Commit 00cda56d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  [libata] AHCI: fix newly introduced host-reset bug
  [libata] sata_nv: fix SWNCQ enabling
  libata: add MAXTOR 7V300F0/VA111900 to NCQ blacklist
  libata: no need to speed down if already at PIO0
  libata: relocate forcing PIO0 on reset
  pata_ns87415: define SUPERIO_IDE_MAX_RETRIES
  [libata] Address some checkpatch-spotted issues
  [libata] fix 'if(' and similar areas that lack whitespace
  libata: implement ata_wait_after_reset()
  libata: track SLEEP state and issue SRST to wake it up
  libata: relocate and fix post-command processing
parents da8e5aa2 ab6fc95f
......@@ -898,8 +898,10 @@ static int ahci_reset_controller(struct ata_host *host)
* AHCI-specific, such as HOST_RESET.
*/
tmp = readl(mmio + HOST_CTL);
if (!(tmp & HOST_AHCI_EN))
writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL);
if (!(tmp & HOST_AHCI_EN)) {
tmp |= HOST_AHCI_EN;
writel(tmp, mmio + HOST_CTL);
}
/* global controller reset */
if ((tmp & HOST_RESET) == 0) {
......@@ -1153,15 +1155,8 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
tf.ctl &= ~ATA_SRST;
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
/* spec mandates ">= 2ms" before checking status.
* We wait 150ms, because that was the magic delay used for
* ATAPI devices in Hale Landis's ATADRVR, for the period of time
* between when the ATA command register is written, and then
* status is checked. Because waiting for "a while" before
* checking status is fine, post SRST, we perform this magic
* delay here as well.
*/
msleep(150);
/* wait a while before checking status */
ata_wait_after_reset(ap, deadline);
rc = ata_wait_ready(ap, deadline);
/* link occupied, -ENODEV too is an error */
......
......@@ -2219,6 +2219,25 @@ int ata_bus_probe(struct ata_port *ap)
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
retry:
ata_link_for_each_dev(dev, &ap->link) {
/* If we issue an SRST then an ATA drive (not ATAPI)
* may change configuration and be in PIO0 timing. If
* we do a hard reset (or are coming from power on)
* this is true for ATA or ATAPI. Until we've set a
* suitable controller mode we should not touch the
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
* touch the DMA setup as that will be dealt with when
* configuring devices.
*/
if (ap->ops->set_piomode)
ap->ops->set_piomode(ap, dev);
}
/* reset and determine device classes */
ap->ops->phy_reset(ap);
......@@ -2234,12 +2253,6 @@ int ata_bus_probe(struct ata_port *ap)
ata_port_probe(ap);
/* after the reset the device state is PIO 0 and the controller
state is undefined. Record the mode */
ata_link_for_each_dev(dev, &ap->link)
dev->pio_mode = XFER_PIO_0;
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
......@@ -3117,6 +3130,55 @@ int ata_busy_sleep(struct ata_port *ap,
return 0;
}
/**
* ata_wait_after_reset - wait before checking status after reset
* @ap: port containing status register to be polled
* @deadline: deadline jiffies for the operation
*
* After reset, we need to pause a while before reading status.
* Also, certain combination of controller and device report 0xff
* for some duration (e.g. until SATA PHY is up and running)
* which is interpreted as empty port in ATA world. This
* function also waits for such devices to get out of 0xff
* status.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
{
unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
if (time_before(until, deadline))
deadline = until;
/* Spec mandates ">= 2ms" before checking status. We wait
* 150ms, because that was the magic delay used for ATAPI
* devices in Hale Landis's ATADRVR, for the period of time
* between when the ATA command register is written, and then
* status is checked. Because waiting for "a while" before
* checking status is fine, post SRST, we perform this magic
* delay here as well.
*
* Old drivers/ide uses the 2mS rule and then waits for ready.
*/
msleep(150);
/* Wait for 0xff to clear. Some SATA devices take a long time
* to clear 0xff after reset. For example, HHD424020F7SV00
* iVDR needs >= 800ms while. Quantum GoVault needs even more
* than that.
*/
while (1) {
u8 status = ata_chk_status(ap);
if (status != 0xff || time_after(jiffies, deadline))
return;
msleep(50);
}
}
/**
* ata_wait_ready - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled
......@@ -3223,8 +3285,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
struct ata_device *dev;
int i = 0;
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
......@@ -3235,36 +3295,8 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20); /* FIXME: flush */
iowrite8(ap->ctl, ioaddr->ctl_addr);
/* If we issued an SRST then an ATA drive (not ATAPI)
* may have changed configuration and be in PIO0 timing. If
* we did a hard reset (or are coming from power on) this is
* true for ATA or ATAPI. Until we've set a suitable controller
* mode we should not touch the bus as we may be talking too fast.
*/
ata_link_for_each_dev(dev, &ap->link)
dev->pio_mode = XFER_PIO_0;
/* If the controller has a pio mode setup function then use
it to set the chipset to rights. Don't touch the DMA setup
as that will be dealt with when revalidating */
if (ap->ops->set_piomode) {
ata_link_for_each_dev(dev, &ap->link)
if (devmask & (1 << i++))
ap->ops->set_piomode(ap, dev);
}
/* spec mandates ">= 2ms" before checking status.
* We wait 150ms, because that was the magic delay used for
* ATAPI devices in Hale Landis's ATADRVR, for the period of time
* between when the ATA command register is written, and then
* status is checked. Because waiting for "a while" before
* checking status is fine, post SRST, we perform this magic
* delay here as well.
*
* Old drivers/ide uses the 2mS rule and then waits for ready
*/
msleep(150);
/* wait a while before checking status */
ata_wait_after_reset(ap, deadline);
/* Before we perform post reset processing we want to see if
* the bus shows 0xFF because the odd clown forgets the D7
......@@ -3691,8 +3723,8 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
return 0;
}
/* wait a while before checking status, see SRST for more info */
msleep(150);
/* wait a while before checking status */
ata_wait_after_reset(ap, deadline);
/* If PMP is supported, we have to do follow-up SRST. Note
* that some PMPs don't send D2H Reg FIS after hardreset at
......@@ -3992,6 +4024,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
{ "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
{ "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
{ "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
......@@ -5595,6 +5628,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* taken care of.
*/
if (ap->ops->error_handler) {
struct ata_device *dev = qc->dev;
struct ata_eh_info *ehi = &dev->link->eh_info;
WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
if (unlikely(qc->err_mask))
......@@ -5613,6 +5649,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_RESULT_TF)
fill_result_tf(qc);
/* Some commands need post-processing after successful
* completion.
*/
switch (qc->tf.command) {
case ATA_CMD_SET_FEATURES:
if (qc->tf.feature != SETFEATURES_WC_ON &&
qc->tf.feature != SETFEATURES_WC_OFF)
break;
/* fall through */
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
/* revalidate device */
ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
break;
case ATA_CMD_SLEEP:
dev->flags |= ATA_DFLAG_SLEEPING;
break;
}
__ata_qc_complete(qc);
} else {
if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
......@@ -5750,6 +5807,14 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->flags &= ~ATA_QCFLAG_DMAMAP;
}
/* if device is sleeping, schedule softreset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
link->eh_info.action |= ATA_EH_SOFTRESET;
ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
ata_link_abort(link);
return;
}
ap->ops->qc_prep(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
......@@ -7327,6 +7392,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_busy_sleep);
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
EXPORT_SYMBOL_GPL(ata_wait_ready);
EXPORT_SYMBOL_GPL(ata_port_queue_task);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
......
......@@ -2083,6 +2083,25 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
ata_link_for_each_dev(dev, link) {
/* If we issue an SRST then an ATA drive (not ATAPI)
* may change configuration and be in PIO0 timing. If
* we do a hard reset (or are coming from power on)
* this is true for ATA or ATAPI. Until we've set a
* suitable controller mode we should not touch the
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
* touch the DMA setup as that will be dealt with when
* configuring devices.
*/
if (ap->ops->set_piomode)
ap->ops->set_piomode(ap, dev);
}
/* Determine which reset to use and record in ehc->i.action.
* prereset() may examine and modify it.
*/
......@@ -2208,9 +2227,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_link_for_each_dev(dev, link) {
/* After the reset, the device state is PIO 0
* and the controller state is undefined.
* Record the mode.
* Reset also wakes up drives from sleeping
* mode.
*/
dev->pio_mode = XFER_PIO_0;
dev->flags &= ~ATA_DFLAG_SLEEPING;
if (ata_link_offline(link))
continue;
......@@ -2416,7 +2437,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
case -EIO:
if (ehc->tries[dev->devno] == 1) {
if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
/* This is the last chance, better to slow
* down than lose it.
*/
......
......@@ -1361,33 +1361,10 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_eh_info *ehi = &qc->dev->link->eh_info;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0);
/* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
* schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
* cache
*/
if (ap->ops->error_handler && !need_sense) {
switch (qc->tf.command) {
case ATA_CMD_SET_FEATURES:
if ((qc->tf.feature == SETFEATURES_WC_ON) ||
(qc->tf.feature == SETFEATURES_WC_OFF)) {
ehi->action |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
}
break;
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
ehi->action |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
break;
}
}
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
* generate because the user forced us to, a check condition
......
......@@ -181,7 +181,7 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
if(!(acpi->gtm.flags & 0x10))
if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
......@@ -202,7 +202,7 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
if(!(acpi->gtm.flags & 0x10))
if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
......
......@@ -215,6 +215,8 @@ static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
#include <asm/superio.h>
#define SUPERIO_IDE_MAX_RETRIES 25
/**
* ns87560_read_buggy - workaround buggy Super I/O chip
* @port: Port to read
......
......@@ -449,7 +449,7 @@ static int optiplus_with_udma(struct pci_dev *pdev)
/* Find function 1 */
dev1 = pci_get_device(0x1045, 0xC701, NULL);
if(dev1 == NULL)
if (dev1 == NULL)
return 0;
/* Rev must be >= 0x10 */
......
......@@ -74,8 +74,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
return ata_do_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0)
{
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
/* Suspicious match, but could be two cards from
the same vendor - check serial */
if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
......@@ -248,7 +247,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
goto next_entry;
io_base = pdev->io.BasePort1;
ctl_base = pdev->io.BasePort1 + 0x0e;
} else goto next_entry;
} else
goto next_entry;
/* If we've got this far, we're done */
break;
}
......@@ -285,8 +285,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
/*
* Having done the PCMCIA plumbing the ATA side is relatively
* sane.
* Having done the PCMCIA plumbing the ATA side is relatively
* sane.
*/
ret = -ENOMEM;
host = ata_host_alloc(&pdev->dev, 1);
......@@ -363,7 +363,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
......
......@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
ATA_ID_PROD_LEN + 1);
/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
if(strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
return ata_pci_default_filter(adev, mask);
......
......@@ -351,9 +351,9 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
struct pci_dev *bridge = dev->bus->self;
/* Don't grab anything behind a Promise I2O RAID */
if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
return -ENODEV;
if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
return -ENODEV;
}
}
......
......@@ -570,17 +570,8 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20);
out_be32(ioaddr->ctl_addr, ap->ctl);
/* spec mandates ">= 2ms" before checking status.
* We wait 150ms, because that was the magic delay used for
* ATAPI devices in Hale Landis's ATADRVR, for the period of time
* between when the ATA command register is written, and then
* status is checked. Because waiting for "a while" before
* checking status is fine, post SRST, we perform this magic
* delay here as well.
*
* Old drivers/ide uses the 2mS rule and then waits for ready
*/
msleep(150);
/* wait a while before checking status */
ata_wait_after_reset(ap, deadline);
/* Before we perform post reset processing we want to see if
* the bus shows 0xFF because the odd clown forgets the D7
......
......@@ -176,7 +176,7 @@ static int via_cable_detect(struct ata_port *ap) {
if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
return ATA_CBL_PATA40;
/* UDMA 66 chips have only drive side logic */
else if((config->flags & VIA_UDMA) < VIA_UDMA_100)
else if ((config->flags & VIA_UDMA) < VIA_UDMA_100)
return ATA_CBL_PATA_UNK;
/* UDMA 100 or later */
pci_read_config_dword(pdev, 0x50, &ata66);
......
......@@ -279,7 +279,7 @@ static __init int winbond_init(void)
if (request_region(port, 2, "pata_winbond")) {
ret = winbond_init_one(port);
if(ret <= 0)
if (ret <= 0)
release_region(port, 2);
else ct+= ret;
}
......
......@@ -47,10 +47,10 @@
#define DRV_VERSION "1.0"
/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
#define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40))
/* macro to calculate base address for ADMA regs */
#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
#define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20))
/* macro to obtain addresses from ata_port */
#define ADMA_PORT_REGS(ap) \
......@@ -128,7 +128,7 @@ struct adma_port_priv {
adma_state_t state;
};
static int adma_ata_init_one (struct pci_dev *pdev,
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_host_stop(struct ata_host *host);
......@@ -340,8 +340,8 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
buf[i++] = 0; /* pPKLW */
buf[i++] = 0; /* reserved */
*(__le32 *)(buf + i)
= (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
*(__le32 *)(buf + i) =
(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
i += 4;
VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
......@@ -617,7 +617,7 @@ static int adma_port_start(struct ata_port *ap)
return -ENOMEM;
/* paranoia? */
if ((pp->pkt_dma & 7) != 0) {
printk("bad alignment for pp->pkt_dma: %08x\n",
printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
(u32)pp->pkt_dma);
return -ENOMEM;
}
......
......@@ -143,7 +143,7 @@ static const int scr_map[] = {
[SCR_CONTROL] = 2,
};
static void __iomem * inic_port_base(struct ata_port *ap)
static void __iomem *inic_port_base(struct ata_port *ap)
{
return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
}
......@@ -448,7 +448,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
struct ata_taskfile tf;
/* wait a while before checking status */
msleep(150);
ata_wait_after_reset(ap, deadline);
rc = ata_wait_ready(ap, deadline);
/* link occupied, -ENODEV too is an error */
......
......@@ -1156,7 +1156,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
}
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
......@@ -2429,7 +2429,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
struct mv_host_priv *hpriv = host->private_data;
u32 hp_flags = hpriv->hp_flags;
switch(board_idx) {
switch (board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
......@@ -2510,7 +2510,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
dev_printk(KERN_ERR, &pdev->dev,
"BUG: invalid board index %u\n", board_idx);
return 1;
}
......
......@@ -291,7 +291,7 @@ struct nv_swncq_port_priv {
};
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
......@@ -884,8 +884,9 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
/* Notifier bits set without a command may indicate the drive
is misbehaving. Raise host state machine violation on this
condition. */
ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
cpb_num);
ata_port_printk(ap, KERN_ERR,
"notifier for tag %d with no cmd?\n",
cpb_num);
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
......@@ -1012,7 +1013,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
u32 check_commands;
int pos, error = 0;
if(ata_tag_valid(ap->link.active_tag))
if (ata_tag_valid(ap->link.active_tag))
check_commands = 1 << ap->link.active_tag;
else
check_commands = ap->link.sactive;
......@@ -1021,14 +1022,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
while ((pos = ffs(check_commands)) && !error) {
pos--;
error = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos) );
check_commands &= ~(1 << pos );
notifier_error & (1 << pos));
check_commands &= ~(1 << pos);
}
}
}
}
if(notifier_clears[0] || notifier_clears[1]) {
if (notifier_clears[0] || notifier_clears[1]) {
/* Note: Both notifier clear registers must be written
if either is set, even if one is zero, according to NVIDIA. */
struct nv_adma_port_priv *pp = host->ports[0]->private_data;
......@@ -1061,7 +1062,7 @@ static void nv_adma_freeze(struct ata_port *ap)
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_thaw(struct ata_port *ap)
......@@ -1079,7 +1080,7 @@ static void nv_adma_thaw(struct ata_port *ap)
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_irq_clear(struct ata_port *ap)
......@@ -1119,7 +1120,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
ata_bmdma_post_internal_cmd(qc);
}
......@@ -1165,7 +1166,7 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->cpb_dma = mem_dma;
writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
......@@ -1189,15 +1190,15 @@ static int nv_adma_port_start(struct ata_port *ap)
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
......@@ -1237,7 +1238,7 @@ static int nv_adma_port_resume(struct ata_port *ap)
/* set CPB block location */
writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
/* clear any outstanding interrupt conditions */
writew(0xffff, mmio + NV_ADMA_STAT);
......@@ -1250,15 +1251,15 @@ static int nv_adma_port_resume(struct ata_port *ap)
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
......@@ -1342,7 +1343,8 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
idx = 0;
ata_for_each_sg(sg, qc) {
aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
aprd = (idx < 5) ? &cpb->aprd[idx] :
&pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
nv_adma_fill_aprd(qc, sg, idx, aprd);
idx++;
}
......@@ -1359,12 +1361,12 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
/* ADMA engine can only be used for non-ATAPI DMA commands,
or interrupt-driven no-data commands, where a result taskfile
is not required. */
if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(qc->tf.flags & ATA_TFLAG_POLLING) ||
(qc->flags & ATA_QCFLAG_RESULT_TF))
return 1;
if((qc->flags & ATA_QCFLAG_DMAMAP) ||
if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
(qc->tf.protocol == ATA_PROT_NODATA))
return 0;
......@@ -1401,14 +1403,14 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
if(qc->flags & ATA_QCFLAG_DMAMAP) {
if (qc->flags & ATA_QCFLAG_DMAMAP) {
nv_adma_fill_sg(qc, cpb);
ctl_flags |= NV_CPB_CTL_APRD_VALID;
} else
memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
finished filling in all of the contents */
/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
until we are finished filling in all of the contents */
wmb();
cpb->ctl_flags = ctl_flags;
wmb();
......@@ -1435,16 +1437,16 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
and (number of cpbs to append -1) in top 8 bits */
wmb();
if(curr_ncq != pp->last_issue_ncq) {
/* Seems to need some delay before switching between NCQ and non-NCQ
commands, else we get command timeouts and such. */
if (curr_ncq != pp->last_issue_ncq) {
/* Seems to need some delay before switching between NCQ and
non-NCQ commands, else we get command timeouts and such. */
udelay(20);
pp->last_issue_ncq = curr_ncq;
}
writew(qc->tag, mmio + NV_ADMA_APPEND);
DPRINTK("Issued tag %u\n",qc->tag);
DPRINTK("Issued tag %u\n", qc->tag);
return 0;
}
......@@ -1641,12 +1643,12 @@ static void nv_error_handler(struct ata_port *ap)
static void nv_adma_error_handler(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
void __iomem *mmio = pp->ctl_block;
int i;
u16 tmp;
if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
......@@ -1654,16 +1656,17 @@ static void nv_adma_error_handler(struct ata_port *ap)
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
ata_port_printk(ap, KERN_ERR,
"EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
notifier, notifier_error, gen_ctl, status,
cpb_count, next_cpb_idx);
for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
struct nv_adma_cpb *cpb = &pp->cpb[i];
if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
ap->link.sactive & (1 << i) )
if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
ap->link.sactive & (1 << i))
ata_port_printk(ap, KERN_ERR,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
......@@ -1673,8 +1676,9 @@ static void nv_adma_error_handler(struct ata_port *ap)
/* Push us back into port register mode for error handling. */
nv_adma_register_mode(ap);
/* Mark all of the CPBs as invalid to prevent them from being executed */
for( i=0;i<NV_ADMA_MAX_CPBS;i++)
/* Mark all of the CPBs as invalid to prevent them from
being executed */
for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
/* clear CPB fetch count */
......@@ -1683,10 +1687,10 @@ static void nv_adma_error_handler(struct ata_port *ap)
/* Reset channel */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
......@@ -2350,9 +2354,9 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version = 0;
static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct ata_host *host;
struct nv_host_priv *hpriv;
......@@ -2364,7 +2368,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
for (bar=0; bar<6; bar++)
for (bar = 0; bar < 6; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
......@@ -2381,6 +2385,14 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
type = ADMA;
}
if (type == SWNCQ) {
if (swncq_enabled)
dev_printk(KERN_NOTICE, &pdev->dev,
"Using SWNCQ mode\n");
else
type = GENERIC;
}
ppi[0] = &nv_port_info[type];
rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
......@@ -2422,10 +2434,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = nv_adma_host_init(host);
if (rc)
return rc;
} else if (type == SWNCQ && swncq_enabled) {
dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
} else if (type == SWNCQ)
nv_swncq_host_init(host);
}
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
......@@ -2440,37 +2450,37 @@ static int nv_pci_device_resume(struct pci_dev *pdev)
int rc;
rc = ata_pci_device_do_resume(pdev);
if(rc)
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
if(hpriv->type >= CK804) {
if (hpriv->type >= CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
if(hpriv->type == ADMA) {
if (hpriv->type == ADMA) {
u32 tmp32;
struct nv_adma_port_priv *pp;
/* enable/disable ADMA on the ports appropriately */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
pp = host->ports[0]->private_data;
if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
pp = host->ports[1]->private_data;
if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
}
......
......@@ -83,10 +83,12 @@ enum {
PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR,
PDC_ERR_MASK = (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC_OVERRUN_ERR
| PDC_UNDERRUN_ERR | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR
| PDC1_ERR_MASK | PDC2_ERR_MASK),
PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
PDC2_ATA_DMA_CNT_ERR,
PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
PDC1_ERR_MASK | PDC2_ERR_MASK,
board_2037x = 0, /* FastTrak S150 TX2plus */
board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
......@@ -695,19 +697,20 @@ static void pdc_irq_clear(struct ata_port *ap)
readl(mmio + PDC_INT_SEQMASK);
}
static inline int pdc_is_sataii_tx4(unsigned long flags)
static int pdc_is_sataii_tx4(unsigned long flags)
{
const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
return (flags & mask) == mask;
}
static inline unsigned int pdc_port_no_to_ata_no(unsigned int port_no, int is_sataii_tx4)
static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
int is_sataii_tx4)
{
static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
}
static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
......@@ -839,15 +842,16 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_ATAPI_DMA);
WARN_ON(tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_ATAPI_DMA);
ata_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
static void pdc_exec_command_mmio(struct ata_port *ap,
const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_ATAPI_DMA);
WARN_ON(tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_ATAPI_DMA);
ata_exec_command(ap, tf);
}
......@@ -870,8 +874,11 @@ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
unsigned int lba;
lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5];
unsigned int lba =
(scsicmd[2] << 24) |
(scsicmd[3] << 16) |
(scsicmd[4] << 8) |
scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
......@@ -956,7 +963,8 @@ static void pdc_host_init(struct ata_host *host)
writel(tmp, mmio + PDC_SLEW_CTL);
}
static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int printed_version;
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
......
......@@ -113,7 +113,7 @@ struct qs_port_priv {
static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
static void qs_phy_reset(struct ata_port *ap);
......@@ -135,7 +135,6 @@ static struct scsi_host_template qs_ata_sht = {
.sg_tablesize = QS_MAX_PRD,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
//FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
.use_clustering = ENABLE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = QS_DMA_BOUNDARY,
......
......@@ -111,7 +111,7 @@ enum {
SIL_QUIRK_UDMA5MAX = (1 << 1),
};
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
......@@ -138,7 +138,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
/* TODO firmware versions should be added - eric */
static const struct sil_drivelist {
const char * product;
const char *product;
unsigned int quirk;
} sil_blacklist [] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
......@@ -279,7 +279,7 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static int slow_down = 0;
static int slow_down;
module_param(slow_down, int, 0444);
MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
......@@ -332,7 +332,8 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
return 0;
}
static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
static inline void __iomem *sil_scr_addr(struct ata_port *ap,
unsigned int sc_reg)
{
void __iomem *offset = ap->ioaddr.scr_addr;
......@@ -643,7 +644,7 @@ static void sil_init_controller(struct ata_host *host)
}
}
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
int board_id = ent->driver_data;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment