Commit 488a9d01 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of git://github.com/jgarzik/libata-dev

* 'upstream-linus' of git://github.com/jgarzik/libata-dev:
  [libata] ata_piix: Add Toshiba Satellite Pro A120 to the quirks list due to broken suspend functionality.
  [libata] add DVRTD08A and DVR-215 to NOSETXFER device quirk list
  [libata] pata_bf54x: Support sg list in bmdma transfer.
  [libata] sata_fsl: fix the controller operating mode
  [libata] enable ata port async suspend
parents 6015ff10 b73fa463
...@@ -1116,6 +1116,13 @@ static int piix_broken_suspend(void) ...@@ -1116,6 +1116,13 @@ static int piix_broken_suspend(void)
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"), DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
}, },
}, },
{
.ident = "Satellite Pro A120",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
},
},
{ {
.ident = "Portege M500", .ident = "Portege M500",
.matches = { .matches = {
......
...@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ...@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA. * device and controller are SATA.
*/ */
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
......
...@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent, ...@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent,
goto tport_err; goto tport_err;
} }
device_enable_async_suspend(dev);
pm_runtime_set_active(dev); pm_runtime_set_active(dev);
pm_runtime_enable(dev); pm_runtime_enable(dev);
......
...@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20; ...@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
static const u32 udma_tackmin = 20; static const u32 udma_tackmin = 20;
static const u32 udma_tssmin = 50; static const u32 udma_tssmin = 50;
#define BFIN_MAX_SG_SEGMENTS 4
/** /**
* *
* Function: num_clocks_min * Function: num_clocks_min
...@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl) ...@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
static void bfin_bmdma_setup(struct ata_queued_cmd *qc) static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
{ {
unsigned short config = WDSIZE_16; struct ata_port *ap = qc->ap;
struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int si; unsigned int si;
unsigned int channel;
unsigned int dir;
unsigned int size = 0;
dev_dbg(qc->ap->dev, "in atapi dma setup\n"); dev_dbg(qc->ap->dev, "in atapi dma setup\n");
/* Program the ATA_CTRL register with dir */ /* Program the ATA_CTRL register with dir */
if (qc->tf.flags & ATA_TFLAG_WRITE) { if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* fill the ATAPI DMA controller */ channel = CH_ATAPI_TX;
set_dma_config(CH_ATAPI_TX, config); dir = DMA_TO_DEVICE;
set_dma_x_modify(CH_ATAPI_TX, 2);
for_each_sg(qc->sg, sg, qc->n_elem, si) {
set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
}
} else { } else {
channel = CH_ATAPI_RX;
dir = DMA_FROM_DEVICE;
config |= WNR; config |= WNR;
}
dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
/* fill the ATAPI DMA controller */ /* fill the ATAPI DMA controller */
set_dma_config(CH_ATAPI_RX, config);
set_dma_x_modify(CH_ATAPI_RX, 2);
for_each_sg(qc->sg, sg, qc->n_elem, si) { for_each_sg(qc->sg, sg, qc->n_elem, si) {
set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); dma_desc_cpu[si].start_addr = sg_dma_address(sg);
set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); dma_desc_cpu[si].cfg = config;
} dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
dma_desc_cpu[si].x_modify = 2;
size += sg_dma_len(sg);
} }
}
/** /* Set the last descriptor to stop mode */
* bfin_bmdma_start - Start an IDE DMA transaction dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
* @qc: Info associated with this ATA transaction.
*
* Note: Original code is ata_bmdma_start().
*/
static void bfin_bmdma_start(struct ata_queued_cmd *qc) flush_dcache_range((unsigned int)dma_desc_cpu,
{ (unsigned int)dma_desc_cpu +
struct ata_port *ap = qc->ap; qc->n_elem * sizeof(struct dma_desc_array));
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
struct scatterlist *sg;
unsigned int si;
dev_dbg(qc->ap->dev, "in atapi dma start\n"); /* Enable ATA DMA operation*/
if (!(ap->udma_mask || ap->mwdma_mask)) set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
return; set_dma_x_count(channel, 0);
set_dma_x_modify(channel, 0);
set_dma_config(channel, config);
/* start ATAPI DMA controller*/ SSYNC();
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/*
* On blackfin arch, uncacheable memory is not
* allocated with flag GFP_DMA. DMA buffer from
* common kenel code should be flushed if WB
* data cache is enabled. Otherwise, this loop
* is an empty loop and optimized out.
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
flush_dcache_range(sg_dma_address(sg),
sg_dma_address(sg) + sg_dma_len(sg));
}
enable_dma(CH_ATAPI_TX);
dev_dbg(qc->ap->dev, "enable udma write\n");
/* Send ATA DMA write command */ /* Send ATA DMA command */
bfin_exec_command(ap, &qc->tf); bfin_exec_command(ap, &qc->tf);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* set ATA DMA write direction */ /* set ATA DMA write direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
| XFER_DIR)); | XFER_DIR));
} else { } else {
enable_dma(CH_ATAPI_RX);
dev_dbg(qc->ap->dev, "enable udma read\n");
/* Send ATA DMA read command */
bfin_exec_command(ap, &qc->tf);
/* set ATA DMA read direction */ /* set ATA DMA read direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
& ~XFER_DIR)); & ~XFER_DIR));
...@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) ...@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
/* Set ATAPI state machine contorl in terminate sequence */ /* Set ATAPI state machine contorl in terminate sequence */
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
/* Set transfer length to buffer len */ /* Set transfer length to the total size of sg buffers */
for_each_sg(qc->sg, sg, qc->n_elem, si) { ATAPI_SET_XFER_LEN(base, size >> 1);
ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); }
}
/* Enable ATA DMA operation*/ /**
* bfin_bmdma_start - Start an IDE DMA transaction
* @qc: Info associated with this ATA transaction.
*
* Note: Original code is ata_bmdma_start().
*/
static void bfin_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
dev_dbg(qc->ap->dev, "in atapi dma start\n");
if (!(ap->udma_mask || ap->mwdma_mask))
return;
/* start ATAPI transfer*/
if (ap->udma_mask) if (ap->udma_mask)
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
| ULTRA_START); | ULTRA_START);
...@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) ...@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
static void bfin_bmdma_stop(struct ata_queued_cmd *qc) static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct scatterlist *sg; unsigned int dir;
unsigned int si;
dev_dbg(qc->ap->dev, "in atapi dma stop\n"); dev_dbg(qc->ap->dev, "in atapi dma stop\n");
if (!(ap->udma_mask || ap->mwdma_mask)) if (!(ap->udma_mask || ap->mwdma_mask))
return; return;
/* stop ATAPI DMA controller*/ /* stop ATAPI DMA controller*/
if (qc->tf.flags & ATA_TFLAG_WRITE) if (qc->tf.flags & ATA_TFLAG_WRITE) {
dir = DMA_TO_DEVICE;
disable_dma(CH_ATAPI_TX); disable_dma(CH_ATAPI_TX);
else { } else {
dir = DMA_FROM_DEVICE;
disable_dma(CH_ATAPI_RX); disable_dma(CH_ATAPI_RX);
if (ap->hsm_task_state & HSM_ST_LAST) {
/*
* On blackfin arch, uncacheable memory is not
* allocated with flag GFP_DMA. DMA buffer from
* common kenel code should be invalidated if
* data cache is enabled. Otherwise, this loop
* is an empty loop and optimized out.
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
invalidate_dcache_range(
sg_dma_address(sg),
sg_dma_address(sg)
+ sg_dma_len(sg));
}
}
} }
dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
} }
/** /**
...@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap) ...@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
{ {
dev_dbg(ap->dev, "in atapi port stop\n"); dev_dbg(ap->dev, "in atapi port stop\n");
if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
dma_free_coherent(ap->dev,
BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
ap->bmdma_prd,
ap->bmdma_prd_dma);
free_dma(CH_ATAPI_RX); free_dma(CH_ATAPI_RX);
free_dma(CH_ATAPI_TX); free_dma(CH_ATAPI_TX);
} }
...@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap) ...@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
if (!(ap->udma_mask || ap->mwdma_mask)) if (!(ap->udma_mask || ap->mwdma_mask))
return 0; return 0;
ap->bmdma_prd = dma_alloc_coherent(ap->dev,
BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
&ap->bmdma_prd_dma,
GFP_KERNEL);
if (ap->bmdma_prd == NULL) {
dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
goto out;
}
if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
if (request_dma(CH_ATAPI_TX, if (request_dma(CH_ATAPI_TX,
"BFIN ATAPI TX DMA") >= 0) "BFIN ATAPI TX DMA") >= 0)
return 0; return 0;
free_dma(CH_ATAPI_RX); free_dma(CH_ATAPI_RX);
dma_free_coherent(ap->dev,
BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
ap->bmdma_prd,
ap->bmdma_prd_dma);
} }
out:
ap->udma_mask = 0; ap->udma_mask = 0;
ap->mwdma_mask = 0; ap->mwdma_mask = 0;
dev_err(ap->dev, "Unable to request ATAPI DMA!" dev_err(ap->dev, "Unable to request ATAPI DMA!"
...@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) ...@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
static struct scsi_host_template bfin_sht = { static struct scsi_host_template bfin_sht = {
ATA_BASE_SHT(DRV_NAME), ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = SG_NONE, .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
}; };
......
...@@ -140,6 +140,7 @@ enum { ...@@ -140,6 +140,7 @@ enum {
*/ */
HCONTROL_ONLINE_PHY_RST = (1 << 31), HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30), HCONTROL_FORCE_OFFLINE = (1 << 30),
HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14), HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12), HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10), HCONTROL_SNOOP_ENABLE = (1 << 10),
...@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host) ...@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
* part of the port_start() callback * part of the port_start() callback
*/ */
/* sata controller to operate in enterprise mode */
temp = ioread32(hcr_base + HCONTROL);
iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
/* ack. any pending IRQs for this controller/port */ /* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS); temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F) if (temp & 0x3F)
...@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op) ...@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
/* Recovery the CHBA register in host controller cmd register set */ /* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
iowrite32((ioread32(hcr_base + HCONTROL)
| HCONTROL_ONLINE_PHY_RST
| HCONTROL_SNOOP_ENABLE
| HCONTROL_PMP_ATTACHED),
hcr_base + HCONTROL);
ata_host_resume(host); ata_host_resume(host);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment