Commit c54c719b authored by Quentin Lambert's avatar Quentin Lambert Committed by Tejun Heo

ata: remove deprecated use of pci api

Replace occurences of the pci api by appropriate call to the dma api.

A simplified version of the semantic patch that finds this problem is as
follows: (http://coccinelle.lip6.fr)

@deprecated@
idexpression id;
position p;
@@

(
  pci_dma_supported@p ( id, ...)
|
  pci_alloc_consistent@p ( id, ...)
)

@bad1@
idexpression id;
position deprecated.p;
@@
...when != &id->dev
   when != pci_get_drvdata ( id )
   when != pci_enable_device ( id )
(
  pci_dma_supported@p ( id, ...)
|
  pci_alloc_consistent@p ( id, ...)
)

@depends on !bad1@
idexpression id;
expression direction;
position deprecated.p;
@@

(
- pci_dma_supported@p ( id,
+ dma_supported ( &id->dev,
...
+ , GFP_ATOMIC
  )
|
- pci_alloc_consistent@p ( id,
+ dma_alloc_coherent ( &id->dev,
...
+ , GFP_ATOMIC
  )
)
Signed-off-by: default avatarQuentin Lambert <lambert.quentin@gmail.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 9a1e75e1
......@@ -181,10 +181,10 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
int rc;
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
......@@ -192,12 +192,12 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
......
......@@ -738,10 +738,10 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0;
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
......@@ -749,12 +749,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
......
......@@ -3220,11 +3220,11 @@ void ata_pci_bmdma_init(struct ata_host *host)
* ->sff_irq_clear method. Try to initialize bmdma_addr
* regardless of dma masks.
*/
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
ata_bmdma_nodma(host, "failed to set dma mask");
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
ata_bmdma_nodma(host,
"failed to set consistent dma mask");
......
......@@ -475,11 +475,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
atp867x_fixup(host);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
return rc;
}
......
......@@ -164,11 +164,11 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV;
}
......
......@@ -221,10 +221,10 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -122,10 +122,10 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc;
host->iomap = pcim_iomap_table(dev);
rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
rc = dma_set_mask(&dev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(dev);
......
......@@ -730,11 +730,11 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -1029,10 +1029,10 @@ static int scc_host_init(struct ata_host *host)
if (rc)
return rc;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -374,10 +374,10 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(pdev);
......
......@@ -593,12 +593,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
int rc;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
......
......@@ -856,13 +856,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
......
......@@ -4320,10 +4320,10 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
......@@ -4331,12 +4331,12 @@ static int pci_go_64(struct pci_dev *pdev)
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
......
......@@ -756,10 +756,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
blk_queue_bounce_limit(sdev1->request_queue,
ATA_DMA_MASK);
pci_set_dma_mask(pdev, ATA_DMA_MASK);
dma_set_mask(&pdev->dev, ATA_DMA_MASK);
} else {
/** This shouldn't fail as it was set to this value before */
pci_set_dma_mask(pdev, pp->adma_dma_mask);
dma_set_mask(&pdev->dev, pp->adma_dma_mask);
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
pp->adma_dma_mask);
......@@ -1133,10 +1133,10 @@ static int nv_adma_port_start(struct ata_port *ap)
/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
pad buffers */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
......@@ -1161,8 +1161,8 @@ static int nv_adma_port_start(struct ata_port *ap)
These are allowed to fail since we store the value that ends up
being used to set as the bounce limit in slave_config later if
needed. */
pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
......
......@@ -1246,10 +1246,10 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
/* initialize adapter */
pdc_host_init(host);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -557,10 +557,10 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
......@@ -568,12 +568,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
......
......@@ -770,10 +770,10 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -1312,10 +1312,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->iomap = iomap;
/* configure and activate the device */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
......@@ -1323,12 +1323,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
......
......@@ -496,10 +496,10 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
ata_port_pbar_desc(ap, 5, offset, "port");
}
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -1476,10 +1476,10 @@ static int pdc_sata_init_one(struct pci_dev *pdev,
}
/* configure and activate */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -502,10 +502,10 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
for (i = 0; i < host->n_ports; i++)
vt6421_init_addrs(host->ports[i]);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
......
......@@ -387,10 +387,10 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
/*
* Use 32 bit DMA mask, because 64 bit address support is poor.
*/
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment