ide: switch to DMA-mapping API

* pci_map_sg() -> dma_map_sg() in ide_build_sglist().

* pci_unmap_sg() -> dma_unmap_sg() in ide_destroy_dmatable().

There should be no functionality changes caused by this patch except
for blackfin arch whose dma_[un]map_sg() implementation differs from
pci_[un]map_sg() one (on s390 arch there is no PCI, on avr32 and h8300
archs PCI is currently unsupported, on m32r arch PCI support depends
on BROKEN, on m68k arch PCI support depends on HADES which in turn
depends on BROKEN, on all other archs dma_[un]map_sg() functionality
matches with pci_[un]map_sg() one).

blackfin behavior change was ack-ed by Bryan Wu.

Cc: Bryan Wu <bryan.wu@analog.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent f8341c1c
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
#include <linux/ide.h> #include <linux/ide.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -175,26 +176,26 @@ static int ide_dma_good_drive(ide_drive_t *drive) ...@@ -175,26 +176,26 @@ static int ide_dma_good_drive(ide_drive_t *drive)
* @drive: the drive to build the DMA table for * @drive: the drive to build the DMA table for
* @rq: the request holding the sg list * @rq: the request holding the sg list
* *
* Perform the PCI mapping magic necessary to access the source or * Perform the DMA mapping magic necessary to access the source or
* target buffers of a request via PCI DMA. The lower layers of the * target buffers of a request via DMA. The lower layers of the
* kernel provide the necessary cache management so that we can * kernel provide the necessary cache management so that we can
* operate in a portable fashion * operate in a portable fashion.
*/ */
int ide_build_sglist(ide_drive_t *drive, struct request *rq) int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *pdev = to_pci_dev(hwif->dev);
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
ide_map_sg(drive, rq); ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; hwif->sg_dma_direction = DMA_FROM_DEVICE;
else else
hwif->sg_dma_direction = PCI_DMA_TODEVICE; hwif->sg_dma_direction = DMA_TO_DEVICE;
return pci_map_sg(pdev, sg, hwif->sg_nents, hwif->sg_dma_direction); return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
hwif->sg_dma_direction);
} }
EXPORT_SYMBOL_GPL(ide_build_sglist); EXPORT_SYMBOL_GPL(ide_build_sglist);
...@@ -308,9 +309,8 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable); ...@@ -308,9 +309,8 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable);
void ide_destroy_dmatable (ide_drive_t *drive) void ide_destroy_dmatable (ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct pci_dev *pdev = to_pci_dev(hwif->dev);
pci_unmap_sg(pdev, hwif->sg_table, hwif->sg_nents, dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
hwif->sg_dma_direction); hwif->sg_dma_direction);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment