[ide] use ide_map_sg()

* make Etrax ide.c, icside.c and ide-dma.c use ide_map_sg()
* use one sg for REQ_DRIVE_TASKFILE requests in ide-dma.c
  (no reason for 128 sectors per sg limit)
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent 35ebc8b9
...@@ -656,15 +656,9 @@ static int e100_ide_build_dmatable (ide_drive_t *drive) ...@@ -656,15 +656,9 @@ static int e100_ide_build_dmatable (ide_drive_t *drive)
ata_tot_size = 0; ata_tot_size = 0;
if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) { ide_map_sg(drive, rq);
sg_init_one(&sg[0], rq->buffer, rq->nr_sectors * SECTOR_SIZE);
hwif->sg_nents = i = 1;
}
else
{
hwif->sg_nents = i = blk_rq_map_sg(drive->queue, rq, hwif->sg_table);
}
i = hwif->sg_nents;
while(i) { while(i) {
/* /*
......
...@@ -212,33 +212,18 @@ static void icside_build_sglist(ide_drive_t *drive, struct request *rq) ...@@ -212,33 +212,18 @@ static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct icside_state *state = hwif->hwif_data; struct icside_state *state = hwif->hwif_data;
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
int nents;
if (rq->flags & REQ_DRIVE_TASKFILE) { ide_map_sg(drive, rq);
ide_task_t *args = rq->special;
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = DMA_TO_DEVICE; hwif->sg_dma_direction = DMA_FROM_DEVICE;
else else
hwif->sg_dma_direction = DMA_FROM_DEVICE; hwif->sg_dma_direction = DMA_TO_DEVICE;
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
nents = 1;
} else {
nents = blk_rq_map_sg(drive->queue, rq, sg);
if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = DMA_FROM_DEVICE;
else
hwif->sg_dma_direction = DMA_TO_DEVICE;
}
nents = dma_map_sg(state->dev, sg, nents, hwif->sg_dma_direction);
hwif->sg_nents = nents; hwif->sg_nents = dma_map_sg(state->dev, sg, hwif->sg_nents,
hwif->sg_dma_direction);
} }
/* /*
* Configure the IOMD to give the appropriate timings for the transfer * Configure the IOMD to give the appropriate timings for the transfer
* mode being requested. We take the advice of the ATA standards, and * mode being requested. We take the advice of the ATA standards, and
......
...@@ -207,16 +207,15 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq) ...@@ -207,16 +207,15 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
int nents;
nents = blk_rq_map_sg(drive->queue, rq, hwif->sg_table); ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
else else
hwif->sg_dma_direction = PCI_DMA_TODEVICE; hwif->sg_dma_direction = PCI_DMA_TODEVICE;
return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction); return pci_map_sg(hwif->pci_dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
} }
EXPORT_SYMBOL_GPL(ide_build_sglist); EXPORT_SYMBOL_GPL(ide_build_sglist);
...@@ -236,33 +235,18 @@ int ide_raw_build_sglist(ide_drive_t *drive, struct request *rq) ...@@ -236,33 +235,18 @@ int ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
int nents = 0;
ide_task_t *args = rq->special; ide_task_t *args = rq->special;
u8 *virt_addr = rq->buffer;
int sector_count = rq->nr_sectors;
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
hwif->sg_dma_direction = PCI_DMA_TODEVICE; hwif->sg_dma_direction = PCI_DMA_TODEVICE;
else else
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
#if 1 BUG_ON(rq->nr_sectors > 256);
if (sector_count > 256)
BUG();
if (sector_count > 128) { ide_map_sg(drive, rq);
#else
while (sector_count > 128) {
#endif
sg_init_one(&sg[nents], virt_addr, 128 * SECTOR_SIZE);
nents++;
virt_addr = virt_addr + (128 * SECTOR_SIZE);
sector_count -= 128;
}
sg_init_one(&sg[nents], virt_addr, sector_count * SECTOR_SIZE);
nents++;
return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction); return pci_map_sg(hwif->pci_dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
} }
EXPORT_SYMBOL_GPL(ide_raw_build_sglist); EXPORT_SYMBOL_GPL(ide_raw_build_sglist);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment