[ide] sg PIO for fs requests

Convert CONFIG_IDE_TASKFILE_IO=n code
to use scatterlists for PIO transfers.

Fixes longstanding 'data integrity on error'
issue and makes barriers work with PIO.
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent 680b8968
...@@ -1172,8 +1172,7 @@ PIO drivers (or drivers that need to revert to PIO transfer once in a ...@@ -1172,8 +1172,7 @@ PIO drivers (or drivers that need to revert to PIO transfer once in a
while (IDE for example)), where the CPU is doing the actual data while (IDE for example)), where the CPU is doing the actual data
transfer a virtual mapping is needed. If the driver supports highmem I/O, transfer a virtual mapping is needed. If the driver supports highmem I/O,
(Sec 1.1, (ii) ) it needs to use __bio_kmap_atomic and bio_kmap_irq to (Sec 1.1, (ii) ) it needs to use __bio_kmap_atomic and bio_kmap_irq to
temporarily map a bio into the virtual address space. See how IDE handles temporarily map a bio into the virtual address space.
this with ide_map_buffer.
8. Prior/Related/Impacted patches 8. Prior/Related/Impacted patches
......
...@@ -297,8 +297,10 @@ static int e100_dma_setup(ide_drive_t *drive) ...@@ -297,8 +297,10 @@ static int e100_dma_setup(ide_drive_t *drive)
} }
/* set up the Etrax DMA descriptors */ /* set up the Etrax DMA descriptors */
if (e100_ide_build_dmatable(drive)) if (e100_ide_build_dmatable(drive)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
return 0; return 0;
} }
......
...@@ -128,55 +128,30 @@ static int lba_capacity_is_ok (struct hd_driveid *id) ...@@ -128,55 +128,30 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
static ide_startstop_t read_intr (ide_drive_t *drive) static ide_startstop_t read_intr (ide_drive_t *drive)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
u32 i = 0, nsect = 0, msect = drive->mult_count; struct request *rq = hwif->hwgroup->rq;
struct request *rq;
unsigned long flags;
u8 stat; u8 stat;
char *to;
/* new way for dealing with premature shared PCI interrupts */ /* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat=hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) { if (!OK_STAT(stat=hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
if (stat & (ERR_STAT|DRQ_STAT)) { if (stat & (ERR_STAT|DRQ_STAT)) {
return DRIVER(drive)->error(drive, "read_intr", stat); return task_error(drive, rq, __FUNCTION__, stat);
} }
/* no data yet, so wait for another interrupt */ /* no data yet, so wait for another interrupt */
ide_set_handler(drive, &read_intr, WAIT_CMD, NULL); ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started; return ide_started;
} }
read_next: if (drive->mult_count)
rq = HWGROUP(drive)->rq; ide_pio_multi(drive, 0);
if (msect) { else
if ((nsect = rq->current_nr_sectors) > msect) ide_pio_sector(drive, 0);
nsect = msect;
msect -= nsect;
} else
nsect = 1;
to = ide_map_buffer(rq, &flags);
taskfile_input_data(drive, to, nsect * SECTOR_WORDS);
#ifdef DEBUG
printk("%s: read: sectors(%ld-%ld), buffer=0x%08lx, remaining=%ld\n",
drive->name, rq->sector, rq->sector+nsect-1,
(unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
#endif
ide_unmap_buffer(rq, to, &flags);
rq->sector += nsect;
rq->errors = 0; rq->errors = 0;
i = (rq->nr_sectors -= nsect); if (!hwif->nleft) {
if (((long)(rq->current_nr_sectors -= nsect)) <= 0) ide_end_request(drive, 1, hwif->nsect);
ide_end_request(drive, 1, rq->hard_cur_sectors); return ide_stopped;
/*
* Another BH Page walker and DATA INTEGRITY Questioned on ERROR.
* If passed back up on multimode read, BAD DATA could be ACKED
* to FILE SYSTEMS above ...
*/
if (i > 0) {
if (msect)
goto read_next;
ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started;
} }
return ide_stopped; ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started;
} }
/* /*
...@@ -187,106 +162,27 @@ static ide_startstop_t write_intr (ide_drive_t *drive) ...@@ -187,106 +162,27 @@ static ide_startstop_t write_intr (ide_drive_t *drive)
ide_hwgroup_t *hwgroup = HWGROUP(drive); ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct request *rq = hwgroup->rq; struct request *rq = hwgroup->rq;
u32 i = 0;
u8 stat; u8 stat;
if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),
DRIVE_READY, drive->bad_wstat)) { DRIVE_READY, drive->bad_wstat)) {
printk("%s: write_intr error1: nr_sectors=%ld, stat=0x%02x\n", printk("%s: write_intr error1: nr_sectors=%u, stat=0x%02x\n",
drive->name, rq->nr_sectors, stat); drive->name, hwif->nleft, stat);
} else { } else {
#ifdef DEBUG if ((hwif->nleft == 0) ^ ((stat & DRQ_STAT) != 0)) {
printk("%s: write: sector %ld, buffer=0x%08lx, remaining=%ld\n",
drive->name, rq->sector, (unsigned long) rq->buffer,
rq->nr_sectors-1);
#endif
if ((rq->nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
rq->sector++;
rq->errors = 0; rq->errors = 0;
i = --rq->nr_sectors; if (!hwif->nleft) {
--rq->current_nr_sectors; ide_end_request(drive, 1, hwif->nsect);
if (((long)rq->current_nr_sectors) <= 0) return ide_stopped;
ide_end_request(drive, 1, rq->hard_cur_sectors);
if (i > 0) {
unsigned long flags;
char *to = ide_map_buffer(rq, &flags);
taskfile_output_data(drive, to, SECTOR_WORDS);
ide_unmap_buffer(rq, to, &flags);
ide_set_handler(drive, &write_intr, WAIT_CMD, NULL);
return ide_started;
} }
return ide_stopped; ide_pio_sector(drive, 1);
ide_set_handler(drive, &write_intr, WAIT_CMD, NULL);
return ide_started;
} }
/* the original code did this here (?) */ /* the original code did this here (?) */
return ide_stopped; return ide_stopped;
} }
return DRIVER(drive)->error(drive, "write_intr", stat); return task_error(drive, rq, __FUNCTION__, stat);
}
/*
* ide_multwrite() transfers a block of up to mcount sectors of data
* to a drive as part of a disk multiple-sector write operation.
*
* Note that we may be called from two contexts - __ide_do_rw_disk() context
* and IRQ context. The IRQ can happen any time after we've output the
* full "mcount" number of sectors, so we must make sure we update the
* state _before_ we output the final part of the data!
*
* The update and return to BH is a BLOCK Layer Fakey to get more data
* to satisfy the hardware atomic segment. If the hardware atomic segment
* is shorter or smaller than the BH segment then we should be OKAY.
* This is only valid if we can rewind the rq->current_nr_sectors counter.
*/
static void ide_multwrite(ide_drive_t *drive, unsigned int mcount)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
struct request *rq = &hwgroup->wrq;
do {
char *buffer;
int nsect = rq->current_nr_sectors;
unsigned long flags;
if (nsect > mcount)
nsect = mcount;
mcount -= nsect;
buffer = ide_map_buffer(rq, &flags);
rq->sector += nsect;
rq->nr_sectors -= nsect;
rq->current_nr_sectors -= nsect;
/* Do we move to the next bh after this? */
if (!rq->current_nr_sectors) {
struct bio *bio = rq->bio;
/*
* only move to next bio, when we have processed
* all bvecs in this one.
*/
if (++bio->bi_idx >= bio->bi_vcnt) {
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
bio = bio->bi_next;
}
/* end early early we ran out of requests */
if (!bio) {
mcount = 0;
} else {
rq->bio = bio;
rq->nr_cbio_segments = bio_segments(bio);
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
}
}
/*
* Ok, we're all setup for the interrupt
* re-entering us on the last transfer.
*/
taskfile_output_data(drive, buffer, nsect<<7);
ide_unmap_buffer(rq, buffer, &flags);
} while (mcount);
} }
/* /*
...@@ -294,42 +190,29 @@ static void ide_multwrite(ide_drive_t *drive, unsigned int mcount) ...@@ -294,42 +190,29 @@ static void ide_multwrite(ide_drive_t *drive, unsigned int mcount)
*/ */
static ide_startstop_t multwrite_intr (ide_drive_t *drive) static ide_startstop_t multwrite_intr (ide_drive_t *drive)
{ {
ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
struct request *rq = &hwgroup->wrq; struct request *rq = hwif->hwgroup->rq;
struct bio *bio = rq->bio;
u8 stat; u8 stat;
stat = hwif->INB(IDE_STATUS_REG); stat = hwif->INB(IDE_STATUS_REG);
if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) { if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) {
if (stat & DRQ_STAT) { if (stat & DRQ_STAT) {
/* /* The drive wants data. */
* The drive wants data. Remember rq is the copy if (hwif->nleft) {
* of the request ide_pio_multi(drive, 1);
*/
if (rq->nr_sectors) {
ide_multwrite(drive, drive->mult_count);
ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL); ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL);
return ide_started; return ide_started;
} }
} else { } else {
/* if (!hwif->nleft) { /* all done? */
* If the copy has all the blocks completed then ide_end_request(drive, 1, hwif->nsect);
* we can end the original request.
*/
if (!rq->nr_sectors) { /* all done? */
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
rq = hwgroup->rq;
ide_end_request(drive, 1, rq->nr_sectors);
return ide_stopped; return ide_stopped;
} }
} }
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
/* the original code did this here (?) */ /* the original code did this here (?) */
return ide_stopped; return ide_stopped;
} }
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments; return task_error(drive, rq, __FUNCTION__, stat);
return DRIVER(drive)->error(drive, "multwrite_intr", stat);
} }
/* /*
...@@ -352,6 +235,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector ...@@ -352,6 +235,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
dma = 0; dma = 0;
} }
if (!dma) {
ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
}
if (IDE_CONTROL_REG) if (IDE_CONTROL_REG)
hwif->OUTB(drive->ctl, IDE_CONTROL_REG); hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
...@@ -435,20 +323,32 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector ...@@ -435,20 +323,32 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
return ide_started; return ide_started;
} }
/* fallback to PIO */ /* fallback to PIO */
ide_init_sg_cmd(drive, rq);
} }
if (rq_data_dir(rq) == READ) { if (rq_data_dir(rq) == READ) {
command = ((drive->mult_count) ?
((lba48) ? WIN_MULTREAD_EXT : WIN_MULTREAD) : if (drive->mult_count) {
((lba48) ? WIN_READ_EXT : WIN_READ)); hwif->data_phase = TASKFILE_MULTI_IN;
command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD;
} else {
hwif->data_phase = TASKFILE_IN;
command = lba48 ? WIN_READ_EXT : WIN_READ;
}
ide_execute_command(drive, command, &read_intr, WAIT_CMD, NULL); ide_execute_command(drive, command, &read_intr, WAIT_CMD, NULL);
return ide_started; return ide_started;
} else { } else {
ide_startstop_t startstop; ide_startstop_t startstop;
command = ((drive->mult_count) ? if (drive->mult_count) {
((lba48) ? WIN_MULTWRITE_EXT : WIN_MULTWRITE) : hwif->data_phase = TASKFILE_MULTI_OUT;
((lba48) ? WIN_WRITE_EXT : WIN_WRITE)); command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE;
} else {
hwif->data_phase = TASKFILE_OUT;
command = lba48 ? WIN_WRITE_EXT : WIN_WRITE;
}
hwif->OUTB(command, IDE_COMMAND_REG); hwif->OUTB(command, IDE_COMMAND_REG);
if (ide_wait_stat(&startstop, drive, DATA_READY, if (ide_wait_stat(&startstop, drive, DATA_READY,
...@@ -461,17 +361,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector ...@@ -461,17 +361,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
if (!drive->unmask) if (!drive->unmask)
local_irq_disable(); local_irq_disable();
if (drive->mult_count) { if (drive->mult_count) {
ide_hwgroup_t *hwgroup = HWGROUP(drive);
hwgroup->wrq = *rq; /* scratchpad */
ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL); ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL);
ide_multwrite(drive, drive->mult_count); ide_pio_multi(drive, 1);
} else { } else {
unsigned long flags;
char *to = ide_map_buffer(rq, &flags);
ide_set_handler(drive, &write_intr, WAIT_CMD, NULL); ide_set_handler(drive, &write_intr, WAIT_CMD, NULL);
taskfile_output_data(drive, to, SECTOR_WORDS); ide_pio_sector(drive, 1);
ide_unmap_buffer(rq, to, &flags);
} }
return ide_started; return ide_started;
} }
...@@ -516,8 +410,10 @@ static u8 get_command(ide_drive_t *drive, struct request *rq, ide_task_t *task) ...@@ -516,8 +410,10 @@ static u8 get_command(ide_drive_t *drive, struct request *rq, ide_task_t *task)
dma = 0; dma = 0;
} }
if (!dma) if (!dma) {
ide_init_sg_cmd(drive, rq); ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
}
if (rq_data_dir(rq) == READ) { if (rq_data_dir(rq) == READ) {
task->command_type = IDE_DRIVE_TASK_IN; task->command_type = IDE_DRIVE_TASK_IN;
......
...@@ -610,8 +610,10 @@ int ide_dma_setup(ide_drive_t *drive) ...@@ -610,8 +610,10 @@ int ide_dma_setup(ide_drive_t *drive)
reading = 1 << 3; reading = 1 << 3;
/* fall back to pio! */ /* fall back to pio! */
if (!ide_build_dmatable(drive, rq)) if (!ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
/* PRD table */ /* PRD table */
hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable); hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
......
...@@ -675,7 +675,7 @@ ide_startstop_t do_special (ide_drive_t *drive) ...@@ -675,7 +675,7 @@ ide_startstop_t do_special (ide_drive_t *drive)
EXPORT_SYMBOL(do_special); EXPORT_SYMBOL(do_special);
static void ide_map_sg(ide_drive_t *drive, struct request *rq) void ide_map_sg(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
...@@ -688,14 +688,14 @@ static void ide_map_sg(ide_drive_t *drive, struct request *rq) ...@@ -688,14 +688,14 @@ static void ide_map_sg(ide_drive_t *drive, struct request *rq)
} }
} }
EXPORT_SYMBOL_GPL(ide_map_sg);
void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
hwif->nsect = hwif->nleft = rq->nr_sectors; hwif->nsect = hwif->nleft = rq->nr_sectors;
hwif->cursg = hwif->cursg_ofs = 0; hwif->cursg = hwif->cursg_ofs = 0;
ide_map_sg(drive, rq);
} }
EXPORT_SYMBOL_GPL(ide_init_sg_cmd); EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
...@@ -729,6 +729,7 @@ ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) ...@@ -729,6 +729,7 @@ ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq)
case TASKFILE_MULTI_IN: case TASKFILE_MULTI_IN:
case TASKFILE_IN: case TASKFILE_IN:
ide_init_sg_cmd(drive, rq); ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
default: default:
break; break;
} }
......
...@@ -273,7 +273,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive) ...@@ -273,7 +273,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
return stat; return stat;
} }
static void ide_pio_sector(ide_drive_t *drive, unsigned int write) void ide_pio_sector(ide_drive_t *drive, unsigned int write)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table; struct scatterlist *sg = hwif->sg_table;
...@@ -310,7 +310,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write) ...@@ -310,7 +310,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
#endif #endif
} }
static void ide_pio_multi(ide_drive_t *drive, unsigned int write) EXPORT_SYMBOL_GPL(ide_pio_sector);
void ide_pio_multi(ide_drive_t *drive, unsigned int write)
{ {
unsigned int nsect; unsigned int nsect;
...@@ -319,6 +321,8 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write) ...@@ -319,6 +321,8 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
ide_pio_sector(drive, write); ide_pio_sector(drive, write);
} }
EXPORT_SYMBOL_GPL(ide_pio_multi);
static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq, static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
unsigned int write) unsigned int write)
{ {
...@@ -336,9 +340,8 @@ static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq, ...@@ -336,9 +340,8 @@ static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
} }
} }
#ifdef CONFIG_IDE_TASKFILE_IO ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, const char *s, u8 stat)
const char *s, u8 stat)
{ {
if (rq->bio) { if (rq->bio) {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
...@@ -367,9 +370,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, ...@@ -367,9 +370,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
} }
return drive->driver->error(drive, s, stat); return drive->driver->error(drive, s, stat);
} }
#else
# define task_error(d, rq, s, stat) drive->driver->error(d, s, stat) EXPORT_SYMBOL_GPL(task_error);
#endif
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{ {
......
...@@ -577,6 +577,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive) ...@@ -577,6 +577,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) { if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) {
/* try PIO instead of DMA */ /* try PIO instead of DMA */
ide_map_sg(drive, rq);
return 1; return 1;
} }
......
...@@ -1885,8 +1885,10 @@ pmac_ide_dma_setup(ide_drive_t *drive) ...@@ -1885,8 +1885,10 @@ pmac_ide_dma_setup(ide_drive_t *drive)
return 1; return 1;
ata4 = (pmif->kind == controller_kl_ata4); ata4 = (pmif->kind == controller_kl_ata4);
if (!pmac_ide_build_dmatable(drive, rq)) if (!pmac_ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
/* Apple adds 60ns to wrDataSetup on reads */ /* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
......
...@@ -789,27 +789,6 @@ typedef struct ide_drive_s { ...@@ -789,27 +789,6 @@ typedef struct ide_drive_s {
struct gendisk *disk; struct gendisk *disk;
} ide_drive_t; } ide_drive_t;
/*
* mapping stuff, prepare for highmem...
*
* temporarily mapping a (possible) highmem bio for PIO transfer
*/
#ifndef CONFIG_IDE_TASKFILE_IO
#define ide_rq_offset(rq) \
(((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9)
static inline void *ide_map_buffer(struct request *rq, unsigned long *flags)
{
return bio_kmap_irq(rq->bio, flags) + ide_rq_offset(rq);
}
static inline void ide_unmap_buffer(struct request *rq, char *buffer, unsigned long *flags)
{
bio_kunmap_irq(buffer, flags);
}
#endif /* !CONFIG_IDE_TASKFILE_IO */
#define IDE_CHIPSET_PCI_MASK \ #define IDE_CHIPSET_PCI_MASK \
((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
...@@ -1375,6 +1354,11 @@ extern void atapi_output_bytes(ide_drive_t *, void *, u32); ...@@ -1375,6 +1354,11 @@ extern void atapi_output_bytes(ide_drive_t *, void *, u32);
extern void taskfile_input_data(ide_drive_t *, void *, u32); extern void taskfile_input_data(ide_drive_t *, void *, u32);
extern void taskfile_output_data(ide_drive_t *, void *, u32); extern void taskfile_output_data(ide_drive_t *, void *, u32);
void ide_pio_sector(ide_drive_t *, unsigned int);
void ide_pio_multi(ide_drive_t *, unsigned int);
ide_startstop_t task_error(ide_drive_t *, struct request *, const char *, u8);
extern int drive_is_ready(ide_drive_t *); extern int drive_is_ready(ide_drive_t *);
extern int wait_for_ready(ide_drive_t *, int /* timeout */); extern int wait_for_ready(ide_drive_t *, int /* timeout */);
...@@ -1505,6 +1489,7 @@ typedef struct ide_pci_device_s { ...@@ -1505,6 +1489,7 @@ typedef struct ide_pci_device_s {
extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *); extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *);
extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *); extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *);
void ide_map_sg(ide_drive_t *, struct request *);
void ide_init_sg_cmd(ide_drive_t *, struct request *); void ide_init_sg_cmd(ide_drive_t *, struct request *);
#define BAD_DMA_DRIVE 0 #define BAD_DMA_DRIVE 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment