Commit 4507e7c8 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] 2.5.11 IDE 48

Tue Apr 30 13:23:13 CEST 2002 ide-clean-48

This fixes the "performance" degradation partially, becouse we don't
miss that many jiffies in choose_urgent_device() anymore.  However
choose_urgent_device has to be fixed for the off by one error to don't
loop for a whole 1/100 second before submitting the next request.

- Include small declaration bits for Jens. (WIN_NOP fix in esp.)

- Fix ide-pmac to conform to the recent API changes.

- Prepare and improve the handling of the request queue. It sucks now as many
   request as possible. This is improving the performance.
parent f7520fcf
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
* symbols or by storing hooks at arch level). * symbols or by storing hooks at arch level).
* *
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -77,8 +78,8 @@ struct pmac_ide_hwif { ...@@ -77,8 +78,8 @@ struct pmac_ide_hwif {
struct scatterlist* sg_table; struct scatterlist* sg_table;
int sg_nents; int sg_nents;
int sg_dma_direction; int sg_dma_direction;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ #endif
} pmac_ide[MAX_HWIFS] __pmacdata; } pmac_ide[MAX_HWIFS] __pmacdata;
static int pmac_ide_count; static int pmac_ide_count;
...@@ -255,11 +256,11 @@ struct { ...@@ -255,11 +256,11 @@ struct {
#define IDE_WAKEUP_DELAY_MS 2000 #define IDE_WAKEUP_DELAY_MS 2000
static void pmac_ide_setup_dma(struct device_node *np, int ix); static void pmac_ide_setup_dma(struct device_node *np, int ix);
static int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive); static int pmac_ide_dmaproc(ide_dma_action_t func, struct ata_device *drive, struct request *rq);
static int pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr); static int pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr);
static int pmac_ide_tune_chipset(ide_drive_t *drive, byte speed); static int pmac_ide_tune_chipset(struct ata_device *drive, byte speed);
static void pmac_ide_tuneproc(ide_drive_t *drive, byte pio); static void pmac_ide_tuneproc(struct ata_device *drive, byte pio);
static void pmac_ide_selectproc(ide_drive_t *drive); static void pmac_ide_selectproc(struct ata_device *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
...@@ -322,7 +323,7 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw, ...@@ -322,7 +323,7 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
ide_hwifs[ix].selectproc = pmac_ide_selectproc; ide_hwifs[ix].selectproc = pmac_ide_selectproc;
ide_hwifs[ix].speedproc = &pmac_ide_tune_chipset; ide_hwifs[ix].speedproc = &pmac_ide_tune_chipset;
if (pmac_ide[ix].dma_regs && pmac_ide[ix].dma_table_cpu) { if (pmac_ide[ix].dma_regs && pmac_ide[ix].dma_table_cpu) {
ide_hwifs[ix].dmaproc = &pmac_ide_dmaproc; ide_hwifs[ix].udma = pmac_ide_dmaproc;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO
if (!noautodma) if (!noautodma)
ide_hwifs[ix].autodma = 1; ide_hwifs[ix].autodma = 1;
...@@ -405,7 +406,7 @@ pmac_ide_do_setfeature(ide_drive_t *drive, byte command) ...@@ -405,7 +406,7 @@ pmac_ide_do_setfeature(ide_drive_t *drive, byte command)
{ {
int result = 1; int result = 1;
unsigned long flags; unsigned long flags;
struct ata_channel *hwif = HWIF(drive); struct ata_channel *hwif = drive->channel;
disable_irq(hwif->irq); /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* disable_irq_nosync ?? */
udelay(1); udelay(1);
...@@ -431,7 +432,7 @@ pmac_ide_do_setfeature(ide_drive_t *drive, byte command) ...@@ -431,7 +432,7 @@ pmac_ide_do_setfeature(ide_drive_t *drive, byte command)
if (result) if (result)
printk(KERN_ERR "pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n"); printk(KERN_ERR "pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n");
out: out:
SELECT_MASK(HWIF(drive), drive, 0); SELECT_MASK(drive->channel, drive, 0);
if (result == 0) { if (result == 0) {
drive->id->dma_ultra &= ~0xFF00; drive->id->dma_ultra &= ~0xFF00;
drive->id->dma_mword &= ~0x0F00; drive->id->dma_mword &= ~0x0F00;
...@@ -1024,7 +1025,7 @@ pmac_ide_setup_dma(struct device_node *np, int ix) ...@@ -1024,7 +1025,7 @@ pmac_ide_setup_dma(struct device_node *np, int ix)
pmif->dma_table_cpu, pmif->dma_table_dma); pmif->dma_table_cpu, pmif->dma_table_dma);
return; return;
} }
ide_hwifs[ix].dmaproc = &pmac_ide_dmaproc; ide_hwifs[ix].udma = pmac_ide_dmaproc;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO
if (!noautodma) if (!noautodma)
ide_hwifs[ix].autodma = 1; ide_hwifs[ix].autodma = 1;
...@@ -1092,11 +1093,10 @@ pmac_raw_build_sglist (int ix, struct request *rq) ...@@ -1092,11 +1093,10 @@ pmac_raw_build_sglist (int ix, struct request *rq)
* for a transfer and sets the DBDMA channel to point to it. * for a transfer and sets the DBDMA channel to point to it.
*/ */
static int static int
pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr)
{ {
struct dbdma_cmd *table; struct dbdma_cmd *table;
int i, count = 0; int i, count = 0;
struct request *rq = HWGROUP(drive)->rq;
volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs; volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -1166,7 +1166,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) ...@@ -1166,7 +1166,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr)
static void static void
pmac_ide_destroy_dmatable (ide_drive_t *drive, int ix) pmac_ide_destroy_dmatable (ide_drive_t *drive, int ix)
{ {
struct pci_dev *dev = HWIF(drive)->pci_dev; struct pci_dev *dev = drive->channel->pci_dev;
struct scatterlist *sg = pmac_ide[ix].sg_table; struct scatterlist *sg = pmac_ide[ix].sg_table;
int nents = pmac_ide[ix].sg_nents; int nents = pmac_ide[ix].sg_nents;
...@@ -1326,17 +1326,17 @@ static void ide_toggle_bounce(ide_drive_t *drive, int on) ...@@ -1326,17 +1326,17 @@ static void ide_toggle_bounce(ide_drive_t *drive, int on)
{ {
dma64_addr_t addr = BLK_BOUNCE_HIGH; dma64_addr_t addr = BLK_BOUNCE_HIGH;
if (on && drive->type == ATA_DISK && HWIF(drive)->highmem) { if (on && drive->type == ATA_DISK && drive->channel->highmem) {
if (!PCI_DMA_BUS_IS_PHYS) if (!PCI_DMA_BUS_IS_PHYS)
addr = BLK_BOUNCE_ANY; addr = BLK_BOUNCE_ANY;
else else
addr = HWIF(drive)->pci_dev->dma_mask; addr = drive->channel->pci_dev->dma_mask;
} }
blk_queue_bounce_limit(&drive->queue, addr); blk_queue_bounce_limit(&drive->queue, addr);
} }
int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) static int pmac_ide_dmaproc(ide_dma_action_t func, struct ata_device *drive, struct request *rq)
{ {
int ix, dstat, reading, ata4; int ix, dstat, reading, ata4;
volatile struct dbdma_regs *dma; volatile struct dbdma_regs *dma;
...@@ -1369,10 +1369,10 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) ...@@ -1369,10 +1369,10 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_write: case ide_dma_write:
/* this almost certainly isn't needed since we don't /* this almost certainly isn't needed since we don't
appear to have a rwproc */ appear to have a rwproc */
if (HWIF(drive)->rwproc) if (drive->channel->rwproc)
HWIF(drive)->rwproc(drive, func); drive->channel->rwproc(drive, func);
reading = (func == ide_dma_read); reading = (func == ide_dma_read);
if (!pmac_ide_build_dmatable(drive, ix, !reading)) if (!pmac_ide_build_dmatable(drive, rq, ix, !reading))
return 1; return 1;
/* Apple adds 60ns to wrDataSetup on reads */ /* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) { if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) {
...@@ -1385,9 +1385,9 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) ...@@ -1385,9 +1385,9 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
if (drive->type != ATA_DISK) if (drive->type != ATA_DISK)
return 0; return 0;
ide_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL); ide_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL);
if ((HWGROUP(drive)->rq->flags & REQ_DRIVE_ACB) && if ((rq->flags & REQ_DRIVE_ACB) &&
(drive->addressing == 1)) { (drive->addressing == 1)) {
struct ata_taskfile *args = HWGROUP(drive)->rq->special; struct ata_taskfile *args = rq->special;
OUT_BYTE(args->taskfile.command, IDE_COMMAND_REG); OUT_BYTE(args->taskfile.command, IDE_COMMAND_REG);
} else if (drive->addressing) { } else if (drive->addressing) {
OUT_BYTE(reading ? WIN_READDMA_EXT : WIN_WRITEDMA_EXT, IDE_COMMAND_REG); OUT_BYTE(reading ? WIN_READDMA_EXT : WIN_WRITEDMA_EXT, IDE_COMMAND_REG);
......
...@@ -829,6 +829,20 @@ static void init_gendisk(struct ata_channel *hwif) ...@@ -829,6 +829,20 @@ static void init_gendisk(struct ata_channel *hwif)
return; return;
} }
/*
* Returns the queue which corresponds to a given device.
*
* FIXME: this should take struct block_device * as argument in future.
*/
static request_queue_t *ata_get_queue(kdev_t dev)
{
struct ata_channel *ch = (struct ata_channel *)blk_dev[major(dev)].data;
/* FIXME: ALLERT: This discriminates between master and slave! */
return &ch->drives[DEVICE_NR(dev) & 1].queue;
}
static void channel_init(struct ata_channel *ch) static void channel_init(struct ata_channel *ch)
{ {
if (!ch->present) if (!ch->present)
...@@ -882,7 +896,7 @@ static void channel_init(struct ata_channel *ch) ...@@ -882,7 +896,7 @@ static void channel_init(struct ata_channel *ch)
init_gendisk(ch); init_gendisk(ch);
blk_dev[ch->major].data = ch; blk_dev[ch->major].data = ch;
blk_dev[ch->major].queue = ide_get_queue; blk_dev[ch->major].queue = ata_get_queue;
/* all went well, flag this channel entry as valid */ /* all went well, flag this channel entry as valid */
ch->present = 1; ch->present = 1;
......
...@@ -522,9 +522,12 @@ ide_startstop_t task_no_data_intr(struct ata_device *drive, struct request *rq) ...@@ -522,9 +522,12 @@ ide_startstop_t task_no_data_intr(struct ata_device *drive, struct request *rq)
ide__sti(); /* local CPU only */ ide__sti(); /* local CPU only */
if (!OK_STAT(stat = GET_STAT(), READY_STAT, BAD_STAT)) if (!OK_STAT(stat = GET_STAT(), READY_STAT, BAD_STAT)) {
return ide_error(drive, "task_no_data_intr", stat); /* Keep quite for NOP becouse they are expected to fail. */
/* calls ide_end_drive_cmd */ if (args && args->taskfile.command != WIN_NOP)
return ide_error(drive, "task_no_data_intr", stat);
}
if (args) if (args)
ide_end_drive_cmd (drive, stat, GET_ERR()); ide_end_drive_cmd (drive, stat, GET_ERR());
...@@ -854,6 +857,7 @@ void ide_cmd_type_parser(struct ata_taskfile *args) ...@@ -854,6 +857,7 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
return; return;
case WIN_NOP: case WIN_NOP:
args->handler = task_no_data_intr;
args->command_type = IDE_DRIVE_TASK_NO_DATA; args->command_type = IDE_DRIVE_TASK_NO_DATA;
return; return;
......
...@@ -1190,12 +1190,49 @@ void ide_stall_queue(ide_drive_t *drive, unsigned long timeout) ...@@ -1190,12 +1190,49 @@ void ide_stall_queue(ide_drive_t *drive, unsigned long timeout)
drive->PADAM_sleep = timeout + jiffies; drive->PADAM_sleep = timeout + jiffies;
} }
/*
* Determine the longes sleep time for the devices in our hwgroup.
*/
static unsigned long longest_sleep(struct ata_channel *channel)
{
unsigned long sleep = 0;
int i;
for (i = 0; i < MAX_HWIFS; ++i) {
int unit;
struct ata_channel *ch = &ide_hwifs[i];
if (!ch->present)
continue;
if (ch->hwgroup != channel->hwgroup)
continue;
for (unit = 0; unit < MAX_DRIVES; ++unit) {
struct ata_device *drive = &ch->drives[unit];
if (!drive->present)
continue;
/* This device is sleeping and waiting to be serviced
* later than any other device we checked thus far.
*/
if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep)))
sleep = drive->PADAM_sleep;
}
}
return sleep;
}
/* /*
* Select the next device which will be serviced. * Select the next device which will be serviced.
*/ */
static struct ata_device *choose_urgent_device(struct ata_channel *channel) static struct ata_device *choose_urgent_device(struct ata_channel *channel)
{ {
struct ata_device *best = NULL; struct ata_device *choice = NULL;
unsigned long sleep = 0;
int i; int i;
for (i = 0; i < MAX_HWIFS; ++i) { for (i = 0; i < MAX_HWIFS; ++i) {
...@@ -1227,54 +1264,152 @@ static struct ata_device *choose_urgent_device(struct ata_channel *channel) ...@@ -1227,54 +1264,152 @@ static struct ata_device *choose_urgent_device(struct ata_channel *channel)
/* Take this device, if there is no device choosen thus far or /* Take this device, if there is no device choosen thus far or
* it's more urgent. * it's more urgent.
*/ */
if (!best || (drive->PADAM_sleep && (!best->PADAM_sleep || time_after(best->PADAM_sleep, drive->PADAM_sleep)))) if (!choice || (drive->PADAM_sleep && (!choice->PADAM_sleep || time_after(choice->PADAM_sleep, drive->PADAM_sleep))))
{ {
if (!blk_queue_plugged(&drive->queue)) if (!blk_queue_plugged(&drive->queue))
best = drive; choice = drive;
} }
} }
} }
return best; if (choice)
return choice;
channel->hwgroup->rq = NULL;
sleep = longest_sleep(channel);
if (sleep) {
/*
* Take a short snooze, and then wake up again. Just in case
* there are big differences in relative throughputs.. don't
* want to hog the cpu too much.
*/
if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
sleep = jiffies + WAIT_MIN_SLEEP;
#if 1
if (timer_pending(&channel->hwgroup->timer))
printk(KERN_ERR "ide_set_handler: timer already active\n");
#endif
set_bit(IDE_SLEEP, &channel->hwgroup->flags);
mod_timer(&channel->hwgroup->timer, sleep);
/* we purposely leave hwgroup busy while sleeping */
} else {
/* Ugly, but how can we sleep for the lock otherwise? perhaps
* from tq_disk? */
ide_release_lock(&irq_lock);/* for atari only */
clear_bit(IDE_BUSY, &channel->hwgroup->flags);
}
return NULL;
} }
/* Place holders for later expansion of functionality.
*/
#define ata_pending_commands(drive) (0)
#define ata_can_queue(drive) (1)
/* /*
* Determine the longes sleep time for the devices in our hwgroup. * Feed commands to a drive until it barfs. Called with ide_lock/DRIVE_LOCK
* held and busy channel.
*/ */
static unsigned long longest_sleep(struct ata_channel *channel)
static void queue_commands(struct ata_device *drive, int masked_irq)
{ {
unsigned long sleep = 0; ide_hwgroup_t *hwgroup = drive->channel->hwgroup;
int i; ide_startstop_t startstop = -1;
for (i = 0; i < MAX_HWIFS; ++i) { for (;;) {
int unit; struct request *rq = NULL;
struct ata_channel *ch = &ide_hwifs[i];
if (!ch->present) if (!test_bit(IDE_BUSY, &hwgroup->flags))
continue; printk(KERN_ERR"%s: hwgroup not busy while queueing\n", drive->name);
if (ch->hwgroup != channel->hwgroup) /* Abort early if we can't queue another command. for non
continue; * tcq, ata_can_queue is always 1 since we never get here
* unless the drive is idle.
*/
if (!ata_can_queue(drive)) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, &hwgroup->flags);
break;
}
for (unit = 0; unit < MAX_DRIVES; ++unit) { drive->PADAM_sleep = 0;
struct ata_device *drive = &ch->drives[unit];
if (!drive->present) if (test_bit(IDE_DMA, &hwgroup->flags)) {
continue; printk("ide_do_request: DMA in progress...\n");
break;
}
/* This device is sleeping and waiting to be serviced /* There's a small window between where the queue could be
* later than any other device we checked thus far. * replugged while we are in here when using tcq (in which
*/ * case the queue is probably empty anyways...), so check
if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep))) * and leave if appropriate. When not using tcq, this is
sleep = drive->PADAM_sleep; * still a severe BUG!
*/
if (blk_queue_plugged(&drive->queue)) {
BUG();
break;
} }
}
return sleep; if (!(rq = elv_next_request(&drive->queue))) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, &hwgroup->flags);
hwgroup->rq = NULL;
break;
}
/* If there are queued commands, we can't start a non-fs
* request (really, a non-queuable command) until the
* queue is empty.
*/
if (!(rq->flags & REQ_CMD) && ata_pending_commands(drive))
break;
hwgroup->rq = rq;
/* Some systems have trouble with IDE IRQs arriving while the
* driver is still setting things up. So, here we disable the
* IRQ used by this interface while the request is being
* started. This may look bad at first, but pretty much the
* same thing happens anyway when any interrupt comes in, IDE
* or otherwise -- the kernel masks the IRQ while it is being
* handled.
*/
if (masked_irq && drive->channel->irq != masked_irq)
disable_irq_nosync(drive->channel->irq);
spin_unlock(&ide_lock);
ide__sti(); /* allow other IRQs while we start this request */
startstop = start_request(drive, rq);
spin_lock_irq(&ide_lock);
if (masked_irq && drive->channel->irq != masked_irq)
enable_irq(drive->channel->irq);
/* command started, we are busy */
if (startstop == ide_started)
break;
/* start_request() can return either ide_stopped (no command
* was started), ide_started (command started, don't queue
* more), or ide_released (command started, try and queue
* more).
*/
#if 0
if (startstop == ide_stopped)
set_bit(IDE_BUSY, &hwgroup->flags);
#endif
}
} }
/* /*
* Issue a new request to a drive from hwgroup. * Issue a new request.
* Caller must have already done spin_lock_irqsave(&ide_lock, ...) * Caller must have already done spin_lock_irqsave(&ide_lock, ...)
* *
* A hwgroup is a serialized group of IDE interfaces. Usually there is * A hwgroup is a serialized group of IDE interfaces. Usually there is
...@@ -1312,42 +1447,14 @@ static void ide_do_request(struct ata_channel *channel, int masked_irq) ...@@ -1312,42 +1447,14 @@ static void ide_do_request(struct ata_channel *channel, int masked_irq)
while (!test_and_set_bit(IDE_BUSY, &hwgroup->flags)) { while (!test_and_set_bit(IDE_BUSY, &hwgroup->flags)) {
struct ata_channel *ch; struct ata_channel *ch;
ide_startstop_t startstop; struct ata_device *drive;
struct ata_device *drive = choose_urgent_device(channel);
if (drive == NULL) {
unsigned long sleep = 0;
hwgroup->rq = NULL;
sleep = longest_sleep(channel);
if (sleep) { /* this will clear IDE_BUSY, if appropriate */
drive = choose_urgent_device(channel);
/* if (!drive)
* Take a short snooze, and then wake up again. break;
* Just in case there are big differences in
* relative throughputs.. don't want to hog the
* cpu too much.
*/
if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
sleep = jiffies + WAIT_MIN_SLEEP;
#if 1
if (timer_pending(&hwgroup->timer))
printk("ide_set_handler: timer already active\n");
#endif
set_bit(IDE_SLEEP, &hwgroup->flags);
mod_timer(&hwgroup->timer, sleep);
/* we purposely leave hwgroup busy while sleeping */
} else {
/* Ugly, but how can we sleep for the lock
* otherwise? perhaps from tq_disk? */
ide_release_lock(&irq_lock);/* for atari only */
clear_bit(IDE_BUSY, &hwgroup->flags);
}
return;
}
ch = drive->channel; ch = drive->channel;
if (hwgroup->XXX_drive->channel->sharing_irq && ch != hwgroup->XXX_drive->channel && ch->io_ports[IDE_CONTROL_OFFSET]) { if (hwgroup->XXX_drive->channel->sharing_irq && ch != hwgroup->XXX_drive->channel && ch->io_ports[IDE_CONTROL_OFFSET]) {
...@@ -1364,51 +1471,10 @@ static void ide_do_request(struct ata_channel *channel, int masked_irq) ...@@ -1364,51 +1471,10 @@ static void ide_do_request(struct ata_channel *channel, int masked_irq)
*/ */
hwgroup->XXX_drive = drive; hwgroup->XXX_drive = drive;
/* Reset wait timeout. queue_commands(drive, masked_irq);
*/
drive->PADAM_sleep = 0;
if (blk_queue_plugged(&drive->queue))
BUG();
/* Just continuing an interrupted request maybe.
*/
hwgroup->rq = elv_next_request(&drive->queue);
/*
* Some systems have trouble with IDE IRQs arriving while the
* driver is still setting things up. So, here we disable the
* IRQ used by this interface while the request is being
* started. This may look bad at first, but pretty much the
* same thing happens anyway when any interrupt comes in, IDE
* or otherwise -- the kernel masks the IRQ while it is being
* handled.
*/
if (masked_irq && ch->irq != masked_irq)
disable_irq_nosync(ch->irq);
spin_unlock(&ide_lock);
ide__sti(); /* allow other IRQs while we start this request */
startstop = start_request(drive, hwgroup->rq);
spin_lock_irq(&ide_lock);
if (masked_irq && ch->irq != masked_irq)
enable_irq(ch->irq);
if (startstop == ide_stopped)
clear_bit(IDE_BUSY, &hwgroup->flags);
} }
} }
/*
* Returns the queue which corresponds to a given device.
*/
request_queue_t *ide_get_queue(kdev_t dev)
{
struct ata_channel *ch = (struct ata_channel *)blk_dev[major(dev)].data;
/* FIXME: ALLERT: This discriminates between master and slave! */
return &ch->drives[DEVICE_NR(dev) & 1].queue;
}
void do_ide_request(request_queue_t *q) void do_ide_request(request_queue_t *q)
{ {
ide_do_request(q->queuedata, 0); ide_do_request(q->queuedata, 0);
...@@ -1419,20 +1485,20 @@ void do_ide_request(request_queue_t *q) ...@@ -1419,20 +1485,20 @@ void do_ide_request(request_queue_t *q)
* retry the current request in PIO mode instead of risking tossing it * retry the current request in PIO mode instead of risking tossing it
* all away * all away
*/ */
static void dma_timeout_retry(ide_drive_t *drive, struct request *rq) static void dma_timeout_retry(struct ata_device *drive, struct request *rq)
{ {
struct ata_channel *hwif = drive->channel; struct ata_channel *ch = drive->channel;
/* /*
* end current dma transaction * end current dma transaction
*/ */
hwif->udma(ide_dma_end, drive, rq); ch->udma(ide_dma_end, drive, rq);
/* /*
* complain a little, later we might remove some of this verbosity * complain a little, later we might remove some of this verbosity
*/ */
printk("%s: timeout waiting for DMA\n", drive->name); printk("%s: timeout waiting for DMA\n", drive->name);
hwif->udma(ide_dma_timeout, drive, rq); ch->udma(ide_dma_timeout, drive, rq);
/* /*
* Disable dma for now, but remember that we did so because of * Disable dma for now, but remember that we did so because of
...@@ -1441,7 +1507,7 @@ static void dma_timeout_retry(ide_drive_t *drive, struct request *rq) ...@@ -1441,7 +1507,7 @@ static void dma_timeout_retry(ide_drive_t *drive, struct request *rq)
*/ */
drive->retry_pio++; drive->retry_pio++;
drive->state = DMA_PIO_RETRY; drive->state = DMA_PIO_RETRY;
hwif->udma(ide_dma_off_quietly, drive, rq); ch->udma(ide_dma_off_quietly, drive, rq);
/* /*
* un-busy drive etc (hwgroup->busy is cleared on return) and * un-busy drive etc (hwgroup->busy is cleared on return) and
...@@ -1713,11 +1779,11 @@ ide_drive_t *get_info_ptr(kdev_t i_rdev) ...@@ -1713,11 +1779,11 @@ ide_drive_t *get_info_ptr(kdev_t i_rdev)
int h; int h;
for (h = 0; h < MAX_HWIFS; ++h) { for (h = 0; h < MAX_HWIFS; ++h) {
struct ata_channel *hwif = &ide_hwifs[h]; struct ata_channel *ch = &ide_hwifs[h];
if (hwif->present && major == hwif->major) { if (ch->present && major == ch->major) {
int unit = DEVICE_NR(i_rdev); int unit = DEVICE_NR(i_rdev);
if (unit < MAX_DRIVES) { if (unit < MAX_DRIVES) {
struct ata_device *drive = &hwif->drives[unit]; struct ata_device *drive = &ch->drives[unit];
if (drive->present) if (drive->present)
return drive; return drive;
} }
...@@ -2029,7 +2095,7 @@ void ide_unregister(struct ata_channel *ch) ...@@ -2029,7 +2095,7 @@ void ide_unregister(struct ata_channel *ch)
int unit, i; int unit, i;
unsigned long flags; unsigned long flags;
unsigned int p, minor; unsigned int p, minor;
struct ata_channel old_hwif; struct ata_channel old;
int n = 0; int n = 0;
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
...@@ -2156,40 +2222,41 @@ void ide_unregister(struct ata_channel *ch) ...@@ -2156,40 +2222,41 @@ void ide_unregister(struct ata_channel *ch)
* it. * it.
*/ */
old_hwif = *ch; old = *ch;
init_hwif_data(ch, ch->index); init_hwif_data(ch, ch->index);
ch->hwgroup = old_hwif.hwgroup; ch->hwgroup = old.hwgroup;
ch->tuneproc = old_hwif.tuneproc; ch->tuneproc = old.tuneproc;
ch->speedproc = old_hwif.speedproc; ch->speedproc = old.speedproc;
ch->selectproc = old_hwif.selectproc; ch->selectproc = old.selectproc;
ch->resetproc = old_hwif.resetproc; ch->resetproc = old.resetproc;
ch->intrproc = old_hwif.intrproc; ch->intrproc = old.intrproc;
ch->maskproc = old_hwif.maskproc; ch->maskproc = old.maskproc;
ch->quirkproc = old_hwif.quirkproc; ch->quirkproc = old.quirkproc;
ch->rwproc = old_hwif.rwproc; ch->rwproc = old.rwproc;
ch->ata_read = old_hwif.ata_read; ch->ata_read = old.ata_read;
ch->ata_write = old_hwif.ata_write; ch->ata_write = old.ata_write;
ch->atapi_read = old_hwif.atapi_read; ch->atapi_read = old.atapi_read;
ch->atapi_write = old_hwif.atapi_write; ch->atapi_write = old.atapi_write;
ch->udma = old_hwif.udma; ch->udma = old.udma;
ch->busproc = old_hwif.busproc; ch->busproc = old.busproc;
ch->bus_state = old_hwif.bus_state; ch->bus_state = old.bus_state;
ch->dma_base = old_hwif.dma_base; ch->dma_base = old.dma_base;
ch->dma_extra = old_hwif.dma_extra; ch->dma_extra = old.dma_extra;
ch->config_data = old_hwif.config_data; ch->config_data = old.config_data;
ch->select_data = old_hwif.select_data; ch->select_data = old.select_data;
ch->proc = old_hwif.proc; ch->proc = old.proc;
/* FIXME: most propably this is always right:! */
#ifndef CONFIG_BLK_DEV_IDECS #ifndef CONFIG_BLK_DEV_IDECS
ch->irq = old_hwif.irq; ch->irq = old.irq;
#endif #endif
ch->major = old_hwif.major; ch->major = old.major;
ch->chipset = old_hwif.chipset; ch->chipset = old.chipset;
ch->autodma = old_hwif.autodma; ch->autodma = old.autodma;
ch->udma_four = old_hwif.udma_four; ch->udma_four = old.udma_four;
#ifdef CONFIG_BLK_DEV_IDEPCI #ifdef CONFIG_BLK_DEV_IDEPCI
ch->pci_dev = old_hwif.pci_dev; ch->pci_dev = old.pci_dev;
#endif #endif
ch->straight8 = old_hwif.straight8; ch->straight8 = old.straight8;
abort: abort:
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
...@@ -3352,7 +3419,6 @@ devfs_handle_t ide_devfs_handle; ...@@ -3352,7 +3419,6 @@ devfs_handle_t ide_devfs_handle;
EXPORT_SYMBOL(ide_lock); EXPORT_SYMBOL(ide_lock);
EXPORT_SYMBOL(drive_is_flashcard); EXPORT_SYMBOL(drive_is_flashcard);
EXPORT_SYMBOL(ide_timer_expiry); EXPORT_SYMBOL(ide_timer_expiry);
EXPORT_SYMBOL(ide_get_queue);
EXPORT_SYMBOL(ide_add_generic_settings); EXPORT_SYMBOL(ide_add_generic_settings);
EXPORT_SYMBOL(do_ide_request); EXPORT_SYMBOL(do_ide_request);
/* /*
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define ECC_STAT 0x04 /* Corrected error */ #define ECC_STAT 0x04 /* Corrected error */
#define DRQ_STAT 0x08 #define DRQ_STAT 0x08
#define SEEK_STAT 0x10 #define SEEK_STAT 0x10
#define SERVICE_STAT SEEK_STAT
#define WRERR_STAT 0x20 #define WRERR_STAT 0x20
#define READY_STAT 0x40 #define READY_STAT 0x40
#define BUSY_STAT 0x80 #define BUSY_STAT 0x80
...@@ -49,6 +50,13 @@ ...@@ -49,6 +50,13 @@
#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
/*
* sector count bits
*/
#define NSEC_CD 0x01
#define NSEC_IO 0x02
#define NSEC_REL 0x04
/* /*
* Command Header sizes for IOCTL commands * Command Header sizes for IOCTL commands
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment