Commit 64d3cfb4 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64
parents 79a5d877 3980957c
This diff is collapsed.
......@@ -280,7 +280,7 @@ init_e100_ide (void)
hwif->tuneproc = &tune_e100_ide;
hwif->dmaproc = &e100_dmaproc;
hwif->ata_read = e100_ide_input_data;
hwif->ata_write = e100_ide_input_data;
hwif->ata_write = e100_ide_output_data;
hwif->atapi_read = e100_atapi_read;
hwif->atapi_write = e100_atapi_write;
}
......@@ -560,32 +560,6 @@ e100_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
e100_atapi_write(drive, buffer, wcount << 2);
}
/*
* The multiplexor for ide_xxxput_data and atapi calls
*/
static void
e100_ideproc (ide_ide_action_t func, ide_drive_t *drive,
void *buffer, unsigned int length)
{
switch (func) {
case ideproc_ide_input_data:
e100_ide_input_data(drive, buffer, length);
break;
case ideproc_ide_output_data:
e100_ide_input_data(drive, buffer, length);
break;
case ideproc_atapi_read:
e100_atapi_read(drive, buffer, length);
break;
case ideproc_atapi_write:
e100_atapi_write(drive, buffer, length);
break;
default:
printk("e100_ideproc: unsupported func %d!\n", func);
break;
}
}
/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
static etrax_dma_descr ata_descrs[MAX_DMA_DESCRS];
static unsigned int ata_tot_size;
......
......@@ -1659,6 +1659,7 @@ static inline void check_timer(void)
printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
timer_ack = 0;
init_8259A(0);
make_8259A_irq(0);
apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
......
......@@ -422,9 +422,10 @@ void __init ide_init_amd74xx(struct ata_channel *hwif)
hwif->speedproc = &amd_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = 1;
for (i = 0; i < 2; i++) {
hwif->drives[i].io_32bit = 1;
hwif->drives[i].unmask = 1;
hwif->drives[i].autotune = 1;
hwif->drives[i].dn = hwif->unit * 2 + i;
}
......
......@@ -21,7 +21,7 @@
*
* A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com,
* bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz,
* chrisc@dbass.demon.co.uk, dalecki@evision-ventures.com,
* chrisc@dbass.demon.co.uk, martin@dalecki.de,
* derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de,
* flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net,
* j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net,
......@@ -403,19 +403,19 @@ void cmd640_dump_regs (void)
*/
static void __init check_prefetch (unsigned int index)
{
ide_drive_t *drive = cmd_drives[index];
struct ata_device *drive = cmd_drives[index];
byte b = get_cmd640_reg(prefetch_regs[index]);
if (b & prefetch_masks[index]) { /* is prefetch off? */
drive->no_unmask = 0;
drive->no_io_32bit = 1;
drive->io_32bit = 0;
drive->channel->no_unmask = 0;
drive->channel->no_io_32bit = 1;
drive->channel->io_32bit = 0;
} else {
#if CMD640_PREFETCH_MASKS
drive->no_unmask = 1;
drive->unmask = 0;
drive->channel->no_unmask = 1;
drive->channel->unmask = 0;
#endif
drive->no_io_32bit = 0;
drive->channel->no_io_32bit = 0;
}
}
......@@ -460,15 +460,15 @@ static void set_prefetch_mode (unsigned int index, int mode)
b = get_cmd640_reg(reg);
if (mode) { /* want prefetch on? */
#if CMD640_PREFETCH_MASKS
drive->no_unmask = 1;
drive->unmask = 0;
drive->channel->no_unmask = 1;
drive->channel->unmask = 0;
#endif
drive->no_io_32bit = 0;
drive->channel->no_io_32bit = 0;
b &= ~prefetch_masks[index]; /* enable prefetch */
} else {
drive->no_unmask = 0;
drive->no_io_32bit = 1;
drive->io_32bit = 0;
drive->channel->no_unmask = 0;
drive->channel->no_io_32bit = 1;
drive->channel->io_32bit = 0;
b |= prefetch_masks[index]; /* disable prefetch */
}
put_cmd640_reg(reg, b);
......@@ -827,7 +827,7 @@ int __init ide_probe_for_cmd640x(void)
retrieve_drive_counts (index);
check_prefetch (index);
printk("cmd640: drive%d timings/prefetch(%s) preserved",
index, drive->no_io_32bit ? "off" : "on");
index, drive->channel->no_io_32bit ? "off" : "on");
display_clocks(index);
}
#else
......@@ -836,7 +836,7 @@ int __init ide_probe_for_cmd640x(void)
*/
check_prefetch (index);
printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
index, drive->no_io_32bit ? "off" : "on");
index, drive->channel->no_io_32bit ? "off" : "on");
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
}
......
......@@ -88,8 +88,7 @@ static void tune_dtc2278 (ide_drive_t *drive, byte pio)
/*
* 32bit I/O has to be enabled for *both* drives at the same time.
*/
drive->io_32bit = 1;
drive->channel->drives[!drive->select.b.unit].io_32bit = 1;
drive->channel->io_32bit = 1;
}
void __init init_dtc2278 (void)
......@@ -120,10 +119,11 @@ void __init init_dtc2278 (void)
ide_hwifs[0].chipset = ide_dtc2278;
ide_hwifs[1].chipset = ide_dtc2278;
ide_hwifs[0].tuneproc = &tune_dtc2278;
ide_hwifs[0].drives[0].no_unmask = 1;
ide_hwifs[0].drives[1].no_unmask = 1;
ide_hwifs[1].drives[0].no_unmask = 1;
ide_hwifs[1].drives[1].no_unmask = 1;
/* FIXME: What about the following?!
ide_hwifs[1].tuneproc = &tune_dtc2278;
*/
ide_hwifs[0].no_unmask = 1;
ide_hwifs[1].no_unmask = 1;
ide_hwifs[0].unit = ATA_PRIMARY;
ide_hwifs[1].unit = ATA_SECONDARY;
}
......@@ -261,11 +261,11 @@ static void ht_set_prefetch(ide_drive_t *drive, byte state)
*/
if (state) {
drive->drive_data |= t; /* enable prefetch mode */
drive->no_unmask = 1;
drive->unmask = 0;
drive->channel->no_unmask = 1;
drive->channel->unmask = 0;
} else {
drive->drive_data &= ~t; /* disable prefetch mode */
drive->no_unmask = 0;
drive->channel->no_unmask = 0;
}
restore_flags (flags); /* all CPUs */
......
......@@ -669,6 +669,12 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
request or data protect error.*/
ide_dump_status (drive, "command error", stat);
cdrom_end_request(drive, 0);
} else if (sense_key == MEDIUM_ERROR) {
/* No point in re-trying a zillion times on a bad
* sector. The error is not correctable at all.
*/
ide_dump_status (drive, "media error (bad sector)", stat);
cdrom_end_request(drive, 0);
} else if ((err & ~ABRT_ERR) != 0) {
/* Go to the default handler
for other errors. */
......
......@@ -755,8 +755,6 @@ static void idedisk_pre_reset (ide_drive_t *drive)
drive->special.b.recalibrate = legacy;
if (OK_TO_RESET_CONTROLLER)
drive->mult_count = 0;
if (!drive->keep_settings && !drive->using_dma)
drive->mult_req = 0;
if (drive->mult_req != drive->mult_count)
drive->special.b.set_multmode = 1;
}
......@@ -1231,7 +1229,14 @@ static void idedisk_setup(ide_drive_t *drive)
drive->special.b.set_multmode = 1;
#endif
}
drive->no_io_32bit = id->dword_io ? 1 : 0;
/* FIXME: Nowadays there are many chipsets out there which *require* 32
* bit IO. Those will most propably not work properly with drives not
* supporting this. But right now we don't do anything about this. We
* dont' even *warn* the user!
*/
drive->channel->no_io_32bit = id->dword_io ? 1 : 0;
if (drive->id->cfs_enable_2 & 0x3000)
write_cache(drive, (id->cfs_enable_2 & 0x3000));
......
......@@ -599,13 +599,19 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
printk("%s: DMA disabled\n", drive->name);
case ide_dma_off_quietly:
set_high = 0;
drive->using_tcq = 0;
outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
hwif->dmaproc(ide_dma_queued_off, drive);
#endif
case ide_dma_on:
ide_toggle_bounce(drive, set_high);
drive->using_dma = (func == ide_dma_on);
if (drive->using_dma)
if (drive->using_dma) {
outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
hwif->dmaproc(ide_dma_queued_on, drive);
#endif
}
return 0;
case ide_dma_check:
return config_drive_for_dma (drive);
......
......@@ -156,7 +156,7 @@ void ata_read(ide_drive_t *drive, void *buffer, unsigned int wcount)
return;
}
io_32bit = drive->io_32bit;
io_32bit = drive->channel->io_32bit;
if (io_32bit) {
#if SUPPORT_VLB_SYNC
......@@ -167,7 +167,7 @@ void ata_read(ide_drive_t *drive, void *buffer, unsigned int wcount)
ata_read_32(drive, buffer, wcount);
} else {
#if SUPPORT_SLOW_DATA_PORTS
if (drive->slow)
if (drive->channel->slow)
ata_read_slow(drive, buffer, wcount);
else
#endif
......@@ -187,7 +187,7 @@ void ata_write(ide_drive_t *drive, void *buffer, unsigned int wcount)
return;
}
io_32bit = drive->io_32bit;
io_32bit = drive->channel->io_32bit;
if (io_32bit) {
#if SUPPORT_VLB_SYNC
......@@ -198,7 +198,7 @@ void ata_write(ide_drive_t *drive, void *buffer, unsigned int wcount)
ata_write_32(drive, buffer, wcount);
} else {
#if SUPPORT_SLOW_DATA_PORTS
if (drive->slow)
if (drive->channel->slow)
ata_write_slow(drive, buffer, wcount);
else
#endif
......@@ -413,6 +413,20 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive,
struct hd_driveid *id = drive->id;
u8 HIHI = (drive->addressing) ? 0xE0 : 0xEF;
#if 0
printk("ata_taskfile ... %p\n", args->handler);
printk(" sector feature %02x\n", args->taskfile.feature);
printk(" sector count %02x\n", args->taskfile.sector_count);
printk(" drive/head %02x\n", args->taskfile.device_head);
printk(" command %02x\n", args->taskfile.command);
if (rq)
printk(" rq->nr_sectors %2li\n", rq->nr_sectors);
else
printk(" rq-> = null\n");
#endif
/* (ks/hs): Moved to start, do not use for multiple out commands */
if (args->handler != task_mulout_intr) {
if (IDE_CONTROL_REG)
......@@ -577,18 +591,22 @@ static ide_startstop_t task_in_intr (ide_drive_t *drive)
ata_read(drive, pBuf, SECTOR_WORDS);
ide_unmap_rq(rq, pBuf, &flags);
/*
* first segment of the request is complete. note that this does not
* necessarily mean that the entire request is done!! this is only
* true if ide_end_request() returns 0.
*/
if (--rq->current_nr_sectors <= 0) {
/* (hs): swapped next 2 lines */
DTF("Request Ended stat: %02x\n", GET_STAT());
if (ide_end_request(drive, 1)) {
ide_set_handler(drive, &task_in_intr, WAIT_CMD, NULL);
return ide_started;
if (!ide_end_request(drive, 1))
return ide_stopped;
}
} else {
/*
* still data left to transfer
*/
ide_set_handler(drive, &task_in_intr, WAIT_CMD, NULL);
return ide_started;
}
return ide_stopped;
}
static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
......@@ -874,7 +892,6 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
return;
case WIN_NOP:
args->command_type = IDE_DRIVE_TASK_NO_DATA;
return;
......@@ -904,11 +921,6 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ata_taskfile *args, byte *buf)
struct ata_request star;
ata_ar_init(drive, &star);
/* Don't put this request on free_req list after usage.
*/
star.ar_flags |= ATA_AR_STATIC;
init_taskfile_request(&rq);
rq.buffer = buf;
......@@ -976,6 +988,7 @@ int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (argbuf == NULL)
return -ENOMEM;
memcpy(argbuf, vals, 4);
memset(argbuf + 4, 0, argsize - 4);
}
if (set_transfer(drive, &args)) {
......@@ -986,14 +999,8 @@ int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
/* Issue ATA command and wait for completion.
*/
/* FIXME: Do we really have to zero out the buffer?
*/
memset(argbuf, 4, SECTOR_WORDS * 4 * vals[3]);
ide_init_drive_cmd(&rq);
rq.buffer = argbuf;
memcpy(argbuf, vals, 4);
err = ide_do_drive_cmd(drive, &rq, ide_wait);
if (!err && xfer_rate) {
......
......@@ -51,21 +51,17 @@
*/
#undef IDE_TCQ_FIDDLE_SI
/*
* wait for data phase before starting DMA or not
*/
#undef IDE_TCQ_WAIT_DATAPHASE
ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
ide_startstop_t ide_service(ide_drive_t *drive);
static inline void drive_ctl_nien(ide_drive_t *drive, int clear)
{
#ifdef IDE_TCQ_NIEN
if (IDE_CONTROL_REG) {
int mask = clear ? 0x00 : 0x02;
if (IDE_CONTROL_REG)
OUT_BYTE(drive->ctl | mask, IDE_CONTROL_REG);
}
#endif
}
......@@ -123,7 +119,6 @@ static void ide_tcq_invalidate_queue(ide_drive_t *drive)
init_taskfile_request(ar->ar_rq);
ar->ar_rq->rq_dev = mk_kdev(drive->channel->major, (drive->select.b.unit)<<PARTN_BITS);
ar->ar_rq->special = ar;
ar->ar_flags |= ATA_AR_RETURN;
_elv_add_request(q, ar->ar_rq, 0, 0);
/*
......@@ -222,7 +217,7 @@ ide_startstop_t ide_service(ide_drive_t *drive)
{
struct ata_request *ar;
byte feat, stat;
int tag;
int tag, ret;
TCQ_PRINTK("%s: started service\n", drive->name);
......@@ -272,9 +267,6 @@ ide_startstop_t ide_service(ide_drive_t *drive)
return ide_stopped;
}
/*
* start dma
*/
tag = feat >> 3;
IDE_SET_CUR_TAG(drive, tag);
......@@ -293,16 +285,16 @@ ide_startstop_t ide_service(ide_drive_t *drive)
*/
if (rq_data_dir(ar->ar_rq) == READ) {
TCQ_PRINTK("ide_service: starting READ %x\n", stat);
drive->channel->dmaproc(ide_dma_read_queued, drive);
ret = drive->channel->dmaproc(ide_dma_read_queued, drive);
} else {
TCQ_PRINTK("ide_service: starting WRITE %x\n", stat);
drive->channel->dmaproc(ide_dma_write_queued, drive);
ret = drive->channel->dmaproc(ide_dma_write_queued, drive);
}
/*
* dmaproc set intr handler
*/
return ide_started;
return !ret ? ide_started : ide_stopped;
}
ide_startstop_t ide_check_service(ide_drive_t *drive)
......@@ -410,14 +402,15 @@ ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
*/
static int ide_tcq_configure(ide_drive_t *drive)
{
int tcq_mask = 1 << 1 | 1 << 14;
int tcq_bits = tcq_mask | 1 << 15;
struct ata_taskfile args;
int tcq_supp = 1 << 1 | 1 << 14;
/*
* bit 14 and 1 must be set in word 83 of the device id to indicate
* support for dma queued protocol
* support for dma queued protocol, and bit 15 must be cleared
*/
if ((drive->id->command_set_2 & tcq_supp) != tcq_supp)
if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
return -EIO;
memset(&args, 0, sizeof(args));
......@@ -477,10 +470,13 @@ static int ide_tcq_configure(ide_drive_t *drive)
*/
static int ide_enable_queued(ide_drive_t *drive, int on)
{
int depth = drive->using_tcq ? drive->queue_depth : 0;
/*
* disable or adjust queue depth
*/
if (!on) {
if (drive->using_tcq)
printk("%s: TCQ disabled\n", drive->name);
drive->using_tcq = 0;
return 0;
......@@ -491,25 +487,33 @@ static int ide_enable_queued(ide_drive_t *drive, int on)
return 1;
}
/*
* possibly expand command list
*/
if (ide_build_commandlist(drive))
return 1;
if (depth != drive->queue_depth)
printk("%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
drive->using_tcq = 1;
/*
* clear stats
*/
drive->tcq->max_depth = 0;
return 0;
}
int ide_tcq_wait_dataphase(ide_drive_t *drive)
{
#ifdef IDE_TCQ_WAIT_DATAPHASE
ide_startstop_t foo;
if (ide_wait_stat(&startstop, drive, READY_STAT | DRQ_STAT, BUSY_STAT, WAIT_READY)) {
if (ide_wait_stat(&foo, drive, READY_STAT | DRQ_STAT, BUSY_STAT, WAIT_READY)) {
printk("%s: timeout waiting for data phase\n", drive->name);
return 1;
}
#endif
return 0;
}
......@@ -595,6 +599,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
if (ide_start_dma(hwif, drive, func))
return ide_stopped;
TCQ_PRINTK("IMMED in queued_start\n");
/*
* need to arm handler before starting dma engine,
* transfer could complete right away
......@@ -612,6 +618,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_queued_off:
enable_tcq = 0;
case ide_dma_queued_on:
if (enable_tcq && !drive->using_dma)
return 1;
return ide_enable_queued(drive, enable_tcq);
default:
break;
......
......@@ -479,12 +479,9 @@ static void ata_pre_reset(ide_drive_t *drive)
if (ata_ops(drive) && ata_ops(drive)->pre_reset)
ata_ops(drive)->pre_reset(drive);
if (!drive->keep_settings && !drive->using_dma) {
drive->unmask = 0;
drive->io_32bit = 0;
}
if (!drive->using_dma)
return;
if (drive->using_dma) {
/* check the DMA crc count */
if (drive->crc_count) {
drive->channel->dmaproc(ide_dma_off_quietly, drive);
......@@ -494,7 +491,6 @@ static void ata_pre_reset(ide_drive_t *drive)
drive->channel->dmaproc(ide_dma_on, drive);
} else
drive->channel->dmaproc(ide_dma_off, drive);
}
}
/*
......@@ -807,7 +803,6 @@ void ide_end_drive_cmd(ide_drive_t *drive, byte stat, byte err)
args->hobfile.high_cylinder = IN_BYTE(IDE_HCYL_REG);
}
}
if (ar->ar_flags & ATA_AR_RETURN)
ata_ar_put(drive, ar);
}
......@@ -905,17 +900,15 @@ byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat)
*/
static void try_to_flush_leftover_data (ide_drive_t *drive)
{
int i = (drive->mult_count ? drive->mult_count : 1);
int i;
if (drive->type != ATA_DISK)
return;
while (i > 0) {
for (i = (drive->mult_count ? drive->mult_count : 1); i > 0; --i) {
u32 buffer[SECTOR_WORDS];
unsigned int count = (i > 1) ? 1 : i;
ata_read(drive, buffer, count * SECTOR_WORDS);
i -= count;
ata_read(drive, buffer, SECTOR_WORDS);
}
}
......@@ -999,7 +992,7 @@ void ide_cmd(ide_drive_t *drive, byte cmd, byte nsect, ide_handler_t *handler)
/*
* Invoked on completion of a special DRIVE_CMD.
*/
static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
static ide_startstop_t drive_cmd_intr(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
u8 *args = rq->buffer;
......@@ -1008,11 +1001,7 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
ide__sti(); /* local CPU only */
if ((stat & DRQ_STAT) && args && args[3]) {
int io_32bit = drive->io_32bit;
drive->io_32bit = 0;
ata_read(drive, &args[4], args[3] * SECTOR_WORDS);
drive->io_32bit = io_32bit;
while (((stat = GET_STAT()) & BUSY_STAT) && retries--)
udelay(100);
......@@ -1824,7 +1813,7 @@ void ide_intr(int irq, void *dev_id, struct pt_regs *regs)
del_timer(&hwgroup->timer);
spin_unlock(&ide_lock);
if (drive->unmask)
if (hwif->unmask)
ide__sti(); /* local CPU only */
startstop = handler(drive); /* service this interrupt, may set handler for next interrupt */
spin_lock_irq(&ide_lock);
......@@ -2572,14 +2561,10 @@ int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val)
static int set_io_32bit(struct ata_device *drive, int arg)
{
if (drive->no_io_32bit)
if (drive->channel->no_io_32bit)
return -EIO;
drive->io_32bit = arg;
#ifdef CONFIG_BLK_DEV_DTC2278
if (drive->channel->chipset == ide_dtc2278)
drive->channel->drives[!drive->select.b.unit].io_32bit = arg;
#endif
drive->channel->io_32bit = arg;
return 0;
}
......@@ -2613,11 +2598,10 @@ static int set_pio_mode (ide_drive_t *drive, int arg)
void ide_add_generic_settings (ide_drive_t *drive)
{
/* drive setting name read/write access read ioctl write ioctl data type min max mul_factor div_factor data pointer set function */
ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, HDIO_GET_32BIT, HDIO_SET_32BIT, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit);
ide_add_setting(drive, "keepsettings", SETTING_RW, HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL);
ide_add_setting(drive, "io_32bit", drive->channel->no_io_32bit ? SETTING_READ : SETTING_RW, HDIO_GET_32BIT, HDIO_SET_32BIT, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->channel->io_32bit, set_io_32bit);
ide_add_setting(drive, "pio_mode", SETTING_WRITE, -1, HDIO_SET_PIO_MODE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode);
ide_add_setting(drive, "slow", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->slow, NULL);
ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL);
ide_add_setting(drive, "slow", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->channel->slow, NULL);
ide_add_setting(drive, "unmaskirq", drive->channel->no_unmask ? SETTING_READ : SETTING_RW, HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, TYPE_BYTE, 0, 1, 1, 1, &drive->channel->unmask, NULL);
ide_add_setting(drive, "using_dma", SETTING_RW, HDIO_GET_DMA, HDIO_SET_DMA, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma);
ide_add_setting(drive, "ide_scsi", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->scsi, NULL);
ide_add_setting(drive, "init_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 69, 1, 1, &drive->init_speed, NULL);
......@@ -3182,7 +3166,7 @@ int __init ide_setup (char *s)
drive->autotune = 2;
goto done;
case -8: /* "slow" */
drive->slow = 1;
hwif->slow = 1;
goto done;
case -9: /* "flash" */
drive->ata_flash = 1;
......
......@@ -1268,11 +1268,8 @@ void __init ide_init_pdc202xx(struct ata_channel *hwif)
#undef CONFIG_PDC202XX_32_UNMASK
#ifdef CONFIG_PDC202XX_32_UNMASK
hwif->drives[0].io_32bit = 1;
hwif->drives[1].io_32bit = 1;
hwif->drives[0].unmask = 1;
hwif->drives[1].unmask = 1;
hwif->io_32bit = 1;
hwif->unmask = 1;
#endif
#ifdef CONFIG_BLK_DEV_IDEDMA
......
......@@ -250,11 +250,9 @@ int __init setup_pdc4030(struct ata_channel *hwif)
memcpy(hwif2->io_ports, hwif->hw.io_ports, sizeof(hwif2->io_ports));
hwif2->irq = hwif->irq;
hwif2->hw.irq = hwif->hw.irq = hwif->irq;
hwif->io_32bit = 3;
hwif2->io_32bit = 3;
for (i=0; i<2 ; i++) {
hwif->drives[i].io_32bit = 3;
hwif2->drives[i].io_32bit = 3;
hwif->drives[i].keep_settings = 1;
hwif2->drives[i].keep_settings = 1;
if (!ident.current_tm[i].cyl)
hwif->drives[i].noprobe = 1;
if (!ident.current_tm[i+2].cyl)
......@@ -634,7 +632,7 @@ ide_startstop_t do_pdc4030_io(ide_drive_t *drive, struct ata_taskfile *task)
"PROMISE_WRITE\n", drive->name);
return startstop;
}
if (!drive->unmask)
if (!drive->channel->unmask)
__cli(); /* local CPU only */
HWGROUP(drive)->wrq = *rq; /* scratchpad */
return promise_write(drive);
......
......@@ -545,10 +545,9 @@ void __init ide_init_piix(struct ata_channel *hwif)
hwif->tuneproc = &piix_tune_drive;
hwif->speedproc = &piix_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = 1;
for (i = 0; i < 2; i++) {
hwif->drives[i].io_32bit = 1;
hwif->drives[i].unmask = 1;
hwif->drives[i].autotune = 1;
hwif->drives[i].dn = hwif->unit * 2 + i;
}
......
......@@ -370,8 +370,7 @@ int __init probe (int base)
hwif->config_data = config;
hwif->drives[0].drive_data =
hwif->drives[1].drive_data = QD6500_DEF_DATA;
hwif->drives[0].io_32bit =
hwif->drives[1].io_32bit = 1;
hwif->io_32bit = 1;
hwif->tuneproc = &qd6500_tune_drive;
return 1;
}
......@@ -403,8 +402,7 @@ int __init probe (int base)
hwif->config_data = config | (control <<8);
hwif->drives[0].drive_data =
hwif->drives[1].drive_data = QD6580_DEF_DATA;
hwif->drives[0].io_32bit =
hwif->drives[1].io_32bit = 1;
hwif->io_32bit = 1;
hwif->tuneproc = &qd6580_tune_drive;
qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT);
......@@ -426,11 +424,11 @@ int __init probe (int base)
ide_hwifs[i].select_data = base;
ide_hwifs[i].config_data = config | (control <<8);
ide_hwifs[i].tuneproc = &qd6580_tune_drive;
ide_hwifs[i].io_32bit = 1;
for (j = 0; j < 2; j++) {
ide_hwifs[i].drives[j].drive_data =
i?QD6580_DEF_DATA2:QD6580_DEF_DATA;
ide_hwifs[i].drives[j].io_32bit = 1;
}
}
......
......@@ -40,8 +40,7 @@ void __init ide_init_rz1000(struct ata_channel *hwif) /* called from ide-pci.c *
printk("%s: disabled chipset read-ahead (buggy RZ1000/RZ1001)\n", hwif->name);
} else {
hwif->serialized = 1;
hwif->drives[0].no_unmask = 1;
hwif->drives[1].no_unmask = 1;
hwif->no_unmask = 1;
printk("%s: serialized, disabled unmasking (buggy RZ1000/RZ1001)\n", hwif->name);
}
}
......
......@@ -535,10 +535,10 @@ void __init ide_init_via82cxxx(struct ata_channel *hwif)
hwif->tuneproc = &via82cxxx_tune_drive;
hwif->speedproc = &via_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = (via_config->flags & VIA_NO_UNMASK) ? 0 : 1;
for (i = 0; i < 2; i++) {
hwif->drives[i].io_32bit = 1;
hwif->drives[i].unmask = (via_config->flags & VIA_NO_UNMASK) ? 0 : 1;
hwif->drives[i].autotune = 1;
hwif->drives[i].dn = hwif->unit * 2 + i;
}
......
This diff is collapsed.
......@@ -3,27 +3,23 @@
#
O_TARGET := aic7xxx_drv.o
MOD_TARGET = aic7xxx.o
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx_mod.o
#EXTRA_CFLAGS += -g
# Core files
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx.o aic7xxx_93cx6.o aic7770.o
# Platform Specific Files
AIC7XXX_OBJS = aic7xxx_linux.o
AIC7XXX_OBJS += aic7xxx_proc.o aic7770_linux.o
#PCI Specific Platform Files
ifeq ($(CONFIG_PCI),y)
AIC7XXX_OBJS += aic7xxx_linux_pci.o
endif
# Core Files
AIC7XXX_OBJS += aic7xxx.o aic7xxx_93cx6.o aic7770.o
#PCI Specific Core Files
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx_linux.o aic7xxx_proc.o aic7770_linux.o
# PCI Specific Files
ifeq ($(CONFIG_PCI),y)
AIC7XXX_OBJS += aic7xxx_pci.o
# Core PCI files
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx_pci.o
# Platform Specific PCI Files
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx_linux_pci.o
endif
# Override our module desitnation
MOD_TARGET = aic7xxx.o
#EXTRA_CFLAGS += -g
include $(TOPDIR)/Rules.make
......
......@@ -91,32 +91,6 @@ static inline void lock_metapage(struct metapage *mp)
__lock_metapage(mp);
}
/* We're currently re-evaluating the method we use to write metadata
* pages. Currently, we have to make sure there no dirty buffer_heads
* hanging around after we free the metadata page, since the same
* physical disk blocks may be used in a different address space and we
* can't write old data over the good data.
*
* The best way to do this now is with block_invalidate_page. However,
* this is only available in the newer kernels and is not exported
* to modules. block_flushpage is the next best, but it too is not exported
* to modules.
*
* In a module, about the best we have is generic_buffer_fdatasync. This
* synchronously writes any dirty buffers. This is not optimal, but it will
* keep old dirty buffers from overwriting newer data.
*/
static inline void invalidate_page(metapage_t *mp)
{
#ifdef MODULE
generic_buffer_fdatasync(mp->mapping->host, mp->index, mp->index + 1);
#else
lock_page(mp->page);
block_flushpage(mp->page, 0);
UnlockPage(mp->page);
#endif
}
int __init metapage_init(void)
{
int i;
......@@ -559,8 +533,11 @@ void release_metapage(metapage_t * mp)
clear_bit(META_sync, &mp->flag);
}
if (test_bit(META_discard, &mp->flag))
invalidate_page(mp);
if (test_bit(META_discard, &mp->flag)) {
lock_page(mp->page);
block_flushpage(mp->page, 0);
UnlockPage(mp->page);
}
page_cache_release(mp->page);
INCREMENT(mpStat.pagefree);
......@@ -593,9 +570,7 @@ void invalidate_metapages(struct inode *ip, unsigned long addr,
int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_sb->s_blocksize_bits;
struct address_space *mapping = ip->i_mapping;
metapage_t *mp;
#ifndef MODULE
struct page *page;
#endif
/*
* First, mark metapages to discard. They will eventually be
......@@ -612,27 +587,14 @@ void invalidate_metapages(struct inode *ip, unsigned long addr,
/*
* If in the metapage cache, we've got the page locked
*/
#ifdef MODULE
UnlockPage(mp->page);
generic_buffer_fdatasync(mp->mapping->host, mp->index,
mp->index+1);
lock_page(mp->page);
#else
block_flushpage(mp->page, 0);
#endif
} else {
spin_unlock(&meta_lock);
#ifdef MODULE
generic_buffer_fdatasync(ip, lblock << l2BlocksPerPage,
(lblock + 1) << l2BlocksPerPage);
#else
page = find_lock_page(mapping,
lblock >> l2BlocksPerPage);
page = find_lock_page(mapping, lblock>>l2BlocksPerPage);
if (page) {
block_flushpage(page, 0);
UnlockPage(page);
}
#endif
}
}
}
......
......@@ -296,7 +296,6 @@ struct hd_big_geometry {
#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
#define HDIO_GET_QDMA 0x0305 /* get use-qdma flag */
#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */
#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
#define HDIO_GET_DMA 0x030b /* get use-dma flag */
......@@ -316,7 +315,6 @@ struct hd_big_geometry {
/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
#define HDIO_SET_MULTCOUNT 0x0321 /* change IDE blockmode */
#define HDIO_SET_UNMASKINTR 0x0322 /* permit other irqs during I/O */
#define HDIO_SET_KEEPSETTINGS 0x0323 /* keep ioctl settings on reset */
#define HDIO_SET_32BIT 0x0324 /* change io_32bit flags */
#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */
#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
......
......@@ -342,12 +342,10 @@ struct ata_device {
unsigned long PADAM_timeout; /* max time to wait for irq */
special_t special; /* special action flags */
byte keep_settings; /* restore settings after drive reset */
byte using_dma; /* disk is using dma for read/write */
byte using_tcq; /* disk is using queued dma operations*/
byte retry_pio; /* retrying dma capable host in pio */
byte state; /* retry state */
byte unmask; /* flag: okay to unmask other irqs */
byte dsc_overlap; /* flag: DSC overlap */
unsigned waiting_for_dma: 1; /* dma currently in progress */
......@@ -358,7 +356,6 @@ struct ata_device {
unsigned noprobe : 1; /* from: hdx=noprobe */
unsigned removable : 1; /* 1 if need to do check_media_change */
unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
unsigned no_unmask : 1; /* disallow setting unmask bit */
unsigned nobios : 1; /* flag: do not probe bios for drive */
unsigned revalidate : 1; /* request revalidation */
unsigned atapi_overlap : 1; /* flag: ATAPI overlap (not supported) */
......@@ -388,13 +385,6 @@ struct ata_device {
unsigned long long capacity48; /* total number of sectors */
unsigned int drive_data; /* for use by tuneproc/selectproc as needed */
/* FIXME: Those are properties of a channel and not a drive! Move them
* later there.
*/
byte slow; /* flag: slow data port */
unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
wait_queue_head_t wqueue; /* used to wait for drive in open() */
struct hd_driveid *id; /* drive model identification info */
......@@ -523,6 +513,12 @@ struct ata_channel {
unsigned autodma : 1; /* automatically try to enable DMA at boot */
unsigned udma_four : 1; /* 1=ATA-66 capable, 0=default */
unsigned highmem : 1; /* can do full 32-bit dma */
byte slow; /* flag: slow data port */
unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
unsigned no_unmask : 1; /* disallow setting unmask bit */
byte unmask; /* flag: okay to unmask other irqs */
#if (DISK_RECOVERY_TIME > 0)
unsigned long last_time; /* time when previous rq was done */
#endif
......@@ -972,10 +968,9 @@ extern void revalidate_drives(void);
/*
* ata_request flag bits
*/
#define ATA_AR_QUEUED 1
#define ATA_AR_SETUP 2
#define ATA_AR_RETURN 4
#define ATA_AR_STATIC 8
#define ATA_AR_QUEUED 1 /* was queued */
#define ATA_AR_SETUP 2 /* dma table mapped */
#define ATA_AR_POOL 4 /* originated from drive pool */
/*
* if turn-around time is longer than this, halve queue depth
......@@ -1007,8 +1002,10 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive)
if (!list_empty(&drive->free_req)) {
ar = list_ata_entry(drive->free_req.next);
list_del(&ar->ar_queue);
ata_ar_init(drive, ar);
ar->ar_flags |= ATA_AR_POOL;
}
return ar;
......@@ -1016,7 +1013,7 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive)
static inline void ata_ar_put(ide_drive_t *drive, struct ata_request *ar)
{
if (!(ar->ar_flags & ATA_AR_STATIC))
if (ar->ar_flags & ATA_AR_POOL)
list_add(&ar->ar_queue, &drive->free_req);
if (ar->ar_flags & ATA_AR_QUEUED) {
......
......@@ -211,6 +211,7 @@ EXPORT_SYMBOL(unlock_buffer);
EXPORT_SYMBOL(__wait_on_buffer);
EXPORT_SYMBOL(___wait_on_page);
EXPORT_SYMBOL(generic_direct_IO);
EXPORT_SYMBOL(discard_bh_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_prepare_write);
......
......@@ -1672,10 +1672,9 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable();
}
static volatile unsigned long migration_mask;
static int migration_thread(void * unused)
static int migration_thread(void * bind_cpu)
{
int cpu = cpu_logical_map((int) (long) bind_cpu);
struct sched_param param = { sched_priority: 99 };
runqueue_t *rq;
int ret;
......@@ -1683,36 +1682,20 @@ static int migration_thread(void * unused)
daemonize();
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
ret = setscheduler(0, SCHED_FIFO, &param);
/*
* We have to migrate manually - there is no migration thread
* to do this for us yet :-)
*
* We use the following property of the Linux scheduler. At
* this point no other task is running, so by keeping all
* migration threads running, the load-balancer will distribute
* them between all CPUs equally. At that point every migration
* task binds itself to the current CPU.
* The first migration thread is started on CPU #0. This one can migrate
* the other migration threads to their destination CPUs.
*/
/* wait for all migration threads to start up. */
while (!migration_mask)
if (cpu != 0) {
while (!cpu_rq(cpu_logical_map(0))->migration_thread)
yield();
for (;;) {
preempt_disable();
if (test_and_clear_bit(smp_processor_id(), &migration_mask))
current->cpus_allowed = 1 << smp_processor_id();
if (test_thread_flag(TIF_NEED_RESCHED))
schedule();
if (!migration_mask)
break;
preempt_enable();
set_cpus_allowed(current, 1UL << cpu);
}
printk("migration_task %d on cpu=%d\n",cpu,smp_processor_id());
ret = setscheduler(0, SCHED_FIFO, &param);
rq = this_rq();
rq->migration_thread = current;
preempt_enable();
sprintf(current->comm, "migration_CPU%d", smp_processor_id());
......@@ -1743,9 +1726,11 @@ static int migration_thread(void * unused)
cpu_src = p->thread_info->cpu;
rq_src = cpu_rq(cpu_src);
local_irq_save(flags);
double_rq_lock(rq_src, rq_dest);
if (p->thread_info->cpu != cpu_src) {
double_rq_unlock(rq_src, rq_dest);
local_irq_restore(flags);
goto repeat;
}
if (rq_src == rq) {
......@@ -1756,6 +1741,7 @@ static int migration_thread(void * unused)
}
}
double_rq_unlock(rq_src, rq_dest);
local_irq_restore(flags);
up(&req->sem);
}
......@@ -1763,33 +1749,18 @@ static int migration_thread(void * unused)
void __init migration_init(void)
{
unsigned long tmp, orig_cache_decay_ticks;
int cpu;
tmp = 0;
current->cpus_allowed = 1UL << cpu_logical_map(0);
for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(migration_thread, NULL,
if (kernel_thread(migration_thread, (void *) (long) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG();
tmp |= (1UL << cpu_logical_map(cpu));
}
current->cpus_allowed = -1L;
migration_mask = tmp;
orig_cache_decay_ticks = cache_decay_ticks;
cache_decay_ticks = 0;
for (cpu = 0; cpu < smp_num_cpus; cpu++) {
int logical = cpu_logical_map(cpu);
while (!cpu_rq(logical)->migration_thread) {
set_current_state(TASK_INTERRUPTIBLE);
for (cpu = 0; cpu < smp_num_cpus; cpu++)
while (!cpu_rq(cpu)->migration_thread)
schedule_timeout(2);
}
}
if (migration_mask)
BUG();
cache_decay_ticks = orig_cache_decay_ticks;
}
#endif
......@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/radix-tree.h>
#include <linux/slab.h>
#include <linux/string.h>
/*
* Radix tree node definition.
......
......@@ -797,9 +797,9 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs
}
/*
* Must be called with the pagecache lock held,
* will return with it held (but it may be dropped
* during blocking operations..
* Must be called with the mapping lock held for writing.
* Will return with it held for writing, but it may be dropped
* while locking the page.
*/
static struct page *__find_lock_page(struct address_space *mapping,
unsigned long offset)
......@@ -815,11 +815,11 @@ static struct page *__find_lock_page(struct address_space *mapping,
if (page) {
page_cache_get(page);
if (TryLockPage(page)) {
read_unlock(&mapping->page_lock);
write_unlock(&mapping->page_lock);
lock_page(page);
read_lock(&mapping->page_lock);
write_lock(&mapping->page_lock);
/* Has the page been re-allocated while we slept? */
/* Has the page been truncated while we slept? */
if (page->mapping != mapping || page->index != offset) {
UnlockPage(page);
page_cache_release(page);
......@@ -830,25 +830,53 @@ static struct page *__find_lock_page(struct address_space *mapping,
return page;
}
/**
* find_lock_page - locate, pin and lock a pagecache page
*
* @mapping - the address_space to search
* @offset - the page index
*
* Locates the desired pagecache page, locks it, increments its reference
* count and returns its address.
*
* Returns zero if the page was not present. find_lock_page() may sleep.
*/
/*
* Same as the above, but lock the page too, verifying that
* it's still valid once we own it.
* The write_lock is unfortunate, but __find_lock_page() requires that on
* behalf of find_or_create_page(). We could just clone __find_lock_page() -
* one for find_lock_page(), one for find_or_create_page()...
*/
struct page * find_lock_page(struct address_space *mapping, unsigned long offset)
struct page *find_lock_page(struct address_space *mapping,
unsigned long offset)
{
struct page *page;
read_lock(&mapping->page_lock);
write_lock(&mapping->page_lock);
page = __find_lock_page(mapping, offset);
read_unlock(&mapping->page_lock);
write_unlock(&mapping->page_lock);
return page;
}
/*
* Same as above, but create the page if required..
/**
* find_or_create_page - locate or add a pagecache page
*
* @mapping - the page's address_space
* @index - the page's index into the mapping
* @gfp_mask - page allocation mode
*
* Locates a page in the pagecache. If the page is not present, a new page
* is allocated using @gfp_mask and is added to the pagecache and to the VM's
* LRU list. The returned page is locked and has its reference count
* incremented.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
* allocation!
*
* find_or_create_page() returns the desired page's address, or zero on
* memory exhaustion.
*/
struct page * find_or_create_page(struct address_space *mapping,
struct page *find_or_create_page(struct address_space *mapping,
unsigned long index, unsigned int gfp_mask)
{
struct page *page;
......
......@@ -340,6 +340,8 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_
zone = zonelist->zones;
classzone = *zone;
if (classzone == NULL)
return NULL;
min = 1UL << order;
for (;;) {
zone_t *z = *(zone++);
......
......@@ -78,11 +78,11 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo
} while (address < end);
}
void vmfree_area_pages(unsigned long address, unsigned long size)
void vmfree_area_pages(unsigned long start, unsigned long size)
{
pgd_t * dir;
unsigned long start = address;
unsigned long end = address + size;
unsigned long address = start;
unsigned long end = start + size;
dir = pgd_offset_k(address);
flush_cache_all();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment