Commit 39f6e22a authored by Richard Henderson's avatar Richard Henderson

Merge ssh://kanga/work/linux/axp-2.5

into are.twiddle.net:/home/rth/BK/axp-2.5
parents 788fbf37 f727aff5
......@@ -250,6 +250,9 @@ static int ide_raw_build_sglist (ide_drive_t *drive, struct request *rq)
BUG();
if (sector_count > 128) {
#else
while (sector_count > 128) {
#endif
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
......@@ -263,22 +266,7 @@ static int ide_raw_build_sglist (ide_drive_t *drive, struct request *rq)
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
#else
while (sector_count > 128) {
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].address = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long)virt_addr & ~PAGE_MASK;
sg[nents].length = 128 * SECTOR_SIZE;
nents++;
virt_addr = virt_addr + (128 * SECTOR_SIZE);
sector_count -= 128;
};
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
#endif
return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
}
......@@ -523,7 +511,6 @@ int __ide_dma_off_quietly (ide_drive_t *drive)
if (HWIF(drive)->ide_dma_host_off(drive))
return 1;
if (drive->queue_setup)
HWIF(drive)->ide_dma_queued_off(drive);
return 0;
......@@ -585,8 +572,9 @@ int __ide_dma_on (ide_drive_t *drive)
if (HWIF(drive)->ide_dma_host_on(drive))
return 1;
if (drive->queue_setup)
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
HWIF(drive)->ide_dma_queued_on(drive);
#endif
return 0;
}
......
......@@ -1264,7 +1264,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&ide_lock, flags);
return IRQ_HANDLED;
return IRQ_NONE;
}
drive = hwgroup->drive;
if (!drive) {
......@@ -1286,7 +1286,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
* enough advance overhead that the latter isn't a problem.
*/
spin_unlock_irqrestore(&ide_lock, flags);
return IRQ_HANDLED;
return IRQ_NONE;
}
if (!hwgroup->busy) {
hwgroup->busy = 1; /* paranoia */
......
......@@ -634,64 +634,8 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
return drive->present;
}
static int hwif_check_region(ide_hwif_t *hwif, unsigned long addr, int num)
{
int err;
if(hwif->mmio)
err = check_mem_region(addr, num);
else
err = check_region(addr, num);
if(err)
{
printk("%s: %s resource 0x%lX-0x%lX not free.\n",
hwif->name, hwif->mmio?"MMIO":"I/O", addr, addr+num-1);
}
return err;
}
/**
* hwif_check_regions - check resources for IDE
* @hwif: interface to use
*
* Checks if all the needed resources for an interface are free
* providing the interface is PIO. Right now core IDE code does
* this work which is deeply wrong. MMIO leaves it to the controller
* driver, PIO will migrate this way over time
*/
static int hwif_check_regions (ide_hwif_t *hwif)
{
u32 i = 0;
int addr_errs = 0;
if (hwif->mmio == 2)
return 0;
addr_errs = hwif_check_region(hwif, hwif->io_ports[IDE_DATA_OFFSET], 1);
for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; i++)
addr_errs += hwif_check_region(hwif, hwif->io_ports[i], 1);
if (hwif->io_ports[IDE_CONTROL_OFFSET])
addr_errs += hwif_check_region(hwif, hwif->io_ports[IDE_CONTROL_OFFSET], 1);
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
if (hwif->io_ports[IDE_IRQ_OFFSET])
addr_errs += hwif_check_region(hwif, hwif->io_ports[IDE_IRQ_OFFSET], 1);
#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
/* If any errors are return, we drop the hwif interface. */
hwif->straight8 = 0;
return(addr_errs);
}
//EXPORT_SYMBOL(hwif_check_regions);
#define hwif_request_region(addr, num, name) \
((hwif->mmio) ? request_mem_region((addr),(num),(name)) : request_region((addr),(num),(name)))
static void hwif_register (ide_hwif_t *hwif)
{
u32 i = 0;
/* register with global device tree */
strlcpy(hwif->gendev.bus_id,hwif->name,BUS_ID_SIZE);
snprintf(hwif->gendev.name,DEVICE_NAME_SIZE,"IDE Controller");
......@@ -701,24 +645,6 @@ static void hwif_register (ide_hwif_t *hwif)
else
hwif->gendev.parent = NULL; /* Would like to do = &device_legacy */
device_register(&hwif->gendev);
if (hwif->mmio == 2)
return;
if (hwif->io_ports[IDE_CONTROL_OFFSET])
hwif_request_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1, hwif->name);
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
if (hwif->io_ports[IDE_IRQ_OFFSET])
hwif_request_region(hwif->io_ports[IDE_IRQ_OFFSET], 1, hwif->name);
#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
if (((unsigned long)hwif->io_ports[IDE_DATA_OFFSET] | 7) ==
((unsigned long)hwif->io_ports[IDE_STATUS_OFFSET])) {
hwif_request_region(hwif->io_ports[IDE_DATA_OFFSET], 8, hwif->name);
hwif->straight8 = 1;
return;
}
for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
hwif_request_region(hwif->io_ports[i], 1, hwif->name);
}
//EXPORT_SYMBOL(hwif_register);
......@@ -778,7 +704,7 @@ void probe_hwif (ide_hwif_t *hwif)
#ifdef CONFIG_BLK_DEV_PDC4030
(hwif->chipset != ide_pdc4030 || hwif->channel == 0) &&
#endif /* CONFIG_BLK_DEV_PDC4030 */
(hwif_check_regions(hwif))) {
(ide_hwif_request_regions(hwif))) {
u16 msgout = 0;
for (unit = 0; unit < MAX_DRIVES; ++unit) {
ide_drive_t *drive = &hwif->drives[unit];
......@@ -869,6 +795,11 @@ void probe_hwif (ide_hwif_t *hwif)
if (irqd)
enable_irq(irqd);
if (!hwif->present) {
ide_hwif_release_regions(hwif);
return;
}
for (unit = 0; unit < MAX_DRIVES; ++unit) {
ide_drive_t *drive = &hwif->drives[unit];
int enable_dma = 1;
......@@ -983,7 +914,6 @@ static void ide_init_queue(ide_drive_t *drive)
blk_init_queue(q, do_ide_request, &ide_lock);
q->queuedata = HWGROUP(drive);
drive->queue_setup = 1;
blk_queue_segment_boundary(q, 0xffff);
if (!hwif->rqsize)
......@@ -1005,10 +935,6 @@ static void ide_init_queue(ide_drive_t *drive)
static void ide_init_drive(ide_drive_t *drive)
{
ide_toggle_bounce(drive, 1);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
HWIF(drive)->ide_dma_queued_on(drive);
#endif
}
/*
......
......@@ -954,6 +954,9 @@ ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
return startstop;
}
if (!drive->unmask)
local_irq_disable();
return task_out_intr(drive);
}
EXPORT_SYMBOL(pre_task_out_intr);
......@@ -1030,6 +1033,9 @@ ide_startstop_t pre_task_mulout_intr (ide_drive_t *drive, struct request *rq)
return startstop;
}
if (!drive->unmask)
local_irq_disable();
return task_mulout_intr(drive);
}
EXPORT_SYMBOL(pre_task_mulout_intr);
......@@ -1361,8 +1367,6 @@ void ide_init_drive_taskfile (struct request *rq)
EXPORT_SYMBOL(ide_init_drive_taskfile);
#if 1
int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
{
struct request rq;
......@@ -1401,69 +1405,6 @@ int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
#else
int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
{
struct request *rq;
unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
struct list_head *queue_head = &drive->queue.queue_head;
DECLARE_COMPLETION(wait);
if (HWIF(drive)->chipset == ide_pdc4030 && buf != NULL)
return -ENOSYS; /* special drive cmds not supported */
memset(rq, 0, sizeof(*rq));
rq->flags = REQ_DRIVE_TASKFILE;
rq->buffer = buf;
/*
* (ks) We transfer currently only whole sectors.
* This is suffient for now. But, it would be great,
* if we would find a solution to transfer any size.
* To support special commands like READ LONG.
*/
if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
if (data_size == 0) {
ata_nsector_t nsector;
nsector.b.low = args->hobRegister[IDE_NSECTOR_OFFSET_HOB];
nsector.b.high = args->tfRegister[IDE_NSECTOR_OFFSET];
rq.nr_sectors = nsector.all;
} else {
rq.nr_sectors = data_size / SECTOR_SIZE;
}
rq.current_nr_sectors = rq.nr_sectors;
// rq.hard_cur_sectors = rq.nr_sectors;
}
if (args->tf_out_flags.all == 0) {
/*
* clean up kernel settings for driver sanity, regardless.
* except for discrete diag services.
*/
args->posthandler = ide_post_handler_parser(
(struct hd_drive_task_hdr *) args->tfRegister,
(struct hd_drive_hob_hdr *) args->hobRegister);
}
rq->special = args;
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->rq_disk = drive->disk;
rq->waiting = &wait;
spin_lock_irqsave(&ide_lock, flags);
queue_head = queue_head->prev;
list_add(&rq->queue, queue_head);
ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&ide_lock, flags);
wait_for_completion(&wait); /* wait for it to be serviced */
return rq->errors ? -EIO : 0; /* return -EIO if errors */
}
#endif
EXPORT_SYMBOL(ide_diag_taskfile);
int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
......
......@@ -501,8 +501,10 @@ static int ide_tcq_configure(ide_drive_t *drive)
* bit 14 and 1 must be set in word 83 of the device id to indicate
* support for dma queued protocol, and bit 15 must be cleared
*/
if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask) {
printk(KERN_INFO "%s: TCQ not supported\n", drive->name);
return -EIO;
}
args = kmalloc(sizeof(*args), GFP_ATOMIC);
if (!args)
......@@ -655,21 +657,24 @@ static int ide_tcq_check_blacklist(ide_drive_t *drive)
int __ide_dma_queued_on(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
if (drive->media != ide_disk)
return 1;
if (!drive->using_dma)
return 1;
if (HWIF(drive)->chipset == ide_pdc4030)
if (hwif->chipset == ide_pdc4030)
return 1;
if (ide_tcq_check_blacklist(drive)) {
printk(KERN_WARNING "%s: tcq forbidden by blacklist\n",
drive->name);
return 1;
}
if (drive->next != drive) {
if (hwif->drives[0].present && hwif->drives[1].present) {
printk(KERN_WARNING "%s: only one drive on a channel supported"
" for tcq\n", drive->name);
return 1;
}
if (ata_pending_commands(drive)) {
printk(KERN_WARNING "ide-tcq; can't toggle tcq feature on "
"busy drive\n");
......@@ -681,6 +686,8 @@ int __ide_dma_queued_on(ide_drive_t *drive)
int __ide_dma_queued_off(ide_drive_t *drive)
{
if (drive->media != ide_disk)
return 1;
if (ata_pending_commands(drive)) {
printk("ide-tcq; can't toggle tcq feature on busy drive\n");
return 1;
......
......@@ -508,12 +508,87 @@ ide_proc_entry_t generic_subdriver_entries[] = {
};
#endif
static struct resource* hwif_request_region(ide_hwif_t *hwif,
unsigned long addr, int num)
{
struct resource *res;
if (hwif->mmio)
res = request_mem_region(addr, num, hwif->name);
else
res = request_region(addr, num, hwif->name);
if (!res)
printk(KERN_ERR "%s: %s resource 0x%lX-0x%lX not free.\n",
hwif->name, hwif->mmio ? "MMIO" : "I/O",
addr, addr+num-1);
return res;
}
#define hwif_release_region(addr, num) \
((hwif->mmio) ? release_mem_region((addr),(num)) : release_region((addr),(num)))
/**
* hwif_unregister - free IDE resources
* ide_hwif_request_regions - request resources for IDE
* @hwif: interface to use
*
* Requests all the needed resources for an interface.
* Right now core IDE code does this work which is deeply wrong.
* MMIO leaves it to the controller driver,
* PIO will migrate this way over time.
*/
int ide_hwif_request_regions(ide_hwif_t *hwif)
{
unsigned long addr;
unsigned int i;
if (hwif->mmio == 2)
return 0;
addr = hwif->io_ports[IDE_CONTROL_OFFSET];
if (addr && !hwif_request_region(hwif, addr, 1))
goto control_region_busy;
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
addr = hwif->io_ports[IDE_IRQ_OFFSET];
if (addr && !hwif_request_region(hwif, addr, 1))
goto irq_region_busy;
#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
hwif->straight8 = 0;
addr = hwif->io_ports[IDE_DATA_OFFSET];
if ((addr | 7) == hwif->io_ports[IDE_STATUS_OFFSET]) {
if (!hwif_request_region(hwif, addr, 8))
goto data_region_busy;
hwif->straight8 = 1;
return 0;
}
for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
addr = hwif->io_ports[i];
if (!hwif_request_region(hwif, addr, 1)) {
while (--i)
hwif_release_region(addr, 1);
goto data_region_busy;
}
}
return 0;
data_region_busy:
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
addr = hwif->io_ports[IDE_IRQ_OFFSET];
if (addr)
hwif_release_region(addr, 1);
irq_region_busy:
#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
addr = hwif->io_ports[IDE_CONTROL_OFFSET];
if (addr)
hwif_release_region(addr, 1);
control_region_busy:
/* If any errors are return, we drop the hwif interface. */
return -EBUSY;
}
EXPORT_SYMBOL(ide_hwif_request_regions);
/**
* ide_hwif_release_regions - free IDE resources
*
* Note that we only release the standard ports,
* and do not even try to handle any extra ports
......@@ -523,8 +598,7 @@ ide_proc_entry_t generic_subdriver_entries[] = {
* importantly our caller should be doing this so we need to
* restructure this as a helper function for drivers.
*/
void hwif_unregister (ide_hwif_t *hwif)
void ide_hwif_release_regions(ide_hwif_t *hwif)
{
u32 i = 0;
......@@ -548,7 +622,7 @@ void hwif_unregister (ide_hwif_t *hwif)
}
}
EXPORT_SYMBOL(hwif_unregister);
EXPORT_SYMBOL(ide_hwif_release_regions);
extern void init_hwif_data(unsigned int index);
......@@ -635,7 +709,7 @@ void ide_unregister (unsigned int index)
* and do not even try to handle any extra ports
* allocated for weird IDE interface chipsets.
*/
hwif_unregister(hwif);
ide_hwif_release_regions(hwif);
/*
* Remove us from the hwgroup, and free
......@@ -2098,10 +2172,9 @@ int __init ide_setup (char *s)
#ifdef CONFIG_BLK_DEV_IDEPCI
hwif->udma_four = 1;
goto done;
#else /* !CONFIG_BLK_DEV_IDEPCI */
hwif->udma_four = 0;
#else
goto bad_hwif;
#endif /* CONFIG_BLK_DEV_IDEPCI */
#endif
case -6: /* dma */
hwif->autodma = 1;
goto done;
......@@ -2520,13 +2593,9 @@ struct bus_type ide_bus_type = {
*/
int __init ide_init (void)
{
static char banner_printed;
if (!banner_printed) {
printk(KERN_INFO "Uniform Multi-Platform E-IDE driver " REVISION "\n");
devfs_mk_dir("ide");
system_bus_speed = ide_system_bus_speed();
banner_printed = 1;
}
bus_register(&ide_bus_type);
......
......@@ -726,7 +726,6 @@ typedef struct ide_drive_s {
unsigned ata_flash : 1; /* 1=present, 0=default */
unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
unsigned queue_setup : 1;
unsigned addressing; /* : 3;
* 0=28-bit
* 1=48-bit
......@@ -1777,7 +1776,8 @@ static inline int __ide_dma_queued_off(ide_drive_t *drive)
static inline void ide_release_dma(ide_hwif_t *drive) {;}
#endif
extern void hwif_unregister(ide_hwif_t *);
extern int ide_hwif_request_regions(ide_hwif_t *hwif);
extern void ide_hwif_release_regions(ide_hwif_t* hwif);
extern void ide_unregister (unsigned int index);
extern void export_ide_init_queue(ide_drive_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment