Commit 17b581c6 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] 2.5.8-pre3 IDE 31

- Integrate the TCQ stuff from Jens Axboe. Deal with the conflicts, apply some
   cosmetic changes. We are still not at a stage where we could immediately
   integrate ata_request and ata_taskfile but we are no longer far away.

- Clean up the data transfer function in ide-disk to use ata_request structures
   directly.

- Kill useless leading version information in ide-disk.c

- Replace the ATA_AR_INIT macro with inline ata_ar_init() function.

- Replace IDE_CLEAR_TAG with ata_clear_tag().

- Replace IDE_SET_TAG with ata_set_tag().

- Kill georgeous ide_dmafunc_verbose().

- Fix typo in ide_enable_queued() (ide-tcq.c!)

Apparently there still problems with a TCQ enabled device and a not enabled
device on the same channel, but let's first synchronize up with Jens.
parent a87cd868
......@@ -744,6 +744,28 @@ CONFIG_IDEDMA_ONLYDISK
Generally say N here.
CONFIG_BLK_DEV_IDE_TCQ
Support for tagged command queueing on ATA disk drives. This enables
the IDE layer to have multiple in-flight requests on hardware that
supports it. For now this includes the IBM Deskstar series drives,
such as the GXP75, 40GV, GXP60, and GXP120 (ie any Deskstar made in
the last couple of years).
If you have such a drive, say Y here.
CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
Enabled tagged command queueing unconditionally on drives that report
support for it.
Generally say Y here.
CONFIG_BLK_DEV_IDE_TCQ_DEPTH
Maximum size of commands to enable per-drive. Any value between 1
and 32 is valid, with 32 being the maxium that the hardware supports.
You probably just want the default of 32 here. If you enter an invalid
number, the default value will be used.
CONFIG_BLK_DEV_IT8172
Say Y here to support the on-board IDE controller on the Integrated
Technology Express, Inc. ITE8172 SBC. Vendor page at
......
......@@ -47,6 +47,11 @@ if [ "$CONFIG_BLK_DEV_IDE" != "n" ]; then
dep_bool ' Use PCI DMA by default when available' CONFIG_IDEDMA_PCI_AUTO $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' Enable DMA only for disks ' CONFIG_IDEDMA_ONLYDISK $CONFIG_IDEDMA_PCI_AUTO
define_bool CONFIG_BLK_DEV_IDEDMA $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' ATA tagged command queueing' CONFIG_BLK_DEV_IDE_TCQ $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' TCQ on by default' CONFIG_BLK_DEV_IDE_TCQ_DEFAULT $CONFIG_BLK_DEV_IDE_TCQ
if [ $CONFIG_BLK_DEV_IDE_TCQ_DEFAULT != "n" ]; then
int ' Default queue depth' CONFIG_BLK_DEV_IDE_TCQ_DEPTH 32
fi
dep_bool ' ATA Work(s) In Progress (EXPERIMENTAL)' CONFIG_IDEDMA_PCI_WIP $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_EXPERIMENTAL
dep_bool ' Good-Bad DMA Model-Firmware (WIP)' CONFIG_IDEDMA_NEW_DRIVE_LISTINGS $CONFIG_IDEDMA_PCI_WIP
dep_bool ' AEC62XX chipset support' CONFIG_BLK_DEV_AEC62XX $CONFIG_BLK_DEV_IDEDMA_PCI
......
......@@ -45,6 +45,7 @@ ide-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
ide-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
ide-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
ide-obj-$(CONFIG_BLK_DEV_IDEDMA_PCI) += ide-dma.o
ide-obj-$(CONFIG_BLK_DEV_IDE_TCQ) += ide-tcq.o
ide-obj-$(CONFIG_BLK_DEV_IDEPCI) += ide-pci.o
ide-obj-$(CONFIG_BLK_DEV_ISAPNP) += ide-pnp.o
ide-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += ide-pmac.o
......
......@@ -75,8 +75,6 @@
#include <linux/proc_fs.h>
#endif /* defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS) */
extern char *ide_dmafunc_verbose(ide_dma_action_t dmafunc);
const char *quirk_drives[] = {
"QUANTUM FIREBALLlct08 08",
"QUANTUM FIREBALLP KA6.4",
......@@ -815,10 +813,8 @@ int hpt366_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
pci_read_config_byte(drive->channel->pci_dev, 0x50, &reg50h);
pci_read_config_byte(drive->channel->pci_dev, 0x52, &reg52h);
pci_read_config_byte(drive->channel->pci_dev, 0x5a, &reg5ah);
printk("%s: (%s) reg50h=0x%02x, reg52h=0x%02x, reg5ah=0x%02x\n",
drive->name,
ide_dmafunc_verbose(func),
reg50h, reg52h, reg5ah);
printk("%s: (ide_dma_lostirq) reg50h=0x%02x, reg52h=0x%02x, reg5ah=0x%02x\n",
drive->name, reg50h, reg52h, reg5ah);
if (reg5ah & 0x10)
pci_write_config_byte(drive->channel->pci_dev, 0x5a, reg5ah & ~0x10);
/* fall through to a reset */
......
......@@ -27,7 +27,6 @@
#include <asm/io.h>
extern char *ide_xfer_verbose (byte xfer_rate);
extern char *ide_dmafunc_verbose(ide_dma_action_t dmafunc);
/*
* Maximum number of interfaces per card
......@@ -467,8 +466,7 @@ icside_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_timeout:
default:
printk("icside_dmaproc: unsupported %s func: %d\n",
ide_dmafunc_verbose(func), func);
printk("icside_dmaproc: unsupported function: %d\n", func);
}
return 1;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -69,34 +69,6 @@ char *ide_xfer_verbose (byte xfer_rate)
}
}
/*
* A Verbose noise maker for debugging on the attempted dmaing calls.
*/
char *ide_dmafunc_verbose (ide_dma_action_t dmafunc)
{
switch (dmafunc) {
case ide_dma_read: return("ide_dma_read");
case ide_dma_write: return("ide_dma_write");
case ide_dma_begin: return("ide_dma_begin");
case ide_dma_end: return("ide_dma_end:");
case ide_dma_check: return("ide_dma_check");
case ide_dma_on: return("ide_dma_on");
case ide_dma_off: return("ide_dma_off");
case ide_dma_off_quietly: return("ide_dma_off_quietly");
case ide_dma_test_irq: return("ide_dma_test_irq");
case ide_dma_bad_drive: return("ide_dma_bad_drive");
case ide_dma_good_drive: return("ide_dma_good_drive");
case ide_dma_verbose: return("ide_dma_verbose");
case ide_dma_retune: return("ide_dma_retune");
case ide_dma_lostirq: return("ide_dma_lostirq");
case ide_dma_timeout: return("ide_dma_timeout");
default: return("unknown");
}
}
/*
*
*/
byte ide_auto_reduce_xfer (ide_drive_t *drive)
{
if (!drive->crc_count)
......@@ -122,9 +94,6 @@ byte ide_auto_reduce_xfer (ide_drive_t *drive)
}
}
/*
* Update the
*/
int ide_driveid_update (ide_drive_t *drive)
{
/*
......@@ -195,10 +164,10 @@ int ide_ata66_check (ide_drive_t *drive, struct ata_taskfile *args)
}
#ifndef CONFIG_IDEDMA_IVB
if ((drive->id->hw_config & 0x6000) == 0) {
#else /* !CONFIG_IDEDMA_IVB */
#else
if (((drive->id->hw_config & 0x2000) == 0) ||
((drive->id->hw_config & 0x4000) == 0)) {
#endif /* CONFIG_IDEDMA_IVB */
#endif
printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
return 1;
}
......@@ -232,7 +201,7 @@ byte eighty_ninty_three (ide_drive_t *drive)
return ((byte) ((drive->channel->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
(drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
#endif
(drive->id->hw_config & 0x6000)) ? 1 : 0);
}
......
......@@ -52,7 +52,6 @@
#endif
#include "ata-timing.h"
extern char *ide_dmafunc_verbose(ide_dma_action_t dmafunc);
extern spinlock_t ide_lock;
#undef IDE_PMAC_DEBUG
......@@ -1460,10 +1459,10 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_retune:
case ide_dma_lostirq:
case ide_dma_timeout:
printk(KERN_WARNING "ide_pmac_dmaproc: chipset supported %s func only: %d\n", ide_dmafunc_verbose(func), func);
printk(KERN_WARNING "ide_pmac_dmaproc: chipset supported func only: %d\n", func);
return 1;
default:
printk(KERN_WARNING "ide_pmac_dmaproc: unsupported %s func: %d\n", ide_dmafunc_verbose(func), func);
printk(KERN_WARNING "ide_pmac_dmaproc: unsupported func: %d\n", func);
return 1;
}
return 0;
......
......@@ -189,6 +189,21 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
if (drive->channel->quirkproc)
drive->quirk_list = drive->channel->quirkproc(drive);
/*
* it's an ata drive, build command list
*/
#ifndef CONFIG_BLK_DEV_IDE_TCQ
drive->queue_depth = 1;
#else
drive->queue_depth = drive->id->queue_depth + 1;
if (drive->queue_depth > CONFIG_BLK_DEV_IDE_TCQ_DEPTH)
drive->queue_depth = CONFIG_BLK_DEV_IDE_TCQ_DEPTH;
if (drive->queue_depth < 1 || drive->queue_depth > IDE_MAX_TAG)
drive->queue_depth = IDE_MAX_TAG;
#endif
if (ide_build_commandlist(drive))
goto err_misc;
return;
err_misc:
......@@ -593,10 +608,10 @@ static void ide_init_queue(ide_drive_t *drive)
blk_queue_max_sectors(q, max_sectors);
/* IDE DMA can do PRD_ENTRIES number of segments. */
blk_queue_max_hw_segments(q, PRD_ENTRIES);
blk_queue_max_hw_segments(q, PRD_SEGMENTS);
/* This is a driver limit and could be eliminated. */
blk_queue_max_phys_segments(q, PRD_ENTRIES);
blk_queue_max_phys_segments(q, PRD_SEGMENTS);
}
#if MAX_HWIFS > 1
......
......@@ -291,7 +291,8 @@ void ata_poll_drive_ready(ide_drive_t *drive)
static ide_startstop_t pre_task_mulout_intr(ide_drive_t *drive, struct request *rq)
{
struct ata_taskfile *args = rq->special;
struct ata_request *ar = rq->special;
struct ata_taskfile *args = &ar->ar_task;
ide_startstop_t startstop;
/*
......@@ -434,11 +435,35 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive,
if (args->prehandler != NULL)
return args->prehandler(drive, rq);
} else {
/* for dma commands we down set the handler */
if (drive->using_dma &&
!(drive->channel->dmaproc(((args->taskfile.command == WIN_WRITEDMA)
|| (args->taskfile.command == WIN_WRITEDMA_EXT))
? ide_dma_write : ide_dma_read, drive)));
ide_dma_action_t dmaaction;
u8 command;
if (!drive->using_dma)
return ide_started;
command = args->taskfile.command;
#ifdef CONFIG_BLK_DEV_IDE_TCQ
if (drive->using_tcq) {
if (command == WIN_READDMA_QUEUED
|| command == WIN_READDMA_QUEUED_EXT
|| command == WIN_WRITEDMA_QUEUED
|| command == WIN_READDMA_QUEUED_EXT)
return ide_start_tag(ide_dma_queued_start, drive, rq->special);
}
#endif
if (command == WIN_WRITEDMA || command == WIN_WRITEDMA_EXT)
dmaaction = ide_dma_write;
else if (command == WIN_READDMA || command == WIN_READDMA_EXT)
dmaaction = ide_dma_read;
else
return ide_stopped;
if (!drive->channel->dmaproc(dmaaction, drive))
return ide_started;
return ide_stopped;
}
return ide_started;
......@@ -495,8 +520,9 @@ ide_startstop_t recal_intr(ide_drive_t *drive)
*/
ide_startstop_t task_no_data_intr (ide_drive_t *drive)
{
struct ata_taskfile *args = HWGROUP(drive)->rq->special;
byte stat = GET_STAT();
struct ata_request *ar = HWGROUP(drive)->rq->special;
struct ata_taskfile *args = &ar->ar_task;
u8 stat = GET_STAT();
ide__sti(); /* local CPU only */
......@@ -555,7 +581,8 @@ static ide_startstop_t task_in_intr (ide_drive_t *drive)
static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
{
struct ata_taskfile *args = rq->special;
struct ata_request *ar = rq->special;
struct ata_taskfile *args = &ar->ar_task;
ide_startstop_t startstop;
if (ide_wait_stat(&startstop, drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
......@@ -644,7 +671,7 @@ static ide_startstop_t task_mulin_intr(ide_drive_t *drive)
pBuf = ide_map_rq(rq, &flags);
DTF("Multiread: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
DTF("Multiread: %p, nsect: %d , rq->current_nr_sectors: %d\n",
pBuf, nsect, rq->current_nr_sectors);
drive->io_32bit = 0;
taskfile_input_data(drive, pBuf, nsect * SECTOR_WORDS);
......@@ -858,7 +885,7 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
/*
* This function is intended to be used prior to invoking ide_do_drive_cmd().
*/
static void init_taskfile_request(struct request *rq)
void init_taskfile_request(struct request *rq)
{
memset(rq, 0, sizeof(*rq));
rq->flags = REQ_DRIVE_TASKFILE;
......@@ -875,23 +902,24 @@ static void init_taskfile_request(struct request *rq)
int ide_wait_taskfile(ide_drive_t *drive, struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile, byte *buf)
{
struct request rq;
/* FIXME: This is on stack! */
struct ata_taskfile args;
struct ata_request ar;
struct ata_taskfile *args = &ar.ar_task;
memset(&args, 0, sizeof(args));
ata_ar_init(drive, &ar);
args.taskfile = *taskfile;
args.hobfile = *hobfile;
memcpy(&args->taskfile, taskfile, sizeof(*taskfile));
if (hobfile)
memcpy(&args->hobfile, hobfile, sizeof(*hobfile));
init_taskfile_request(&rq);
/* This is kept for internal use only !!! */
ide_cmd_type_parser(&args);
if (args.command_type != IDE_DRIVE_TASK_NO_DATA)
ide_cmd_type_parser(args);
if (args->command_type != IDE_DRIVE_TASK_NO_DATA)
rq.current_nr_sectors = rq.nr_sectors = (hobfile->sector_count << 8) | taskfile->sector_count;
rq.buffer = buf;
rq.special = &args;
rq.special = &ar;
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
......@@ -899,15 +927,19 @@ int ide_wait_taskfile(ide_drive_t *drive, struct hd_drive_task_hdr *taskfile, st
int ide_raw_taskfile(ide_drive_t *drive, struct ata_taskfile *args, byte *buf)
{
struct request rq;
struct ata_request ar;
ata_ar_init(drive, &ar);
init_taskfile_request(&rq);
rq.buffer = buf;
memcpy(&ar.ar_task, args, sizeof(*args));
if (args->command_type != IDE_DRIVE_TASK_NO_DATA)
rq.current_nr_sectors = rq.nr_sectors
= (args->hobfile.sector_count << 8)
| args->taskfile.sector_count;
rq.special = args;
rq.special = &ar;
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -1057,6 +1057,12 @@ int pdc202xx_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_timeout:
if (drive->channel->resetproc != NULL)
drive->channel->resetproc(drive);
/*
* we cannot support queued operations on promise, so fail to
* to enable it...
*/
case ide_dma_queued_on:
return 1;
default:
break;
}
......
......@@ -34,6 +34,7 @@
#define ECC_STAT 0x04 /* Corrected error */
#define DRQ_STAT 0x08
#define SEEK_STAT 0x10
#define SERVICE_STAT SEEK_STAT
#define WRERR_STAT 0x20
#define READY_STAT 0x40
#define BUSY_STAT 0x80
......@@ -49,6 +50,13 @@
#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
/*
* bits of NSECTOR reg
*/
#define NSEC_CD 0x1
#define NSEC_IO 0x2
#define NSEC_REL 0x4
/*
* Command Header sizes for IOCTL commands
* HDIO_DRIVE_CMD and HDIO_DRIVE_TASK
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment