Commit d8bffa93 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] 2.5.8 IDE 39

Synchronize with Jens Axobe:

- Congruent ATA_AR_POOL fix to the ATA_AR_STATIC memmory corruption fix.

- Multi sector write handling fix.

- Fix drive capability deduction.

- Various other minor fixes.
parent 43c1ab59
...@@ -599,13 +599,19 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive) ...@@ -599,13 +599,19 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
printk("%s: DMA disabled\n", drive->name); printk("%s: DMA disabled\n", drive->name);
case ide_dma_off_quietly: case ide_dma_off_quietly:
set_high = 0; set_high = 0;
drive->using_tcq = 0;
outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2); outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
hwif->dmaproc(ide_dma_queued_off, drive);
#endif
case ide_dma_on: case ide_dma_on:
ide_toggle_bounce(drive, set_high); ide_toggle_bounce(drive, set_high);
drive->using_dma = (func == ide_dma_on); drive->using_dma = (func == ide_dma_on);
if (drive->using_dma) if (drive->using_dma) {
outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2); outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
hwif->dmaproc(ide_dma_queued_on, drive);
#endif
}
return 0; return 0;
case ide_dma_check: case ide_dma_check:
return config_drive_for_dma (drive); return config_drive_for_dma (drive);
......
...@@ -413,6 +413,20 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive, ...@@ -413,6 +413,20 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive,
struct hd_driveid *id = drive->id; struct hd_driveid *id = drive->id;
u8 HIHI = (drive->addressing) ? 0xE0 : 0xEF; u8 HIHI = (drive->addressing) ? 0xE0 : 0xEF;
#if 0
printk("ata_taskfile ... %p\n", args->handler);
printk(" sector feature %02x\n", args->taskfile.feature);
printk(" sector count %02x\n", args->taskfile.sector_count);
printk(" drive/head %02x\n", args->taskfile.device_head);
printk(" command %02x\n", args->taskfile.command);
if (rq)
printk(" rq->nr_sectors %2li\n", rq->nr_sectors);
else
printk(" rq-> = null\n");
#endif
/* (ks/hs): Moved to start, do not use for multiple out commands */ /* (ks/hs): Moved to start, do not use for multiple out commands */
if (args->handler != task_mulout_intr) { if (args->handler != task_mulout_intr) {
if (IDE_CONTROL_REG) if (IDE_CONTROL_REG)
...@@ -577,18 +591,22 @@ static ide_startstop_t task_in_intr (ide_drive_t *drive) ...@@ -577,18 +591,22 @@ static ide_startstop_t task_in_intr (ide_drive_t *drive)
ata_read(drive, pBuf, SECTOR_WORDS); ata_read(drive, pBuf, SECTOR_WORDS);
ide_unmap_rq(rq, pBuf, &flags); ide_unmap_rq(rq, pBuf, &flags);
/*
* first segment of the request is complete. note that this does not
* necessarily mean that the entire request is done!! this is only
* true if ide_end_request() returns 0.
*/
if (--rq->current_nr_sectors <= 0) { if (--rq->current_nr_sectors <= 0) {
/* (hs): swapped next 2 lines */
DTF("Request Ended stat: %02x\n", GET_STAT()); DTF("Request Ended stat: %02x\n", GET_STAT());
if (ide_end_request(drive, 1)) { if (!ide_end_request(drive, 1))
ide_set_handler(drive, &task_in_intr, WAIT_CMD, NULL); return ide_stopped;
return ide_started;
} }
} else {
/*
* still data left to transfer
*/
ide_set_handler(drive, &task_in_intr, WAIT_CMD, NULL); ide_set_handler(drive, &task_in_intr, WAIT_CMD, NULL);
return ide_started; return ide_started;
}
return ide_stopped;
} }
static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq) static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
...@@ -874,7 +892,6 @@ void ide_cmd_type_parser(struct ata_taskfile *args) ...@@ -874,7 +892,6 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
return; return;
case WIN_NOP: case WIN_NOP:
args->command_type = IDE_DRIVE_TASK_NO_DATA; args->command_type = IDE_DRIVE_TASK_NO_DATA;
return; return;
...@@ -904,11 +921,6 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ata_taskfile *args, byte *buf) ...@@ -904,11 +921,6 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ata_taskfile *args, byte *buf)
struct ata_request star; struct ata_request star;
ata_ar_init(drive, &star); ata_ar_init(drive, &star);
/* Don't put this request on free_req list after usage.
*/
star.ar_flags |= ATA_AR_STATIC;
init_taskfile_request(&rq); init_taskfile_request(&rq);
rq.buffer = buf; rq.buffer = buf;
......
...@@ -51,21 +51,17 @@ ...@@ -51,21 +51,17 @@
*/ */
#undef IDE_TCQ_FIDDLE_SI #undef IDE_TCQ_FIDDLE_SI
/*
* wait for data phase before starting DMA or not
*/
#undef IDE_TCQ_WAIT_DATAPHASE
ide_startstop_t ide_dmaq_intr(ide_drive_t *drive); ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
ide_startstop_t ide_service(ide_drive_t *drive); ide_startstop_t ide_service(ide_drive_t *drive);
static inline void drive_ctl_nien(ide_drive_t *drive, int clear) static inline void drive_ctl_nien(ide_drive_t *drive, int clear)
{ {
#ifdef IDE_TCQ_NIEN #ifdef IDE_TCQ_NIEN
if (IDE_CONTROL_REG) {
int mask = clear ? 0x00 : 0x02; int mask = clear ? 0x00 : 0x02;
if (IDE_CONTROL_REG)
OUT_BYTE(drive->ctl | mask, IDE_CONTROL_REG); OUT_BYTE(drive->ctl | mask, IDE_CONTROL_REG);
}
#endif #endif
} }
...@@ -123,7 +119,6 @@ static void ide_tcq_invalidate_queue(ide_drive_t *drive) ...@@ -123,7 +119,6 @@ static void ide_tcq_invalidate_queue(ide_drive_t *drive)
init_taskfile_request(ar->ar_rq); init_taskfile_request(ar->ar_rq);
ar->ar_rq->rq_dev = mk_kdev(drive->channel->major, (drive->select.b.unit)<<PARTN_BITS); ar->ar_rq->rq_dev = mk_kdev(drive->channel->major, (drive->select.b.unit)<<PARTN_BITS);
ar->ar_rq->special = ar; ar->ar_rq->special = ar;
ar->ar_flags |= ATA_AR_RETURN;
_elv_add_request(q, ar->ar_rq, 0, 0); _elv_add_request(q, ar->ar_rq, 0, 0);
/* /*
...@@ -222,7 +217,7 @@ ide_startstop_t ide_service(ide_drive_t *drive) ...@@ -222,7 +217,7 @@ ide_startstop_t ide_service(ide_drive_t *drive)
{ {
struct ata_request *ar; struct ata_request *ar;
byte feat, stat; byte feat, stat;
int tag; int tag, ret;
TCQ_PRINTK("%s: started service\n", drive->name); TCQ_PRINTK("%s: started service\n", drive->name);
...@@ -272,9 +267,6 @@ ide_startstop_t ide_service(ide_drive_t *drive) ...@@ -272,9 +267,6 @@ ide_startstop_t ide_service(ide_drive_t *drive)
return ide_stopped; return ide_stopped;
} }
/*
* start dma
*/
tag = feat >> 3; tag = feat >> 3;
IDE_SET_CUR_TAG(drive, tag); IDE_SET_CUR_TAG(drive, tag);
...@@ -293,16 +285,16 @@ ide_startstop_t ide_service(ide_drive_t *drive) ...@@ -293,16 +285,16 @@ ide_startstop_t ide_service(ide_drive_t *drive)
*/ */
if (rq_data_dir(ar->ar_rq) == READ) { if (rq_data_dir(ar->ar_rq) == READ) {
TCQ_PRINTK("ide_service: starting READ %x\n", stat); TCQ_PRINTK("ide_service: starting READ %x\n", stat);
drive->channel->dmaproc(ide_dma_read_queued, drive); ret = drive->channel->dmaproc(ide_dma_read_queued, drive);
} else { } else {
TCQ_PRINTK("ide_service: starting WRITE %x\n", stat); TCQ_PRINTK("ide_service: starting WRITE %x\n", stat);
drive->channel->dmaproc(ide_dma_write_queued, drive); ret = drive->channel->dmaproc(ide_dma_write_queued, drive);
} }
/* /*
* dmaproc set intr handler * dmaproc set intr handler
*/ */
return ide_started; return !ret ? ide_started : ide_stopped;
} }
ide_startstop_t ide_check_service(ide_drive_t *drive) ide_startstop_t ide_check_service(ide_drive_t *drive)
...@@ -410,14 +402,15 @@ ide_startstop_t ide_dmaq_intr(ide_drive_t *drive) ...@@ -410,14 +402,15 @@ ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
*/ */
static int ide_tcq_configure(ide_drive_t *drive) static int ide_tcq_configure(ide_drive_t *drive)
{ {
int tcq_mask = 1 << 1 | 1 << 14;
int tcq_bits = tcq_mask | 1 << 15;
struct ata_taskfile args; struct ata_taskfile args;
int tcq_supp = 1 << 1 | 1 << 14;
/* /*
* bit 14 and 1 must be set in word 83 of the device id to indicate * bit 14 and 1 must be set in word 83 of the device id to indicate
* support for dma queued protocol * support for dma queued protocol, and bit 15 must be cleared
*/ */
if ((drive->id->command_set_2 & tcq_supp) != tcq_supp) if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
return -EIO; return -EIO;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
...@@ -477,10 +470,13 @@ static int ide_tcq_configure(ide_drive_t *drive) ...@@ -477,10 +470,13 @@ static int ide_tcq_configure(ide_drive_t *drive)
*/ */
static int ide_enable_queued(ide_drive_t *drive, int on) static int ide_enable_queued(ide_drive_t *drive, int on)
{ {
int depth = drive->using_tcq ? drive->queue_depth : 0;
/* /*
* disable or adjust queue depth * disable or adjust queue depth
*/ */
if (!on) { if (!on) {
if (drive->using_tcq)
printk("%s: TCQ disabled\n", drive->name); printk("%s: TCQ disabled\n", drive->name);
drive->using_tcq = 0; drive->using_tcq = 0;
return 0; return 0;
...@@ -491,25 +487,33 @@ static int ide_enable_queued(ide_drive_t *drive, int on) ...@@ -491,25 +487,33 @@ static int ide_enable_queued(ide_drive_t *drive, int on)
return 1; return 1;
} }
/*
* possibly expand command list
*/
if (ide_build_commandlist(drive)) if (ide_build_commandlist(drive))
return 1; return 1;
if (depth != drive->queue_depth)
printk("%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth); printk("%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
drive->using_tcq = 1; drive->using_tcq = 1;
/*
* clear stats
*/
drive->tcq->max_depth = 0; drive->tcq->max_depth = 0;
return 0; return 0;
} }
int ide_tcq_wait_dataphase(ide_drive_t *drive) int ide_tcq_wait_dataphase(ide_drive_t *drive)
{ {
#ifdef IDE_TCQ_WAIT_DATAPHASE
ide_startstop_t foo; ide_startstop_t foo;
if (ide_wait_stat(&startstop, drive, READY_STAT | DRQ_STAT, BUSY_STAT, WAIT_READY)) { if (ide_wait_stat(&foo, drive, READY_STAT | DRQ_STAT, BUSY_STAT, WAIT_READY)) {
printk("%s: timeout waiting for data phase\n", drive->name); printk("%s: timeout waiting for data phase\n", drive->name);
return 1; return 1;
} }
#endif
return 0; return 0;
} }
...@@ -595,6 +599,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive) ...@@ -595,6 +599,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
if (ide_start_dma(hwif, drive, func)) if (ide_start_dma(hwif, drive, func))
return ide_stopped; return ide_stopped;
TCQ_PRINTK("IMMED in queued_start\n");
/* /*
* need to arm handler before starting dma engine, * need to arm handler before starting dma engine,
* transfer could complete right away * transfer could complete right away
...@@ -612,6 +618,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive) ...@@ -612,6 +618,8 @@ int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_queued_off: case ide_dma_queued_off:
enable_tcq = 0; enable_tcq = 0;
case ide_dma_queued_on: case ide_dma_queued_on:
if (enable_tcq && !drive->using_dma)
return 1;
return ide_enable_queued(drive, enable_tcq); return ide_enable_queued(drive, enable_tcq);
default: default:
break; break;
......
...@@ -803,7 +803,6 @@ void ide_end_drive_cmd(ide_drive_t *drive, byte stat, byte err) ...@@ -803,7 +803,6 @@ void ide_end_drive_cmd(ide_drive_t *drive, byte stat, byte err)
args->hobfile.high_cylinder = IN_BYTE(IDE_HCYL_REG); args->hobfile.high_cylinder = IN_BYTE(IDE_HCYL_REG);
} }
} }
if (ar->ar_flags & ATA_AR_RETURN)
ata_ar_put(drive, ar); ata_ar_put(drive, ar);
} }
......
...@@ -968,10 +968,9 @@ extern void revalidate_drives(void); ...@@ -968,10 +968,9 @@ extern void revalidate_drives(void);
/* /*
* ata_request flag bits * ata_request flag bits
*/ */
#define ATA_AR_QUEUED 1 #define ATA_AR_QUEUED 1 /* was queued */
#define ATA_AR_SETUP 2 #define ATA_AR_SETUP 2 /* dma table mapped */
#define ATA_AR_RETURN 4 #define ATA_AR_POOL 4 /* originated from drive pool */
#define ATA_AR_STATIC 8
/* /*
* if turn-around time is longer than this, halve queue depth * if turn-around time is longer than this, halve queue depth
...@@ -1003,8 +1002,10 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive) ...@@ -1003,8 +1002,10 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive)
if (!list_empty(&drive->free_req)) { if (!list_empty(&drive->free_req)) {
ar = list_ata_entry(drive->free_req.next); ar = list_ata_entry(drive->free_req.next);
list_del(&ar->ar_queue); list_del(&ar->ar_queue);
ata_ar_init(drive, ar); ata_ar_init(drive, ar);
ar->ar_flags |= ATA_AR_POOL;
} }
return ar; return ar;
...@@ -1012,7 +1013,7 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive) ...@@ -1012,7 +1013,7 @@ static inline struct ata_request *ata_ar_get(ide_drive_t *drive)
static inline void ata_ar_put(ide_drive_t *drive, struct ata_request *ar) static inline void ata_ar_put(ide_drive_t *drive, struct ata_request *ar)
{ {
if (!(ar->ar_flags & ATA_AR_STATIC)) if (ar->ar_flags & ATA_AR_POOL)
list_add(&ar->ar_queue, &drive->free_req); list_add(&ar->ar_queue, &drive->free_req);
if (ar->ar_flags & ATA_AR_QUEUED) { if (ar->ar_flags & ATA_AR_QUEUED) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment