Commit 99a974e6 authored by Ondrej Zary's avatar Ondrej Zary Committed by Martin K. Petersen

scsi: g_NCR5380: Re-work PDMA loops

The polling loops in pread() and pwrite() can easily become infinite
loops and hang the machine.

Merge the IRQ check into host buffer wait loop and add polling limit.

Also place a limit on polling for 53C80 registers accessibility.

[Use NCR5380_poll_politely2() for register polling. Rely on polling for
gated IRQ rather than polling for phase error, like the algorithm in the
53c400 datasheet. Move DTC436 workarounds into a separate patch.
Factor-out common code as wait_for_53c80_access(). Rework the residual
calculations. -- F.T.]
Signed-off-by: default avatarOndrej Zary <linux@rainbow-software.org>
Signed-off-by: default avatarFinn Thain <fthain@telegraphics.com.au>
Tested-by: default avatarOndrej Zary <linux@rainbow-software.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent ab2ace2d
...@@ -480,6 +480,28 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance) ...@@ -480,6 +480,28 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
release_mem_region(base, region_size); release_mem_region(base, region_size);
} }
/* wait_for_53c80_access - wait for 53C80 registers to become accessible
* @hostdata: scsi host private data
*
* The registers within the 53C80 logic block are inaccessible until
* bit 7 in the 53C400 control status register gets asserted.
*/
static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata)
{
int count = 10000;
do {
if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)
return;
} while (--count > 0);
scmd_printk(KERN_ERR, hostdata->connected,
"53c80 registers not accessible, device will be reset\n");
NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
}
/** /**
* generic_NCR5380_precv - pseudo DMA receive * generic_NCR5380_precv - pseudo DMA receive
* @hostdata: scsi host private data * @hostdata: scsi host private data
...@@ -492,18 +514,27 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance) ...@@ -492,18 +514,27 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len) unsigned char *dst, int len)
{ {
int blocks = len / 128; int residual;
int start = 0; int start = 0;
NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR);
NCR5380_write(hostdata->c400_blk_cnt, blocks); NCR5380_write(hostdata->c400_blk_cnt, len / 128);
while (1) {
if (NCR5380_read(hostdata->c400_blk_cnt) == 0) do {
break; if (start == len - 128) {
if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) /* Ignore End of DMA interrupt for the final buffer */
goto out_wait; if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0)
; /* FIXME - no timeout */ break;
} else {
if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
break;
}
if (hostdata->io_port && hostdata->io_width == 2) if (hostdata->io_port && hostdata->io_width == 2)
insw(hostdata->io_port + hostdata->c400_host_buf, insw(hostdata->io_port + hostdata->c400_host_buf,
...@@ -514,44 +545,26 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, ...@@ -514,44 +545,26 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
else else
memcpy_fromio(dst + start, memcpy_fromio(dst + start,
hostdata->io + NCR53C400_host_buffer, 128); hostdata->io + NCR53C400_host_buffer, 128);
start += 128; start += 128;
blocks--; } while (start < len);
}
if (blocks) { residual = len - start;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
if (hostdata->io_port && hostdata->io_width == 2)
insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
else if (hostdata->io_port)
insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
else
memcpy_fromio(dst + start,
hostdata->io + NCR53C400_host_buffer, 128);
start += 128; if (residual != 0) {
blocks--; /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
} }
wait_for_53c80_access(hostdata);
if (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ)) if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
printk("53C400r: no 53C80 gated irq after transfer"); BASR_END_DMA_TRANSFER,
BASR_END_DMA_TRANSFER,
out_wait: HZ / 64) < 0)
hostdata->pdma_residual = len - start; scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
/* wait for 53C80 registers to be available */ hostdata->pdma_residual = residual;
while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG))
;
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout (%d)\n",
__func__, hostdata->pdma_residual);
return 0; return 0;
} }
...@@ -568,36 +581,39 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, ...@@ -568,36 +581,39 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len) unsigned char *src, int len)
{ {
int blocks = len / 128; int residual;
int start = 0; int start = 0;
NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
NCR5380_write(hostdata->c400_blk_cnt, blocks); NCR5380_write(hostdata->c400_blk_cnt, len / 128);
while (1) {
if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) do {
goto out_wait; if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) {
/* Both 128 B buffers are in use */
if (start >= 128)
start -= 128;
if (start >= 128)
start -= 128;
break;
}
if (NCR5380_read(hostdata->c400_blk_cnt) == 0) if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0)
break; break;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
if (hostdata->io_port && hostdata->io_width == 2) if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
outsw(hostdata->io_port + hostdata->c400_host_buf, /* Host buffer is empty, other one is in use */
src + start, 64); if (start >= 128)
else if (hostdata->io_port) start -= 128;
outsb(hostdata->io_port + hostdata->c400_host_buf, break;
src + start, 128); }
else
memcpy_toio(hostdata->io + NCR53C400_host_buffer,
src + start, 128);
start += 128; if (start >= len)
blocks--; continue;
}
if (blocks) {
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
if (hostdata->io_port && hostdata->io_width == 2) if (hostdata->io_port && hostdata->io_width == 2)
outsw(hostdata->io_port + hostdata->c400_host_buf, outsw(hostdata->io_port + hostdata->c400_host_buf,
...@@ -608,28 +624,33 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, ...@@ -608,28 +624,33 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
else else
memcpy_toio(hostdata->io + NCR53C400_host_buffer, memcpy_toio(hostdata->io + NCR53C400_host_buffer,
src + start, 128); src + start, 128);
start += 128; start += 128;
blocks--; } while (1);
}
out_wait: residual = len - start;
hostdata->pdma_residual = len - start;
/* wait for 53C80 registers to be available */ if (residual != 0) {
while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) { /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
udelay(4); /* DTC436 chip hangs without this */ NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
/* FIXME - no timeout */ NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
}
wait_for_53c80_access(hostdata);
if (residual == 0) {
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT,
HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
} }
while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)) hostdata->pdma_residual = residual;
; // TIMEOUT
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout (%d)\n",
__func__, hostdata->pdma_residual);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment