Commit 7eb141c4 authored by Doug Ledford's avatar Doug Ledford Committed by Linus Torvalds

[PATCH] make SCSI queue depth adjustable

Linus, this has been tested by some people in the field to not break
things, and it's the start of some other changes I'm making, so please put
this in your tree so I'm not merging huge patches but instead am merging a
little as I go.
parent 5c4936fa
......@@ -976,7 +976,7 @@ struct aic7xxx_host {
#define DEVICE_DTR_SCANNED 0x40
volatile unsigned char dev_flags[MAX_TARGETS];
volatile unsigned char dev_active_cmds[MAX_TARGETS];
volatile unsigned char dev_temp_queue_depth[MAX_TARGETS];
volatile unsigned short dev_temp_queue_depth[MAX_TARGETS];
unsigned char dev_commands_sent[MAX_TARGETS];
unsigned int dev_timer_active; /* Which devs have a timer set */
......@@ -988,7 +988,9 @@ struct aic7xxx_host {
unsigned char dev_last_queue_full[MAX_TARGETS];
unsigned char dev_last_queue_full_count[MAX_TARGETS];
unsigned char dev_max_queue_depth[MAX_TARGETS];
unsigned char dev_lun_queue_depth[MAX_TARGETS];
unsigned short dev_scbs_needed[MAX_TARGETS];
unsigned short dev_max_queue_depth[MAX_TARGETS];
volatile scb_queue_type delayed_scbs[MAX_TARGETS];
......@@ -1035,6 +1037,7 @@ struct aic7xxx_host {
ahc_chip chip; /* chip type */
ahc_bugs bugs;
dma_addr_t fifo_dma; /* DMA handle for fifo arrays */
Scsi_Device *Scsi_Dev[MAX_TARGETS][MAX_LUNS];
/*
* Statistics Kept:
......@@ -2820,94 +2823,6 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
cmd->result |= (DID_RESET << 16);
}
if (!(p->dev_flags[tindex] & DEVICE_PRESENT))
{
if ( (cmd->cmnd[0] == INQUIRY) && (cmd->result == DID_OK) )
{
p->dev_flags[tindex] |= DEVICE_PRESENT;
#define WIDE_INQUIRY_BITS 0x60
#define SYNC_INQUIRY_BITS 0x10
#define SCSI_VERSION_BITS 0x07
#define SCSI_DT_BIT 0x04
if(!(p->dev_flags[tindex] & DEVICE_DTR_SCANNED)) {
char *buffer;
if(cmd->use_sg)
BUG();
buffer = (char *)cmd->request_buffer;
if ( (buffer[7] & WIDE_INQUIRY_BITS) &&
(p->features & AHC_WIDE) )
{
p->needwdtr |= (1<<tindex);
p->needwdtr_copy |= (1<<tindex);
p->transinfo[tindex].goal_width = p->transinfo[tindex].user_width;
}
else
{
p->needwdtr &= ~(1<<tindex);
p->needwdtr_copy &= ~(1<<tindex);
pause_sequencer(p);
aic7xxx_set_width(p, cmd->target, cmd->channel, cmd->lun,
MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
AHC_TRANS_GOAL |
AHC_TRANS_CUR) );
unpause_sequencer(p, FALSE);
}
if ( (buffer[7] & SYNC_INQUIRY_BITS) &&
p->transinfo[tindex].user_offset )
{
p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
p->transinfo[tindex].goal_options = p->transinfo[tindex].user_options;
if (p->features & AHC_ULTRA2)
p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
else
p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
if ( (((buffer[2] & SCSI_VERSION_BITS) >= 3) ||
(buffer[56] & SCSI_DT_BIT) ||
(p->dev_flags[tindex] & DEVICE_SCSI_3) ) &&
(p->transinfo[tindex].user_period <= 9) &&
(p->transinfo[tindex].user_options) )
{
p->needppr |= (1<<tindex);
p->needppr_copy |= (1<<tindex);
p->needsdtr &= ~(1<<tindex);
p->needsdtr_copy &= ~(1<<tindex);
p->needwdtr &= ~(1<<tindex);
p->needwdtr_copy &= ~(1<<tindex);
p->dev_flags[tindex] |= DEVICE_SCSI_3;
}
else
{
p->needsdtr |= (1<<tindex);
p->needsdtr_copy |= (1<<tindex);
p->transinfo[tindex].goal_period =
MAX(10, p->transinfo[tindex].goal_period);
p->transinfo[tindex].goal_options = 0;
}
}
else
{
p->needsdtr &= ~(1<<tindex);
p->needsdtr_copy &= ~(1<<tindex);
p->transinfo[tindex].goal_period = 255;
p->transinfo[tindex].goal_offset = 0;
p->transinfo[tindex].goal_options = 0;
}
p->dev_flags[tindex] |= DEVICE_DTR_SCANNED;
p->dev_flags[tindex] |= DEVICE_PRINT_DTR;
}
#undef WIDE_INQUIRY_BITS
#undef SYNC_INQUIRY_BITS
#undef SCSI_VERSION_BITS
#undef SCSI_DT_BIT
}
}
if ((scb->flags & SCB_MSGOUT_BITS) != 0)
{
unsigned short mask;
......@@ -4919,15 +4834,29 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
if ( (p->dev_last_queue_full_count[tindex] > 14) &&
(p->dev_active_cmds[tindex] > 4) )
{
int diff, lun;
if (p->dev_active_cmds[tindex] > p->dev_lun_queue_depth[tindex])
/* We don't know what to do here, so bail. */
break;
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no,
CTL_OF_SCB(scb), p->dev_active_cmds[tindex]);
p->dev_max_queue_depth[tindex] =
p->dev_active_cmds[tindex];
diff = p->dev_lun_queue_depth[tindex] -
p->dev_active_cmds[tindex];
p->dev_lun_queue_depth[tindex] -= diff;
for(lun = 0; lun < p->host->max_lun; lun++)
{
if(p->Scsi_Dev[tindex][lun] != NULL)
{
p->dev_max_queue_depth[tindex] -= diff;
scsi_adjust_queue_depth(p->Scsi_Dev[tindex][lun], 1,
p->dev_lun_queue_depth[tindex]);
if(p->dev_temp_queue_depth[tindex] > p->dev_max_queue_depth[tindex])
p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
}
}
p->dev_last_queue_full[tindex] = 0;
p->dev_last_queue_full_count[tindex] = 0;
p->dev_temp_queue_depth[tindex] =
p->dev_active_cmds[tindex];
}
else if (p->dev_active_cmds[tindex] == 0)
{
......@@ -7023,10 +6952,10 @@ do_aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
* with queue depths for individual devices. It also allows tagged
* queueing to be [en|dis]abled for a specific adapter.
*-F*************************************************************************/
static int
static void
aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
{
int default_depth = 3;
int default_depth = p->host->hostt->cmd_per_lun;
unsigned char tindex;
unsigned short target_mask;
......@@ -7036,12 +6965,69 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
if (p->dev_max_queue_depth[tindex] > 1)
{
/*
* We've already scanned this device, leave it alone
* We've already scanned some lun on this device and enabled tagged
* queueing on it. So, as long as this lun also supports tagged
* queueing, enable it here with the same depth. Call SCSI mid layer
* to adjust depth on this device, and add enough to the max_queue_depth
* to cover the commands for this lun.
*
* Note: there is a shortcoming here. The aic7xxx driver really assumes
* that if any lun on a device supports tagged queueing, then they *all*
* do. Our p->tagenable field is on a per target id basis and doesn't
* differentiate for different luns. If we end up with one lun that
* doesn't support tagged queueing, it's going to disable tagged queueing
* on *all* the luns on that target ID :-(
*/
return(p->dev_max_queue_depth[tindex]);
if(device->tagged_supported) {
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "Enabled tagged queuing, queue depth %d.\n",
p->host_no, device->channel, device->id,
device->lun, device->queue_depth);
}
p->dev_max_queue_depth[tindex] += p->dev_lun_queue_depth[tindex];
p->dev_temp_queue_depth[tindex] += p->dev_lun_queue_depth[tindex];
scsi_adjust_queue_depth(device, 1, p->dev_lun_queue_depth[tindex]);
}
else
{
int lun;
/*
* Uh ohh, this is what I was talking about. All the other devices on
* this target ID that support tagged queueing are going to end up
* getting tagged queueing turned off because of this device. Print
* out a message to this effect for the user, then disable tagged
* queueing on all the devices on this ID.
*/
printk(WARN_LEAD "does not support tagged queuing while other luns on\n"
" the same target ID do!! Tagged queueing will be disabled for\n"
" all luns on this target ID!!\n", p->host_no,
device->channel, device->id, device->lun);
p->dev_lun_queue_depth[tindex] = default_depth;
p->dev_scbs_needed[tindex] = 0;
p->dev_temp_queue_depth[tindex] = 1;
p->dev_max_queue_depth[tindex] = 1;
p->tagenable &= ~target_mask;
for(lun=0; lun < p->host->max_lun; lun++)
{
if(p->Scsi_Dev[tindex][lun] != NULL)
{
printk(WARN_LEAD "disabling tagged queuing.\n", p->host_no,
p->Scsi_Dev[tindex][lun]->channel,
p->Scsi_Dev[tindex][lun]->id,
p->Scsi_Dev[tindex][lun]->lun);
scsi_adjust_queue_depth(p->Scsi_Dev[tindex][lun], 0, default_depth);
p->dev_scbs_needed[tindex] += default_depth;
}
}
}
return;
}
device->queue_depth = default_depth;
p->dev_lun_queue_depth[tindex] = default_depth;
p->dev_scbs_needed[tindex] = default_depth;
p->dev_temp_queue_depth[tindex] = 1;
p->dev_max_queue_depth[tindex] = 1;
p->tagenable &= ~target_mask;
......@@ -7051,7 +7037,7 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
int tag_enabled = TRUE;
default_depth = AIC7XXX_CMDS_PER_DEVICE;
if (!(p->discenable & target_mask))
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
......@@ -7072,7 +7058,7 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
" the aic7xxx.c source file.\n");
print_warning = FALSE;
}
device->queue_depth = default_depth;
p->dev_lun_queue_depth[tindex] = default_depth;
}
else
{
......@@ -7080,19 +7066,18 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255)
{
tag_enabled = FALSE;
device->queue_depth = 3; /* Tagged queueing is disabled. */
}
else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0)
{
device->queue_depth = default_depth;
p->dev_lun_queue_depth[tindex] = default_depth;
}
else
{
device->queue_depth =
p->dev_lun_queue_depth[tindex] =
aic7xxx_tag_info[p->instance].tag_commands[tindex];
}
}
if ((device->tagged_queue == 0) && tag_enabled)
if (tag_enabled)
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
......@@ -7100,46 +7085,70 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
p->host_no, device->channel, device->id,
device->lun, device->queue_depth);
}
p->dev_max_queue_depth[tindex] = device->queue_depth;
p->dev_temp_queue_depth[tindex] = device->queue_depth;
p->dev_max_queue_depth[tindex] = p->dev_lun_queue_depth[tindex];
p->dev_temp_queue_depth[tindex] = p->dev_lun_queue_depth[tindex];
p->dev_scbs_needed[tindex] = p->dev_lun_queue_depth[tindex];
p->tagenable |= target_mask;
p->orderedtag |= target_mask;
device->tagged_queue = 1;
device->current_tag = SCB_LIST_NULL;
scsi_adjust_queue_depth(device, 1, p->dev_lun_queue_depth[tindex]);
}
}
}
return(p->dev_max_queue_depth[tindex]);
return;
}
/*+F*************************************************************************
* Function:
* aic7xxx_select_queue_depth
* aic7xxx_slave_detach
*
* Description:
* Sets the queue depth for each SCSI device hanging off the input
* host adapter. We use a queue depth of 2 for devices that do not
* support tagged queueing. If AIC7XXX_CMDS_PER_LUN is defined, we
* use that for tagged queueing devices; otherwise we use our own
* algorithm for determining the queue depth based on the maximum
* SCBs for the controller.
* prepare for this device to go away
*-F*************************************************************************/
static void
aic7xxx_select_queue_depth(struct Scsi_Host *host,
Scsi_Device *scsi_devs)
void
aic7xxx_slave_detach(Scsi_Device *sdpnt)
{
Scsi_Device *device;
struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
int scbnum;
struct aic7xxx_host *p = (struct aic7xxx_host *) sdpnt->host->hostdata;
int lun, tindex;
tindex = sdpnt->id | (sdpnt->channel << 3);
lun = sdpnt->lun;
if(p->Scsi_Dev[tindex][lun] == NULL)
return;
scbnum = 0;
for (device = scsi_devs; device != NULL; device = device->next)
if(p->tagenable & (1 << tindex))
{
if (device->host == host)
{
scbnum += aic7xxx_device_queue_depth(p, device);
}
p->dev_max_queue_depth[tindex] -= p->dev_lun_queue_depth[tindex];
if(p->dev_temp_queue_depth[tindex] > p->dev_max_queue_depth[tindex])
p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
}
p->dev_scbs_needed[tindex] -= p->dev_lun_queue_depth[tindex];
p->Scsi_Dev[tindex][lun] = NULL;
return;
}
/*+F*************************************************************************
* Function:
* aic7xxx_slave_attach
*
* Description:
* Configure the device we are attaching to the controller. This is
* where we get to do things like scan the INQUIRY data, set queue
* depths, allocate command structs, etc.
*-F*************************************************************************/
int
aic7xxx_slave_attach(Scsi_Device *sdpnt)
{
struct aic7xxx_host *p = (struct aic7xxx_host *) sdpnt->host->hostdata;
int scbnum, tindex, i;
tindex = sdpnt->id | (sdpnt->channel << 3);
p->dev_flags[tindex] |= DEVICE_PRESENT;
p->Scsi_Dev[tindex][sdpnt->lun] = sdpnt;
aic7xxx_device_queue_depth(p, sdpnt);
for(i = 0, scbnum = 0; i < p->host->max_id; i++)
scbnum += p->dev_scbs_needed[i];
while (scbnum > p->scb_data->numscbs)
{
/*
......@@ -7148,8 +7157,77 @@ aic7xxx_select_queue_depth(struct Scsi_Host *host,
* the SCB in order to perform a swap operation (possible deadlock)
*/
if ( aic7xxx_allocate_scb(p) == 0 )
return;
break;
}
/*
* We only need to check INQUIRY data on one lun of multi lun devices
* since speed negotiations are not lun specific. Once we've check this
* particular target id once, the DEVICE_PRESENT flag will be set.
*/
if (!(p->dev_flags[tindex] & DEVICE_DTR_SCANNED))
{
p->dev_flags[tindex] |= DEVICE_DTR_SCANNED;
if ( sdpnt->wdtr && (p->features & AHC_WIDE) )
{
p->needwdtr |= (1<<tindex);
p->needwdtr_copy |= (1<<tindex);
p->transinfo[tindex].goal_width = p->transinfo[tindex].user_width;
}
else
{
p->needwdtr &= ~(1<<tindex);
p->needwdtr_copy &= ~(1<<tindex);
pause_sequencer(p);
aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun,
MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
AHC_TRANS_GOAL |
AHC_TRANS_CUR) );
unpause_sequencer(p, FALSE);
}
if ( sdpnt->sdtr && p->transinfo[tindex].user_offset )
{
p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
p->transinfo[tindex].goal_options = p->transinfo[tindex].user_options;
if (p->features & AHC_ULTRA2)
p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
else
p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
if ( sdpnt->ppr && p->transinfo[tindex].user_period <= 9 &&
p->transinfo[tindex].user_options )
{
p->needppr |= (1<<tindex);
p->needppr_copy |= (1<<tindex);
p->needsdtr &= ~(1<<tindex);
p->needsdtr_copy &= ~(1<<tindex);
p->needwdtr &= ~(1<<tindex);
p->needwdtr_copy &= ~(1<<tindex);
p->dev_flags[tindex] |= DEVICE_SCSI_3;
}
else
{
p->needsdtr |= (1<<tindex);
p->needsdtr_copy |= (1<<tindex);
p->transinfo[tindex].goal_period =
MAX(10, p->transinfo[tindex].goal_period);
p->transinfo[tindex].goal_options = 0;
}
}
else
{
p->needsdtr &= ~(1<<tindex);
p->needsdtr_copy &= ~(1<<tindex);
p->transinfo[tindex].goal_period = 255;
p->transinfo[tindex].goal_offset = 0;
p->transinfo[tindex].goal_options = 0;
}
p->dev_flags[tindex] |= DEVICE_PRINT_DTR;
}
return(0);
}
/*+F*************************************************************************
......@@ -8246,7 +8324,6 @@ aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
host->can_queue = AIC7XXX_MAXSCB;
host->cmd_per_lun = 3;
host->sg_tablesize = AIC7XXX_MAX_SG;
host->select_queue_depths = aic7xxx_select_queue_depth;
host->this_id = p->scsi_id;
host->io_port = p->base;
host->n_io_port = 0xFF;
......
......@@ -46,7 +46,9 @@
eh_host_reset_handler: NULL, \
abort: aic7xxx_abort, \
reset: aic7xxx_reset, \
slave_attach: NULL, \
select_queue_depths: NULL, \
slave_attach: aic7xxx_slave_attach, \
slave_detach: aic7xxx_slave_detach, \
bios_param: aic7xxx_biosparam, \
can_queue: 255, /* max simultaneous cmds */\
this_id: -1, /* scsi id of host adapter */\
......@@ -64,6 +66,8 @@ extern int aic7xxx_command(Scsi_Cmnd *);
extern int aic7xxx_reset(Scsi_Cmnd *, unsigned int);
extern int aic7xxx_abort(Scsi_Cmnd *);
extern int aic7xxx_release(struct Scsi_Host *);
extern int aic7xxx_slave_attach(Scsi_Device *);
extern void aic7xxx_slave_detach(Scsi_Device *);
extern const char *aic7xxx_info(struct Scsi_Host *);
......
......@@ -97,6 +97,10 @@ typedef struct SHT
*/
int (* detect)(struct SHT *);
/*
* This function is only used by one driver and will be going away
* once it switches over to using the slave_detach() function instead.
*/
int (*revoke)(Scsi_Device *);
/* Used with loadable modules to unload the host structures. Note:
......@@ -200,11 +204,59 @@ typedef struct SHT
int (* reset)(Scsi_Cmnd *, unsigned int);
/*
* This function is used to select synchronous communications,
* which will result in a higher data throughput. Not implemented
* yet.
*/
int (* slave_attach)(int, int);
* Once the device has responded to an INQUIRY and we know the device
* is online, call into the low level driver with the Scsi_Device *
* (so that the low level driver may save it off in a safe location
* for later use in calling scsi_adjust_queue_depth() or possibly
* other scsi_* functions) and char * to the INQUIRY return data buffer.
* This way, low level drivers will no longer have to snoop INQUIRY data
* to see if a drive supports PPR message protocol for Ultra160 speed
* negotiations or other similar items. Instead it can simply wait until
* the scsi mid layer calls them with the data in hand and then it can
* do it's checking of INQUIRY data. This will happen once for each new
* device added on this controller (including once for each lun on
* multi-lun devices, so low level drivers should take care to make
* sure that if they do tagged queueing on a per physical unit basis
* instead of a per logical unit basis that they have the mid layer
* allocate tags accordingly).
*
* Things currently recommended to be handled at this time include:
*
* 1. Checking for tagged queueing capability and if able then calling
* scsi_adjust_queue_depth() with the device pointer and the
* suggested new queue depth.
* 2. Checking for things such as SCSI level or DT bit in order to
* determine if PPR message protocols are appropriate on this
* device (or any other scsi INQUIRY data specific things the
* driver wants to know in order to properly handle this device).
* 3. Allocating command structs that the device will need.
* 4. Setting the default timeout on this device (if needed).
* 5. Saving the Scsi_Device pointer so that the low level driver
* will be able to easily call back into scsi_adjust_queue_depth
* again should it be determined that the queue depth for this
* device should be lower or higher than it is initially set to.
* 6. Allocate device data structures as needed that can be attached
* to the Scsi_Device * via SDpnt->host_device_ptr
* 7. Anything else the low level driver might want to do on a device
* specific setup basis...
* 8. Return 0 on success, non-0 on error. The device will be marked
* as offline on error so that no access will occur.
*/
int (* slave_attach)(Scsi_Device *);
/*
* If we are getting ready to remove a device from the scsi chain then
* we call into the low level driver to let them know. Once a low
* level driver has been informed that a drive is going away, the low
* level driver *must* remove it's pointer to the Scsi_Device because
* it is going to be kfree()'ed shortly. It is no longer safe to call
* any mid layer functions with this Scsi_Device *. Additionally, the
* mid layer will not make any more calls into the low level driver's
* queue routine with this device, so it is safe for the device driver
* to deallocate all structs/commands/etc that is has allocated
* specifically for this device at the time of this call.
*/
void (* slave_detach)(Scsi_Device *);
/*
* This function determines the bios parameters for a given
......@@ -217,6 +269,8 @@ typedef struct SHT
/*
* Used to set the queue depth for a specific device.
*
* Once the slave_attach() function is in full use, this will go away.
*/
void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
......
......@@ -551,6 +551,7 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
{
unsigned long flags;
Scsi_Device * SDpnt;
int alloc_cmd = 0;
spin_lock_irqsave(&device_request_lock, flags);
......@@ -567,6 +568,25 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
atomic_read(&SCpnt->host->host_active),
SCpnt->host->host_failed));
if(SDpnt->queue_depth > SDpnt->new_queue_depth) {
Scsi_Cmnd *prev, *next;
/*
* Release the command block and decrement the queue
* depth.
*/
for(prev = NULL, next = SDpnt->device_queue;
next != SCpnt;
prev = next, next = next->next) ;
if(prev == NULL)
SDpnt->device_queue = next->next;
else
prev->next = next->next;
kfree((char *)SCpnt);
SDpnt->queue_depth--;
} else if(SDpnt->queue_depth < SDpnt->new_queue_depth) {
alloc_cmd = 1;
SDpnt->queue_depth++;
}
spin_unlock_irqrestore(&device_request_lock, flags);
/*
......@@ -575,6 +595,48 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
* they wake up.
*/
wake_up(&SDpnt->scpnt_wait);
/*
* We are happy to release command blocks in the scope of the
* device_request_lock since that's nice and quick, but allocation
* can take more time so do it outside that scope instead.
*/
if(alloc_cmd) {
Scsi_Cmnd *newSCpnt;
newSCpnt = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC |
(SDpnt->host->unchecked_isa_dma ?
GFP_DMA : 0));
if(newSCpnt) {
memset(newSCpnt, 0, sizeof(Scsi_Cmnd));
newSCpnt->host = SDpnt->host;
newSCpnt->device = SDpnt;
newSCpnt->target = SDpnt->id;
newSCpnt->lun = SDpnt->lun;
newSCpnt->channel = SDpnt->channel;
newSCpnt->request = NULL;
newSCpnt->use_sg = 0;
newSCpnt->old_use_sg = 0;
newSCpnt->old_cmd_len = 0;
newSCpnt->underflow = 0;
newSCpnt->old_underflow = 0;
newSCpnt->transfersize = 0;
newSCpnt->resid = 0;
newSCpnt->serial_number = 0;
newSCpnt->serial_number_at_timeout = 0;
newSCpnt->host_scribble = NULL;
newSCpnt->state = SCSI_STATE_UNUSED;
newSCpnt->owner = SCSI_OWNER_NOBODY;
spin_lock_irqsave(&device_request_lock, flags);
newSCpnt->next = SDpnt->device_queue;
SDpnt->device_queue = newSCpnt;
spin_unlock_irqrestore(&device_request_lock, flags);
} else {
spin_lock_irqsave(&device_request_lock, flags);
SDpnt->queue_depth--;
spin_unlock_irqrestore(&device_request_lock, flags);
}
}
}
/*
......@@ -1447,8 +1509,8 @@ void scsi_release_commandblocks(Scsi_Device * SDpnt)
SDpnt->device_queue = SCnext = SCpnt->next;
kfree((char *) SCpnt);
}
SDpnt->has_cmdblocks = 0;
SDpnt->queue_depth = 0;
SDpnt->new_queue_depth = 0;
spin_unlock_irqrestore(&device_request_lock, flags);
}
......@@ -1463,63 +1525,115 @@ void scsi_release_commandblocks(Scsi_Device * SDpnt)
*
* Lock status: No locking assumed or required.
*
* Notes:
* Notes: We really only allocate one command here. We will allocate
* more commands as needed once the device goes into real use.
*/
void scsi_build_commandblocks(Scsi_Device * SDpnt)
{
unsigned long flags;
struct Scsi_Host *host = SDpnt->host;
int j;
Scsi_Cmnd *SCpnt;
if (SDpnt->queue_depth != 0)
return;
SCpnt = (Scsi_Cmnd *) kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC |
(SDpnt->host->unchecked_isa_dma ? GFP_DMA : 0));
if (NULL == SCpnt) {
/*
* Since we don't currently have *any* command blocks on this
* device, go ahead and try an atomic allocation...
*/
SCpnt = (Scsi_Cmnd *) kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC |
(SDpnt->host->unchecked_isa_dma ? GFP_DMA : 0));
if (NULL == SCpnt)
return; /* Oops, we aren't going anywhere for now */
}
memset(SCpnt, 0, sizeof(Scsi_Cmnd));
SCpnt->host = SDpnt->host;
SCpnt->device = SDpnt;
SCpnt->target = SDpnt->id;
SCpnt->lun = SDpnt->lun;
SCpnt->channel = SDpnt->channel;
SCpnt->request = NULL;
SCpnt->use_sg = 0;
SCpnt->old_use_sg = 0;
SCpnt->old_cmd_len = 0;
SCpnt->underflow = 0;
SCpnt->old_underflow = 0;
SCpnt->transfersize = 0;
SCpnt->resid = 0;
SCpnt->serial_number = 0;
SCpnt->serial_number_at_timeout = 0;
SCpnt->host_scribble = NULL;
SCpnt->state = SCSI_STATE_UNUSED;
SCpnt->owner = SCSI_OWNER_NOBODY;
spin_lock_irqsave(&device_request_lock, flags);
if(SDpnt->new_queue_depth == 0)
SDpnt->new_queue_depth = 1;
SDpnt->queue_depth++;
SCpnt->next = SDpnt->device_queue;
SDpnt->device_queue = SCpnt;
spin_unlock_irqrestore(&device_request_lock, flags);
}
if (SDpnt->queue_depth == 0)
{
SDpnt->queue_depth = host->cmd_per_lun;
if (SDpnt->queue_depth == 0)
SDpnt->queue_depth = 1; /* live to fight another day */
}
SDpnt->device_queue = NULL;
/*
* Function: scsi_adjust_queue_depth()
*
* Purpose: Allow low level drivers to tell us to change the queue depth
* on a specific SCSI device
*
* Arguments: SDpnt - SCSI Device in question
* tagged - Do we use tagged queueing (non-0) or do we treat
* this device as an untagged device (0)
* tags - Number of tags allowed if tagged queueing enabled,
* or number of commands the low level driver can
* queue up in non-tagged mode (as per cmd_per_lun).
*
* Returns: Nothing
*
* Lock Status: None held on entry
*
* Notes: Low level drivers may call this at any time and we will do
* the right thing depending on whether or not the device is
* currently active and whether or not it even has the
* command blocks built yet.
*
* If cmdblocks != 0 then we are a live device. We just set the
* new_queue_depth variable and when the scsi completion handler
* notices that queue_depth != new_queue_depth it will work to
* rectify the situation. If new_queue_depth is less than current
* queue_depth, then it will free the completed command instead of
* putting it back on the free list and dec queue_depth. Otherwise
* it will try to allocate a new command block for the device and
* put it on the free list along with the command that is being
* completed. Obviously, if the device isn't doing anything then
* neither is this code, so it will bring the devices queue depth
* back into line when the device is actually being used. This
* keeps us from needing to fire off a kernel thread or some such
* nonsense (this routine can be called from interrupt code, so
* handling allocations here would be tricky and risky, making
* a kernel thread a much safer way to go if we wanted to handle
* the work immediately instead of letting it get done a little
* at a time in the completion handler).
*/
void scsi_adjust_queue_depth(Scsi_Device *SDpnt, int tagged, int tags)
{
unsigned long flags;
for (j = 0; j < SDpnt->queue_depth; j++) {
SCpnt = (Scsi_Cmnd *)
kmalloc(sizeof(Scsi_Cmnd),
GFP_ATOMIC |
(host->unchecked_isa_dma ? GFP_DMA : 0));
if (NULL == SCpnt)
break; /* If not, the next line will oops ... */
memset(SCpnt, 0, sizeof(Scsi_Cmnd));
SCpnt->host = host;
SCpnt->device = SDpnt;
SCpnt->target = SDpnt->id;
SCpnt->lun = SDpnt->lun;
SCpnt->channel = SDpnt->channel;
SCpnt->request = NULL;
SCpnt->use_sg = 0;
SCpnt->old_use_sg = 0;
SCpnt->old_cmd_len = 0;
SCpnt->underflow = 0;
SCpnt->old_underflow = 0;
SCpnt->transfersize = 0;
SCpnt->resid = 0;
SCpnt->serial_number = 0;
SCpnt->serial_number_at_timeout = 0;
SCpnt->host_scribble = NULL;
SCpnt->next = SDpnt->device_queue;
SDpnt->device_queue = SCpnt;
SCpnt->state = SCSI_STATE_UNUSED;
SCpnt->owner = SCSI_OWNER_NOBODY;
}
if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
SDpnt->queue_depth, j);
SDpnt->queue_depth = j;
SDpnt->has_cmdblocks = (0 != j);
} else {
SDpnt->has_cmdblocks = 1;
}
/*
* refuse to set tagged depth to an unworkable size
*/
if(tags == 0)
return;
spin_lock_irqsave(&device_request_lock, flags);
SDpnt->new_queue_depth = tags;
SDpnt->tagged_queue = tagged;
spin_unlock_irqrestore(&device_request_lock, flags);
if(SDpnt->queue_depth == 0)
{
scsi_build_commandblocks(SDpnt);
}
}
void __init scsi_host_no_insert(char *str, int n)
......@@ -1758,13 +1872,6 @@ static int proc_scsi_gen_write(struct file * file, const char * buf,
goto out; /* We do not yet support unplugging */
scan_scsis(HBA_ptr, 1, channel, id, lun);
/* FIXME (DB) This assumes that the queue_depth routines can be used
in this context as well, while they were all designed to be
called only once after the detect routine. (DB) */
/* queue_depth routine moved to inside scan_scsis(,1,,,) so
it is called before build_commandblocks() */
err = length;
goto out;
}
......@@ -1826,6 +1933,8 @@ static int proc_scsi_gen_write(struct file * file, const char * buf,
*/
if (HBA_ptr->hostt->revoke)
HBA_ptr->hostt->revoke(scd);
if (HBA_ptr->hostt->slave_detach)
(*HBA_ptr->hostt->slave_detach) (scd);
devfs_unregister (scd->de);
scsi_release_commandblocks(scd);
......@@ -1963,9 +2072,6 @@ int scsi_register_host(Scsi_Host_Template * tpnt)
/* first register parent with driverfs */
device_register(&shpnt->host_driverfs_dev);
scan_scsis(shpnt, 0, 0, 0, 0);
if (shpnt->select_queue_depths != NULL) {
(shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
}
}
}
......@@ -1985,7 +2091,7 @@ int scsi_register_host(Scsi_Host_Template * tpnt)
(*sdtpnt->attach) (SDpnt);
if (SDpnt->attached) {
scsi_build_commandblocks(SDpnt);
if (0 == SDpnt->has_cmdblocks)
if (SDpnt->queue_depth == 0)
out_of_space = 1;
}
}
......@@ -2116,6 +2222,8 @@ int scsi_unregister_host(Scsi_Host_Template * tpnt)
printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
goto err_out;
}
if (shpnt->hostt->slave_detach)
(*shpnt->hostt->slave_detach) (SDpnt);
devfs_unregister (SDpnt->de);
put_device(&SDpnt->sdev_driverfs_dev);
}
......@@ -2272,10 +2380,10 @@ int scsi_register_device(struct Scsi_Device_Template *tpnt)
* If this driver attached to the device, and don't have any
* command blocks for this device, allocate some.
*/
if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
if (SDpnt->attached && SDpnt->queue_depth == 0) {
SDpnt->online = TRUE;
scsi_build_commandblocks(SDpnt);
if (0 == SDpnt->has_cmdblocks)
if (SDpnt->queue_depth == 0)
out_of_space = 1;
}
}
......@@ -2325,6 +2433,8 @@ int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
* Nobody is using this device any more. Free all of the
* command structures.
*/
if (shpnt->hostt->slave_detach)
(*shpnt->hostt->slave_detach) (SDpnt);
scsi_release_commandblocks(SDpnt);
}
}
......@@ -2678,9 +2788,13 @@ Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
SDpnt->host = SHpnt;
SDpnt->id = SHpnt->this_id;
SDpnt->type = -1;
SDpnt->queue_depth = 1;
SDpnt->new_queue_depth = 1;
scsi_build_commandblocks(SDpnt);
if(SDpnt->queue_depth == 0) {
kfree(SDpnt);
return NULL;
}
scsi_initialize_queue(SDpnt, SHpnt);
......
......@@ -481,6 +481,7 @@ extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
extern void scsi_bottom_half_handler(void);
extern void scsi_release_commandblocks(Scsi_Device * SDpnt);
extern void scsi_build_commandblocks(Scsi_Device * SDpnt);
extern void scsi_adjust_queue_depth(Scsi_Device *, int, int);
extern void scsi_done(Scsi_Cmnd * SCpnt);
extern void scsi_finish_command(Scsi_Cmnd *);
extern int scsi_retry_command(Scsi_Cmnd *);
......@@ -563,6 +564,8 @@ struct scsi_device {
volatile unsigned short device_busy; /* commands actually active on low-level */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
Scsi_Cmnd *current_cmnd; /* currently active command */
unsigned short queue_depth; /* How deep of a queue we have */
unsigned short new_queue_depth; /* How deep of a queue we want */
unsigned int id, lun, channel;
......@@ -586,24 +589,25 @@ struct scsi_device {
unsigned char current_tag; /* current tag */
unsigned char sync_min_period; /* Not less than this period */
unsigned char sync_max_offset; /* Not greater than this offset */
unsigned char queue_depth; /* How deep a queue to use */
unsigned online:1;
unsigned writeable:1;
unsigned removable:1;
unsigned random:1;
unsigned has_cmdblocks:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
unsigned lockable:1; /* Able to prevent media removal */
unsigned borken:1; /* Tell the Seagate driver to be
* painfully slow on this device */
unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
unsigned disconnect:1; /* can disconnect */
unsigned soft_reset:1; /* Uses soft reset option */
unsigned sync:1; /* Negotiate for sync transfers */
unsigned wide:1; /* Negotiate for WIDE transfers */
unsigned sdtr:1; /* Device supports SDTR messages */
unsigned wdtr:1; /* Device supports WDTR messages */
unsigned ppr:1; /* Device supports PPR messages */
unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
unsigned simple_tags:1; /* Device supports simple queue tag messages */
unsigned ordered_tags:1;/* Device supports ordered queue tag messages */
unsigned single_lun:1; /* Indicates we should only allow I/O to
* one of the luns for the device at a
* time. */
......
......@@ -1409,6 +1409,14 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
sdev->lockable = sdev->removable;
sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
if (sdev->scsi_level >= SCSI_3 || (sdev->inquiry_len > 56 &&
inq_result[56] & 0x04))
sdev->ppr = 1;
if (inq_result[7] & 0x60)
sdev->wdtr = 1;
if (inq_result[7] & 0x10)
sdev->sdtr = 1;
/*
* XXX maybe move the identifier and driverfs/devfs setup to a new
* function, and call them after this function is called.
......@@ -1513,9 +1521,9 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
* XXX maybe change scsi_release_commandblocks to not reset
* queue_depth to 0.
*/
sdevscan->queue_depth = 1;
sdevscan->new_queue_depth = 1;
scsi_build_commandblocks(sdevscan);
if (sdevscan->has_cmdblocks == 0)
if (sdevscan->queue_depth == 0)
goto alloc_failed;
sreq = scsi_allocate_request(sdevscan);
......@@ -1589,7 +1597,7 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
kfree(scsi_result);
if (sreq != NULL)
scsi_release_request(sreq);
if (sdevscan->has_cmdblocks != 0)
if (sdevscan->queue_depth != 0)
scsi_release_commandblocks(sdevscan);
return SCSI_SCAN_NO_RESPONSE;
}
......@@ -1743,9 +1751,9 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
if (sdevscan->scsi_level < SCSI_3)
return 1;
sdevscan->queue_depth = 1;
sdevscan->new_queue_depth = 1;
scsi_build_commandblocks(sdevscan);
if (sdevscan->has_cmdblocks == 0) {
if (sdevscan->queue_depth == 0) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
/*
* We are out of memory, don't try scanning any further.
......@@ -2018,6 +2026,17 @@ static void scsi_scan_selected_lun(struct Scsi_Host *shost, uint channel,
*/
if (shost->select_queue_depths != NULL)
(shost->select_queue_depths) (shost, shost->host_queue);
if (shost->hostt->slave_attach != NULL)
if ((shost->hostt->slave_attach) (sdev) != 0) {
/*
* Low level driver failed to attach this
* device, we've got to kick it back out
* now as a result :-(
*/
printk("scsi_scan_selected_lun: slave_attach "
"failed, marking device OFFLINE.\n");
sdev->online = FALSE;
}
for (sdt = scsi_devicelist; sdt; sdt = sdt->next)
if (sdt->init && sdt->dev_noticed)
......@@ -2028,7 +2047,7 @@ static void scsi_scan_selected_lun(struct Scsi_Host *shost, uint channel,
(*sdt->attach) (sdev);
if (sdev->attached) {
scsi_build_commandblocks(sdev);
if (sdev->has_cmdblocks == 0)
if (sdev->queue_depth == 0)
printk(ALLOC_FAILURE_MSG,
__FUNCTION__);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment