Commit 78ef52ec authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Christoph Hellwig

[PATCH] coding style updates for scsi_lib.c

I just couldn't see the mess anymore..  Nuke the ifdefs and use sane
variable names.  Some more small nitpicks but no behaviour changes at
all.
parent baaf76dd
...@@ -41,7 +41,7 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = { ...@@ -41,7 +41,7 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
* *
* Purpose: Insert pre-formed command into request queue. * Purpose: Insert pre-formed command into request queue.
* *
* Arguments: SCpnt - command that is ready to be queued. * Arguments: cmd - command that is ready to be queued.
* at_head - boolean. True if we should insert at head * at_head - boolean. True if we should insert at head
* of queue, false if we should insert at tail. * of queue, false if we should insert at tail.
* *
...@@ -56,10 +56,10 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = { ...@@ -56,10 +56,10 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
* for now), and then call the queue request function to actually * for now), and then call the queue request function to actually
* process it. * process it.
*/ */
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) int scsi_insert_special_cmd(struct scsi_cmnd *cmd, int at_head)
{ {
blk_insert_request(SCpnt->device->request_queue, SCpnt->request, blk_insert_request(cmd->device->request_queue, cmd->request,
at_head, SCpnt); at_head, cmd);
return 0; return 0;
} }
...@@ -68,7 +68,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -68,7 +68,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
* *
* Purpose: Insert pre-formed request into request queue. * Purpose: Insert pre-formed request into request queue.
* *
* Arguments: SRpnt - request that is ready to be queued. * Arguments: sreq - request that is ready to be queued.
* at_head - boolean. True if we should insert at head * at_head - boolean. True if we should insert at head
* of queue, false if we should insert at tail. * of queue, false if we should insert at tail.
* *
...@@ -83,24 +83,24 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -83,24 +83,24 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
* for now), and then call the queue request function to actually * for now), and then call the queue request function to actually
* process it. * process it.
*/ */
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
{ {
/* This is used to insert SRpnt specials. Because users of /*
* this function are apt to reuse requests with no modification, * Because users of this function are apt to reuse requests with no
* we have to sanitise the request flags here * modification, we have to sanitise the request flags here
*/ */
SRpnt->sr_request->flags &= ~REQ_DONTPREP; sreq->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(SRpnt->sr_device->request_queue, SRpnt->sr_request, blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
at_head, SRpnt); at_head, sreq);
return 0; return 0;
} }
/* /*
* Function: scsi_init_cmd_errh() * Function: scsi_init_cmd_errh()
* *
* Purpose: Initialize SCpnt fields related to error handling. * Purpose: Initialize cmd fields related to error handling.
* *
* Arguments: SCpnt - command that is ready to be queued. * Arguments: cmd - command that is ready to be queued.
* *
* Returns: Nothing * Returns: Nothing
* *
...@@ -108,21 +108,20 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) ...@@ -108,21 +108,20 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
* fields related to error handling. Typically this will * fields related to error handling. Typically this will
* be called once for each command, as required. * be called once for each command, as required.
*/ */
static int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{ {
SCpnt->owner = SCSI_OWNER_MIDLEVEL; cmd->owner = SCSI_OWNER_MIDLEVEL;
SCpnt->reset_chain = NULL; cmd->reset_chain = NULL;
SCpnt->serial_number = 0; cmd->serial_number = 0;
SCpnt->serial_number_at_timeout = 0; cmd->serial_number_at_timeout = 0;
SCpnt->flags = 0; cmd->flags = 0;
SCpnt->retries = 0; cmd->retries = 0;
cmd->abort_reason = 0;
SCpnt->abort_reason = 0;
memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
if (SCpnt->cmd_len == 0) if (cmd->cmd_len == 0)
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
/* /*
* We need saved copies of a number of fields - this is because * We need saved copies of a number of fields - this is because
...@@ -131,19 +130,16 @@ static int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) ...@@ -131,19 +130,16 @@ static int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
* we will need to restore these values prior to running the actual * we will need to restore these values prior to running the actual
* command. * command.
*/ */
SCpnt->old_use_sg = SCpnt->use_sg; cmd->old_use_sg = cmd->use_sg;
SCpnt->old_cmd_len = SCpnt->cmd_len; cmd->old_cmd_len = cmd->cmd_len;
SCpnt->sc_old_data_direction = SCpnt->sc_data_direction; cmd->sc_old_data_direction = cmd->sc_data_direction;
SCpnt->old_underflow = SCpnt->underflow; cmd->old_underflow = cmd->underflow;
memcpy((void *) SCpnt->data_cmnd, memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
(const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd)); cmd->buffer = cmd->request_buffer;
SCpnt->buffer = SCpnt->request_buffer; cmd->bufflen = cmd->request_bufflen;
SCpnt->bufflen = SCpnt->request_bufflen; cmd->reset_chain = NULL;
cmd->internal_timeout = NORMAL_TIMEOUT;
SCpnt->reset_chain = NULL; cmd->abort_reason = 0;
SCpnt->internal_timeout = NORMAL_TIMEOUT;
SCpnt->abort_reason = 0;
return 1; return 1;
} }
...@@ -153,23 +149,22 @@ static int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) ...@@ -153,23 +149,22 @@ static int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
* *
* Purpose: Restore the command state for a retry * Purpose: Restore the command state for a retry
* *
* Arguments: SCpnt - command to be restored * Arguments: cmd - command to be restored
* *
* Returns: Nothing * Returns: Nothing
* *
* Notes: Immediately prior to retrying a command, we need * Notes: Immediately prior to retrying a command, we need
* to restore certain fields that we saved above. * to restore certain fields that we saved above.
*/ */
void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
{ {
memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
sizeof(SCpnt->data_cmnd)); cmd->request_buffer = cmd->buffer;
SCpnt->request_buffer = SCpnt->buffer; cmd->request_bufflen = cmd->bufflen;
SCpnt->request_bufflen = SCpnt->bufflen; cmd->use_sg = cmd->old_use_sg;
SCpnt->use_sg = SCpnt->old_use_sg; cmd->cmd_len = cmd->old_cmd_len;
SCpnt->cmd_len = SCpnt->old_cmd_len; cmd->sc_data_direction = cmd->sc_old_data_direction;
SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; cmd->underflow = cmd->old_underflow;
SCpnt->underflow = SCpnt->old_underflow;
} }
/* /*
...@@ -177,7 +172,7 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) ...@@ -177,7 +172,7 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt)
* *
* Purpose: Handle post-processing of completed commands. * Purpose: Handle post-processing of completed commands.
* *
* Arguments: SCpnt - command that may need to be requeued. * Arguments: cmd - command that may need to be requeued.
* *
* Returns: Nothing * Returns: Nothing
* *
...@@ -187,7 +182,7 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) ...@@ -187,7 +182,7 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt)
* that a medium error occurred, and the sectors after * that a medium error occurred, and the sectors after
* the bad block need to be re-read. * the bad block need to be re-read.
* *
* If SCpnt is NULL, it means that the previous command * If cmd is NULL, it means that the previous command
* was completely finished, and we should simply start * was completely finished, and we should simply start
* a new command, if possible. * a new command, if possible.
* *
...@@ -208,17 +203,17 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) ...@@ -208,17 +203,17 @@ void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt)
* permutations grows as 2**N, and if too many more special cases * permutations grows as 2**N, and if too many more special cases
* get added, we start to get screwed. * get added, we start to get screwed.
*/ */
void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
{ {
int all_clear; struct scsi_device *sdev, *sdev2;
struct Scsi_Host *shost;
unsigned long flags; unsigned long flags;
Scsi_Device *SDpnt, *SDpnt2; int all_clear;
struct Scsi_Host *SHpnt;
ASSERT_LOCK(q->queue_lock, 0); ASSERT_LOCK(q->queue_lock, 0);
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (SCpnt != NULL) { if (cmd != NULL) {
/* /*
* For some reason, we are not done with this request. * For some reason, we are not done with this request.
...@@ -226,16 +221,18 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -226,16 +221,18 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* in which case we need to request the blocks that come after * in which case we need to request the blocks that come after
* the bad sector. * the bad sector.
*/ */
SCpnt->request->special = (void *) SCpnt; cmd->request->special = cmd;
if(blk_rq_tagged(SCpnt->request)) if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, SCpnt->request); blk_queue_end_tag(q, cmd->request);
/* set REQ_SPECIAL - we have a command
/*
* set REQ_SPECIAL - we have a command
* clear REQ_DONTPREP - we assume the sg table has been * clear REQ_DONTPREP - we assume the sg table has been
* nuked so we need to set it up again. * nuked so we need to set it up again.
*/ */
SCpnt->request->flags |= REQ_SPECIAL; cmd->request->flags |= REQ_SPECIAL;
SCpnt->request->flags &= ~REQ_DONTPREP; cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, SCpnt->request, 0, 0); __elv_add_request(q, cmd->request, 0, 0);
} }
/* /*
...@@ -243,8 +240,8 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -243,8 +240,8 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
*/ */
__blk_run_queue(q); __blk_run_queue(q);
SDpnt = (Scsi_Device *) q->queuedata; sdev = q->queuedata;
SHpnt = SDpnt->host; shost = sdev->host;
/* /*
* If this is a single-lun device, and we are currently finished * If this is a single-lun device, and we are currently finished
...@@ -253,15 +250,15 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -253,15 +250,15 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* with special case code, then spin off separate versions and * with special case code, then spin off separate versions and
* use function pointers to pick the right one. * use function pointers to pick the right one.
*/ */
if (SDpnt->single_lun && blk_queue_empty(q) && SDpnt->device_busy ==0 && if (sdev->single_lun && blk_queue_empty(q) && sdev->device_busy ==0 &&
!SHpnt->host_blocked && !SHpnt->host_self_blocked && !shost->host_blocked && !shost->host_self_blocked &&
!((SHpnt->can_queue > 0) && (SHpnt->host_busy >= !((shost->can_queue > 0) && (shost->host_busy >=
SHpnt->can_queue))) { shost->can_queue))) {
list_for_each_entry(SDpnt2, &SDpnt->same_target_siblings, list_for_each_entry(sdev2, &sdev->same_target_siblings,
same_target_siblings) { same_target_siblings) {
if (!SDpnt2->device_blocked && if (!sdev2->device_blocked &&
!blk_queue_empty(SDpnt2->request_queue)) { !blk_queue_empty(sdev2->request_queue)) {
__blk_run_queue(SDpnt2->request_queue); __blk_run_queue(sdev2->request_queue);
break; break;
} }
} }
...@@ -276,22 +273,21 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -276,22 +273,21 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* other device might have become starved along the way. * other device might have become starved along the way.
*/ */
all_clear = 1; all_clear = 1;
if (SHpnt->some_device_starved) { if (shost->some_device_starved) {
list_for_each_entry(SDpnt, &SHpnt->my_devices, siblings) { list_for_each_entry(sdev, &shost->my_devices, siblings) {
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) if (shost->can_queue > 0 &&
|| (SHpnt->host_blocked) shost->host_busy >= shost->can_queue)
|| (SHpnt->host_self_blocked)) {
break; break;
} if (shost->host_blocked || shost->host_self_blocked)
if (SDpnt->device_blocked || !SDpnt->starved) { break;
if (sdev->device_blocked || !sdev->starved)
continue; continue;
} __blk_run_queue(sdev->request_queue);
__blk_run_queue(SDpnt->request_queue);
all_clear = 0; all_clear = 0;
} }
if (SDpnt == NULL && all_clear) {
SHpnt->some_device_starved = 0; if (sdev == NULL && all_clear)
} shost->some_device_starved = 0;
} }
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
...@@ -302,7 +298,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -302,7 +298,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* Purpose: Post-processing of completed commands called from interrupt * Purpose: Post-processing of completed commands called from interrupt
* handler or a bottom-half handler. * handler or a bottom-half handler.
* *
* Arguments: SCpnt - command that is complete. * Arguments: cmd - command that is complete.
* uptodate - 1 if I/O indicates success, 0 for I/O error. * uptodate - 1 if I/O indicates success, 0 for I/O error.
* sectors - number of sectors we want to mark. * sectors - number of sectors we want to mark.
* requeue - indicates whether we should requeue leftovers. * requeue - indicates whether we should requeue leftovers.
...@@ -319,13 +315,11 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -319,13 +315,11 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* We are guaranteeing that the request queue will be goosed * We are guaranteeing that the request queue will be goosed
* at some point during this call. * at some point during this call.
*/ */
static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
int uptodate, int sectors, int requeue)
int sectors,
int requeue)
{ {
request_queue_t *q = SCpnt->device->request_queue; request_queue_t *q = cmd->device->request_queue;
struct request *req = SCpnt->request; struct request *req = cmd->request;
unsigned long flags; unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0); ASSERT_LOCK(q->queue_lock, 0);
...@@ -335,15 +329,14 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, ...@@ -335,15 +329,14 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt,
* to queue the remainder of them. * to queue the remainder of them.
*/ */
if (end_that_request_first(req, uptodate, sectors)) { if (end_that_request_first(req, uptodate, sectors)) {
if (!requeue) if (requeue) {
return SCpnt;
/* /*
* Bleah. Leftovers again. Stick the leftovers in * Bleah. Leftovers again. Stick the leftovers in
* the front of the queue, and goose the queue again. * the front of the queue, and goose the queue again.
*/ */
scsi_queue_next_request(q, SCpnt); scsi_queue_next_request(q, cmd);
return SCpnt; }
return cmd;
} }
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
...@@ -358,39 +351,39 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, ...@@ -358,39 +351,39 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt,
* This will goose the queue request function at the end, so we don't * This will goose the queue request function at the end, so we don't
* need to worry about launching another command. * need to worry about launching another command.
*/ */
scsi_put_command(SCpnt); scsi_put_command(cmd);
scsi_queue_next_request(q, NULL); scsi_queue_next_request(q, NULL);
return NULL; return NULL;
} }
static struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask) static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
{ {
struct scsi_host_sg_pool *sgp; struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl; struct scatterlist *sgl;
BUG_ON(!SCpnt->use_sg); BUG_ON(!cmd->use_sg);
switch (SCpnt->use_sg) { switch (cmd->use_sg) {
case 1 ... 8: case 1 ... 8:
SCpnt->sglist_len = 0; cmd->sglist_len = 0;
break; break;
case 9 ... 16: case 9 ... 16:
SCpnt->sglist_len = 1; cmd->sglist_len = 1;
break; break;
case 17 ... 32: case 17 ... 32:
SCpnt->sglist_len = 2; cmd->sglist_len = 2;
break; break;
case 33 ... 64: case 33 ... 64:
SCpnt->sglist_len = 3; cmd->sglist_len = 3;
break; break;
case 65 ... MAX_PHYS_SEGMENTS: case 65 ... MAX_PHYS_SEGMENTS:
SCpnt->sglist_len = 4; cmd->sglist_len = 4;
break; break;
default: default:
return NULL; return NULL;
} }
sgp = scsi_sg_pools + SCpnt->sglist_len; sgp = scsi_sg_pools + cmd->sglist_len;
sgl = mempool_alloc(sgp->pool, gfp_mask); sgl = mempool_alloc(sgp->pool, gfp_mask);
if (sgl) if (sgl)
memset(sgl, 0, sgp->size); memset(sgl, 0, sgp->size);
...@@ -407,13 +400,12 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index) ...@@ -407,13 +400,12 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
mempool_free(sgl, sgp->pool); mempool_free(sgl, sgp->pool);
} }
/* /*
* Function: scsi_release_buffers() * Function: scsi_release_buffers()
* *
* Purpose: Completion processing for block device I/O requests. * Purpose: Completion processing for block device I/O requests.
* *
* Arguments: SCpnt - command that we are bailing. * Arguments: cmd - command that we are bailing.
* *
* Lock status: Assumed that no lock is held upon entry. * Lock status: Assumed that no lock is held upon entry.
* *
...@@ -425,28 +417,28 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index) ...@@ -425,28 +417,28 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
* the scatter-gather table, and potentially any bounce * the scatter-gather table, and potentially any bounce
* buffers. * buffers.
*/ */
static void scsi_release_buffers(Scsi_Cmnd * SCpnt) static void scsi_release_buffers(struct scsi_cmnd *cmd)
{ {
struct request *req = SCpnt->request; struct request *req = cmd->request;
ASSERT_LOCK(SCpnt->device->host->host_lock, 0); ASSERT_LOCK(cmd->device->host->host_lock, 0);
/* /*
* Free up any indirection buffers we allocated for DMA purposes. * Free up any indirection buffers we allocated for DMA purposes.
*/ */
if (SCpnt->use_sg) if (cmd->use_sg)
scsi_free_sgtable(SCpnt->request_buffer, SCpnt->sglist_len); scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
else if (SCpnt->request_buffer != req->buffer) else if (cmd->request_buffer != req->buffer)
kfree(SCpnt->request_buffer); kfree(cmd->request_buffer);
/* /*
* Zero these out. They now point to freed memory, and it is * Zero these out. They now point to freed memory, and it is
* dangerous to hang onto the pointers. * dangerous to hang onto the pointers.
*/ */
SCpnt->buffer = NULL; cmd->buffer = NULL;
SCpnt->bufflen = 0; cmd->bufflen = 0;
SCpnt->request_buffer = NULL; cmd->request_buffer = NULL;
SCpnt->request_bufflen = 0; cmd->request_bufflen = 0;
} }
/* /*
...@@ -477,7 +469,7 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req) ...@@ -477,7 +469,7 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
* *
* Purpose: Completion processing for block device I/O requests. * Purpose: Completion processing for block device I/O requests.
* *
* Arguments: SCpnt - command that is finished. * Arguments: cmd - command that is finished.
* *
* Lock status: Assumed that no lock is held upon entry. * Lock status: Assumed that no lock is held upon entry.
* *
...@@ -489,13 +481,13 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req) ...@@ -489,13 +481,13 @@ static struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
* (the normal case for most drivers), we don't need * (the normal case for most drivers), we don't need
* the logic to deal with cleaning up afterwards. * the logic to deal with cleaning up afterwards.
*/ */
void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
int block_sectors) int block_sectors)
{ {
int result = SCpnt->result; int result = cmd->result;
int this_count = SCpnt->bufflen >> 9; int this_count = cmd->bufflen >> 9;
request_queue_t *q = SCpnt->device->request_queue; request_queue_t *q = cmd->device->request_queue;
struct request *req = SCpnt->request; struct request *req = cmd->request;
int clear_errors = 1; int clear_errors = 1;
/* /*
...@@ -518,44 +510,43 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -518,44 +510,43 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* For the case of a READ, we need to copy the data out of the * For the case of a READ, we need to copy the data out of the
* bounce buffer and into the real buffer. * bounce buffer and into the real buffer.
*/ */
if (SCpnt->use_sg) if (cmd->use_sg)
scsi_free_sgtable(SCpnt->buffer, SCpnt->sglist_len); scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
else if (SCpnt->buffer != req->buffer) { else if (cmd->buffer != req->buffer) {
if (rq_data_dir(req) == READ) { if (rq_data_dir(req) == READ) {
unsigned long flags; unsigned long flags;
char *to = bio_kmap_irq(req->bio, &flags); char *to = bio_kmap_irq(req->bio, &flags);
memcpy(to, SCpnt->buffer, SCpnt->bufflen); memcpy(to, cmd->buffer, cmd->bufflen);
bio_kunmap_irq(to, &flags); bio_kunmap_irq(to, &flags);
} }
kfree(SCpnt->buffer); kfree(cmd->buffer);
} }
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
req->errors = (driver_byte(result) & DRIVER_SENSE) ? req->errors = (driver_byte(result) & DRIVER_SENSE) ?
(CHECK_CONDITION << 1) : (result & 0xff); (CHECK_CONDITION << 1) : (result & 0xff);
if (!result) if (result) {
req->data_len -= SCpnt->bufflen;
else {
clear_errors = 0; clear_errors = 0;
if (SCpnt->sense_buffer[0] & 0x70) { if (cmd->sense_buffer[0] & 0x70) {
int len = 8 + SCpnt->sense_buffer[7]; int len = 8 + cmd->sense_buffer[7];
if (len > SCSI_SENSE_BUFFERSIZE) if (len > SCSI_SENSE_BUFFERSIZE)
len = SCSI_SENSE_BUFFERSIZE; len = SCSI_SENSE_BUFFERSIZE;
memcpy(req->sense, SCpnt->sense_buffer, len); memcpy(req->sense, cmd->sense_buffer, len);
req->sense_len = len; req->sense_len = len;
} }
} } else
req->data_len -= cmd->bufflen;
} }
/* /*
* Zero these out. They now point to freed memory, and it is * Zero these out. They now point to freed memory, and it is
* dangerous to hang onto the pointers. * dangerous to hang onto the pointers.
*/ */
SCpnt->buffer = NULL; cmd->buffer = NULL;
SCpnt->bufflen = 0; cmd->bufflen = 0;
SCpnt->request_buffer = NULL; cmd->request_buffer = NULL;
SCpnt->request_bufflen = 0; cmd->request_bufflen = 0;
/* /*
* Next deal with any sectors which we were able to correctly * Next deal with any sectors which we were able to correctly
...@@ -564,7 +555,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -564,7 +555,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
if (good_sectors >= 0) { if (good_sectors >= 0) {
SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n", SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
req->nr_sectors, good_sectors)); req->nr_sectors, good_sectors));
SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg)); SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", cmd->use_sg));
if (clear_errors) if (clear_errors)
req->errors = 0; req->errors = 0;
...@@ -579,13 +570,13 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -579,13 +570,13 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* requeueing right here - we will requeue down below * requeueing right here - we will requeue down below
* when we handle the bad sectors. * when we handle the bad sectors.
*/ */
SCpnt = scsi_end_request(SCpnt, 1, good_sectors, result == 0); cmd = scsi_end_request(cmd, 1, good_sectors, result == 0);
/* /*
* If the command completed without error, then either finish off the * If the command completed without error, then either finish off the
* rest of the command, or start a new one. * rest of the command, or start a new one.
*/ */
if (result == 0 || SCpnt == NULL ) { if (result == 0 || cmd == NULL ) {
return; return;
} }
} }
...@@ -601,28 +592,28 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -601,28 +592,28 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* Not yet implemented. A read will fail after being remapped, * Not yet implemented. A read will fail after being remapped,
* a write will call the strategy routine again. * a write will call the strategy routine again.
*/ */
if (SCpnt->device->remap) { if (cmd->device->remap) {
result = 0; result = 0;
} }
#endif #endif
} }
if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { if ((cmd->sense_buffer[0] & 0x7f) == 0x70) {
/* /*
* If the device is in the process of becoming ready, * If the device is in the process of becoming ready,
* retry. * retry.
*/ */
if (SCpnt->sense_buffer[12] == 0x04 && if (cmd->sense_buffer[12] == 0x04 &&
SCpnt->sense_buffer[13] == 0x01) { cmd->sense_buffer[13] == 0x01) {
scsi_queue_next_request(q, SCpnt); scsi_queue_next_request(q, cmd);
return; return;
} }
if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) { if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
if (SCpnt->device->removable) { if (cmd->device->removable) {
/* detected disc change. set a bit /* detected disc change. set a bit
* and quietly refuse further access. * and quietly refuse further access.
*/ */
SCpnt->device->changed = 1; cmd->device->changed = 1;
SCpnt = scsi_end_request(SCpnt, 0, cmd = scsi_end_request(cmd, 0,
this_count, 1); this_count, 1);
return; return;
} else { } else {
...@@ -632,7 +623,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -632,7 +623,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* media change, so we just retry the * media change, so we just retry the
* request and see what happens. * request and see what happens.
*/ */
scsi_queue_next_request(q, SCpnt); scsi_queue_next_request(q, cmd);
return; return;
} }
} }
...@@ -644,35 +635,35 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -644,35 +635,35 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* past the end of the disk. * past the end of the disk.
*/ */
switch (SCpnt->sense_buffer[2]) { switch (cmd->sense_buffer[2]) {
case ILLEGAL_REQUEST: case ILLEGAL_REQUEST:
if (SCpnt->device->ten) { if (cmd->device->ten) {
SCpnt->device->ten = 0; cmd->device->ten = 0;
/* /*
* This will cause a retry with a 6-byte * This will cause a retry with a 6-byte
* command. * command.
*/ */
scsi_queue_next_request(q, SCpnt); scsi_queue_next_request(q, cmd);
result = 0; result = 0;
} else { } else {
SCpnt = scsi_end_request(SCpnt, 0, this_count, 1); cmd = scsi_end_request(cmd, 0, this_count, 1);
return; return;
} }
break; break;
case NOT_READY: case NOT_READY:
printk(KERN_INFO "Device %s not ready.\n", printk(KERN_INFO "Device %s not ready.\n",
req->rq_disk ? req->rq_disk->disk_name : ""); req->rq_disk ? req->rq_disk->disk_name : "");
SCpnt = scsi_end_request(SCpnt, 0, this_count, 1); cmd = scsi_end_request(cmd, 0, this_count, 1);
return; return;
break; break;
case MEDIUM_ERROR: case MEDIUM_ERROR:
case VOLUME_OVERFLOW: case VOLUME_OVERFLOW:
printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
SCpnt->device->host->host_no, (int) SCpnt->device->channel, cmd->device->host->host_no, (int) cmd->device->channel,
(int) SCpnt->device->id, (int) SCpnt->device->lun); (int) cmd->device->id, (int) cmd->device->lun);
print_command(SCpnt->data_cmnd); print_command(cmd->data_cmnd);
print_sense("sd", SCpnt); print_sense("sd", cmd);
SCpnt = scsi_end_request(SCpnt, 0, block_sectors, 1); cmd = scsi_end_request(cmd, 0, block_sectors, 1);
return; return;
default: default:
break; break;
...@@ -684,28 +675,28 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -684,28 +675,28 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* recovery reasons. Just retry the request * recovery reasons. Just retry the request
* and see what happens. * and see what happens.
*/ */
scsi_queue_next_request(q, SCpnt); scsi_queue_next_request(q, cmd);
return; return;
} }
if (result) { if (result) {
struct Scsi_Device_Template *STpnt; struct Scsi_Device_Template *sdt;
STpnt = scsi_get_request_dev(SCpnt->request); sdt = scsi_get_request_dev(cmd->request);
printk("SCSI %s error : host %d channel %d id %d lun %d return code = %x\n", printk("SCSI %s error : host %d channel %d id %d lun %d return code = %x\n",
(STpnt ? STpnt->name : "device"), (sdt ? sdt->name : "device"),
SCpnt->device->host->host_no, cmd->device->host->host_no,
SCpnt->device->channel, cmd->device->channel,
SCpnt->device->id, cmd->device->id,
SCpnt->device->lun, result); cmd->device->lun, result);
if (driver_byte(result) & DRIVER_SENSE) if (driver_byte(result) & DRIVER_SENSE)
print_sense("sd", SCpnt); print_sense("sd", cmd);
/* /*
* Mark a single buffer as not uptodate. Queue the remainder. * Mark a single buffer as not uptodate. Queue the remainder.
* We sometimes get this cruft in the event that a medium error * We sometimes get this cruft in the event that a medium error
* isn't properly reported. * isn't properly reported.
*/ */
SCpnt = scsi_end_request(SCpnt, 0, req->current_nr_sectors, 1); cmd = scsi_end_request(cmd, 0, req->current_nr_sectors, 1);
return; return;
} }
} }
...@@ -715,26 +706,26 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -715,26 +706,26 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* *
* Purpose: SCSI I/O initialize function. * Purpose: SCSI I/O initialize function.
* *
* Arguments: SCpnt - Command descriptor we wish to initialize * Arguments: cmd - Command descriptor we wish to initialize
* *
* Returns: 0 on success * Returns: 0 on success
* BLKPREP_DEFER if the failure is retryable * BLKPREP_DEFER if the failure is retryable
* BLKPREP_KILL if the failure is fatal * BLKPREP_KILL if the failure is fatal
*/ */
static int scsi_init_io(Scsi_Cmnd *SCpnt) static int scsi_init_io(struct scsi_cmnd *cmd)
{ {
struct request *req = SCpnt->request; struct request *req = cmd->request;
struct scatterlist *sgpnt; struct scatterlist *sgpnt;
int count, ret = 0; int count;
/* /*
* if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
*/ */
if ((req->flags & REQ_BLOCK_PC) && !req->bio) { if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
SCpnt->request_bufflen = req->data_len; cmd->request_bufflen = req->data_len;
SCpnt->request_buffer = req->data; cmd->request_buffer = req->data;
req->buffer = req->data; req->buffer = req->data;
SCpnt->use_sg = 0; cmd->use_sg = 0;
return 0; return 0;
} }
...@@ -743,48 +734,45 @@ static int scsi_init_io(Scsi_Cmnd *SCpnt) ...@@ -743,48 +734,45 @@ static int scsi_init_io(Scsi_Cmnd *SCpnt)
* but now we do (it makes highmem I/O easier to support without * but now we do (it makes highmem I/O easier to support without
* kmapping pages) * kmapping pages)
*/ */
SCpnt->use_sg = req->nr_phys_segments; cmd->use_sg = req->nr_phys_segments;
/* /*
* if sg table allocation fails, requeue request later. * if sg table allocation fails, requeue request later.
*/ */
sgpnt = scsi_alloc_sgtable(SCpnt, GFP_ATOMIC); sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
if (unlikely(!sgpnt)) { if (unlikely(!sgpnt)) {
req->flags |= REQ_SPECIAL; req->flags |= REQ_SPECIAL;
ret = BLKPREP_DEFER; return BLKPREP_DEFER;
goto out;
} }
SCpnt->request_buffer = (char *) sgpnt; cmd->request_buffer = (char *) sgpnt;
SCpnt->request_bufflen = req->nr_sectors << 9; cmd->request_bufflen = req->nr_sectors << 9;
if (blk_pc_request(req)) if (blk_pc_request(req))
SCpnt->request_bufflen = req->data_len; cmd->request_bufflen = req->data_len;
req->buffer = NULL; req->buffer = NULL;
/* /*
* Next, walk the list, and fill in the addresses and sizes of * Next, walk the list, and fill in the addresses and sizes of
* each segment. * each segment.
*/ */
count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer); count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
/* /*
* mapped well, send it off * mapped well, send it off
*/ */
if (count <= SCpnt->use_sg) { if (likely(count <= cmd->use_sg)) {
SCpnt->use_sg = count; cmd->use_sg = count;
return 0; return 0;
} }
printk(KERN_ERR "Incorrect number of segments after building list\n"); printk(KERN_ERR "Incorrect number of segments after building list\n");
printk(KERN_ERR "counted %d, received %d\n", count, SCpnt->use_sg); printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
req->current_nr_sectors); req->current_nr_sectors);
/* release the command and kill it */ /* release the command and kill it */
scsi_put_command(SCpnt); scsi_put_command(cmd);
ret = BLKPREP_KILL; return BLKPREP_KILL;
out:
return ret;
} }
/* /*
...@@ -805,60 +793,53 @@ static int check_all_luns(struct scsi_device *myself) ...@@ -805,60 +793,53 @@ static int check_all_luns(struct scsi_device *myself)
int scsi_prep_fn(struct request_queue *q, struct request *req) int scsi_prep_fn(struct request_queue *q, struct request *req)
{ {
struct Scsi_Device_Template *STpnt; struct Scsi_Device_Template *sdt;
Scsi_Cmnd *SCpnt; struct scsi_device *sdev = q->queuedata;
Scsi_Device *SDpnt; struct scsi_cmnd *cmd;
SDpnt = (Scsi_Device *) q->queuedata;
BUG_ON(!SDpnt);
/* /*
* Find the actual device driver associated with this command. * Find the actual device driver associated with this command.
* The SPECIAL requests are things like character device or * The SPECIAL requests are things like character device or
* ioctls, which did not originate from ll_rw_blk. Note that * ioctls, which did not originate from ll_rw_blk. Note that
* the special field is also used to indicate the SCpnt for * the special field is also used to indicate the cmd for
* the remainder of a partially fulfilled request that can * the remainder of a partially fulfilled request that can
* come up when there is a medium error. We have to treat * come up when there is a medium error. We have to treat
* these two cases differently. We differentiate by looking * these two cases differently. We differentiate by looking
* at request->cmd, as this tells us the real story. * at request->cmd, as this tells us the real story.
*/ */
if (req->flags & REQ_SPECIAL) { if (req->flags & REQ_SPECIAL) {
Scsi_Request *SRpnt; struct scsi_request *sreq = req->special;
STpnt = NULL;
SCpnt = (Scsi_Cmnd *) req->special;
SRpnt = (Scsi_Request *) req->special;
if (SRpnt->sr_magic == SCSI_REQ_MAGIC) { if (sreq->sr_magic == SCSI_REQ_MAGIC) {
SCpnt = scsi_get_command(SRpnt->sr_device, GFP_ATOMIC); cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
if (!SCpnt) if (unlikely(!cmd))
return BLKPREP_DEFER; return BLKPREP_DEFER;
scsi_init_cmd_from_req(SCpnt, SRpnt); scsi_init_cmd_from_req(cmd, sreq);
} } else
cmd = req->special;
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
/* /*
* Now try and find a command block that we can use. * Now try and find a command block that we can use.
*/ */
if (!req->special) { if (!req->special) {
SCpnt = scsi_get_command(SDpnt, GFP_ATOMIC); cmd = scsi_get_command(sdev, GFP_ATOMIC);
if (unlikely(!SCpnt)) if (unlikely(!cmd))
return BLKPREP_DEFER; return BLKPREP_DEFER;
} else } else
SCpnt = req->special; cmd = req->special;
/* pull a tag out of the request if we have one */ /* pull a tag out of the request if we have one */
SCpnt->tag = req->tag; cmd->tag = req->tag;
} else { } else {
blk_dump_rq_flags(req, "SCSI bad req"); blk_dump_rq_flags(req, "SCSI bad req");
return BLKPREP_KILL; return BLKPREP_KILL;
} }
/* note the overloading of req->special. When the tag /* note the overloading of req->special. When the tag
* is active it always means SCpnt. If the tag goes * is active it always means cmd. If the tag goes
* back for re-queueing, it may be reset */ * back for re-queueing, it may be reset */
req->special = SCpnt; req->special = cmd;
SCpnt->request = req; cmd->request = req;
/* /*
* FIXME: drop the lock here because the functions below * FIXME: drop the lock here because the functions below
...@@ -867,7 +848,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) ...@@ -867,7 +848,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
* lock. We hope REQ_STARTED prevents anything untoward from * lock. We hope REQ_STARTED prevents anything untoward from
* happening now. * happening now.
*/ */
if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
int ret; int ret;
...@@ -883,27 +863,30 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) ...@@ -883,27 +863,30 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
* some kinds of consistency checking may cause the * some kinds of consistency checking may cause the
* request to be rejected immediately. * request to be rejected immediately.
*/ */
STpnt = scsi_get_request_dev(req); sdt = scsi_get_request_dev(req);
BUG_ON(!STpnt); BUG_ON(!sdt);
/* /*
* This sets up the scatter-gather table (allocating if * This sets up the scatter-gather table (allocating if
* required). * required).
*/ */
if ((ret = scsi_init_io(SCpnt))) ret = scsi_init_io(cmd);
/* BLKPREP_KILL return also releases the command */ if (ret) /* BLKPREP_KILL return also releases the command */
return ret; return ret;
/* /*
* Initialize the actual SCSI command for this request. * Initialize the actual SCSI command for this request.
*/ */
if (!STpnt->init_command(SCpnt)) { if (unlikely(!sdt->init_command(cmd))) {
scsi_release_buffers(SCpnt); scsi_release_buffers(cmd);
scsi_put_command(SCpnt); scsi_put_command(cmd);
return BLKPREP_KILL; return BLKPREP_KILL;
} }
} }
/* The request is now prepped, no need to come back here */
/*
* The request is now prepped, no need to come back here
*/
req->flags |= REQ_DONTPREP; req->flags |= REQ_DONTPREP;
return BLKPREP_OK; return BLKPREP_OK;
} }
...@@ -911,48 +894,34 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) ...@@ -911,48 +894,34 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
/* /*
* Function: scsi_request_fn() * Function: scsi_request_fn()
* *
* Purpose: Generic version of request function for SCSI hosts. * Purpose: Main strategy routine for SCSI.
* *
* Arguments: q - Pointer to actual queue. * Arguments: q - Pointer to actual queue.
* *
* Returns: Nothing * Returns: Nothing
* *
* Lock status: IO request lock assumed to be held when called. * Lock status: IO request lock assumed to be held when called.
*
* Notes: The theory is that this function is something which individual
* drivers could also supply if they wished to. The problem
* is that we have 30 some odd low-level drivers in the kernel
* tree already, and it would be most difficult to retrofit
* this crap into all of them. Thus this function has the job
* of acting as a generic queue manager for all of those existing
* drivers.
*/ */
void scsi_request_fn(request_queue_t * q) void scsi_request_fn(request_queue_t *q)
{ {
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd;
struct request *req; struct request *req;
Scsi_Cmnd *SCpnt;
Scsi_Device *SDpnt;
struct Scsi_Host *SHpnt;
ASSERT_LOCK(q->queue_lock, 1); ASSERT_LOCK(q->queue_lock, 1);
SDpnt = (Scsi_Device *) q->queuedata;
if (!SDpnt) {
panic("Missing device");
}
SHpnt = SDpnt->host;
/* /*
* To start with, we keep looping until the queue is empty, or until * To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests. * the host is no longer able to accept any more requests.
*/ */
while (1 == 1) { for (;;) {
/* /*
* Check this again - each time we loop through we will have * Check this again - each time we loop through we will have
* released the lock and grabbed it again, so each time * released the lock and grabbed it again, so each time
* we need to check to see if the queue is plugged or not. * we need to check to see if the queue is plugged or not.
*/ */
if (SHpnt->in_recovery || blk_queue_plugged(q)) if (shost->in_recovery || blk_queue_plugged(q))
return; return;
/* /*
...@@ -963,39 +932,43 @@ void scsi_request_fn(request_queue_t * q) ...@@ -963,39 +932,43 @@ void scsi_request_fn(request_queue_t * q)
*/ */
req = elv_next_request(q); req = elv_next_request(q);
if (SDpnt->device_busy >= SDpnt->queue_depth) if (sdev->device_busy >= sdev->queue_depth)
break; break;
if (SDpnt->single_lun && check_all_luns(SDpnt)) if (sdev->single_lun && check_all_luns(sdev))
break; break;
if(SHpnt->host_busy == 0 && SHpnt->host_blocked) { if (shost->host_busy == 0 && shost->host_blocked) {
/* unblock after host_blocked iterates to zero */ /* unblock after host_blocked iterates to zero */
if(--SHpnt->host_blocked == 0) { if (--shost->host_blocked == 0) {
SCSI_LOG_MLQUEUE(3, printk("scsi%d unblocking host at zero depth\n", SHpnt->host_no)); SCSI_LOG_MLQUEUE(3,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else { } else {
blk_plug_device(q); blk_plug_device(q);
break; break;
} }
} }
if(SDpnt->device_busy == 0 && SDpnt->device_blocked) {
if (sdev->device_busy == 0 && sdev->device_blocked) {
/* unblock after device_blocked iterates to zero */ /* unblock after device_blocked iterates to zero */
if(--SDpnt->device_blocked == 0) { if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3, printk("scsi%d (%d:%d) unblocking device at zero depth\n", SHpnt->host_no, SDpnt->id, SDpnt->lun)); SCSI_LOG_MLQUEUE(3,
printk("scsi%d (%d:%d) unblocking device at zero depth\n",
shost->host_no, sdev->id, sdev->lun));
} else { } else {
blk_plug_device(q); blk_plug_device(q);
break; break;
} }
} }
/* /*
* If the device cannot accept another request, then quit. * If the device cannot accept another request, then quit.
*/ */
if (SDpnt->device_blocked) { if (sdev->device_blocked)
break; break;
} if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) shost->host_blocked || shost->host_self_blocked) {
|| (SHpnt->host_blocked)
|| (SHpnt->host_self_blocked)) {
/* /*
* If we are unable to process any commands at all for * If we are unable to process any commands at all for
* this device, then we consider it to be starved. * this device, then we consider it to be starved.
...@@ -1004,14 +977,13 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1004,14 +977,13 @@ void scsi_request_fn(request_queue_t * q)
* little help getting it started again * little help getting it started again
* once the host isn't quite so busy. * once the host isn't quite so busy.
*/ */
if (SDpnt->device_busy == 0) { if (sdev->device_busy == 0) {
SDpnt->starved = 1; sdev->starved = 1;
SHpnt->some_device_starved = 1; shost->some_device_starved = 1;
} }
break; break;
} else { } else
SDpnt->starved = 0; sdev->starved = 0;
}
/* /*
* If we couldn't find a request that could be queued, then we * If we couldn't find a request that could be queued, then we
...@@ -1020,21 +992,22 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1020,21 +992,22 @@ void scsi_request_fn(request_queue_t * q)
if (blk_queue_empty(q)) if (blk_queue_empty(q))
break; break;
if(!req) { if (!req) {
/* If the device is busy, a returning I/O /* If the device is busy, a returning I/O
* will restart the queue. Otherwise, we have * will restart the queue. Otherwise, we have
* to plug the queue */ * to plug the queue */
if(SDpnt->device_busy == 0) if(sdev->device_busy == 0)
blk_plug_device(q); blk_plug_device(q);
break; break;
} }
SCpnt = (struct scsi_cmnd *)req->special; cmd = req->special;
/* Should be impossible for a correctly prepared request /*
* Should be impossible for a correctly prepared request
* please mail the stack trace to linux-scsi@vger.kernel.org * please mail the stack trace to linux-scsi@vger.kernel.org
*/ */
BUG_ON(!SCpnt); BUG_ON(!cmd);
/* /*
* Finally, before we release the lock, we copy the * Finally, before we release the lock, we copy the
...@@ -1044,27 +1017,27 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1044,27 +1017,27 @@ void scsi_request_fn(request_queue_t * q)
* reason to search the list, because all of the * reason to search the list, because all of the
* commands in this queue are for the same device. * commands in this queue are for the same device.
*/ */
if(!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0))) if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
/* /*
* Now bump the usage count for both the host and the * Now bump the usage count for both the host and the
* device. * device.
*/ */
SHpnt->host_busy++; shost->host_busy++;
SDpnt->device_busy++; sdev->device_busy++;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* /*
* Finally, initialize any error handling parameters, and set up * Finally, initialize any error handling parameters, and set up
* the timers for timeouts. * the timers for timeouts.
*/ */
scsi_init_cmd_errh(SCpnt); scsi_init_cmd_errh(cmd);
/* /*
* Dispatch the command to the low-level driver. * Dispatch the command to the low-level driver.
*/ */
scsi_dispatch_cmd(SCpnt); scsi_dispatch_cmd(cmd);
/* /*
* Now we need to grab the lock again. We are about to mess * Now we need to grab the lock again. We are about to mess
...@@ -1080,7 +1053,7 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1080,7 +1053,7 @@ void scsi_request_fn(request_queue_t * q)
* Purpose: Utility function used by low-level drivers to prevent further * Purpose: Utility function used by low-level drivers to prevent further
* commands from being queued to the device. * commands from being queued to the device.
* *
* Arguments: SHpnt - Host in question * Arguments: shost - Host in question
* *
* Returns: Nothing * Returns: Nothing
* *
...@@ -1090,9 +1063,9 @@ void scsi_request_fn(request_queue_t * q) ...@@ -1090,9 +1063,9 @@ void scsi_request_fn(request_queue_t * q)
* get unblocked other than the low-level driver calling * get unblocked other than the low-level driver calling
* scsi_unblock_requests(). * scsi_unblock_requests().
*/ */
void scsi_block_requests(struct Scsi_Host * SHpnt) void scsi_block_requests(struct Scsi_Host *shost)
{ {
SHpnt->host_self_blocked = 1; shost->host_self_blocked = 1;
} }
/* /*
...@@ -1101,7 +1074,7 @@ void scsi_block_requests(struct Scsi_Host * SHpnt) ...@@ -1101,7 +1074,7 @@ void scsi_block_requests(struct Scsi_Host * SHpnt)
* Purpose: Utility function used by low-level drivers to allow further * Purpose: Utility function used by low-level drivers to allow further
* commands from being queued to the device. * commands from being queued to the device.
* *
* Arguments: SHpnt - Host in question * Arguments: shost - Host in question
* *
* Returns: Nothing * Returns: Nothing
* *
...@@ -1115,14 +1088,17 @@ void scsi_block_requests(struct Scsi_Host * SHpnt) ...@@ -1115,14 +1088,17 @@ void scsi_block_requests(struct Scsi_Host * SHpnt)
* internals of the scsi mid-layer won't require wholesale * internals of the scsi mid-layer won't require wholesale
* changes to drivers that use this feature. * changes to drivers that use this feature.
*/ */
void scsi_unblock_requests(struct Scsi_Host * SHpnt) void scsi_unblock_requests(struct Scsi_Host *shost)
{ {
Scsi_Device *SDloop; struct scsi_device *sdev;
SHpnt->host_self_blocked = 0; shost->host_self_blocked = 0;
/* Now that we are unblocked, try to start the queues. */
list_for_each_entry(SDloop, &SHpnt->my_devices, siblings) /*
scsi_queue_next_request(SDloop->request_queue, NULL); * Now that we are unblocked, try to start the queues.
*/
list_for_each_entry(sdev, &shost->my_devices, siblings)
scsi_queue_next_request(sdev->request_queue, NULL);
} }
/* /*
...@@ -1131,7 +1107,7 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt) ...@@ -1131,7 +1107,7 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt)
* Purpose: Utility function used by low-level drivers to report that * Purpose: Utility function used by low-level drivers to report that
* they have observed a bus reset on the bus being handled. * they have observed a bus reset on the bus being handled.
* *
* Arguments: SHpnt - Host in question * Arguments: shost - Host in question
* channel - channel on which reset was observed. * channel - channel on which reset was observed.
* *
* Returns: Nothing * Returns: Nothing
...@@ -1146,13 +1122,14 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt) ...@@ -1146,13 +1122,14 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt)
* The main purpose of this is to make sure that a CHECK_CONDITION * The main purpose of this is to make sure that a CHECK_CONDITION
* is properly treated. * is properly treated.
*/ */
void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel) void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
{ {
Scsi_Device *SDloop; struct scsi_device *sdev;
list_for_each_entry(SDloop, &SHpnt->my_devices, siblings) {
if (channel == SDloop->channel) { list_for_each_entry(sdev, &shost->my_devices, siblings) {
SDloop->was_reset = 1; if (channel == sdev->channel) {
SDloop->expecting_cc_ua = 1; sdev->was_reset = 1;
sdev->expecting_cc_ua = 1;
} }
} }
} }
...@@ -1166,11 +1143,11 @@ void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel) ...@@ -1166,11 +1143,11 @@ void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel)
* The details of the implementation remain to be settled, however the * The details of the implementation remain to be settled, however the
* stubs are here now so that the actual drivers will properly compile. * stubs are here now so that the actual drivers will properly compile.
*/ */
void scsi_register_blocked_host(struct Scsi_Host * SHpnt) void scsi_register_blocked_host(struct Scsi_Host * shost)
{ {
} }
void scsi_deregister_blocked_host(struct Scsi_Host * SHpnt) void scsi_deregister_blocked_host(struct Scsi_Host * shost)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment