Commit 7b253dfa authored by Doug Ledford's avatar Doug Ledford

Change all uses of device->request_queue (was struct, now pointer)

Update scsi_scan so that we don't pass around a scsi_device struct for
    	scanning.  Instead, we pass around a request_queue during
    	scanning and create and destroy device structs as needed.  This
    	allows us to have a 1:1 correlation between scsi_alloc_sdev()
    	and scsi_free_sdev() calls, which we didn't have before.
parent 3967f4c3
...@@ -160,15 +160,13 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt); ...@@ -160,15 +160,13 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
*/ */
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{ {
request_queue_t *q = &SDpnt->request_queue; request_queue_t *q = SDpnt->request_queue;
/* /*
* tell block layer about assigned host_lock for this host * tell block layer about assigned host_lock for this host
*/ */
blk_init_queue(q, scsi_request_fn, SHpnt->host_lock); blk_init_queue(q, scsi_request_fn, SHpnt->host_lock);
q->queuedata = (void *) SDpnt;
/* Hardware imposed limit. */ /* Hardware imposed limit. */
blk_queue_max_hw_segments(q, SHpnt->sg_tablesize); blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
...@@ -223,7 +221,7 @@ __setup("scsi_logging=", scsi_logging_setup); ...@@ -223,7 +221,7 @@ __setup("scsi_logging=", scsi_logging_setup);
static void scsi_wait_done(Scsi_Cmnd * SCpnt) static void scsi_wait_done(Scsi_Cmnd * SCpnt)
{ {
struct request *req = SCpnt->request; struct request *req = SCpnt->request;
struct request_queue *q = &SCpnt->device->request_queue; struct request_queue *q = SCpnt->device->request_queue;
unsigned long flags; unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0); ASSERT_LOCK(q->queue_lock, 0);
...@@ -656,17 +654,14 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) ...@@ -656,17 +654,14 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
*/ */
void scsi_release_command(Scsi_Cmnd * SCpnt) void scsi_release_command(Scsi_Cmnd * SCpnt)
{ {
request_queue_t *q = &SCpnt->device->request_queue;
__scsi_release_command(SCpnt); __scsi_release_command(SCpnt);
/* /*
* Finally, hit the queue request function to make sure that * Finally, hit the queue request function to make sure that
* the device is actually busy if there are requests present. * the device is actually busy if there are requests present.
* This won't block - if the device cannot take any more, life * This won't block - if the device cannot take any more, life
* will go on. * will go on.
*/ */
scsi_queue_next_request(q, NULL); scsi_queue_next_request(SCpnt->device->request_queue, NULL);
} }
/* /*
...@@ -810,13 +805,12 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd , ...@@ -810,13 +805,12 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
int timeout, int retries) int timeout, int retries)
{ {
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
request_queue_t *q = &SRpnt->sr_device->request_queue;
SRpnt->sr_request->waiting = &wait; SRpnt->sr_request->waiting = &wait;
SRpnt->sr_request->rq_status = RQ_SCSI_BUSY; SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
scsi_do_req (SRpnt, (void *) cmnd, scsi_do_req (SRpnt, (void *) cmnd,
buffer, bufflen, scsi_wait_done, timeout, retries); buffer, bufflen, scsi_wait_done, timeout, retries);
generic_unplug_device(q); generic_unplug_device(SRpnt->sr_device->request_queue);
wait_for_completion(&wait); wait_for_completion(&wait);
SRpnt->sr_request->waiting = NULL; SRpnt->sr_request->waiting = NULL;
if( SRpnt->sr_command != NULL ) if( SRpnt->sr_command != NULL )
...@@ -1912,10 +1906,8 @@ void scsi_device_put(struct scsi_device *sdev) ...@@ -1912,10 +1906,8 @@ void scsi_device_put(struct scsi_device *sdev)
*/ */
int scsi_slave_attach(struct scsi_device *sdev) int scsi_slave_attach(struct scsi_device *sdev)
{ {
/* all this code is now handled elsewhere
if (sdev->attached++ == 0) { if (sdev->attached++ == 0) {
/*
* No one was attached.
*/
scsi_build_commandblocks(sdev); scsi_build_commandblocks(sdev);
if (sdev->current_queue_depth == 0) { if (sdev->current_queue_depth == 0) {
printk(KERN_ERR "scsi: Allocation failure during" printk(KERN_ERR "scsi: Allocation failure during"
...@@ -1935,6 +1927,8 @@ int scsi_slave_attach(struct scsi_device *sdev) ...@@ -1935,6 +1927,8 @@ int scsi_slave_attach(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0, scsi_adjust_queue_depth(sdev, 0,
sdev->host->cmd_per_lun); sdev->host->cmd_per_lun);
} }
*/
sdev->attached++;
return 0; return 0;
} }
...@@ -1950,9 +1944,12 @@ int scsi_slave_attach(struct scsi_device *sdev) ...@@ -1950,9 +1944,12 @@ int scsi_slave_attach(struct scsi_device *sdev)
*/ */
void scsi_slave_detach(struct scsi_device *sdev) void scsi_slave_detach(struct scsi_device *sdev)
{ {
/*
if (--sdev->attached == 0) { if (--sdev->attached == 0) {
scsi_release_commandblocks(sdev); scsi_release_commandblocks(sdev);
} }
*/
sdev->attached--;
} }
/* /*
* This entry point should be called by a loadable module if it is trying * This entry point should be called by a loadable module if it is trying
......
...@@ -569,14 +569,12 @@ struct scsi_device { ...@@ -569,14 +569,12 @@ struct scsi_device {
/* /*
* This information is private to the scsi mid-layer. * This information is private to the scsi mid-layer.
*/ */
struct scsi_device *next; /* Used for linked list */
struct scsi_device *prev; /* Used for linked list */
struct list_head siblings; /* list of all devices on this host */ struct list_head siblings; /* list of all devices on this host */
struct list_head same_target_siblings; /* just the devices sharing same target id */ struct list_head same_target_siblings; /* just the devices sharing same target id */
wait_queue_head_t scpnt_wait; /* Used to wait if wait_queue_head_t scpnt_wait; /* Used to wait if
device is busy */ device is busy */
struct Scsi_Host *host; struct Scsi_Host *host;
request_queue_t request_queue; request_queue_t *request_queue;
atomic_t device_active; /* commands checked out for device */ atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on low-level */ volatile unsigned short device_busy; /* commands actually active on low-level */
struct list_head free_cmnds; /* list of available Scsi_Cmnd structs */ struct list_head free_cmnds; /* list of available Scsi_Cmnd structs */
...@@ -894,11 +892,9 @@ extern int scsi_reset_provider(Scsi_Device *, int); ...@@ -894,11 +892,9 @@ extern int scsi_reset_provider(Scsi_Device *, int);
* would be adjustable from 0 to depth. * would be adjustable from 0 to depth.
**/ **/
static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) { static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) {
request_queue_t *q = &SDpnt->request_queue;
if(SDpnt->tagged_supported) { if(SDpnt->tagged_supported) {
if(!blk_queue_tagged(q)) if(!blk_queue_tagged(SDpnt->request_queue))
blk_queue_init_tags(q, depth); blk_queue_init_tags(SDpnt->request_queue, depth);
scsi_adjust_queue_depth(SDpnt, MSG_ORDERED_TAG, depth); scsi_adjust_queue_depth(SDpnt, MSG_ORDERED_TAG, depth);
} }
} }
...@@ -908,10 +904,8 @@ static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) { ...@@ -908,10 +904,8 @@ static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) {
* @SDpnt: device to turn off TCQ for * @SDpnt: device to turn off TCQ for
**/ **/
static inline void scsi_deactivate_tcq(Scsi_Device *SDpnt, int depth) { static inline void scsi_deactivate_tcq(Scsi_Device *SDpnt, int depth) {
request_queue_t *q = &SDpnt->request_queue; if(blk_queue_tagged(SDpnt->request_queue))
blk_queue_free_tags(SDpnt->request_queue);
if(blk_queue_tagged(q))
blk_queue_free_tags(q);
scsi_adjust_queue_depth(SDpnt, 0, depth); scsi_adjust_queue_depth(SDpnt, 0, depth);
} }
...@@ -957,7 +951,7 @@ static inline Scsi_Cmnd *scsi_find_tag(Scsi_Device *SDpnt, int tag) { ...@@ -957,7 +951,7 @@ static inline Scsi_Cmnd *scsi_find_tag(Scsi_Device *SDpnt, int tag) {
/* single command, look in space */ /* single command, look in space */
return SDpnt->current_cmnd; return SDpnt->current_cmnd;
req = blk_queue_find_tag(&SDpnt->request_queue, tag); req = blk_queue_find_tag(SDpnt->request_queue, tag);
if(req == NULL) if(req == NULL)
return NULL; return NULL;
......
...@@ -1487,7 +1487,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost) ...@@ -1487,7 +1487,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
break; break;
} }
__blk_run_queue(&sdev->request_queue); __blk_run_queue(sdev->request_queue);
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
......
...@@ -57,9 +57,8 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = { ...@@ -57,9 +57,8 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
*/ */
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{ {
request_queue_t *q = &SCpnt->device->request_queue; blk_insert_request(SCpnt->device->request_queue, SCpnt->request,
at_head, SCpnt);
blk_insert_request(q, SCpnt->request, at_head, SCpnt);
return 0; return 0;
} }
...@@ -85,16 +84,13 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -85,16 +84,13 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/ */
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{ {
request_queue_t *q = &SRpnt->sr_device->request_queue;
/* This is used to insert SRpnt specials. Because users of /* This is used to insert SRpnt specials. Because users of
* this function are apt to reuse requests with no modification, * this function are apt to reuse requests with no modification,
* we have to sanitise the request flags here * we have to sanitise the request flags here
*/ */
SRpnt->sr_request->flags &= ~REQ_DONTPREP; SRpnt->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(SRpnt->sr_device->request_queue, SRpnt->sr_request,
blk_insert_request(q, SRpnt->sr_request, at_head, SRpnt); at_head, SRpnt);
return 0; return 0;
} }
...@@ -215,7 +211,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -215,7 +211,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
{ {
int all_clear; int all_clear;
unsigned long flags; unsigned long flags;
Scsi_Device *SDpnt; Scsi_Device *SDpnt, *SDpnt2;
struct Scsi_Host *SHpnt; struct Scsi_Host *SHpnt;
ASSERT_LOCK(q->queue_lock, 0); ASSERT_LOCK(q->queue_lock, 0);
...@@ -256,17 +252,17 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -256,17 +252,17 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* with special case code, then spin off separate versions and * with special case code, then spin off separate versions and
* use function pointers to pick the right one. * use function pointers to pick the right one.
*/ */
if (SDpnt->single_lun && blk_queue_empty(q) && SDpnt->device_busy ==0) { if (SDpnt->single_lun && blk_queue_empty(q) && SDpnt->device_busy ==0 &&
list_for_each_entry(SDpnt, &SHpnt->my_devices, siblings) { !SHpnt->host_blocked && !SHpnt->host_self_blocked &&
if (((SHpnt->can_queue > 0) !((SHpnt->can_queue > 0) && (SHpnt->host_busy >=
&& (SHpnt->host_busy >= SHpnt->can_queue)) SHpnt->can_queue))) {
|| (SHpnt->host_blocked) list_for_each_entry(SDpnt2, &SDpnt->same_target_siblings,
|| (SHpnt->host_self_blocked) same_target_siblings) {
|| (SDpnt->device_blocked)) { if (!SDpnt2->device_blocked &&
!blk_queue_empty(SDpnt2->request_queue)) {
__blk_run_queue(SDpnt2->request_queue);
break; break;
} }
__blk_run_queue(&SDpnt->request_queue);
} }
} }
...@@ -289,7 +285,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -289,7 +285,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
if (SDpnt->device_blocked || !SDpnt->starved) { if (SDpnt->device_blocked || !SDpnt->starved) {
continue; continue;
} }
__blk_run_queue(&SDpnt->request_queue); __blk_run_queue(SDpnt->request_queue);
all_clear = 0; all_clear = 0;
} }
if (SDpnt == NULL && all_clear) { if (SDpnt == NULL && all_clear) {
...@@ -327,7 +323,7 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, ...@@ -327,7 +323,7 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt,
int sectors, int sectors,
int requeue) int requeue)
{ {
request_queue_t *q = &SCpnt->device->request_queue; request_queue_t *q = SCpnt->device->request_queue;
struct request *req = SCpnt->request; struct request *req = SCpnt->request;
unsigned long flags; unsigned long flags;
...@@ -497,7 +493,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -497,7 +493,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
{ {
int result = SCpnt->result; int result = SCpnt->result;
int this_count = SCpnt->bufflen >> 9; int this_count = SCpnt->bufflen >> 9;
request_queue_t *q = &SCpnt->device->request_queue; request_queue_t *q = SCpnt->device->request_queue;
struct request *req = SCpnt->request; struct request *req = SCpnt->request;
/* /*
...@@ -1094,7 +1090,7 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt) ...@@ -1094,7 +1090,7 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt)
SHpnt->host_self_blocked = FALSE; SHpnt->host_self_blocked = FALSE;
/* Now that we are unblocked, try to start the queues. */ /* Now that we are unblocked, try to start the queues. */
list_for_each_entry(SDloop, &SHpnt->my_devices, siblings) list_for_each_entry(SDloop, &SHpnt->my_devices, siblings)
scsi_queue_next_request(&SDloop->request_queue, NULL); scsi_queue_next_request(SDloop->request_queue, NULL);
} }
/* /*
......
...@@ -371,7 +371,7 @@ static void print_inquiry(unsigned char *inq_result) ...@@ -371,7 +371,7 @@ static void print_inquiry(unsigned char *inq_result)
*/ */
static void scsi_initialize_merge_fn(struct scsi_device *sd) static void scsi_initialize_merge_fn(struct scsi_device *sd)
{ {
request_queue_t *q = &sd->request_queue; request_queue_t *q = sd->request_queue;
struct Scsi_Host *sh = sd->host; struct Scsi_Host *sh = sd->host;
struct device *dev = scsi_get_device(sh); struct device *dev = scsi_get_device(sh);
u64 bounce_limit; u64 bounce_limit;
...@@ -407,14 +407,12 @@ static void scsi_initialize_merge_fn(struct scsi_device *sd) ...@@ -407,14 +407,12 @@ static void scsi_initialize_merge_fn(struct scsi_device *sd)
* Scsi_Device pointer, or NULL on failure. * Scsi_Device pointer, or NULL on failure.
**/ **/
static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
uint channel, uint id, uint lun) struct request_queue **q, uint channel, uint id, uint lun)
{ {
struct scsi_device *sdev, *device; struct scsi_device *sdev, *device;
sdev = kmalloc(sizeof(*sdev), GFP_ATOMIC); sdev = kmalloc(sizeof(*sdev), GFP_ATOMIC);
if (sdev == NULL) if (sdev != NULL) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
else {
memset(sdev, 0, sizeof(Scsi_Device)); memset(sdev, 0, sizeof(Scsi_Device));
sdev->vendor = scsi_null_device_strs; sdev->vendor = scsi_null_device_strs;
sdev->model = scsi_null_device_strs; sdev->model = scsi_null_device_strs;
...@@ -436,18 +434,32 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, ...@@ -436,18 +434,32 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
* doesn't * doesn't
*/ */
sdev->borken = 1; sdev->borken = 1;
if (shost->hostt->slave_alloc)
if (shost->hostt->slave_alloc(sdev)) {
kfree(sdev);
return NULL;
}
if(!q || *q == NULL) {
sdev->request_queue = kmalloc(sizeof(struct request_queue), GFP_ATOMIC);
if(sdev->request_queue == NULL) {
goto out_bail;
}
memset(sdev->request_queue, 0,
sizeof(struct request_queue));
scsi_initialize_queue(sdev, shost); scsi_initialize_queue(sdev, shost);
sdev->request_queue.queuedata = (void *) sdev;
scsi_initialize_merge_fn(sdev); scsi_initialize_merge_fn(sdev);
} else {
sdev->request_queue = *q;
*q = NULL;
}
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
scsi_build_commandblocks(sdev);
if (sdev->current_queue_depth == 0) {
goto out_bail;
}
init_waitqueue_head(&sdev->scpnt_wait); init_waitqueue_head(&sdev->scpnt_wait);
if (shost->hostt->slave_alloc)
if (shost->hostt->slave_alloc(sdev)) {
goto out_bail;
}
/* /*
* If there are any same target siblings, add this to the * If there are any same target siblings, add this to the
* sibling list * sibling list
...@@ -457,15 +469,35 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, ...@@ -457,15 +469,35 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
device->channel == sdev->channel) { device->channel == sdev->channel) {
list_add_tail(&sdev->same_target_siblings, list_add_tail(&sdev->same_target_siblings,
&device->same_target_siblings); &device->same_target_siblings);
sdev->scsi_level = device->scsi_level;
break; break;
} }
} }
/*
* If there wasn't another lun already configured at this
* target, then default this device to SCSI_2 until we
* know better
*/
if(!sdev->scsi_level)
sdev->scsi_level = SCSI_2;
/* /*
* Add it to the end of the shost->my_devices list. * Add it to the end of the shost->my_devices list.
*/ */
list_add_tail(&sdev->siblings, &shost->my_devices); list_add_tail(&sdev->siblings, &shost->my_devices);
}
return (sdev); return (sdev);
}
out_bail:
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
if(q && sdev->request_queue) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
} else if(sdev->request_queue) {
blk_cleanup_queue(sdev->request_queue);
kfree(sdev->request_queue);
}
scsi_release_commandblocks(sdev);
kfree(sdev);
return NULL;
} }
/** /**
...@@ -481,7 +513,10 @@ static void scsi_free_sdev(struct scsi_device *sdev) ...@@ -481,7 +513,10 @@ static void scsi_free_sdev(struct scsi_device *sdev)
list_del(&sdev->siblings); list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings); list_del(&sdev->same_target_siblings);
blk_cleanup_queue(&sdev->request_queue); if(sdev->request_queue != NULL) {
blk_cleanup_queue(sdev->request_queue);
kfree(sdev->request_queue);
}
scsi_release_commandblocks(sdev); scsi_release_commandblocks(sdev);
if (sdev->host->hostt->slave_destroy) if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev); sdev->host->hostt->slave_destroy(sdev);
...@@ -1188,18 +1223,11 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result, ...@@ -1188,18 +1223,11 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result,
* SCSI_SCAN_NO_RESPONSE: could not allocate or setup a Scsi_Device * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a Scsi_Device
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized * SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
**/ **/
static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
Scsi_Request *sreq, char *inq_result, int *bflags) char *inq_result, int *bflags)
{ {
Scsi_Device *sdev;
char devname[64]; char devname[64];
sdev = scsi_alloc_sdev(sdevscan->host, sdevscan->channel,
sdevscan->id, sdevscan->lun);
if (sdev == NULL)
return SCSI_SCAN_NO_RESPONSE;
sdev->scsi_level = sdevscan->scsi_level;
/* /*
* XXX do not save the inquiry, since it can change underneath us, * XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev. * save just vendor/model/rev.
...@@ -1210,10 +1238,8 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1210,10 +1238,8 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
* scanning run at their own risk, or supply a user level program * scanning run at their own risk, or supply a user level program
* that can correctly scan. * that can correctly scan.
*/ */
sdev->inquiry_len = sdevscan->inquiry_len;
sdev->inquiry = kmalloc(sdev->inquiry_len, GFP_ATOMIC); sdev->inquiry = kmalloc(sdev->inquiry_len, GFP_ATOMIC);
if (sdev->inquiry == NULL) { if (sdev->inquiry == NULL) {
scsi_free_sdev(sdev);
return SCSI_SCAN_NO_RESPONSE; return SCSI_SCAN_NO_RESPONSE;
} }
...@@ -1335,8 +1361,8 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1335,8 +1361,8 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
* function */ * function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
if (sdevnew != NULL) if(sdev->host->hostt->slave_configure)
*sdevnew = sdev; sdev->host->hostt->slave_configure(sdev);
return SCSI_SCAN_LUN_PRESENT; return SCSI_SCAN_LUN_PRESENT;
} }
...@@ -1365,8 +1391,9 @@ static void scsi_remove_lun(struct scsi_device *sdev) ...@@ -1365,8 +1391,9 @@ static void scsi_remove_lun(struct scsi_device *sdev)
* attached at the LUN * attached at the LUN
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized * SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
**/ **/
static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, static int scsi_probe_and_add_lun(struct Scsi_Host *host,
int *bflagsp) struct request_queue **q, uint channel, uint id,
uint lun, int *bflagsp)
{ {
Scsi_Device *sdev = NULL; Scsi_Device *sdev = NULL;
Scsi_Request *sreq = NULL; Scsi_Request *sreq = NULL;
...@@ -1374,47 +1401,27 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1374,47 +1401,27 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
int bflags; int bflags;
int res; int res;
/* sdev = scsi_alloc_sdev(host, q, channel, id, lun);
* Any command blocks allocated are fixed to use sdevscan->lun, if (sdev == NULL)
* so they must be allocated and released if sdevscan->lun return SCSI_SCAN_NO_RESPONSE;
* changes. sreq = scsi_allocate_request(sdev);
* if (sreq == NULL) {
* XXX optimize and don't call build/release commandblocks, instead printk(ALLOC_FAILURE_MSG, __FUNCTION__);
* modify the LUN value of the existing command block - this means res = SCSI_SCAN_NO_RESPONSE;
* the build/release calls would be moved to the alloc/free of goto bail_out;
* sdevscan, and the modifying function would be called here. }
*
* XXX maybe change scsi_release_commandblocks to not reset
* queue_depth to 0.
*/
sdevscan->new_queue_depth = 1;
scsi_build_commandblocks(sdevscan);
if (sdevscan->current_queue_depth == 0)
goto alloc_failed;
/*
* Since we reuse the same sdevscan over and over with different
* target and lun values, we have to destroy and then recreate
* any possible low level attachments since they very will might
* also store the id and lun numbers in some form and need updating
* with each scan.
*/
if (sdevscan->host->hostt->slave_destroy)
sdevscan->host->hostt->slave_destroy(sdevscan);
if (sdevscan->host->hostt->slave_alloc)
sdevscan->host->hostt->slave_alloc(sdevscan);
sreq = scsi_allocate_request(sdevscan);
if (sreq == NULL)
goto alloc_failed;
/* /*
* The sreq is for use only with sdevscan. * The sreq is for use only with sdevscan.
*/ */
scsi_result = kmalloc(256, GFP_ATOMIC | scsi_result = kmalloc(256, GFP_ATOMIC |
(sdevscan->host->unchecked_isa_dma) ? (host->unchecked_isa_dma) ?
GFP_DMA : 0); GFP_DMA : 0);
if (scsi_result == NULL) if (scsi_result == NULL) {
goto alloc_failed; printk(ALLOC_FAILURE_MSG, __FUNCTION__);
res = SCSI_SCAN_NO_RESPONSE;
goto bail_out;
}
scsi_probe_lun(sreq, scsi_result, &bflags); scsi_probe_lun(sreq, scsi_result, &bflags);
if (sreq->sr_result) if (sreq->sr_result)
...@@ -1439,10 +1446,8 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1439,10 +1446,8 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
" no device added\n")); " no device added\n"));
res = SCSI_SCAN_TARGET_PRESENT; res = SCSI_SCAN_TARGET_PRESENT;
} else { } else {
res = scsi_add_lun(sdevscan, &sdev, sreq, scsi_result, res = scsi_add_lun(sdev, sreq, scsi_result, &bflags);
&bflags);
if (res == SCSI_SCAN_LUN_PRESENT) { if (res == SCSI_SCAN_LUN_PRESENT) {
BUG_ON(sdev == NULL);
if ((bflags & BLIST_KEY) != 0) { if ((bflags & BLIST_KEY) != 0) {
sdev->lockable = 0; sdev->lockable = 0;
scsi_unlock_floptical(sreq, scsi_unlock_floptical(sreq,
...@@ -1452,31 +1457,25 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1452,31 +1457,25 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
* the INQUIRY data. * the INQUIRY data.
*/ */
} }
/*
* "hardcoded" scans of a single LUN need
* to know the sdev just allocated.
*/
if (sdevnew != NULL)
*sdevnew = sdev;
if (bflagsp != NULL) if (bflagsp != NULL)
*bflagsp = bflags; *bflagsp = bflags;
} }
} }
} }
kfree(scsi_result); bail_out:
scsi_release_request(sreq);
scsi_release_commandblocks(sdevscan);
return res;
alloc_failed:
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
if (scsi_result != NULL) if (scsi_result != NULL)
kfree(scsi_result); kfree(scsi_result);
if (sreq != NULL) if (sreq != NULL)
scsi_release_request(sreq); scsi_release_request(sreq);
if (sdevscan->current_queue_depth != 0) if (res != SCSI_SCAN_LUN_PRESENT) {
scsi_release_commandblocks(sdevscan); if(q) {
return SCSI_SCAN_NO_RESPONSE; *q = sdev->request_queue;
sdev->request_queue = NULL;
}
scsi_free_sdev(sdev);
}
return res;
} }
/** /**
...@@ -1492,16 +1491,15 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew, ...@@ -1492,16 +1491,15 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
* *
* Modifies sdevscan->lun. * Modifies sdevscan->lun.
**/ **/
static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags, static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
int lun0_res) struct request_queue **q, uint channel, uint id,
int bflags, int lun0_res, int scsi_level)
{ {
struct Scsi_Host *shost = sdevscan->host; unsigned int sparse_lun, lun, max_dev_lun;
unsigned int sparse_lun;
unsigned int max_dev_lun;
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of" SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
" host %d channel %d id %d\n", sdevscan->host->host_no, " host %d channel %d id %d\n", shost->host_no,
sdevscan->channel, sdevscan->id)); channel, id));
max_dev_lun = min(max_scsi_luns, shost->max_lun); max_dev_lun = min(max_scsi_luns, shost->max_lun);
/* /*
...@@ -1526,11 +1524,19 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags, ...@@ -1526,11 +1524,19 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags,
* If less than SCSI_1_CSS, and no special lun scaning, stop * If less than SCSI_1_CSS, and no special lun scaning, stop
* scanning; this matches 2.4 behaviour, but could just be a bug * scanning; this matches 2.4 behaviour, but could just be a bug
* (to continue scanning a SCSI_1_CSS device). * (to continue scanning a SCSI_1_CSS device).
*/ *
* This test is broken. We might not have any device on lun0 for
* a sparselun device, and if that's the case then how would we
* know the real scsi_level, eh? It might make sense to just not
* scan any SCSI_1 device for non-0 luns, but that check would best
* go into scsi_alloc_sdev() and just have it return null when asked
* to alloc an sdev for lun > 0 on an already found SCSI_1 device.
*
if ((sdevscan->scsi_level < SCSI_1_CCS) && if ((sdevscan->scsi_level < SCSI_1_CCS) &&
((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
== 0)) == 0))
return; return;
*/
/* /*
* If this device is known to support multiple units, override * If this device is known to support multiple units, override
* the other settings, and scan all of them. * the other settings, and scan all of them.
...@@ -1546,7 +1552,7 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags, ...@@ -1546,7 +1552,7 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags,
* Do not scan SCSI-2 or lower device past LUN 7, unless * Do not scan SCSI-2 or lower device past LUN 7, unless
* BLIST_LARGELUN. * BLIST_LARGELUN.
*/ */
if ((sdevscan->scsi_level < SCSI_3) && !(bflags & BLIST_LARGELUN)) if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
max_dev_lun = min(8U, max_dev_lun); max_dev_lun = min(8U, max_dev_lun);
/* /*
...@@ -1554,8 +1560,8 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags, ...@@ -1554,8 +1560,8 @@ static void scsi_sequential_lun_scan(Scsi_Device *sdevscan, int bflags,
* until we reach the max, or no LUN is found and we are not * until we reach the max, or no LUN is found and we are not
* sparse_lun. * sparse_lun.
*/ */
for (sdevscan->lun = 1; sdevscan->lun < max_dev_lun; ++sdevscan->lun) for (lun = 1; lun < max_dev_lun; ++lun)
if ((scsi_probe_and_add_lun(sdevscan, NULL, NULL) if ((scsi_probe_and_add_lun(shost, q, channel, id, lun, NULL)
!= SCSI_SCAN_LUN_PRESENT) && !sparse_lun) != SCSI_SCAN_LUN_PRESENT) && !sparse_lun)
return; return;
} }
...@@ -1608,7 +1614,8 @@ static int scsilun_to_int(ScsiLun *scsilun) ...@@ -1608,7 +1614,8 @@ static int scsilun_to_int(ScsiLun *scsilun)
* 0: scan completed (or no memory, so further scanning is futile) * 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured * 1: no report lun scan, or not configured
**/ **/
static int scsi_report_lun_scan(Scsi_Device *sdevscan) static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
int bflags)
{ {
#ifdef CONFIG_SCSI_REPORT_LUNS #ifdef CONFIG_SCSI_REPORT_LUNS
...@@ -1625,38 +1632,19 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1625,38 +1632,19 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
/* /*
* Only support SCSI-3 and up devices. * Only support SCSI-3 and up devices.
*/ */
if (sdevscan->scsi_level < SCSI_3) if (sdev->scsi_level < SCSI_3)
return 1; return 1;
if (bflags & BLIST_NOLUN)
return 0;
sdevscan->new_queue_depth = 1; sreq = scsi_allocate_request(sdev);
scsi_build_commandblocks(sdevscan); if (sreq == NULL) {
if (sdevscan->current_queue_depth == 0) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__); printk(ALLOC_FAILURE_MSG, __FUNCTION__);
/*
* We are out of memory, don't try scanning any further.
*/
return 0; return 0;
} }
/*
* Since we reuse the same sdevscan over and over with different
* target and lun values, we have to destroy and then recreate
* any possible low level attachments since they very will might
* also store the id and lun numbers in some form and need updating
* with each scan.
*
* This is normally handled in probe_and_add_lun, but since this
* one particular function wants to scan lun 0 on each device
* itself and will possibly pick up a resed sdevscan when doing
* so, it also needs this hack.
*/
if (sdevscan->host->hostt->slave_destroy)
sdevscan->host->hostt->slave_destroy(sdevscan);
if (sdevscan->host->hostt->slave_alloc)
sdevscan->host->hostt->slave_alloc(sdevscan);
sreq = scsi_allocate_request(sdevscan);
sprintf(devname, "host %d channel %d id %d", sdevscan->host->host_no, sprintf(devname, "host %d channel %d id %d", sdev->host->host_no,
sdevscan->channel, sdevscan->id); sdev->channel, sdev->id);
/* /*
* Allocate enough to hold the header (the same size as one ScsiLun) * Allocate enough to hold the header (the same size as one ScsiLun)
* plus the max number of luns we are requesting. * plus the max number of luns we are requesting.
...@@ -1669,11 +1657,11 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1669,11 +1657,11 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
*/ */
length = (max_scsi_report_luns + 1) * sizeof(ScsiLun); length = (max_scsi_report_luns + 1) * sizeof(ScsiLun);
lun_data = (ScsiLun *) kmalloc(length, GFP_ATOMIC | lun_data = (ScsiLun *) kmalloc(length, GFP_ATOMIC |
(sdevscan->host->unchecked_isa_dma ? (sdev->host->unchecked_isa_dma ?
GFP_DMA : 0)); GFP_DMA : 0));
if (lun_data == NULL) { if (lun_data == NULL) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__); printk(ALLOC_FAILURE_MSG, __FUNCTION__);
scsi_release_commandblocks(sdevscan); scsi_release_request(sreq);
/* /*
* We are out of memory, don't try scanning any further. * We are out of memory, don't try scanning any further.
*/ */
...@@ -1723,7 +1711,6 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1723,7 +1711,6 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
|| sreq->sr_sense_buffer[2] != UNIT_ATTENTION) || sreq->sr_sense_buffer[2] != UNIT_ATTENTION)
break; break;
} }
scsi_release_commandblocks(sdevscan);
if (sreq->sr_result) { if (sreq->sr_result) {
/* /*
...@@ -1751,8 +1738,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1751,8 +1738,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
num_luns = (length / sizeof(ScsiLun)); num_luns = (length / sizeof(ScsiLun));
SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUN scan of" SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUN scan of"
" host %d channel %d id %d\n", sdevscan->host->host_no, " host %d channel %d id %d\n", sdev->host->host_no,
sdevscan->channel, sdevscan->id)); sdev->channel, sdev->id));
/* /*
* Scan the luns in lun_data. The entry at offset 0 is really * Scan the luns in lun_data. The entry at offset 0 is really
* the header, so start at 1 and go up to and including num_luns. * the header, so start at 1 and go up to and including num_luns.
...@@ -1782,22 +1769,22 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1782,22 +1769,22 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
/* /*
* LUN 0 has already been scanned. * LUN 0 has already been scanned.
*/ */
} else if (lun > sdevscan->host->max_lun) { } else if (lun > sdev->host->max_lun) {
printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
" than allowed by the host adapter\n", " than allowed by the host adapter\n",
devname, lun); devname, lun);
} else { } else {
int res; int res;
sdevscan->lun = lun; res = scsi_probe_and_add_lun(sdev->host, q,
res = scsi_probe_and_add_lun(sdevscan, NULL, NULL); sdev->channel, sdev->id, lun, NULL);
if (res == SCSI_SCAN_NO_RESPONSE) { if (res == SCSI_SCAN_NO_RESPONSE) {
/* /*
* Got some results, but now none, abort. * Got some results, but now none, abort.
*/ */
printk(KERN_ERR "scsi: Unexpected response" printk(KERN_ERR "scsi: Unexpected response"
" from %s lun %d while scanning, scan" " from %s lun %d while scanning, scan"
" aborted\n", devname, sdevscan->lun); " aborted\n", devname, lun);
break; break;
} }
} }
...@@ -1814,38 +1801,22 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan) ...@@ -1814,38 +1801,22 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
int scsi_add_single_device(uint host, uint channel, uint id, uint lun) int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
{ {
struct scsi_device *sdevscan, *sdev;
struct Scsi_Host *shost; struct Scsi_Host *shost;
int error = -ENODEV; int error = -ENODEV;
struct scsi_device *sdev;
shost = scsi_host_hn_get(host); shost = scsi_host_hn_get(host);
if (!shost) if (!shost)
return -ENODEV; return -ENODEV;
sdev = scsi_find_device(shost, channel, id, lun); if(scsi_find_device(shost, channel, id, lun) != NULL)
if (sdev)
goto out;
error = -ENOMEM;
sdevscan = scsi_alloc_sdev(shost, channel, id, lun);
if (!sdevscan)
goto out; goto out;
if(!list_empty(&sdevscan->same_target_siblings)) { if (scsi_probe_and_add_lun(shost, NULL, channel, id, lun, NULL) ==
sdev = list_entry(&sdevscan->same_target_siblings, Scsi_Device, SCSI_SCAN_LUN_PRESENT) {
same_target_siblings);
sdevscan->scsi_level = sdev->scsi_level;
sdev = NULL;
} else
sdevscan->scsi_level = SCSI_2;
error = scsi_probe_and_add_lun(sdevscan, &sdev, NULL);
scsi_free_sdev(sdevscan);
if (error != SCSI_SCAN_LUN_PRESENT)
goto out;
scsi_attach_device(sdev);
error = 0; error = 0;
sdev = scsi_find_device(shost, channel, id, lun);
scsi_attach_device(sdev);
}
out: out:
scsi_host_put(shost); scsi_host_put(shost);
return error; return error;
...@@ -1898,11 +1869,12 @@ int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) ...@@ -1898,11 +1869,12 @@ int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
* First try a REPORT LUN scan, if that does not scan the target, do a * First try a REPORT LUN scan, if that does not scan the target, do a
* sequential scan of LUNs on the target id. * sequential scan of LUNs on the target id.
**/ **/
static void scsi_scan_target(Scsi_Device *sdevscan, struct Scsi_Host *shost, static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
unsigned int channel, unsigned int id) unsigned int channel, unsigned int id)
{ {
int bflags; int bflags = 0;
int res; int res;
struct scsi_device *sdev;
if (shost->this_id == id) if (shost->this_id == id)
/* /*
...@@ -1910,36 +1882,29 @@ static void scsi_scan_target(Scsi_Device *sdevscan, struct Scsi_Host *shost, ...@@ -1910,36 +1882,29 @@ static void scsi_scan_target(Scsi_Device *sdevscan, struct Scsi_Host *shost,
*/ */
return; return;
sdevscan->host = shost;
sdevscan->id = id;
sdevscan->channel = channel;
/* /*
* Scan LUN 0, if there is some response, scan further. Ideally, we * Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned. * would not configure LUN 0 until all LUNs are scanned.
*
* The scsi_level is set (in scsi_probe_lun) if a target responds.
*/
sdevscan->lun = 0;
res = scsi_probe_and_add_lun(sdevscan, NULL, &bflags);
if (res != SCSI_SCAN_NO_RESPONSE) {
/*
* Some scsi devices cannot properly handle a lun != 0.
* BLIST_NOLUN also prevents a REPORT LUN from being sent.
* Any multi-lun SCSI-3 device that hangs because of a
* REPORT LUN command is seriously broken.
*/
if (!(bflags & BLIST_NOLUN))
/*
* Ending the scan here if max_scsi_luns == 1
* breaks scanning of SPARSE, FORCE, MAX5 LUN
* devices, and the report lun scan.
*/ */
if (scsi_report_lun_scan(sdevscan) != 0) res = scsi_probe_and_add_lun(shost, q, channel, id, 0, &bflags);
if (res == SCSI_SCAN_LUN_PRESENT) {
sdev = scsi_find_device(shost, channel, id, 0);
if (scsi_report_lun_scan(sdev, q, bflags) != 0)
/* /*
* The REPORT LUN did not scan the target, * The REPORT LUN did not scan the target,
* do a sequential scan. * do a sequential scan.
*/ */
scsi_sequential_lun_scan(sdevscan, bflags, res); scsi_sequential_lun_scan(shost, q, channel, id, bflags,
res, sdev->scsi_level);
} else if (res == SCSI_SCAN_TARGET_PRESENT) {
/*
* There's a target here, but lun 0 is offline so we
* can't use the report_lun scan. Fall back to a
* sequential lun scan with a bflags of SPARSELUN and
* a default scsi level of SCSI_2
*/
scsi_sequential_lun_scan(shost, q, channel, id, BLIST_SPARSELUN,
SCSI_SCAN_TARGET_PRESENT, SCSI_2);
} }
} }
...@@ -1953,22 +1918,9 @@ static void scsi_scan_target(Scsi_Device *sdevscan, struct Scsi_Host *shost, ...@@ -1953,22 +1918,9 @@ static void scsi_scan_target(Scsi_Device *sdevscan, struct Scsi_Host *shost,
**/ **/
void scsi_scan_host(struct Scsi_Host *shost) void scsi_scan_host(struct Scsi_Host *shost)
{ {
struct scsi_device *sdevscan; struct request_queue *q = NULL;
uint channel, id, order_id; uint channel, id, order_id;
/*
* The blk layer queue allocation is a bit expensive to
* repeat for each channel and id - for FCP max_id is near
* 255: each call to scsi_alloc_sdev() implies a call to
* blk_init_queue, and then blk_init_free_list, where 2 *
* queue_nr_requests requests are allocated. Don't do so
* here for scsi_scan_selected_lun, since we end up
* calling select_queue_depths with an extra Scsi_Device
* on the host_queue list.
*/
sdevscan = scsi_alloc_sdev(shost, 0, 0, 0);
if (sdevscan == NULL)
return;
/* /*
* The sdevscan host, channel, id and lun are filled in as * The sdevscan host, channel, id and lun are filled in as
* needed to scan. * needed to scan.
...@@ -1991,11 +1943,13 @@ void scsi_scan_host(struct Scsi_Host *shost) ...@@ -1991,11 +1943,13 @@ void scsi_scan_host(struct Scsi_Host *shost)
order_id = shost->max_id - id - 1; order_id = shost->max_id - id - 1;
else else
order_id = id; order_id = id;
scsi_scan_target(sdevscan, shost, channel, scsi_scan_target(shost, &q, channel, order_id);
order_id);
} }
} }
scsi_free_sdev(sdevscan); if(q) {
blk_cleanup_queue(q);
kfree(q);
}
} }
void scsi_forget_host(struct Scsi_Host *shost) void scsi_forget_host(struct Scsi_Host *shost)
...@@ -2030,19 +1984,11 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) ...@@ -2030,19 +1984,11 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
sdev = scsi_alloc_sdev(shost, 0, shost->this_id, 0); sdev = scsi_alloc_sdev(shost, NULL, 0, shost->this_id, 0);
if (sdev) { if (sdev) {
scsi_build_commandblocks(sdev);
if (sdev->current_queue_depth == 0)
goto fail;
sdev->borken = 0; sdev->borken = 0;
} }
return sdev; return sdev;
fail:
kfree(sdev);
return NULL;
} }
/* /*
......
...@@ -1060,7 +1060,7 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname, ...@@ -1060,7 +1060,7 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
*/ */
int hard_sector = sector_size; int hard_sector = sector_size;
sector_t sz = sdkp->capacity * (hard_sector/256); sector_t sz = sdkp->capacity * (hard_sector/256);
request_queue_t *queue = &sdp->request_queue; request_queue_t *queue = sdp->request_queue;
sector_t mb; sector_t mb;
blk_queue_hardsect_size(queue, hard_sector); blk_queue_hardsect_size(queue, hard_sector);
...@@ -1295,7 +1295,7 @@ static int sd_attach(struct scsi_device * sdp) ...@@ -1295,7 +1295,7 @@ static int sd_attach(struct scsi_device * sdp)
if (sdp->removable) if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE; gd->flags |= GENHD_FL_REMOVABLE;
gd->private_data = &sdkp->driver; gd->private_data = &sdkp->driver;
gd->queue = &sdkp->device->request_queue; gd->queue = sdkp->device->request_queue;
sd_devlist_insert(sdkp); sd_devlist_insert(sdkp);
set_capacity(gd, sdkp->capacity); set_capacity(gd, sdkp->capacity);
......
...@@ -695,7 +695,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, ...@@ -695,7 +695,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
} }
srp->my_cmdp = SRpnt; srp->my_cmdp = SRpnt;
q = &SRpnt->sr_device->request_queue; q = SRpnt->sr_device->request_queue;
SRpnt->sr_request->rq_disk = sdp->disk; SRpnt->sr_request->rq_disk = sdp->disk;
SRpnt->sr_sense_buffer[0] = 0; SRpnt->sr_sense_buffer[0] = 0;
SRpnt->sr_cmd_len = hp->cmd_len; SRpnt->sr_cmd_len = hp->cmd_len;
......
...@@ -563,7 +563,7 @@ static int sr_attach(struct scsi_device *sdev) ...@@ -563,7 +563,7 @@ static int sr_attach(struct scsi_device *sdev)
register_cdrom(&cd->cdi); register_cdrom(&cd->cdi);
set_capacity(disk, cd->capacity); set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver; disk->private_data = &cd->driver;
disk->queue = &sdev->request_queue; disk->queue = sdev->request_queue;
add_disk(disk); add_disk(disk);
sr_devlist_insert(cd); sr_devlist_insert(cd);
...@@ -672,7 +672,7 @@ static void get_sectorsize(struct scsi_cd *cd) ...@@ -672,7 +672,7 @@ static void get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity); set_capacity(cd->disk, cd->capacity);
} }
queue = &cd->device->request_queue; queue = cd->device->request_queue;
blk_queue_hardsect_size(queue, sector_size); blk_queue_hardsect_size(queue, sector_size);
out: out:
kfree(buffer); kfree(buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment