Commit 7b253dfa authored by Doug Ledford's avatar Doug Ledford

Change all uses of device->request_queue (was struct, now pointer)

Update scsi_scan so that we don't pass around a scsi_device struct for
    	scanning.  Instead, we pass around a request_queue during
    	scanning and create and destroy device structs as needed.  This
    	allows us to have a 1:1 correlation between scsi_alloc_sdev()
    	and scsi_free_sdev() calls, which we didn't have before.
parent 3967f4c3
......@@ -160,15 +160,13 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
*/
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{
request_queue_t *q = &SDpnt->request_queue;
request_queue_t *q = SDpnt->request_queue;
/*
* tell block layer about assigned host_lock for this host
*/
blk_init_queue(q, scsi_request_fn, SHpnt->host_lock);
q->queuedata = (void *) SDpnt;
/* Hardware imposed limit. */
blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
......@@ -223,7 +221,7 @@ __setup("scsi_logging=", scsi_logging_setup);
static void scsi_wait_done(Scsi_Cmnd * SCpnt)
{
struct request *req = SCpnt->request;
struct request_queue *q = &SCpnt->device->request_queue;
struct request_queue *q = SCpnt->device->request_queue;
unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
......@@ -656,17 +654,14 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
*/
void scsi_release_command(Scsi_Cmnd * SCpnt)
{
request_queue_t *q = &SCpnt->device->request_queue;
__scsi_release_command(SCpnt);
/*
* Finally, hit the queue request function to make sure that
* the device is actually busy if there are requests present.
* This won't block - if the device cannot take any more, life
* will go on.
*/
scsi_queue_next_request(q, NULL);
scsi_queue_next_request(SCpnt->device->request_queue, NULL);
}
/*
......@@ -810,13 +805,12 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
int timeout, int retries)
{
DECLARE_COMPLETION(wait);
request_queue_t *q = &SRpnt->sr_device->request_queue;
SRpnt->sr_request->waiting = &wait;
SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
scsi_do_req (SRpnt, (void *) cmnd,
buffer, bufflen, scsi_wait_done, timeout, retries);
generic_unplug_device(q);
generic_unplug_device(SRpnt->sr_device->request_queue);
wait_for_completion(&wait);
SRpnt->sr_request->waiting = NULL;
if( SRpnt->sr_command != NULL )
......@@ -1912,10 +1906,8 @@ void scsi_device_put(struct scsi_device *sdev)
*/
int scsi_slave_attach(struct scsi_device *sdev)
{
/* all this code is now handled elsewhere
if (sdev->attached++ == 0) {
/*
* No one was attached.
*/
scsi_build_commandblocks(sdev);
if (sdev->current_queue_depth == 0) {
printk(KERN_ERR "scsi: Allocation failure during"
......@@ -1935,6 +1927,8 @@ int scsi_slave_attach(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0,
sdev->host->cmd_per_lun);
}
*/
sdev->attached++;
return 0;
}
......@@ -1950,9 +1944,12 @@ int scsi_slave_attach(struct scsi_device *sdev)
*/
void scsi_slave_detach(struct scsi_device *sdev)
{
/*
if (--sdev->attached == 0) {
scsi_release_commandblocks(sdev);
}
*/
sdev->attached--;
}
/*
* This entry point should be called by a loadable module if it is trying
......
......@@ -569,14 +569,12 @@ struct scsi_device {
/*
* This information is private to the scsi mid-layer.
*/
struct scsi_device *next; /* Used for linked list */
struct scsi_device *prev; /* Used for linked list */
struct list_head siblings; /* list of all devices on this host */
struct list_head same_target_siblings; /* just the devices sharing same target id */
wait_queue_head_t scpnt_wait; /* Used to wait if
device is busy */
struct Scsi_Host *host;
request_queue_t request_queue;
request_queue_t *request_queue;
atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on low-level */
struct list_head free_cmnds; /* list of available Scsi_Cmnd structs */
......@@ -894,11 +892,9 @@ extern int scsi_reset_provider(Scsi_Device *, int);
* would be adjustable from 0 to depth.
**/
static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) {
request_queue_t *q = &SDpnt->request_queue;
if(SDpnt->tagged_supported) {
if(!blk_queue_tagged(q))
blk_queue_init_tags(q, depth);
if(!blk_queue_tagged(SDpnt->request_queue))
blk_queue_init_tags(SDpnt->request_queue, depth);
scsi_adjust_queue_depth(SDpnt, MSG_ORDERED_TAG, depth);
}
}
......@@ -908,10 +904,8 @@ static inline void scsi_activate_tcq(Scsi_Device *SDpnt, int depth) {
* @SDpnt: device to turn off TCQ for
**/
static inline void scsi_deactivate_tcq(Scsi_Device *SDpnt, int depth) {
request_queue_t *q = &SDpnt->request_queue;
if(blk_queue_tagged(q))
blk_queue_free_tags(q);
if(blk_queue_tagged(SDpnt->request_queue))
blk_queue_free_tags(SDpnt->request_queue);
scsi_adjust_queue_depth(SDpnt, 0, depth);
}
......@@ -957,7 +951,7 @@ static inline Scsi_Cmnd *scsi_find_tag(Scsi_Device *SDpnt, int tag) {
/* single command, look in space */
return SDpnt->current_cmnd;
req = blk_queue_find_tag(&SDpnt->request_queue, tag);
req = blk_queue_find_tag(SDpnt->request_queue, tag);
if(req == NULL)
return NULL;
......
......@@ -1487,7 +1487,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
break;
}
__blk_run_queue(&sdev->request_queue);
__blk_run_queue(sdev->request_queue);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
......
......@@ -57,9 +57,8 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
*/
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{
request_queue_t *q = &SCpnt->device->request_queue;
blk_insert_request(q, SCpnt->request, at_head, SCpnt);
blk_insert_request(SCpnt->device->request_queue, SCpnt->request,
at_head, SCpnt);
return 0;
}
......@@ -85,16 +84,13 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{
request_queue_t *q = &SRpnt->sr_device->request_queue;
/* This is used to insert SRpnt specials. Because users of
* this function are apt to reuse requests with no modification,
* we have to sanitise the request flags here
*/
SRpnt->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(q, SRpnt->sr_request, at_head, SRpnt);
blk_insert_request(SRpnt->sr_device->request_queue, SRpnt->sr_request,
at_head, SRpnt);
return 0;
}
......@@ -215,7 +211,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
{
int all_clear;
unsigned long flags;
Scsi_Device *SDpnt;
Scsi_Device *SDpnt, *SDpnt2;
struct Scsi_Host *SHpnt;
ASSERT_LOCK(q->queue_lock, 0);
......@@ -256,17 +252,17 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
* with special case code, then spin off separate versions and
* use function pointers to pick the right one.
*/
if (SDpnt->single_lun && blk_queue_empty(q) && SDpnt->device_busy ==0) {
list_for_each_entry(SDpnt, &SHpnt->my_devices, siblings) {
if (((SHpnt->can_queue > 0)
&& (SHpnt->host_busy >= SHpnt->can_queue))
|| (SHpnt->host_blocked)
|| (SHpnt->host_self_blocked)
|| (SDpnt->device_blocked)) {
if (SDpnt->single_lun && blk_queue_empty(q) && SDpnt->device_busy ==0 &&
!SHpnt->host_blocked && !SHpnt->host_self_blocked &&
!((SHpnt->can_queue > 0) && (SHpnt->host_busy >=
SHpnt->can_queue))) {
list_for_each_entry(SDpnt2, &SDpnt->same_target_siblings,
same_target_siblings) {
if (!SDpnt2->device_blocked &&
!blk_queue_empty(SDpnt2->request_queue)) {
__blk_run_queue(SDpnt2->request_queue);
break;
}
__blk_run_queue(&SDpnt->request_queue);
}
}
......@@ -289,7 +285,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
if (SDpnt->device_blocked || !SDpnt->starved) {
continue;
}
__blk_run_queue(&SDpnt->request_queue);
__blk_run_queue(SDpnt->request_queue);
all_clear = 0;
}
if (SDpnt == NULL && all_clear) {
......@@ -327,7 +323,7 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt,
int sectors,
int requeue)
{
request_queue_t *q = &SCpnt->device->request_queue;
request_queue_t *q = SCpnt->device->request_queue;
struct request *req = SCpnt->request;
unsigned long flags;
......@@ -497,7 +493,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
{
int result = SCpnt->result;
int this_count = SCpnt->bufflen >> 9;
request_queue_t *q = &SCpnt->device->request_queue;
request_queue_t *q = SCpnt->device->request_queue;
struct request *req = SCpnt->request;
/*
......@@ -1094,7 +1090,7 @@ void scsi_unblock_requests(struct Scsi_Host * SHpnt)
SHpnt->host_self_blocked = FALSE;
/* Now that we are unblocked, try to start the queues. */
list_for_each_entry(SDloop, &SHpnt->my_devices, siblings)
scsi_queue_next_request(&SDloop->request_queue, NULL);
scsi_queue_next_request(SDloop->request_queue, NULL);
}
/*
......
This diff is collapsed.
......@@ -1060,7 +1060,7 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
*/
int hard_sector = sector_size;
sector_t sz = sdkp->capacity * (hard_sector/256);
request_queue_t *queue = &sdp->request_queue;
request_queue_t *queue = sdp->request_queue;
sector_t mb;
blk_queue_hardsect_size(queue, hard_sector);
......@@ -1295,7 +1295,7 @@ static int sd_attach(struct scsi_device * sdp)
if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE;
gd->private_data = &sdkp->driver;
gd->queue = &sdkp->device->request_queue;
gd->queue = sdkp->device->request_queue;
sd_devlist_insert(sdkp);
set_capacity(gd, sdkp->capacity);
......
......@@ -695,7 +695,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
srp->my_cmdp = SRpnt;
q = &SRpnt->sr_device->request_queue;
q = SRpnt->sr_device->request_queue;
SRpnt->sr_request->rq_disk = sdp->disk;
SRpnt->sr_sense_buffer[0] = 0;
SRpnt->sr_cmd_len = hp->cmd_len;
......
......@@ -563,7 +563,7 @@ static int sr_attach(struct scsi_device *sdev)
register_cdrom(&cd->cdi);
set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver;
disk->queue = &sdev->request_queue;
disk->queue = sdev->request_queue;
add_disk(disk);
sr_devlist_insert(cd);
......@@ -672,7 +672,7 @@ static void get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
queue = &cd->device->request_queue;
queue = cd->device->request_queue;
blk_queue_hardsect_size(queue, sector_size);
out:
kfree(buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment