Commit 99d60d5d authored by James Bottomley's avatar James Bottomley

Merge patmans/axboe

parents f01d7733 099616a4
......@@ -383,6 +383,7 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
scsi_assign_lock(shost, &shost->default_lock);
INIT_LIST_HEAD(&shost->my_devices);
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
shost->dma_channel = 0xff;
......@@ -619,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
sdev->device_busy--;
if (shost->in_recovery && shost->host_failed &&
(shost->host_busy == shost->host_failed))
{
......
......@@ -380,6 +380,7 @@ struct Scsi_Host
struct scsi_host_cmd_pool *cmd_pool;
spinlock_t free_list_lock;
struct list_head free_list; /* backup store of cmd structs */
struct list_head starved_list;
spinlock_t default_lock;
spinlock_t *host_lock;
......@@ -470,12 +471,6 @@ struct Scsi_Host
*/
unsigned reverse_ordering:1;
/*
* Indicates that one or more devices on this host were starved, and
* when the device becomes less busy that we need to feed them.
*/
unsigned some_device_starved:1;
/*
* Host has rejected a command because it was busy.
*/
......
......@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
/* Assign a unique nonzero serial_number. */
if (++serial_number == 0)
serial_number = 1;
......@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
{
struct Scsi_Host *host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
SRpnt->sr_command = SCpnt;
......@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
struct Scsi_Host *host;
Scsi_Device *device;
Scsi_Request * SRpnt;
unsigned int flags;
host = SCpnt->device->host;
device = SCpnt->device;
ASSERT_LOCK(host->host_lock, 0);
/*
* We need to protect the decrement, as otherwise a race condition
* would exist. Fiddling with SCpnt isn't a problem as the
......@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* shared.
*/
scsi_host_busy_dec_and_test(host, device);
spin_lock_irqsave(SCpnt->device->request_queue->queue_lock, flags);
SCpnt->device->device_busy--;
spin_unlock_irqrestore(SCpnt->device->request_queue->queue_lock, flags);
/*
* Clear the flags which say that the device/host is no longer
......
......@@ -417,7 +417,8 @@ extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt);
extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
int block_sectors);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost);
extern void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd);
extern request_queue_t *scsi_alloc_queue(struct scsi_device *sdev);
extern void scsi_free_queue(request_queue_t *q);
extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
......@@ -530,6 +531,15 @@ struct scsi_dev_info_list {
extern struct list_head scsi_dev_info_list;
extern int scsi_dev_info_list_add_str(char *);
/*
* scsi_target: representation of a scsi target, for now, this is only
* used for single_lun devices.
*/
struct scsi_target {
unsigned int starget_busy;
unsigned int starget_refcnt;
};
/*
* The scsi_device struct contains what we know about each given scsi
* device.
......@@ -554,8 +564,10 @@ struct scsi_device {
struct Scsi_Host *host;
request_queue_t *request_queue;
volatile unsigned short device_busy; /* commands actually active on low-level */
spinlock_t sdev_lock; /* also the request queue_lock */
spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry;
Scsi_Cmnd *current_cmnd; /* currently active command */
unsigned short queue_depth; /* How deep of a queue we want */
unsigned short last_queue_full_depth; /* These two are used by */
......@@ -586,6 +598,7 @@ struct scsi_device {
unsigned char current_tag; /* current tag */
// unsigned char sync_min_period; /* Not less than this period */
// unsigned char sync_max_offset; /* Not greater than this offset */
struct scsi_target *sdev_target; /* used only for single_lun */
unsigned online:1;
unsigned writeable:1;
......@@ -616,8 +629,6 @@ struct scsi_device {
* because we did a bus reset. */
unsigned ten:1; /* support ten byte read / write */
unsigned remap:1; /* support remapping */
unsigned starved:1; /* unable to process commands because
host busy */
// unsigned sync:1; /* Sync transfer state, managed by host */
// unsigned wide:1; /* WIDE transfer state, managed by host */
......
......@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
unsigned long flags;
int rtn = SUCCESS;
ASSERT_LOCK(host->host_lock, 0);
/*
* we will use a queued command if possible, otherwise we will
* emulate the queuing and calling of completion function ourselves.
......@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
struct scsi_device *sdev;
unsigned long flags;
ASSERT_LOCK(shost->host_lock, 0);
/*
* If the door was locked, we need to insert a door lock request
* onto the head of the SCSI request queue for the device. There
......@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* now that error recovery is done, we will need to ensure that these
* requests are started.
*/
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->my_devices, siblings) {
if ((shost->can_queue > 0 &&
(shost->host_busy >= shost->can_queue))
|| (shost->host_blocked)
|| (shost->host_self_blocked)) {
break;
}
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
__blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
......@@ -1681,6 +1670,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
struct request req;
int rtn;
struct request_queue *q;
scmd->request = &req;
memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
......@@ -1735,6 +1725,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
}
scsi_delete_timer(scmd);
q = scmd->device->request_queue;
scsi_put_command(scmd);
scsi_queue_next_request(q, NULL);
return rtn;
}
......@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
......@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
spin_lock_irqsave(device->request_queue->queue_lock, flags);
device->device_busy--;
spin_unlock_irqrestore(device->request_queue->queue_lock, flags);
scsi_host_busy_dec_and_test(host, device);
/*
......@@ -174,14 +178,18 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
void (*done)(struct scsi_cmnd *),
int timeout, int retries)
{
struct request_queue *q;
/*
* If the upper level driver is reusing these things, then
* we should release the low-level block now. Another one will
* be allocated later when this request is getting queued.
*/
if (sreq->sr_command) {
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
sreq->sr_command = NULL;
scsi_queue_next_request(q, NULL);
}
/*
......@@ -228,6 +236,7 @@ static void scsi_wait_done(struct scsi_cmnd *cmd)
void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
unsigned bufflen, int timeout, int retries)
{
struct request_queue *q;
DECLARE_COMPLETION(wait);
sreq->sr_request->waiting = &wait;
......@@ -239,7 +248,9 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
sreq->sr_request->waiting = NULL;
if (sreq->sr_command) {
q = sreq->sr_command->device->request_queue;
scsi_put_command(sreq->sr_command);
scsi_queue_next_request(q, NULL);
sreq->sr_command = NULL;
}
}
......@@ -315,6 +326,39 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
cmd->underflow = cmd->old_underflow;
}
/*
* Called for single_lun devices on IO completion. Clear starget_busy, and
* Call __blk_run_queue for all the scsi_devices on the target - including
* current_sdev first.
*
* Called with *no* scsi locks held.
*/
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct scsi_device *sdev;
unsigned int flags, flags2;
spin_lock_irqsave(current_sdev->request_queue->queue_lock, flags2);
spin_lock_irqsave(current_sdev->host->host_lock, flags);
WARN_ON(!current_sdev->sdev_target->starget_busy);
if (current_sdev->device_busy == 0)
current_sdev->sdev_target->starget_busy = 0;
spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
/*
* Call __blk_run_queue for all LUNs on the target, starting with
* current_sdev.
*/
__blk_run_queue(current_sdev->request_queue);
spin_unlock_irqrestore(current_sdev->request_queue->queue_lock, flags2);
list_for_each_entry(sdev, &current_sdev->same_target_siblings,
same_target_siblings) {
spin_lock_irqsave(sdev->request_queue->queue_lock, flags2);
__blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags2);
}
}
/*
* Function: scsi_queue_next_request()
*
......@@ -351,16 +395,12 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
* permutations grows as 2**N, and if too many more special cases
* get added, we start to get screwed.
*/
static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
{
struct scsi_device *sdev, *sdev2;
struct Scsi_Host *shost;
unsigned long flags;
int all_clear;
ASSERT_LOCK(q->queue_lock, 0);
spin_lock_irqsave(q->queue_lock, flags);
if (cmd != NULL) {
/*
......@@ -369,6 +409,7 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
* in which case we need to request the blocks that come after
* the bad sector.
*/
spin_lock_irqsave(q->queue_lock, flags);
cmd->request->special = cmd;
if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, cmd->request);
......@@ -381,62 +422,45 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
cmd->request->flags |= REQ_SPECIAL;
cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, cmd->request, 0, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Just hit the requeue function for the queue.
*/
__blk_run_queue(q);
sdev = q->queuedata;
shost = sdev->host;
/*
* If this is a single-lun device, and we are currently finished
* with this device, then see if we need to get another device
* started. FIXME(eric) - if this function gets too cluttered
* with special case code, then spin off separate versions and
* use function pointers to pick the right one.
*/
if (sdev->single_lun && sdev->device_busy == 0 &&
!shost->host_blocked && !shost->host_self_blocked &&
!((shost->can_queue > 0) && (shost->host_busy >= shost->can_queue))
&& elv_queue_empty(q)) {
list_for_each_entry(sdev2, &sdev->same_target_siblings,
same_target_siblings) {
if (!sdev2->device_blocked &&
!elv_queue_empty(sdev2->request_queue)) {
__blk_run_queue(sdev2->request_queue);
break;
}
}
}
if (sdev->single_lun)
scsi_single_lun_run(sdev);
/*
* Now see whether there are other devices on the bus which
* might be starved. If so, hit the request function. If we
* don't find any, then it is safe to reset the flag. If we
* find any device that it is starved, it isn't safe to reset the
* flag as the queue function releases the lock and thus some
* other device might have become starved along the way.
*/
all_clear = 1;
if (shost->some_device_starved) {
list_for_each_entry(sdev, &shost->my_devices, siblings) {
if (shost->can_queue > 0 &&
shost->host_busy >= shost->can_queue)
break;
if (shost->host_blocked || shost->host_self_blocked)
break;
if (sdev->device_blocked || !sdev->starved)
continue;
__blk_run_queue(sdev->request_queue);
all_clear = 0;
}
shost = sdev->host;
spin_lock_irqsave(shost->host_lock, flags);
while (!list_empty(&shost->starved_list) &&
!shost->host_blocked && !shost->host_self_blocked &&
!((shost->can_queue > 0) &&
(shost->host_busy >= shost->can_queue))) {
/*
* As long as shost is accepting commands and we have
* starved queues, call __blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the
* starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
sdev2 = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev2->starved_entry);
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(sdev2->request_queue->queue_lock, flags);
__blk_run_queue(sdev2->request_queue);
spin_unlock_irqrestore(sdev2->request_queue->queue_lock, flags);
if (sdev == NULL && all_clear)
shost->some_device_starved = 0;
spin_lock_irqsave(shost->host_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
......@@ -470,8 +494,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
struct request *req = cmd->request;
unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
/*
* If there are blocks left over at the end, set up the command
* to queue the remainder of them.
......@@ -569,8 +591,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
struct request *req = cmd->request;
ASSERT_LOCK(cmd->device->host->host_lock, 0);
/*
* Free up any indirection buffers we allocated for DMA purposes.
*/
......@@ -651,8 +671,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* would be used if we just wanted to retry, for example.
*
*/
ASSERT_LOCK(q->queue_lock, 0);
/*
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
......@@ -923,22 +941,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL;
}
/*
* The target associated with myself can only handle one active command at
* a time. Scan through all of the luns on the same target as myself,
* return 1 if any are active.
*/
static int check_all_luns(struct scsi_device *myself)
{
struct scsi_device *sdev;
list_for_each_entry(sdev, &myself->same_target_siblings,
same_target_siblings)
if (sdev->device_busy)
return 1;
return 0;
}
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct Scsi_Device_Template *sdt;
......@@ -1039,6 +1041,81 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_OK;
}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
* return 0.
*
* Called with the queue_lock held.
*/
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
if (sdev->device_busy >= sdev->queue_depth)
return 0;
if (sdev->device_busy == 0 && sdev->device_blocked) {
/*
* unblock after device_blocked iterates to zero
*/
if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d (%d:%d) unblocking device at"
" zero depth\n", sdev->host->host_no,
sdev->id, sdev->lun));
} else {
blk_plug_device(q);
return 0;
}
}
if (sdev->device_blocked)
return 0;
return 1;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0.
*
* Called with queue_lock and host_lock held.
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
if (shost->in_recovery)
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
* unblock after host_blocked iterates to zero
*/
if (--shost->host_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else {
blk_plug_device(q);
return 0;
}
}
if (!list_empty(&sdev->starved_entry))
return 0;
if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
shost->host_blocked || shost->host_self_blocked) {
SCSI_LOG_MLQUEUE(3,
printk("add starved dev <%d,%d,%d,%d>; host "
"limit %d, busy %d, blocked %d selfblocked %d\n",
sdev->host->host_no, sdev->channel,
sdev->id, sdev->lun,
shost->can_queue, shost->host_busy,
shost->host_blocked, shost->host_self_blocked));
list_add_tail(&sdev->starved_entry,
&shost->starved_list);
return 0;
}
return 1;
}
/*
* Function: scsi_request_fn()
*
......@@ -1056,74 +1133,26 @@ static void scsi_request_fn(request_queue_t *q)
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd;
struct request *req;
ASSERT_LOCK(q->queue_lock, 1);
unsigned int flags;
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
for (;;) {
/*
* Check this again - each time we loop through we will have
* released the lock and grabbed it again, so each time
* we need to check to see if the queue is plugged or not.
*/
if (shost->in_recovery || blk_queue_plugged(q))
return;
if (sdev->device_busy >= sdev->queue_depth)
break;
if (blk_queue_plugged(q))
goto completed;
if (sdev->single_lun && check_all_luns(sdev))
break;
if (!scsi_dev_queue_ready(q, sdev))
goto completed;
if (shost->host_busy == 0 && shost->host_blocked) {
/* unblock after host_blocked iterates to zero */
if (--shost->host_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else {
blk_plug_device(q);
break;
}
}
if (sdev->device_busy == 0 && sdev->device_blocked) {
/* unblock after device_blocked iterates to zero */
if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
printk("scsi%d (%d:%d) unblocking device at zero depth\n",
shost->host_no, sdev->id, sdev->lun));
} else {
blk_plug_device(q);
break;
}
}
spin_lock_irqsave(shost->host_lock, flags);
if (!scsi_host_queue_ready(q, shost, sdev))
goto after_host_lock;
/*
* If the device cannot accept another request, then quit.
*/
if (sdev->device_blocked)
break;
if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
shost->host_blocked || shost->host_self_blocked) {
/*
* If we are unable to process any commands at all for
* this device, then we consider it to be starved.
* What this means is that there are no outstanding
* commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy.
*/
if (sdev->device_busy == 0) {
sdev->starved = 1;
shost->some_device_starved = 1;
}
break;
} else
sdev->starved = 0;
if (sdev->single_lun && !sdev->device_busy &&
sdev->sdev_target->starget_busy)
goto after_host_lock;
/*
* get next queueable request. We do this early to make sure
......@@ -1137,9 +1166,9 @@ static void scsi_request_fn(request_queue_t *q)
/* If the device is busy, a returning I/O
* will restart the queue. Otherwise, we have
* to plug the queue */
if(sdev->device_busy == 0)
if (sdev->device_busy == 1)
blk_plug_device(q);
break;
goto after_host_lock;
}
cmd = req->special;
......@@ -1161,11 +1190,12 @@ static void scsi_request_fn(request_queue_t *q)
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req);
/*
* Now bump the usage count for both the host and the
* device.
*/
if (sdev->single_lun)
sdev->sdev_target->starget_busy = 1;
shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
......@@ -1186,6 +1216,11 @@ static void scsi_request_fn(request_queue_t *q)
*/
spin_lock_irq(q->queue_lock);
}
completed:
return;
after_host_lock:
spin_unlock_irqrestore(shost->host_lock, flags);
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
......@@ -1207,15 +1242,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return BLK_BOUNCE_HIGH;
}
request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
request_queue_t *scsi_alloc_queue(struct scsi_device *sdev)
{
request_queue_t *q;
struct Scsi_Host *shost;
q = kmalloc(sizeof(*q), GFP_ATOMIC);
if (!q)
return NULL;
memset(q, 0, sizeof(*q));
/*
* XXX move host code to scsi_register
*/
shost = sdev->host;
if (!shost->max_sectors) {
/*
* Driver imposes no hard sector transfer limit.
......@@ -1224,7 +1264,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
}
blk_init_queue(q, scsi_request_fn, shost->host_lock);
blk_init_queue(q, scsi_request_fn, &sdev->sdev_lock);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize);
......
......@@ -387,7 +387,7 @@ static void print_inquiry(unsigned char *inq_result)
* Scsi_Device pointer, or NULL on failure.
**/
static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
struct request_queue **q, uint channel, uint id, uint lun)
uint channel, uint id, uint lun)
{
struct scsi_device *sdev, *device;
......@@ -407,6 +407,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
spin_lock_init(&sdev->list_lock);
/*
......@@ -421,14 +422,10 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
*/
sdev->borken = 1;
if (!q || *q == NULL) {
sdev->request_queue = scsi_alloc_queue(shost);
if (!sdev->request_queue)
goto out_free_dev;
} else {
sdev->request_queue = *q;
*q = NULL;
}
spin_lock_init(&sdev->sdev_lock);
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue)
goto out_free_dev;
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
......@@ -468,10 +465,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
return sdev;
out_free_queue:
if (q && sdev->request_queue) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
} else if (sdev->request_queue)
if (sdev->request_queue)
scsi_free_queue(sdev->request_queue);
out_free_dev:
......@@ -491,6 +485,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
**/
static void scsi_free_sdev(struct scsi_device *sdev)
{
unsigned int flags;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
......@@ -500,6 +496,14 @@ static void scsi_free_sdev(struct scsi_device *sdev)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->inquiry)
kfree(sdev->inquiry);
if (sdev->single_lun) {
spin_lock_irqsave(sdev->host->host_lock, flags);
sdev->sdev_target->starget_refcnt--;
if (sdev->sdev_target->starget_refcnt == 0)
kfree(sdev->sdev_target);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
kfree(sdev);
}
......@@ -1135,6 +1139,10 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result,
static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
char *inq_result, int *bflags)
{
struct scsi_device *sdev_sibling;
struct scsi_target *starget;
unsigned int flags;
/*
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
......@@ -1256,10 +1264,38 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
/*
* If we need to allow I/O to only one of the luns attached to
* this target id at a time, then we set this flag.
* this target id at a time set single_lun, and allocate or modify
* sdev_target.
*/
if (*bflags & BLIST_SINGLELUN)
if (*bflags & BLIST_SINGLELUN) {
sdev->single_lun = 1;
spin_lock_irqsave(sdev->host->host_lock, flags);
starget = NULL;
/*
* Search for an existing target for this sdev.
*/
list_for_each_entry(sdev_sibling, &sdev->same_target_siblings,
same_target_siblings) {
if (sdev_sibling->sdev_target != NULL) {
starget = sdev_sibling->sdev_target;
break;
}
}
if (!starget) {
starget = kmalloc(sizeof(*starget), GFP_KERNEL);
if (!starget) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
spin_unlock_irqrestore(sdev->host->host_lock,
flags);
return SCSI_SCAN_NO_RESPONSE;
}
starget->starget_refcnt = 0;
starget->starget_busy = 0;
}
starget->starget_refcnt++;
sdev->sdev_target = starget;
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
/* if the device needs this changing, it may do so in the detect
* function */
......@@ -1288,15 +1324,15 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct Scsi_Host *host,
struct request_queue **q, uint channel, uint id, uint lun,
int *bflagsp, struct scsi_device **sdevp)
uint channel, uint id, uint lun, int *bflagsp,
struct scsi_device **sdevp)
{
struct scsi_device *sdev;
struct scsi_request *sreq;
unsigned char *result;
int bflags, res = SCSI_SCAN_NO_RESPONSE;
sdev = scsi_alloc_sdev(host, q, channel, id, lun);
sdev = scsi_alloc_sdev(host, channel, id, lun);
if (!sdev)
goto out;
sreq = scsi_allocate_request(sdev);
......@@ -1350,13 +1386,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
if (res == SCSI_SCAN_LUN_PRESENT) {
if (sdevp)
*sdevp = sdev;
} else {
if (q) {
*q = sdev->request_queue;
sdev->request_queue = NULL;
}
} else
scsi_free_sdev(sdev);
}
out:
return res;
}
......@@ -1374,9 +1405,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
*
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
struct request_queue **q, uint channel, uint id,
int bflags, int lun0_res, int scsi_level)
static void scsi_sequential_lun_scan(struct Scsi_Host *shost, uint channel,
uint id, int bflags, int lun0_res, int scsi_level)
{
unsigned int sparse_lun, lun, max_dev_lun;
......@@ -1444,7 +1474,7 @@ static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
* sparse_lun.
*/
for (lun = 1; lun < max_dev_lun; ++lun)
if ((scsi_probe_and_add_lun(shost, q, channel, id, lun,
if ((scsi_probe_and_add_lun(shost, channel, id, lun,
NULL, NULL) != SCSI_SCAN_LUN_PRESENT) && !sparse_lun)
return;
}
......@@ -1497,8 +1527,7 @@ static int scsilun_to_int(ScsiLun *scsilun)
* 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured
**/
static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
int bflags)
static int scsi_report_lun_scan(Scsi_Device *sdev, int bflags)
{
#ifdef CONFIG_SCSI_REPORT_LUNS
......@@ -1659,8 +1688,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
} else {
int res;
res = scsi_probe_and_add_lun(sdev->host, q,
sdev->channel, sdev->id, lun, NULL, NULL);
res = scsi_probe_and_add_lun(sdev->host, sdev->channel,
sdev->id, lun, NULL, NULL);
if (res == SCSI_SCAN_NO_RESPONSE) {
/*
* Got some results, but now none, abort.
......@@ -1688,8 +1717,7 @@ struct scsi_device *scsi_add_device(struct Scsi_Host *shost,
struct scsi_device *sdev;
int error = -ENODEV, res;
res = scsi_probe_and_add_lun(shost, NULL, channel, id, lun,
NULL, &sdev);
res = scsi_probe_and_add_lun(shost, channel, id, lun, NULL, &sdev);
if (res == SCSI_SCAN_LUN_PRESENT)
error = scsi_attach_device(sdev);
......@@ -1730,8 +1758,8 @@ int scsi_remove_device(struct scsi_device *sdev)
* First try a REPORT LUN scan, if that does not scan the target, do a
* sequential scan of LUNs on the target id.
**/
static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
unsigned int channel, unsigned int id)
static void scsi_scan_target(struct Scsi_Host *shost, unsigned int channel,
unsigned int id)
{
int bflags = 0;
int res;
......@@ -1747,14 +1775,14 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned.
*/
res = scsi_probe_and_add_lun(shost, q, channel, id, 0, &bflags, &sdev);
res = scsi_probe_and_add_lun(shost, channel, id, 0, &bflags, &sdev);
if (res == SCSI_SCAN_LUN_PRESENT) {
if (scsi_report_lun_scan(sdev, q, bflags) != 0)
if (scsi_report_lun_scan(sdev, bflags) != 0)
/*
* The REPORT LUN did not scan the target,
* do a sequential scan.
*/
scsi_sequential_lun_scan(shost, q, channel, id, bflags,
scsi_sequential_lun_scan(shost, channel, id, bflags,
res, sdev->scsi_level);
} else if (res == SCSI_SCAN_TARGET_PRESENT) {
/*
......@@ -1763,7 +1791,7 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* sequential lun scan with a bflags of SPARSELUN and
* a default scsi level of SCSI_2
*/
scsi_sequential_lun_scan(shost, q, channel, id, BLIST_SPARSELUN,
scsi_sequential_lun_scan(shost, channel, id, BLIST_SPARSELUN,
SCSI_SCAN_TARGET_PRESENT, SCSI_2);
}
}
......@@ -1778,7 +1806,6 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
struct request_queue *q = NULL;
uint channel, id, order_id;
/*
......@@ -1803,12 +1830,9 @@ void scsi_scan_host(struct Scsi_Host *shost)
order_id = shost->max_id - id - 1;
else
order_id = id;
scsi_scan_target(shost, &q, channel, order_id);
scsi_scan_target(shost, channel, order_id);
}
}
if (q)
scsi_free_queue(q);
}
void scsi_forget_host(struct Scsi_Host *shost)
......@@ -1847,7 +1871,7 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
sdev = scsi_alloc_sdev(shost, NULL, 0, shost->this_id, 0);
sdev = scsi_alloc_sdev(shost, 0, shost->this_id, 0);
if (sdev) {
sdev->borken = 0;
}
......
......@@ -60,6 +60,8 @@ EXPORT_SYMBOL(scsi_allocate_request);
EXPORT_SYMBOL(scsi_release_request);
EXPORT_SYMBOL(scsi_wait_req);
EXPORT_SYMBOL(scsi_do_req);
EXPORT_SYMBOL(scsi_get_command);
EXPORT_SYMBOL(scsi_put_command);
EXPORT_SYMBOL(scsi_report_bus_reset);
EXPORT_SYMBOL(scsi_block_requests);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment