Commit b7ccbd3e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by James Bottomley

[PATCH] Re: scsi_scan.c complaints...

On Fri, Dec 20, 2002 at 08:29:23PM -0500, Doug Ledford wrote:
> And I was right.  One little q = NULL; is all that was missing.  Anyway,
> here's a printout of what startup looks like with this patch in place
> under 2.5.52.  This should make you happy Justin ;-)

Okay, I looked at the patches that are in mainline and they look pretty
cool to me.  When looking over the code (in preparation of implementing
Justin's suggestion to get rid of the highmem_io flag) I found quite
a bit small stuff to make the code in that area a lot cleaner:

- new helper scsi_calculate_bounce_limit to calculate the bounce
  limit for a scsi host, remove a copy of that code ni st.c
- scsi_initialize_queue gets replace with scsi_alloc_queue, this
  one now takes only a struct Scsi_Host and returns the request queue,
  it's paired with a small scsi_free_queue helper.

Diffstat:

 hosts.h     |    3 -
 scsi.c      |   43 ----------------------
 scsi.h      |    1
 scsi_scan.c |  113 ++++++++++++++++++++++++++++++++++--------------------------
 scsi_syms.c |    5 ++
 st.c        |   16 +-------
 6 files changed, 73 insertions(+), 108 deletions(-)
parent 77ea1aeb
...@@ -554,9 +554,6 @@ struct Scsi_Device_Template ...@@ -554,9 +554,6 @@ struct Scsi_Device_Template
struct device_driver scsi_driverfs_driver; struct device_driver scsi_driverfs_driver;
}; };
void scsi_initialize_queue(Scsi_Device *, struct Scsi_Host *);
/* /*
* Highlevel driver registration/unregistration. * Highlevel driver registration/unregistration.
*/ */
......
...@@ -147,49 +147,6 @@ LIST_HEAD(scsi_dev_info_list); ...@@ -147,49 +147,6 @@ LIST_HEAD(scsi_dev_info_list);
extern void scsi_times_out(Scsi_Cmnd * SCpnt); extern void scsi_times_out(Scsi_Cmnd * SCpnt);
void scsi_build_commandblocks(Scsi_Device * SDpnt); void scsi_build_commandblocks(Scsi_Device * SDpnt);
/*
* Function: scsi_initialize_queue()
*
* Purpose: Sets up the block queue for a device.
*
* Arguments: SDpnt - device for which we need a handler function.
*
* Returns: Nothing
*
* Lock status: No locking assumed or required.
*/
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{
request_queue_t *q = SDpnt->request_queue;
/*
* tell block layer about assigned host_lock for this host
*/
blk_init_queue(q, scsi_request_fn, SHpnt->host_lock);
/* Hardware imposed limit. */
blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
/*
* scsi_alloc_sgtable max
*/
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
if(!SHpnt->max_sectors)
/* driver imposes no hard sector transfer limit.
* start at machine infinity initially */
SHpnt->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
/* FIXME: we should also adjust this limit later on
* after we know what the device capabilities are */
blk_queue_max_sectors(q, SHpnt->max_sectors);
if (!SHpnt->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
blk_queue_prep_rq(q, scsi_prep_fn);
}
#ifdef MODULE #ifdef MODULE
MODULE_PARM(scsi_logging_level, "i"); MODULE_PARM(scsi_logging_level, "i");
MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero"); MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
......
...@@ -511,6 +511,7 @@ static inline void scsi_proc_host_rm(struct Scsi_Host *); ...@@ -511,6 +511,7 @@ static inline void scsi_proc_host_rm(struct Scsi_Host *);
*/ */
extern int scsi_add_single_device(uint, uint, uint, uint); extern int scsi_add_single_device(uint, uint, uint, uint);
extern int scsi_remove_single_device(uint, uint, uint, uint); extern int scsi_remove_single_device(uint, uint, uint, uint);
extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
/* /*
* Prototypes for functions in constants.c * Prototypes for functions in constants.c
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/blk.h> #include <linux/blk.h>
#include "scsi.h" #include "scsi.h"
...@@ -365,34 +364,60 @@ static void print_inquiry(unsigned char *inq_result) ...@@ -365,34 +364,60 @@ static void print_inquiry(unsigned char *inq_result)
printk("\n"); printk("\n");
} }
/** u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
* scsi_initialize_merge_fn() -ƣinitialize merge function for a host
* @sd: host descriptor
*/
static void scsi_initialize_merge_fn(struct scsi_device *sd)
{ {
request_queue_t *q = sd->request_queue; if (shost->highmem_io) {
struct Scsi_Host *sh = sd->host; struct device *host_dev = scsi_get_device(shost);
struct device *dev = scsi_get_device(sh);
u64 bounce_limit; if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask)
return *host_dev->dma_mask;
if (sh->highmem_io) {
if (dev && dev->dma_mask && PCI_DMA_BUS_IS_PHYS) {
bounce_limit = *dev->dma_mask;
} else {
/* /*
* Platforms with virtual-DMA translation * Platforms with virtual-DMA translation
* hardware have no practical limit. * hardware have no practical limit.
*/ */
bounce_limit = BLK_BOUNCE_ANY; return BLK_BOUNCE_ANY;
} } else if (shost->unchecked_isa_dma)
} else if (sh->unchecked_isa_dma) { return BLK_BOUNCE_ISA;
bounce_limit = BLK_BOUNCE_ISA;
} else { return BLK_BOUNCE_HIGH;
bounce_limit = BLK_BOUNCE_HIGH; }
static request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
{
request_queue_t *q;
q = kmalloc(sizeof(*q), GFP_ATOMIC);
if (!q)
return NULL;
memset(q, 0, sizeof(*q));
if (!shost->max_sectors) {
/*
* Driver imposes no hard sector transfer limit.
* start at machine infinity initially.
*/
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
} }
blk_queue_bounce_limit(q, bounce_limit); blk_init_queue(q, scsi_request_fn, shost->host_lock);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q;
}
static void scsi_free_queue(request_queue_t *q)
{
blk_cleanup_queue(q);
kfree(q);
} }
/** /**
...@@ -435,19 +460,15 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, ...@@ -435,19 +460,15 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
*/ */
sdev->borken = 1; sdev->borken = 1;
if(!q || *q == NULL) { if (!q || *q == NULL) {
sdev->request_queue = kmalloc(sizeof(struct request_queue), GFP_ATOMIC); sdev->request_queue = scsi_alloc_queue(shost);
if(sdev->request_queue == NULL) { if (!sdev->request_queue)
goto out_bail; goto out_bail;
}
memset(sdev->request_queue, 0,
sizeof(struct request_queue));
scsi_initialize_queue(sdev, shost);
scsi_initialize_merge_fn(sdev);
} else { } else {
sdev->request_queue = *q; sdev->request_queue = *q;
*q = NULL; *q = NULL;
} }
sdev->request_queue->queuedata = sdev; sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
scsi_build_commandblocks(sdev); scsi_build_commandblocks(sdev);
...@@ -488,13 +509,12 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, ...@@ -488,13 +509,12 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
} }
out_bail: out_bail:
printk(ALLOC_FAILURE_MSG, __FUNCTION__); printk(ALLOC_FAILURE_MSG, __FUNCTION__);
if(q && sdev->request_queue) { if (q && sdev->request_queue) {
*q = sdev->request_queue; *q = sdev->request_queue;
sdev->request_queue = NULL; sdev->request_queue = NULL;
} else if(sdev->request_queue) { } else if (sdev->request_queue)
blk_cleanup_queue(sdev->request_queue); scsi_free_queue(sdev->request_queue);
kfree(sdev->request_queue);
}
scsi_release_commandblocks(sdev); scsi_release_commandblocks(sdev);
kfree(sdev); kfree(sdev);
return NULL; return NULL;
...@@ -513,14 +533,12 @@ static void scsi_free_sdev(struct scsi_device *sdev) ...@@ -513,14 +533,12 @@ static void scsi_free_sdev(struct scsi_device *sdev)
list_del(&sdev->siblings); list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings); list_del(&sdev->same_target_siblings);
if(sdev->request_queue != NULL) { if (sdev->request_queue)
blk_cleanup_queue(sdev->request_queue); scsi_free_queue(sdev->request_queue);
kfree(sdev->request_queue);
}
scsi_release_commandblocks(sdev); scsi_release_commandblocks(sdev);
if (sdev->host->hostt->slave_destroy) if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev); sdev->host->hostt->slave_destroy(sdev);
if (sdev->inquiry != NULL) if (sdev->inquiry)
kfree(sdev->inquiry); kfree(sdev->inquiry);
kfree(sdev); kfree(sdev);
} }
...@@ -1946,10 +1964,9 @@ void scsi_scan_host(struct Scsi_Host *shost) ...@@ -1946,10 +1964,9 @@ void scsi_scan_host(struct Scsi_Host *shost)
scsi_scan_target(shost, &q, channel, order_id); scsi_scan_target(shost, &q, channel, order_id);
} }
} }
if(q) {
blk_cleanup_queue(q); if (q)
kfree(q); scsi_free_queue(q);
}
} }
void scsi_forget_host(struct Scsi_Host *shost) void scsi_forget_host(struct Scsi_Host *shost)
......
...@@ -97,6 +97,11 @@ EXPORT_SYMBOL(scsi_host_hn_get); ...@@ -97,6 +97,11 @@ EXPORT_SYMBOL(scsi_host_hn_get);
EXPORT_SYMBOL(scsi_host_put); EXPORT_SYMBOL(scsi_host_put);
EXPORT_SYMBOL(scsi_device_types); EXPORT_SYMBOL(scsi_device_types);
/*
* This is for st to find the bounce limit
*/
EXPORT_SYMBOL(scsi_calculate_bounce_limit);
/* /*
* Externalize timers so that HBAs can safely start/restart commands. * Externalize timers so that HBAs can safely start/restart commands.
*/ */
......
...@@ -3764,21 +3764,9 @@ static int st_attach(Scsi_Device * SDp) ...@@ -3764,21 +3764,9 @@ static int st_attach(Scsi_Device * SDp)
tpnt->nbr_partitions = 0; tpnt->nbr_partitions = 0;
tpnt->timeout = ST_TIMEOUT; tpnt->timeout = ST_TIMEOUT;
tpnt->long_timeout = ST_LONG_TIMEOUT; tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma; tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
bounce_limit = BLK_BOUNCE_HIGH; /* Borrowed from scsi_merge.c */
if (SDp->host->highmem_io) { bounce_limit = scsi_calculate_bounce_limit(SDp->host) >> PAGE_SHIFT;
struct device *dev = scsi_get_device(SDp->host);
if (!PCI_DMA_BUS_IS_PHYS)
/* Platforms with virtual-DMA translation
* hardware have no practical limit.
*/
bounce_limit = BLK_BOUNCE_ANY;
else if (dev && dev->dma_mask)
bounce_limit = *dev->dma_mask;
} else if (SDp->host->unchecked_isa_dma)
bounce_limit = BLK_BOUNCE_ISA;
bounce_limit >>= PAGE_SHIFT;
if (bounce_limit > ULONG_MAX) if (bounce_limit > ULONG_MAX)
bounce_limit = ULONG_MAX; bounce_limit = ULONG_MAX;
tpnt->max_pfn = bounce_limit; tpnt->max_pfn = bounce_limit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment