Commit 842ca9a3 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-scsi.bkbits.net/scsi-for-linus-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents c3dfb72c 76c81a56
......@@ -2,7 +2,8 @@
obj-y := core.o sys.o interface.o bus.o \
driver.o class.o class_simple.o platform.o \
cpu.o firmware.o init.o map.o dmapool.o
cpu.o firmware.o init.o map.o dmapool.o \
attribute_container.o transport_class.o
obj-y += power/
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
......
/*
* attribute_container.c - implementation of a simple container for classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
* This file is licensed under GPLv2
*
* The basic idea here is to enable a device to be attached to an
* aritrary numer of classes without having to allocate storage for them.
* Instead, the contained classes select the devices they need to attach
* to via a matching function.
*/
#include <linux/attribute_container.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
/* This is a private structure used to tie the classdev and the
* container .. it should never be visible outside this file */
struct internal_container {
struct list_head node;
struct attribute_container *cont;
struct class_device classdev;
};
/**
* attribute_container_classdev_to_container - given a classdev, return the container
*
* @classdev: the class device created by attribute_container_add_device.
*
* Returns the container associated with this classdev.
*/
struct attribute_container *
attribute_container_classdev_to_container(struct class_device *classdev)
{
struct internal_container *ic =
container_of(classdev, struct internal_container, classdev);
return ic->cont;
}
EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
static struct list_head attribute_container_list;
static DECLARE_MUTEX(attribute_container_mutex);
/**
* attribute_container_register - register an attribute container
*
* @cont: The container to register. This must be allocated by the
* callee and should also be zeroed by it.
*/
int
attribute_container_register(struct attribute_container *cont)
{
INIT_LIST_HEAD(&cont->node);
INIT_LIST_HEAD(&cont->containers);
down(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list);
up(&attribute_container_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(attribute_container_register);
/**
* attribute_container_unregister - remove a container registration
*
* @cont: previously registered container to remove
*/
int
attribute_container_unregister(struct attribute_container *cont)
{
int retval = -EBUSY;
down(&attribute_container_mutex);
if (!list_empty(&cont->containers))
goto out;
retval = 0;
list_del(&cont->node);
out:
up(&attribute_container_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(attribute_container_unregister);
/* private function used as class release */
static void attribute_container_release(struct class_device *classdev)
{
struct internal_container *ic
= container_of(classdev, struct internal_container, classdev);
struct device *dev = classdev->dev;
kfree(ic);
put_device(dev);
}
/**
* attribute_container_add_device - see if any container is interested in dev
*
* @dev: device to add attributes to
* @fn: function to trigger addition of class device.
*
* This function allocates storage for the class device(s) to be
* attached to dev (one for each matching attribute_container). If no
* fn is provided, the code will simply register the class device via
* class_device_add. If a function is provided, it is expected to add
* the class device at the appropriate time. One of the things that
* might be necessary is to allocate and initialise the classdev and
* then add it a later time. To do this, call this routine for
* allocation and initialisation and then use
* attribute_container_device_trigger() to call class_device_add() on
* it. Note: after this, the class device contains a reference to dev
* which is not relinquished until the release of the classdev.
*/
void
attribute_container_add_device(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct class_device *))
{
struct attribute_container *cont;
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
ic = kmalloc(sizeof(struct internal_container), GFP_KERNEL);
if (!ic) {
dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
continue;
}
memset(ic, 0, sizeof(struct internal_container));
INIT_LIST_HEAD(&ic->node);
ic->cont = cont;
class_device_initialize(&ic->classdev);
ic->classdev.dev = get_device(dev);
ic->classdev.class = cont->class;
cont->class->release = attribute_container_release;
strcpy(ic->classdev.class_id, dev->bus_id);
if (fn)
fn(cont, dev, &ic->classdev);
else
class_device_add(&ic->classdev);
list_add_tail(&ic->node, &cont->containers);
}
up(&attribute_container_mutex);
}
/**
* attribute_container_remove_device - make device eligible for removal.
*
* @dev: The generic device
* @fn: A function to call to remove the device
*
* This routine triggers device removal. If fn is NULL, then it is
* simply done via class_device_unregister (note that if something
* still has a reference to the classdev, then the memory occupied
* will not be freed until the classdev is released). If you want a
* two phase release: remove from visibility and then delete the
* device, then you should use this routine with a fn that calls
* class_device_del() and then use
* attribute_container_device_trigger() to do the final put on the
* classdev.
*/
void
attribute_container_remove_device(struct device *dev,
void (*fn)(struct attribute_container *,
struct device *,
struct class_device *))
{
struct attribute_container *cont;
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
if (dev != ic->classdev.dev)
continue;
list_del(&ic->node);
if (fn)
fn(cont, dev, &ic->classdev);
else
class_device_unregister(&ic->classdev);
}
}
up(&attribute_container_mutex);
}
EXPORT_SYMBOL_GPL(attribute_container_remove_device);
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*
* @dev: The generic device to run the trigger for
* @fn the function to execute for each classdev.
*
* This funcion is for executing a trigger when you need to know both
* the container and the classdev. If you only care about the
* container, then use attribute_container_trigger() instead.
*/
void
attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct class_device *))
{
struct attribute_container *cont;
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp;
if (!cont->match(cont, dev))
continue;
list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
if (dev == ic->classdev.dev)
fn(cont, dev, &ic->classdev);
}
}
up(&attribute_container_mutex);
}
EXPORT_SYMBOL_GPL(attribute_container_device_trigger);
/**
* attribute_container_trigger - trigger a function for each matching container
*
* @dev: The generic device to activate the trigger for
* @fn: the function to trigger
*
* This routine triggers a function that only needs to know the
* matching containers (not the classdev) associated with a device.
* It is more lightweight than attribute_container_device_trigger, so
* should be used in preference unless the triggering function
* actually needs to know the classdev.
*/
void
attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *))
{
struct attribute_container *cont;
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
if (cont->match(cont, dev))
fn(cont, dev);
}
up(&attribute_container_mutex);
}
EXPORT_SYMBOL_GPL(attribute_container_trigger);
int __init
attribute_container_init(void)
{
INIT_LIST_HEAD(&attribute_container_list);
return 0;
}
......@@ -17,7 +17,7 @@ extern int firmware_init(void);
extern int platform_bus_init(void);
extern int system_bus_init(void);
extern int cpu_dev_init(void);
extern int attribute_container_init(void);
/**
* driver_init - initialize driver model.
*
......@@ -39,4 +39,5 @@ void __init driver_init(void)
platform_bus_init();
system_bus_init();
cpu_dev_init();
attribute_container_init();
}
/*
* transport_class.c - implementation of generic transport classes
* using attribute_containers
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
* This file is licensed under GPLv2
*
* The basic idea here is to allow any "device controller" (which
* would most often be a Host Bus Adapter" to use the services of one
* or more tranport classes for performing transport specific
* services. Transport specific services are things that the generic
* command layer doesn't want to know about (speed settings, line
* condidtioning, etc), but which the user might be interested in.
* Thus, the HBA's use the routines exported by the transport classes
* to perform these functions. The transport classes export certain
* values to the user via sysfs using attribute containers.
*
* Note: because not every HBA will care about every transport
* attribute, there's a many to one relationship that goes like this:
*
* transport class<-----attribute container<----class device
*
* Usually the attribute container is per-HBA, but the design doesn't
* mandate that. Although most of the services will be specific to
* the actual external storage connection used by the HBA, the generic
* transport class is framed entirely in terms of generic devices to
* allow it to be used by any physical HBA in the system.
*/
#include <linux/attribute_container.h>
#include <linux/transport_class.h>
/**
* transport_class_register - register an initial transport class
*
* @tclass: a pointer to the transport class structure to be initialised
*
* The transport class contains an embedded class which is used to
* identify it. The caller should initialise this structure with
* zeros and then generic class must have been initialised with the
* actual transport class unique name. There's a macro
* DECLARE_TRANSPORT_CLASS() to do this (declared classes still must
* be registered).
*
* Returns 0 on success or error on failure.
*/
int transport_class_register(struct transport_class *tclass)
{
return class_register(&tclass->class);
}
EXPORT_SYMBOL_GPL(transport_class_register);
/**
* transport_class_unregister - unregister a previously registered class
*
* @tclass: The transport class to unregister
*
* Must be called prior to deallocating the memory for the transport
* class.
*/
void transport_class_unregister(struct transport_class *tclass)
{
class_unregister(&tclass->class);
}
EXPORT_SYMBOL_GPL(transport_class_unregister);
static int anon_transport_dummy_function(struct device *dev)
{
/* do nothing */
return 0;
}
/**
* anon_transport_class_register - register an anonymous class
*
* @atc: The anon transport class to register
*
* The anonymous transport class contains both a transport class and a
* container. The idea of an anonymous class is that it never
* actually has any device attributes associated with it (and thus
* saves on container storage). So it can only be used for triggering
* events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to
* initialise the anon transport class storage.
*/
int anon_transport_class_register(struct anon_transport_class *atc)
{
int error;
atc->container.class = &atc->tclass.class;
attribute_container_set_no_classdevs(&atc->container);
error = attribute_container_register(&atc->container);
if (error)
return error;
atc->tclass.setup = anon_transport_dummy_function;
atc->tclass.remove = anon_transport_dummy_function;
return 0;
}
EXPORT_SYMBOL_GPL(anon_transport_class_register);
/**
* anon_transport_class_unregister - unregister an anon class
*
* @atc: Pointer to the anon transport class to unregister
*
* Must be called prior to deallocating the memory for the anon
* transport class.
*/
void anon_transport_class_unregister(struct anon_transport_class *atc)
{
attribute_container_unregister(&atc->container);
}
EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
static int transport_setup_classdev(struct attribute_container *cont,
struct device *dev,
struct class_device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->setup)
tclass->setup(dev);
return 0;
}
/**
* transport_setup_device - declare a new dev for transport class association
* but don't make it visible yet.
*
* @dev: the generic device representing the entity being added
*
* Usually, dev represents some component in the HBA system (either
* the HBA itself or a device remote across the HBA bus). This
* routine is simply a trigger point to see if any set of transport
* classes wishes to associate with the added device. This allocates
* storage for the class device and initialises it, but does not yet
* add it to the system or add attributes to it (you do this with
* transport_add_device). If you have no need for a separate setup
* and add operations, use transport_register_device (see
* transport_class.h).
*/
void transport_setup_device(struct device *dev)
{
attribute_container_add_device(dev, transport_setup_classdev);
}
EXPORT_SYMBOL_GPL(transport_setup_device);
static int transport_add_classdev(struct attribute_container *cont,
struct device *dev,
struct class_device *classdev)
{
struct class_device_attribute **attrs = cont->attrs;
int i, error;
error = class_device_add(classdev);
if (error)
return error;
for (i = 0; attrs[i]; i++) {
error = class_device_create_file(classdev, attrs[i]);
if (error)
return error;
}
return 0;
}
/**
* transport_add_device - declare a new dev for transport class association
*
* @dev: the generic device representing the entity being added
*
* Usually, dev represents some component in the HBA system (either
* the HBA itself or a device remote across the HBA bus). This
* routine is simply a trigger point used to add the device to the
* system and register attributes for it.
*/
void transport_add_device(struct device *dev)
{
attribute_container_device_trigger(dev, transport_add_classdev);
}
EXPORT_SYMBOL_GPL(transport_add_device);
static int transport_configure(struct attribute_container *cont,
struct device *dev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->configure)
tclass->configure(dev);
return 0;
}
/**
* transport_configure_device - configure an already set up device
*
* @dev: generic device representing device to be configured
*
* The idea of configure is simply to provide a point within the setup
* process to allow the transport class to extract information from a
* device after it has been setup. This is used in SCSI because we
* have to have a setup device to begin using the HBA, but after we
* send the initial inquiry, we use configure to extract the device
* parameters. The device need not have been added to be configured.
*/
void transport_configure_device(struct device *dev)
{
attribute_container_trigger(dev, transport_configure);
}
EXPORT_SYMBOL_GPL(transport_configure_device);
static int transport_remove_classdev(struct attribute_container *cont,
struct device *dev,
struct class_device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->remove)
tclass->remove(dev);
return 0;
}
/**
* transport_remove_device - remove the visibility of a device
*
* @dev: generic device to remove
*
* This call removes the visibility of the device (to the user from
* sysfs), but does not destroy it. To eliminate a device entirely
* you must also call transport_destroy_device. If you don't need to
* do remove and destroy as separate operations, use
* transport_unregister_device() (see transport_class.h) which will
* perform both calls for you.
*/
void transport_remove_device(struct device *dev)
{
attribute_container_device_trigger(dev, transport_remove_classdev);
}
EXPORT_SYMBOL_GPL(transport_remove_device);
static void transport_destroy_classdev(struct attribute_container *cont,
struct device *dev,
struct class_device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->remove != anon_transport_dummy_function)
class_device_put(classdev);
}
/**
* transport_destroy_device - destroy a removed device
*
* @dev: device to eliminate from the transport class.
*
* This call triggers the elimination of storage associated with the
* transport classdev. Note: all it really does is relinquish a
* reference to the classdev. The memory will not be freed until the
* last reference goes to zero. Note also that the classdev retains a
* reference count on dev, so dev too will remain for as long as the
* transport class device remains around.
*/
void transport_destroy_device(struct device *dev)
{
attribute_container_remove_device(dev, transport_destroy_classdev);
}
EXPORT_SYMBOL_GPL(transport_destroy_device);
......@@ -225,14 +225,14 @@ config SGIWD93_SCSI
config SCSI_DECNCR
tristate "DEC NCR53C94 Scsi Driver"
depends on DECSTATION && TC && SCSI
depends on MACH_DECSTATION && SCSI && TC
help
Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
config SCSI_DECSII
tristate "DEC SII Scsi Driver"
depends on DECSTATION && SCSI
depends on MACH_DECSTATION && SCSI && MIPS32
config BLK_DEV_3W_XXXX_RAID
tristate "3ware 5/6/7/8xxx ATA-RAID support"
......
......@@ -290,7 +290,7 @@ static inline void esp_advance_phase(Scsi_Cmnd *s, int newphase)
#endif
#ifdef DEBUG_ESP_CMDS
inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs,
static inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs,
unchar cmd)
{
esp->espcmdlog[esp->espcmdent] = cmd;
......@@ -505,7 +505,7 @@ static void esp_reset_esp(struct NCR_ESP *esp, struct ESP_regs *eregs)
}
/* This places the ESP into a known state at boot time. */
static void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
{
volatile unchar trash;
......
......@@ -652,7 +652,7 @@ extern int nesps, esps_in_use, esps_running;
/* External functions */
extern void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs, unchar cmd);
extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
extern struct NCR_ESP *esp_allocate(Scsi_Host_Template *, void *);
extern void esp_deallocate(struct NCR_ESP *);
extern void esp_release(void);
......
......@@ -48,7 +48,7 @@ struct NCR_Q720_private {
struct Scsi_Host *hosts[4];
};
struct scsi_host_template NCR_Q720_tpnt = {
static struct scsi_host_template NCR_Q720_tpnt = {
.module = THIS_MODULE,
.proc_name = "NCR_Q720",
};
......@@ -345,7 +345,7 @@ NCR_Q720_remove(struct device *dev)
static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
struct mca_driver NCR_Q720_driver = {
static struct mca_driver NCR_Q720_driver = {
.id_table = NCR_Q720_id_table,
.driver = {
.name = "NCR_Q720",
......
......@@ -1029,6 +1029,114 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
return 0;
}
static void synchronize_callback(void *context, struct fib *fibptr)
{
struct aac_synchronize_reply *synchronizereply;
struct scsi_cmnd *cmd;
cmd = context;
dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
smp_processor_id(), jiffies));
BUG_ON(fibptr == NULL);
synchronizereply = fib_data(fibptr);
if (le32_to_cpu(synchronizereply->status) == CT_OK)
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
struct aac_dev *dev = (struct aav_dev *)sdev->host->hostdata;
u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
synchronizereply->status);
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min(sizeof(dev->fsa_dev[cid].sense_data),
sizeof(cmd->sense_buffer)));
}
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
{
int status;
struct fib *cmd_fibcontext;
struct aac_synchronize *synchronizecmd;
struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
int active = 0;
unsigned long flags;
/*
* Wait for all commands to complete to this specific
* target (block).
*/
spin_lock_irqsave(&sdev->list_lock, flags);
list_for_each_entry(cmd, &sdev->cmd_list, list)
if (cmd != scsicmd && cmd->serial_number != 0) {
++active;
break;
}
spin_unlock_irqrestore(&sdev->list_lock, flags);
/*
* Yield the processor (requeue for later)
*/
if (active)
return SCSI_MLQUEUE_DEVICE_BUSY;
/*
* Alocate and initialize a Fib
*/
if (!(cmd_fibcontext =
fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
return SCSI_MLQUEUE_HOST_BUSY;
fib_init(cmd_fibcontext);
synchronizecmd = fib_data(cmd_fibcontext);
synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
synchronizecmd->cid = cpu_to_le32(cid);
synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerCommand,
cmd_fibcontext,
sizeof(struct aac_synchronize),
FsaNormal,
0, 1,
(fib_callback)synchronize_callback,
(void *)scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
return 0;
printk(KERN_WARNING
"aac_synchronize: fib_send failed with status: %d.\n", status);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
return SCSI_MLQUEUE_HOST_BUSY;
}
/**
* aac_scsi_cmd() - Process SCSI command
......@@ -1274,6 +1382,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
ret = aac_write(scsicmd, cid);
spin_lock_irq(host->host_lock);
return ret;
case SYNCHRONIZE_CACHE:
/* Issue FIB to tell Firmware to flush it's cache */
return aac_synchronize(scsicmd, cid);
default:
/*
* Unhandled commands
......
......@@ -1069,6 +1069,30 @@ struct aac_write_reply
u32 committed;
};
#define CT_FLUSH_CACHE 129
struct aac_synchronize {
u32 command; /* VM_ContainerConfig */
u32 type; /* CT_FLUSH_CACHE */
u32 cid;
u32 parm1;
u32 parm2;
u32 parm3;
u32 parm4;
u32 count; /* sizeof(((struct aac_synchronize_reply *)NULL)->data) */
};
struct aac_synchronize_reply {
u32 dummy0;
u32 dummy1;
u32 status; /* CT_OK */
u32 parm1;
u32 parm2;
u32 parm3;
u32 parm4;
u32 parm5;
u8 data[16];
};
struct aac_srb
{
u32 function;
......
......@@ -17,6 +17,8 @@
* data.
* 20001005 - Initialization fixes for 2.4.0-test9
* Florian Lohoff <flo@rfc822.org>
*
* Copyright (C) 2002, 2003 Maciej W. Rozycki
*/
#include <linux/kernel.h>
......@@ -26,59 +28,52 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <asm/irq.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/dec/tc.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
#include <asm/dec/machtype.h>
#include <asm/dec/tc.h>
#define DEC_SCSI_SREG 0
#define DEC_SCSI_DMAREG 0x40000
#define DEC_SCSI_SRAM 0x80000
#define DEC_SCSI_DIAG 0xC0000
/*
* Once upon a time the pmaz code used to be working but
* it hasn't been maintained for quite some time.
* It isn't working anymore but I'll leave here as a
* starting point. #define this an be prepared for tons
* of warnings and errors :)
*/
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static void dma_drain(struct NCR_ESP *esp);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd * sp);
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd * sp);
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd * sp);
static void dma_advance_sg(Scsi_Cmnd * sp);
static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
static void dma_advance_sg(struct scsi_cmnd * sp);
static void pmaz_dma_drain(struct NCR_ESP *esp);
static void pmaz_dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
static void pmaz_dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
static void pmaz_dma_ints_off(struct NCR_ESP *esp);
static void pmaz_dma_ints_on(struct NCR_ESP *esp);
static void pmaz_dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd * sp);
static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
#define TC_ESP_RAM_SIZE 0x20000
#define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
......@@ -88,7 +83,7 @@ static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd * sp);
#define TC_ESP_DMAR_WRITE 0x80000000
#define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
__u32 esp_virt_buffer;
u32 esp_virt_buffer;
int scsi_current_length;
volatile unsigned char cmd_buffer[16];
......@@ -98,16 +93,11 @@ volatile unsigned char pmaz_cmd_buffer[16];
* via PIO.
*/
volatile unsigned long *scsi_dma_ptr;
volatile unsigned long *scsi_next_ptr;
volatile unsigned long *scsi_scr;
volatile unsigned long *ioasic_ssr;
volatile unsigned long *scsi_sdr0;
volatile unsigned long *scsi_sdr1;
static irqreturn_t scsi_dma_merr_int(int, void *, struct pt_regs *);
static irqreturn_t scsi_dma_err_int(int, void *, struct pt_regs *);
static irqreturn_t scsi_dma_int(int, void *, struct pt_regs *);
static void scsi_dma_int(int, void *, struct pt_regs *);
int dec_esp_detect(Scsi_Host_Template * tpnt);
static int dec_esp_detect(struct scsi_host_template * tpnt);
static int dec_esp_release(struct Scsi_Host *shost)
{
......@@ -119,9 +109,9 @@ static int dec_esp_release(struct Scsi_Host *shost)
return 0;
}
static Scsi_Host_Template driver_template = {
static struct scsi_host_template driver_template = {
.proc_name = "dec_esp",
.proc_info = &esp_proc_info,
.proc_info = esp_proc_info,
.name = "NCR53C94",
.detect = dec_esp_detect,
.slave_alloc = esp_slave_alloc,
......@@ -142,7 +132,7 @@ static Scsi_Host_Template driver_template = {
#include "scsi_module.c"
/***************************************************************** Detection */
int dec_esp_detect(Scsi_Host_Template * tpnt)
static int dec_esp_detect(Scsi_Host_Template * tpnt)
{
struct NCR_ESP *esp;
struct ConfigDev *esp_dev;
......@@ -152,17 +142,10 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
if (IOASIC) {
esp_dev = 0;
esp = esp_allocate(tpnt, (void *) esp_dev);
scsi_dma_ptr = (unsigned long *) (system_base + IOCTL + SCSI_DMA_P);
scsi_next_ptr = (unsigned long *) (system_base + IOCTL + SCSI_DMA_BP);
scsi_scr = (unsigned long *) (system_base + IOCTL + SCSI_SCR);
ioasic_ssr = (unsigned long *) (system_base + IOCTL + SSR);
scsi_sdr0 = (unsigned long *) (system_base + IOCTL + SCSI_SDR0);
scsi_sdr1 = (unsigned long *) (system_base + IOCTL + SCSI_SDR1);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
......@@ -185,7 +168,7 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
esp->dma_reset = 0;
esp->dma_led_off = 0;
esp->dma_led_on = 0;
/* virtual DMA functions */
esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
......@@ -197,38 +180,42 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
/* SCSI chip speed */
esp->cfreq = 25000000;
/*
* we don't give the address of DMA channel, but the number
* of DMA channel, so we can use the jazz DMA functions
*
*/
esp->dregs = JAZZ_SCSI_DMA;
esp->dregs = 0;
/* ESP register base */
esp->eregs = (struct ESP_regs *) (system_base + SCSI);
esp->eregs = (struct ESP_regs *) (system_base + IOASIC_SCSI);
/* Set the command buffer */
esp->esp_command = (volatile unsigned char *) cmd_buffer;
/* get virtual dma address for command buffer */
esp->esp_command_dvma = (__u32) KSEG1ADDR((volatile unsigned char *) cmd_buffer);
esp->irq = SCSI_INT;
esp->esp_command_dvma = virt_to_phys(cmd_buffer);
esp->irq = dec_interrupt[DEC_IRQ_ASC];
esp->scsi_id = 7;
/* Check for differential SCSI-bus */
esp->diff = 0;
esp_initialize(esp);
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
"NCR 53C94 SCSI", esp->ehost))
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
"ncr53c94", esp->ehost))
goto err_dealloc;
if (request_irq(SCSI_DMA_INT, scsi_dma_int, SA_INTERRUPT,
"JUNKIO SCSI DMA", esp->ehost))
if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
scsi_dma_merr_int, SA_INTERRUPT,
"ncr53c94 error", esp->ehost))
goto err_free_irq;
if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
scsi_dma_err_int, SA_INTERRUPT,
"ncr53c94 overrun", esp->ehost))
goto err_free_irq_merr;
if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA],
scsi_dma_int, SA_INTERRUPT,
"ncr53c94 dma", esp->ehost))
goto err_free_irq_err;
}
if (TURBOCHANNEL) {
......@@ -241,7 +228,7 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
mem_start = get_tc_base_addr(slot);
/* Store base addr into esp struct */
esp->slot = mem_start;
esp->slot = PHYSADDR(mem_start);
esp->dregs = 0;
esp->eregs = (struct ESP_regs *) (mem_start + DEC_SCSI_SREG);
......@@ -251,7 +238,7 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
esp->esp_command = (volatile unsigned char *) pmaz_cmd_buffer;
/* get virtual dma address for command buffer */
esp->esp_command_dvma = (__u32) KSEG0ADDR((volatile unsigned char *) pmaz_cmd_buffer);
esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
esp->cfreq = get_tc_speed();
......@@ -286,7 +273,7 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
esp->dma_mmu_release_scsi_sgl = 0;
esp->dma_advance_sg = 0;
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
"PMAZ_AA", esp->ehost)) {
esp_deallocate(esp);
release_tc_card(slot);
......@@ -305,231 +292,259 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
}
return 0;
err_free_irq:
err_free_irq_err:
free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], scsi_dma_err_int);
err_free_irq_merr:
free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], scsi_dma_merr_int);
err_free_irq:
free_irq(esp->irq, esp_intr);
err_dealloc:
err_dealloc:
esp_deallocate(esp);
return 0;
}
/************************************************************* DMA Functions */
static void scsi_dma_int(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id, struct pt_regs *regs)
{
extern volatile unsigned int *isr;
unsigned int dummy;
if (*isr & SCSI_PTR_LOADED) {
/* next page */
*scsi_next_ptr = ((*scsi_dma_ptr + PAGE_SIZE) & PAGE_MASK) << 3;
*isr &= ~SCSI_PTR_LOADED;
} else {
if (*isr & SCSI_PAGOVRRUN)
*isr &= ~SCSI_PAGOVRRUN;
if (*isr & SCSI_DMA_MEMRDERR) {
printk("Got unexpected SCSI DMA Interrupt! < ");
printk("SCSI_DMA_MEMRDERR ");
printk(">\n");
*isr &= ~SCSI_DMA_MEMRDERR;
}
}
printk("Got unexpected SCSI DMA Interrupt! < ");
printk("SCSI_DMA_MEMRDERR ");
printk(">\n");
/*
* This routine will only work on IOASIC machines
* so we can avoid an indirect function call here
* and flush the writeback buffer the fast way
*/
dummy = *isr;
dummy = *isr;
return IRQ_HANDLED;
}
static irqreturn_t scsi_dma_err_int(int irq, void *dev_id, struct pt_regs *regs)
{
/* empty */
return IRQ_HANDLED;
}
static irqreturn_t scsi_dma_int(int irq, void *dev_id, struct pt_regs *regs)
{
u32 scsi_next_ptr;
scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
/* next page */
scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
fast_iob();
return IRQ_HANDLED;
}
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
return fifo_count;
return fifo_count;
}
static void dma_drain(struct NCR_ESP *esp)
{
unsigned long nw = *scsi_scr;
unsigned short *p = (unsigned short *)KSEG1ADDR((*scsi_dma_ptr) >> 3);
u32 nw, data0, data1, scsi_data_ptr;
u16 *p;
nw = ioasic_read(IO_REG_SCSI_SCR);
/*
/*
* Is there something in the dma buffers left?
*/
*/
if (nw) {
scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
p = phys_to_virt(scsi_data_ptr);
switch (nw) {
case 1:
*p = (unsigned short) *scsi_sdr0;
data0 = ioasic_read(IO_REG_SCSI_SDR0);
p[0] = data0 & 0xffff;
break;
case 2:
*p++ = (unsigned short) (*scsi_sdr0);
*p = (unsigned short) ((*scsi_sdr0) >> 16);
data0 = ioasic_read(IO_REG_SCSI_SDR0);
p[0] = data0 & 0xffff;
p[1] = (data0 >> 16) & 0xffff;
break;
case 3:
*p++ = (unsigned short) (*scsi_sdr0);
*p++ = (unsigned short) ((*scsi_sdr0) >> 16);
*p = (unsigned short) (*scsi_sdr1);
data0 = ioasic_read(IO_REG_SCSI_SDR0);
data1 = ioasic_read(IO_REG_SCSI_SDR1);
p[0] = data0 & 0xffff;
p[1] = (data0 >> 16) & 0xffff;
p[2] = data1 & 0xffff;
break;
default:
printk("Strange: %d words in dma buffer left\n", (int) nw);
printk("Strange: %d words in dma buffer left\n", nw);
break;
}
}
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd * sp)
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
return sp->SCp.this_residual;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
/*
ESPLOG(("esp%d: dma -- enable <%08x> residue <%08x\n",
esp->esp_id, vdma_get_enable((int)esp->dregs), vdma_get_resdiue((int)esp->dregs)));
*/
}
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
{
extern volatile unsigned int *isr;
unsigned int dummy;
u32 scsi_next_ptr, ioasic_ssr;
unsigned long flags;
if (vaddress & 3)
panic("dec_efs.c: unable to handle partial word transfers, yet...");
panic("dec_esp.c: unable to handle partial word transfers, yet...");
dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
*ioasic_ssr &= ~SCSI_DMA_EN;
*scsi_scr = 0;
*scsi_dma_ptr = vaddress << 3;
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
ioasic_ssr = ioasic_read(IO_REG_SSR);
ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_wmb();
ioasic_write(IO_REG_SCSI_SCR, 0);
ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
/* prepare for next page */
*scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
*ioasic_ssr |= (SCSI_DMA_DIR | SCSI_DMA_EN);
scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
/*
* see above
*/
dummy = *isr;
dummy = *isr;
ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
fast_wmb();
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
{
extern volatile unsigned int *isr;
unsigned int dummy;
u32 scsi_next_ptr, ioasic_ssr;
unsigned long flags;
if (vaddress & 3)
panic("dec_efs.c: unable to handle partial word transfers, yet...");
panic("dec_esp.c: unable to handle partial word transfers, yet...");
dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
*ioasic_ssr &= ~(SCSI_DMA_DIR | SCSI_DMA_EN);
*scsi_scr = 0;
*scsi_dma_ptr = vaddress << 3;
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
ioasic_ssr = ioasic_read(IO_REG_SSR);
ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_wmb();
ioasic_write(IO_REG_SCSI_SCR, 0);
ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
/* prepare for next page */
*scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
*ioasic_ssr |= SCSI_DMA_EN;
scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
/*
* see above
*/
dummy = *isr;
dummy = *isr;
ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
fast_wmb();
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(SCSI_DMA_INT);
disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(SCSI_DMA_INT);
enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp->eregs->esp_status & ESP_STAT_INTR);
return (esp->eregs->esp_status & ESP_STAT_INTR);
}
static int dma_ports_p(struct NCR_ESP *esp)
{
/*
* FIXME: what's this good for?
*/
/*
* FIXME: what's this good for?
*/
return 1;
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
{
/*
* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if (write) {
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
/*
* DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if (write)
dma_init_read(esp, addr, count);
else
dma_init_write(esp, addr, count);
}
/*
* These aren't used yet
*/
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd * sp)
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *)PHYSADDR(sp->SCp.buffer);
sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
}
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd * sp)
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
int sz = sp->SCp.buffers_residual;
struct mmu_sglist *sg = (struct mmu_sglist *) sp->SCp.buffer;
while (sz >= 0) {
sg[sz].dvma_addr = PHYSADDR(sg[sz].addr);
sz--;
}
sp->SCp.ptr = (char *) ((unsigned long) sp->SCp.buffer->dvma_address);
int sz = sp->SCp.buffers_residual;
struct scatterlist *sg = sp->SCp.buffer;
while (sz >= 0) {
sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
sz--;
}
sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
}
static void dma_advance_sg(Scsi_Cmnd * sp)
static void dma_advance_sg(struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *) ((unsigned long) sp->SCp.buffer->dvma_address);
sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
}
static void pmaz_dma_drain(struct NCR_ESP *esp)
{
memcpy((void *) (KSEG0ADDR(esp_virt_buffer)),
(void *) ( esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE),
memcpy(phys_to_virt(esp_virt_buffer),
(void *)KSEG1ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE),
scsi_current_length);
}
static void pmaz_dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
{
volatile int *dmareg = (volatile int *) (esp->slot + DEC_SCSI_DMAREG);
volatile u32 *dmareg =
(volatile u32 *)KSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
if (length > ESP_TGT_DMA_SIZE)
length = ESP_TGT_DMA_SIZE;
*dmareg = TC_ESP_DMA_ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE);
*dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
iob();
esp_virt_buffer = vaddress;
scsi_current_length = length;
}
static void pmaz_dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
{
volatile int *dmareg = (volatile int *) ( esp->slot + DEC_SCSI_DMAREG );
volatile u32 *dmareg =
(volatile u32 *)KSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
memcpy((void *)(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE),
(void *)KSEG0ADDR(vaddress), length);
memcpy((void *)KSEG1ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE),
phys_to_virt(vaddress), length);
*dmareg = TC_ESP_DMAR_WRITE |
TC_ESP_DMA_ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE);
wmb();
*dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
iob();
}
static void pmaz_dma_ints_off(struct NCR_ESP *esp)
......@@ -540,20 +555,19 @@ static void pmaz_dma_ints_on(struct NCR_ESP *esp)
{
}
static void pmaz_dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
{
/*
* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if (write) {
if (write)
pmaz_dma_init_read(esp, addr, count);
} else {
else
pmaz_dma_init_write(esp, addr, count);
}
}
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd * sp)
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *)KSEG0ADDR((sp->request_buffer));
sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
}
/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
/*
* Copyright (c) 1996-1999 Distributed Processing Technology Corporation
* All rights reserved.
*
* Redistribution and use in source form, with or without modification, are
* permitted provided that redistributions of source code must retain the
* above copyright notice, this list of conditions and the following disclaimer.
*
* This software is provided `as is' by Distributed Processing Technology and
* any express or implied warranties, including, but not limited to, the
* implied warranties of merchantability and fitness for a particular purpose,
* are disclaimed. In no event shall Distributed Processing Technology be
* liable for any direct, indirect, incidental, special, exemplary or
* consequential damages (including, but not limited to, procurement of
* substitute goods or services; loss of use, data, or profits; or business
* interruptions) however caused and on any theory of liability, whether in
* contract, strict liability, or tort (including negligence or otherwise)
* arising in any way out of the use of this driver software, even if advised
* of the possibility of such damage.
*
*/
#ifndef __OSD_UTIL_H
#define __OSD_UTIL_H
/*File - OSD_UTIL.H
****************************************************************************
*
*Description:
*
* This file contains defines and function prototypes that are
*operating system dependent. The resources defined in this file
*are not specific to any particular application.
*
*Copyright Distributed Processing Technology, Corp.
* 140 Candace Dr.
* Maitland, Fl. 32751 USA
* Phone: (407) 830-5522 Fax: (407) 260-5366
* All Rights Reserved
*
*Author: Doug Anderson
*Date: 1/7/94
*
*Editors:
*
*Remarks:
*
*
*****************************************************************************/
/*Definitions - Defines & Constants ----------------------------------------- */
/*----------------------------- */
/* Operating system selections: */
/*----------------------------- */
/*#define _DPT_MSDOS */
/*#define _DPT_WIN_3X */
/*#define _DPT_WIN_4X */
/*#define _DPT_WIN_NT */
/*#define _DPT_NETWARE */
/*#define _DPT_OS2 */
/*#define _DPT_SCO */
/*#define _DPT_UNIXWARE */
/*#define _DPT_SOLARIS */
/*#define _DPT_NEXTSTEP */
/*#define _DPT_BANYAN */
/*-------------------------------- */
/* Include the OS specific defines */
/*-------------------------------- */
/*#define OS_SELECTION From Above List */
/*#define SEMAPHORE_T ??? */
/*#define DLL_HANDLE_T ??? */
#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
# include "i386/isa/dpt_osd_defs.h"
#else
# include "osd_defs.h"
#endif
#ifndef DPT_UNALIGNED
#define DPT_UNALIGNED
#endif
#ifndef DPT_EXPORT
#define DPT_EXPORT
#endif
#ifndef DPT_IMPORT
#define DPT_IMPORT
#endif
#ifndef DPT_RUNTIME_IMPORT
#define DPT_RUNTIME_IMPORT DPT_IMPORT
#endif
/*--------------------- */
/* OS dependent defines */
/*--------------------- */
#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
#define _DPT_16_BIT
#else
#define _DPT_32_BIT
#endif
#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
#define _DPT_UNIX
#endif
#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
|| defined (_DPT_OS2)
#define _DPT_DLL_SUPPORT
#endif
#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
#define _DPT_PREEMPTIVE
#endif
#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
#define _DPT_MULTI_THREADED
#endif
#if !defined (_DPT_MSDOS)
#define _DPT_MULTI_TASKING
#endif
/* These exist for platforms that */
/* chunk when accessing mis-aligned */
/* data */
#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
#if defined (_DPT_BIG_ENDIAN)
#if !defined (_DPT_STRICT_ALIGN)
#define _DPT_STRICT_ALIGN
#endif
#endif
#endif
/* Determine if in C or C++ mode */
#ifdef __cplusplus
#define _DPT_CPP
#else
#define _DPT_C
#endif
/*-------------------------------------------------------------------*/
/* Under Solaris the compiler refuses to accept code like: */
/* { {"DPT"}, 0, NULL .... }, */
/* and complains about the {"DPT"} part by saying "cannot use { } */
/* to initialize char*". */
/* */
/* By defining these ugly macros we can get around this and also */
/* not have to copy and #ifdef large sections of code. I know that */
/* these macros are *really* ugly, but they should help reduce */
/* maintenance in the long run. */
/* */
/*-------------------------------------------------------------------*/
#if !defined (DPTSQO)
#if defined (_DPT_SOLARIS)
#define DPTSQO
#define DPTSQC
#else
#define DPTSQO {
#define DPTSQC }
#endif /* solaris */
#endif /* DPTSQO */
/*---------------------- */
/* OS dependent typedefs */
/*---------------------- */
#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
#define BYTE unsigned char
#define WORD unsigned short
#endif
#ifndef _DPT_TYPEDEFS
#define _DPT_TYPEDEFS
typedef unsigned char uCHAR;
typedef unsigned short uSHORT;
typedef unsigned int uINT;
typedef unsigned long uLONG;
typedef union {
uCHAR u8[4];
uSHORT u16[2];
uLONG u32;
} access_U;
#endif
#if !defined (NULL)
#define NULL 0
#endif
/*Prototypes - function ----------------------------------------------------- */
#ifdef __cplusplus
extern "C" { /* Declare all these functions as "C" functions */
#endif
/*------------------------ */
/* Byte reversal functions */
/*------------------------ */
/* Reverses the byte ordering of a 2 byte variable */
#if (!defined(osdSwap2))
uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
#endif // !osdSwap2
/* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
#if (!defined(osdSwap3))
uLONG osdSwap3(DPT_UNALIGNED uLONG *);
#endif // !osdSwap3
#ifdef _DPT_NETWARE
#include "novpass.h" /* For DPT_Bswapl() prototype */
/* Inline the byte swap */
#ifdef __cplusplus
inline uLONG osdSwap4(uLONG *inLong) {
return *inLong = DPT_Bswapl(*inLong);
}
#else
#define osdSwap4(inLong) DPT_Bswapl(inLong)
#endif // cplusplus
#else
/* Reverses the byte ordering of a 4 byte variable */
# if (!defined(osdSwap4))
uLONG osdSwap4(DPT_UNALIGNED uLONG *);
# endif // !osdSwap4
/* The following functions ALWAYS swap regardless of the *
* presence of DPT_BIG_ENDIAN */
uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
uLONG trueSwap4(DPT_UNALIGNED uLONG *);
#endif // netware
/*-------------------------------------*
* Network order swap functions *
* *
* These functions/macros will be used *
* by the structure insert()/extract() *
* functions. *
*
* We will enclose all structure *
* portability modifications inside *
* #ifdefs. When we are ready, we *
* will #define DPT_PORTABLE to begin *
* using the modifications. *
*-------------------------------------*/
uLONG netSwap4(uLONG val);
#if defined (_DPT_BIG_ENDIAN)
// for big-endian we need to swap
#ifndef NET_SWAP_2
#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
#endif // NET_SWAP_2
#ifndef NET_SWAP_4
#define NET_SWAP_4(x) netSwap4((x))
#endif // NET_SWAP_4
#else
// for little-endian we don't need to do anything
#ifndef NET_SWAP_2
#define NET_SWAP_2(x) (x)
#endif // NET_SWAP_2
#ifndef NET_SWAP_4
#define NET_SWAP_4(x) (x)
#endif // NET_SWAP_4
#endif // big endian
/*----------------------------------- */
/* Run-time loadable module functions */
/*----------------------------------- */
/* Loads the specified run-time loadable DLL */
DLL_HANDLE_T osdLoadModule(uCHAR *);
/* Unloads the specified run-time loadable DLL */
uSHORT osdUnloadModule(DLL_HANDLE_T);
/* Returns a pointer to a function inside a run-time loadable DLL */
void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
/*--------------------------------------- */
/* Mutually exclusive semaphore functions */
/*--------------------------------------- */
/* Create a named semaphore */
SEMAPHORE_T osdCreateNamedSemaphore(char *);
/* Create a mutually exlusive semaphore */
SEMAPHORE_T osdCreateSemaphore(void);
/* create an event semaphore */
SEMAPHORE_T osdCreateEventSemaphore(void);
/* create a named event semaphore */
SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
/* Destroy the specified mutually exclusive semaphore object */
uSHORT osdDestroySemaphore(SEMAPHORE_T);
/* Request access to the specified mutually exclusive semaphore */
uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
/* Release access to the specified mutually exclusive semaphore */
uSHORT osdReleaseSemaphore(SEMAPHORE_T);
/* wait for a event to happen */
uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
/* signal an event */
uLONG osdSignalEventSemaphore(SEMAPHORE_T);
/* reset the event */
uLONG osdResetEventSemaphore(SEMAPHORE_T);
/*----------------- */
/* Thread functions */
/*----------------- */
/* Releases control to the task switcher in non-preemptive */
/* multitasking operating systems. */
void osdSwitchThreads(void);
/* Starts a thread function */
uLONG osdStartThread(void *,void *);
/* what is my thread id */
uLONG osdGetThreadID(void);
/* wakes up the specifed thread */
void osdWakeThread(uLONG);
/* osd sleep for x miliseconds */
void osdSleep(uLONG);
#define DPT_THREAD_PRIORITY_LOWEST 0x00
#define DPT_THREAD_PRIORITY_NORMAL 0x01
#define DPT_THREAD_PRIORITY_HIGHEST 0x02
uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
#ifdef __cplusplus
} /* end the xtern "C" declaration */
#endif
#endif /* osd_util_h */
......@@ -1178,7 +1178,6 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
// dangerous.
status = -ETIME;
}
schedule_timeout(timeout*HZ);
}
if(pHba->host)
spin_lock_irq(pHba->host->host_lock);
......
......@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/transport_class.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
......@@ -79,15 +80,8 @@ void scsi_remove_host(struct Scsi_Host *shost)
set_bit(SHOST_DEL, &shost->shost_state);
if (shost->transportt->host_destroy)
shost->transportt->host_destroy(shost);
transport_unregister_device(&shost->shost_gendev);
class_device_unregister(&shost->shost_classdev);
if (shost->transport_classdev.class) {
if (shost->transportt->host_statistics)
sysfs_remove_group(&shost->transport_classdev.kobj,
shost->transportt->host_statistics);
class_device_unregister(&shost->transport_classdev);
}
device_del(&shost->shost_gendev);
}
EXPORT_SYMBOL(scsi_remove_host);
......@@ -135,9 +129,6 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
GFP_KERNEL)) == NULL)
goto out_del_classdev;
if (shost->transportt->host_setup)
shost->transportt->host_setup(shost);
error = scsi_sysfs_add_host(shost);
if (error)
goto out_destroy_host;
......@@ -146,8 +137,6 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
return error;
out_destroy_host:
if (shost->transportt->host_destroy)
shost->transportt->host_destroy(shost);
out_del_classdev:
class_device_del(&shost->shost_classdev);
out_del_gendev:
......@@ -397,3 +386,9 @@ void scsi_exit_hosts(void)
{
class_unregister(&shost_class);
}
int scsi_is_host_device(const struct device *dev)
{
return dev->release == scsi_host_dev_release;
}
EXPORT_SYMBOL(scsi_is_host_device);
......@@ -6,6 +6,7 @@
* jazz_esp is based on David S. Miller's ESP driver and cyber_esp
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
......@@ -27,7 +28,7 @@
#include <asm/pgtable.h>
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
......@@ -36,11 +37,11 @@ static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_advance_sg (Scsi_Cmnd *sp);
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_advance_sg (struct scsi_cmnd *sp);
static void dma_led_off(struct NCR_ESP *);
static void dma_led_on(struct NCR_ESP *);
......@@ -52,7 +53,7 @@ static volatile unsigned char cmd_buffer[16];
*/
/***************************************************************** Detection */
int jazz_esp_detect(Scsi_Host_Template *tpnt)
static int jazz_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct ConfigDev *esp_dev;
......@@ -115,7 +116,7 @@ int jazz_esp_detect(Scsi_Host_Template *tpnt)
esp->esp_command = (volatile unsigned char *)cmd_buffer;
/* get virtual dma address for command buffer */
esp->esp_command_dvma = vdma_alloc(PHYSADDR(cmd_buffer), sizeof (cmd_buffer));
esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer));
esp->irq = JAZZ_SCSI_IRQ;
request_irq(JAZZ_SCSI_IRQ, esp_intr, SA_INTERRUPT, "JAZZ SCSI",
......@@ -157,7 +158,7 @@ static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
/*
* maximum DMA size is 1MB
......@@ -230,43 +231,43 @@ static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
}
}
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
sp->SCp.have_data_in = vdma_alloc(PHYSADDR(sp->SCp.buffer), sp->SCp.this_residual);
sp->SCp.have_data_in = vdma_alloc(CPHYSADDR(sp->SCp.buffer), sp->SCp.this_residual);
sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in);
}
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
int sz = sp->SCp.buffers_residual;
struct mmu_sglist *sg = (struct mmu_sglist *) sp->SCp.buffer;
struct scatterlist *sg = (struct scatterlist *) sp->SCp.buffer;
while (sz >= 0) {
sg[sz].dvma_addr = vdma_alloc(PHYSADDR(sg[sz].addr), sg[sz].len);
sg[sz].dma_address = vdma_alloc(CPHYSADDR(page_address(sg[sz].page) + sg[sz].offset), sg[sz].length);
sz--;
}
sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dvma_address);
sp->SCp.ptr=(char *)(sp->SCp.buffer->dma_address);
}
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
vdma_free(sp->SCp.have_data_in);
}
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
int sz = sp->use_sg - 1;
struct mmu_sglist *sg = (struct mmu_sglist *)sp->buffer;
struct scatterlist *sg = (struct scatterlist *)sp->buffer;
while(sz >= 0) {
vdma_free(sg[sz].dvma_addr);
vdma_free(sg[sz].dma_address);
sz--;
}
}
static void dma_advance_sg (Scsi_Cmnd *sp)
static void dma_advance_sg (struct scsi_cmnd *sp)
{
sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dvma_address);
sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
}
#define JAZZ_HDC_LED 0xe000d100 /* FIXME, find correct address */
......@@ -285,9 +286,9 @@ static void dma_led_on(struct NCR_ESP *esp)
#endif
}
static Scsi_Host_Template driver_template = {
static struct scsi_host_template driver_template = {
.proc_name = "jazz_esp",
.proc_info = &esp_proc_info,
.proc_info = esp_proc_info,
.name = "ESP 100/100a/200",
.detect = jazz_esp_detect,
.slave_alloc = esp_slave_alloc,
......@@ -303,4 +304,4 @@ static Scsi_Host_Template driver_template = {
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
......@@ -72,8 +72,6 @@ static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
static void dma_advance_sg(Scsi_Cmnd *);
static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
void esp_bootup_reset(struct NCR_ESP *esp,struct ESP_regs *eregs);
#ifdef USE_BOTTOM_HALF
static void dma_commit(void *opaque);
......
......@@ -63,10 +63,14 @@ const char * osst_version = "0.99.3";
in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
#define OSST_DEB_MSG KERN_NOTICE
#include "scsi.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_request.h>
#define ST_KILOBYTE 1024
......@@ -152,7 +156,7 @@ static int osst_nr_dev;
static struct osst_tape **os_scsi_tapes = NULL;
static DEFINE_RWLOCK(os_scsi_tapes_lock);
static int modes_defined = FALSE;
static int modes_defined = 0;
static struct osst_buffer *new_tape_buffer(int, int, int);
static int enlarge_buffer(struct osst_buffer *, int);
......@@ -223,7 +227,7 @@ static int osst_chk_result(struct osst_tape * STp, struct scsi_request * SRpnt)
if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n",
name, scode, sense[12], sense[13]);
if (driver_byte(result) & DRIVER_SENSE)
print_req_sense("osst ", SRpnt);
scsi_print_req_sense("osst ", SRpnt);
}
else
#endif
......@@ -238,7 +242,7 @@ static int osst_chk_result(struct osst_tape * STp, struct scsi_request * SRpnt)
SRpnt->sr_cmnd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */
if (driver_byte(result) & DRIVER_SENSE) {
printk(KERN_WARNING "%s:W: Command with sense data:\n", name);
print_req_sense("osst:", SRpnt);
scsi_print_req_sense("osst:", SRpnt);
}
else {
static int notyetprinted = 1;
......@@ -282,7 +286,7 @@ static int osst_chk_result(struct osst_tape * STp, struct scsi_request * SRpnt)
/* Wakeup from interrupt */
static void osst_sleep_done (Scsi_Cmnd * SCpnt)
static void osst_sleep_done (struct scsi_cmnd * SCpnt)
{
struct osst_tape * STp = container_of(SCpnt->request->rq_disk->private_data, struct osst_tape, driver);
......@@ -624,7 +628,7 @@ static int osst_wait_ready(struct osst_tape * STp, struct scsi_request ** aSRpnt
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (!SRpnt) return (-EBUSY);
......@@ -645,7 +649,7 @@ static int osst_wait_ready(struct osst_tape * STp, struct scsi_request ** aSRpnt
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
}
*aSRpnt = SRpnt;
#if DEBUG
......@@ -685,7 +689,7 @@ static int osst_wait_for_medium(struct osst_tape * STp, struct scsi_request ** a
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (!SRpnt) return (-EBUSY);
......@@ -704,7 +708,7 @@ static int osst_wait_for_medium(struct osst_tape * STp, struct scsi_request ** a
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
}
*aSRpnt = SRpnt;
#if DEBUG
......@@ -756,7 +760,7 @@ static int osst_flush_drive_buffer(struct osst_tape * STp, struct scsi_request *
cmd[0] = WRITE_FILEMARKS;
cmd[1] = 1;
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (!SRpnt) return (-EBUSY);
if (STp->buffer->syscall_result) {
......@@ -847,8 +851,8 @@ static int osst_recover_wait_frame(struct osst_tape * STp, struct scsi_request *
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_FILEMARKS;
cmd[1] = 1;
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout,
MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
MAX_RETRIES, 1);
while (retval && time_before (jiffies, startwait + 5*60*HZ)) {
......@@ -865,8 +869,8 @@ static int osst_recover_wait_frame(struct osst_tape * STp, struct scsi_request *
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = READ_POSITION;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 20, SCSI_DATA_READ, STp->timeout,
MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 20, DMA_FROM_DEVICE, STp->timeout,
MAX_RETRIES, 1);
retval = ( STp->buffer->syscall_result || (STp->buffer)->b_data[15] > 25 );
STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
......@@ -911,8 +915,8 @@ static int osst_read_frame(struct osst_tape * STp, struct scsi_request ** aSRpnt
if (debugging)
printk(OSST_DEB_MSG "%s:D: Reading frame from OnStream tape\n", name);
#endif
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, SCSI_DATA_READ,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (!SRpnt)
return (-EBUSY);
......@@ -987,7 +991,7 @@ static int osst_initiate_read(struct osst_tape * STp, struct scsi_request ** aSR
#if DEBUG
printk(OSST_DEB_MSG "%s:D: Start Read Ahead on OnStream tape\n", name);
#endif
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if ((retval = STp->buffer->syscall_result))
printk(KERN_WARNING "%s:W: Error starting read ahead\n", name);
......@@ -1120,7 +1124,7 @@ static int osst_get_logical_frame(struct osst_tape * STp, struct scsi_request **
"%s:D: Exit get logical frame (%d=>%d) from OnStream tape with code %d\n",
name, frame_seq_number, STp->frame_seq_number, STps->eof);
#endif
STp->fast_open = FALSE;
STp->fast_open = 0;
STp->read_error_frame = 0;
return (STps->eof);
}
......@@ -1368,8 +1372,8 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
cmd[7] = 32768 >> 8;
cmd[8] = 32768 & 0xff;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, SCSI_DATA_READ,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
STp->timeout, MAX_RETRIES, 1);
if ((STp->buffer)->syscall_result || !SRpnt) {
printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
......@@ -1440,8 +1444,8 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
name, new_frame+i, frame_seq_number+i, logical_blk_num + i*blks_per_frame,
p[0], p[1], p[2], p[3]);
#endif
SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, SCSI_DATA_WRITE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
STp->timeout, MAX_RETRIES, 1);
if (STp->buffer->syscall_result)
flag = 1;
......@@ -1456,8 +1460,8 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_FILEMARKS;
cmd[1] = 1;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
STp->timeout, MAX_RETRIES, 1);
#if DEBUG
if (debugging) {
printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
......@@ -1471,8 +1475,8 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout,
MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
MAX_RETRIES, 1);
if (SRpnt->sr_sense_buffer[2] == 2 && SRpnt->sr_sense_buffer[12] == 4 &&
(SRpnt->sr_sense_buffer[13] == 1 || SRpnt->sr_sense_buffer[13] == 8)) {
......@@ -1577,8 +1581,8 @@ static int osst_reposition_and_retry(struct osst_tape * STp, struct scsi_request
printk(OSST_DEB_MSG "%s:D: About to write pending fseq %d at fppos %d\n",
name, STp->frame_seq_number-1, STp->first_frame_position);
#endif
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, SCSI_DATA_WRITE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (STp->buffer->syscall_result) { /* additional write error */
......@@ -2036,7 +2040,7 @@ static void osst_set_retries(struct osst_tape * STp, struct scsi_request ** aSRp
if (debugging)
printk(OSST_DEB_MSG "%s:D: Setting number of retries on OnStream tape to %d\n", name, retries);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
*aSRpnt = SRpnt;
if ((STp->buffer)->syscall_result)
......@@ -2576,7 +2580,7 @@ static int osst_configure_onstream(struct osst_tape *STp, struct scsi_request **
cmd[2] = BLOCK_SIZE_PAGE;
cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_READ, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
if (SRpnt == NULL) {
#if DEBUG
printk(OSST_DEB_MSG "osst :D: Busy\n");
......@@ -2613,7 +2617,7 @@ static int osst_configure_onstream(struct osst_tape *STp, struct scsi_request **
cmd[1] = 0x10;
cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
*aSRpnt = SRpnt;
if ((STp->buffer)->syscall_result != 0) {
printk (KERN_ERR "%s:E: Couldn't set tape block size mode page\n", name);
......@@ -2653,7 +2657,7 @@ static int osst_configure_onstream(struct osst_tape *STp, struct scsi_request **
(STp->buffer)->b_data[MODE_HEADER_LENGTH + 6] = 0;
(STp->buffer)->b_data[MODE_HEADER_LENGTH + 7] = 0;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
*aSRpnt = SRpnt;
if ((STp->buffer)->syscall_result != 0) {
......@@ -2668,7 +2672,7 @@ static int osst_configure_onstream(struct osst_tape *STp, struct scsi_request **
cmd[2] = CAPABILITIES_PAGE;
cmd[4] = CAPABILITIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_READ, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
*aSRpnt = SRpnt;
if ((STp->buffer)->syscall_result != 0) {
......@@ -2688,7 +2692,7 @@ static int osst_configure_onstream(struct osst_tape *STp, struct scsi_request **
cmd[2] = TAPE_PARAMTR_PAGE;
cmd[4] = TAPE_PARAMTR_PAGE_LENGTH + MODE_HEADER_LENGTH;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_READ, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
*aSRpnt = SRpnt;
if ((STp->buffer)->syscall_result != 0) {
......@@ -2762,8 +2766,8 @@ static int osst_get_frame_position(struct osst_tape *STp, struct scsi_request **
scmd[0] = READ_POSITION;
STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 20, SCSI_DATA_READ,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
STp->timeout, MAX_RETRIES, 1);
if (!SRpnt) {
STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
return (-EBUSY);
......@@ -2782,8 +2786,8 @@ static int osst_get_frame_position(struct osst_tape *STp, struct scsi_request **
memset (scmd, 0, MAX_COMMAND_SIZE);
scmd[0] = READ_POSITION;
STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
SRpnt = osst_do_scsi(SRpnt, STp, scmd, 20, SCSI_DATA_READ,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
STp->timeout, MAX_RETRIES, 1);
#if DEBUG
printk(OSST_DEB_MSG "%s:D: Reread position, reason=[%02x:%02x:%02x], result=[%s%02x:%02x:%02x]\n",
name, mysense[2], mysense[12], mysense[13], STp->buffer->syscall_result?"":"ok:",
......@@ -2861,8 +2865,8 @@ static int osst_set_frame_position(struct osst_tape *STp, struct scsi_request **
if (skip)
scmd[9] = 0x80;
SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 0, SCSI_DATA_NONE, STp->long_timeout,
MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 0, DMA_NONE, STp->long_timeout,
MAX_RETRIES, 1);
if (!SRpnt)
return (-EBUSY);
*aSRpnt = SRpnt;
......@@ -2999,8 +3003,8 @@ static int osst_flush_write_buffer(struct osst_tape *STp, struct scsi_request **
name, offset, transfer, blks);
#endif
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, transfer, SCSI_DATA_WRITE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
STp->timeout, MAX_RETRIES, 1);
*aSRpnt = SRpnt;
if (!SRpnt)
return (-EBUSY);
......@@ -3082,7 +3086,7 @@ static int osst_flush_buffer(struct osst_tape * STp, struct scsi_request ** aSRp
if (!seek_next) {
if (STps->eof == ST_FM_HIT) {
result = cross_eof(STp, aSRpnt, FALSE); /* Back over the EOF hit */
result = cross_eof(STp, aSRpnt, 0); /* Back over the EOF hit */
if (!result)
STps->eof = ST_NOEOF;
else {
......@@ -3156,7 +3160,7 @@ static int osst_write_frame(struct osst_tape * STp, struct scsi_request ** aSRpn
if (!synchronous)
STp->write_pending = 1;
#endif
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, SCSI_DATA_WRITE, STp->timeout,
SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE, STp->timeout,
MAX_RETRIES, synchronous);
if (!SRpnt)
return (-EBUSY);
......@@ -3218,7 +3222,7 @@ static void reset_state(struct osst_tape *STp)
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
STps->last_block_valid = FALSE;
STps->last_block_valid = 0;
STps->drv_block = -1;
STps->drv_file = -1;
}
......@@ -3372,7 +3376,7 @@ static ssize_t osst_write(struct file * filp, const char __user * buf, size_t co
#endif
}
}
STp->fast_open = FALSE;
STp->fast_open = 0;
}
if (!STp->header_ok) {
#if DEBUG
......@@ -3447,7 +3451,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
blks = do_count / STp->block_size;
STp->logical_blk_num += blks; /* logical_blk_num is incremented as data is moved from user */
i = osst_write_frame(STp, &SRpnt, TRUE);
i = osst_write_frame(STp, &SRpnt, 1);
if (i == (-ENOSPC)) {
transfer = STp->buffer->writing; /* FIXME -- check this logic */
......@@ -3528,7 +3532,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
STp->dirty = !((STp->buffer)->writing ==
(STp->buffer)->buffer_bytes);
i = osst_write_frame(STp, &SRpnt, FALSE);
i = osst_write_frame(STp, &SRpnt, 0);
if (i < 0) {
retval = (-EIO);
goto out;
......@@ -3769,7 +3773,7 @@ static int osst_set_options(struct osst_tape *STp, long options)
STm = &(STp->modes[STp->current_mode]);
if (!STm->defined) {
memcpy(STm, &(STp->modes[0]), sizeof(*STm));
modes_defined = TRUE;
modes_defined = 1;
#if DEBUG
if (debugging)
printk(OSST_DEB_MSG "%s:D: Initialized mode %d definition from mode 0\n",
......@@ -3921,12 +3925,12 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
int timeout;
long ltmp;
int i, ioctl_result;
int chg_eof = TRUE;
int chg_eof = 1;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request * SRpnt = * aSRpnt;
struct st_partstat * STps;
int fileno, blkno, at_sm, frame_seq_numbr, logical_blk_num;
int datalen = 0, direction = SCSI_DATA_NONE;
int datalen = 0, direction = DMA_NONE;
char * name = tape_name(STp);
if (STp->ready != ST_READY && cmd_in != MTLOAD) {
......@@ -3946,7 +3950,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
memset(cmd, 0, MAX_COMMAND_SIZE);
switch (cmd_in) {
case MTFSFM:
chg_eof = FALSE; /* Changed from the FSF after this */
chg_eof = 0; /* Changed from the FSF after this */
case MTFSF:
if (STp->raw)
return (-EIO);
......@@ -3961,7 +3965,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
goto os_bypass;
case MTBSF:
chg_eof = FALSE; /* Changed from the FSF after this */
chg_eof = 0; /* Changed from the FSF after this */
case MTBSFM:
if (STp->raw)
return (-EIO);
......@@ -4176,7 +4180,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
case MTSETDENSITY: /* Set tape density */
case MTSETDRVBUFFER: /* Set drive buffering */
case SET_DENS_AND_BLK: /* Set density and block size */
chg_eof = FALSE;
chg_eof = 0;
if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
return (-EIO); /* Not allowed if data in buffer */
if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
......@@ -4193,7 +4197,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
return (-ENOSYS);
}
SRpnt = osst_do_scsi(SRpnt, STp, cmd, datalen, direction, timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, datalen, direction, timeout, MAX_RETRIES, 1);
ioctl_result = (STp->buffer)->syscall_result;
......@@ -4249,7 +4253,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
else if (cmd_in == MTLOAD) {
for (i=0; i < ST_NBR_PARTITIONS; i++) {
STp->ps[i].rw = ST_IDLE;
STp->ps[i].last_block_valid = FALSE;/* FIXME - where else is this field maintained? */
STp->ps[i].last_block_valid = 0;/* FIXME - where else is this field maintained? */
}
STp->partition = 0;
}
......@@ -4304,7 +4308,7 @@ static int osst_int_ioctl(struct osst_tape * STp, struct scsi_request ** aSRpnt,
static int os_scsi_tape_open(struct inode * inode, struct file * filp)
{
unsigned short flags;
int i, b_size, new_session = FALSE, retval = 0;
int i, b_size, new_session = 0, retval = 0;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request * SRpnt = NULL;
struct osst_tape * STp;
......@@ -4353,7 +4357,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
printk(OSST_DEB_MSG "%s:D: Mode change from %d to %d.\n",
name, STp->current_mode, mode);
#endif
new_session = TRUE;
new_session = 1;
STp->current_mode = mode;
}
STm = &(STp->modes[STp->current_mode]);
......@@ -4403,7 +4407,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
memset (cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(NULL, STp, cmd, 0, SCSI_DATA_NONE, STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
if (!SRpnt) {
retval = (STp->buffer)->syscall_result; /* FIXME - valid? */
goto err_out;
......@@ -4423,8 +4427,8 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
cmd[0] = START_STOP;
cmd[1] = 1;
cmd[4] = 1;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
STp->timeout, MAX_RETRIES, 1);
}
osst_wait_ready(STp, &SRpnt, (SRpnt->sr_sense_buffer[13]==1?15:3) * 60, 0);
}
......@@ -4440,8 +4444,8 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
memset (cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
STp->timeout, MAX_RETRIES, 1);
if ((SRpnt->sr_sense_buffer[0] & 0x70) != 0x70 ||
(SRpnt->sr_sense_buffer[2] & 0x0f) != UNIT_ATTENTION)
break;
......@@ -4456,11 +4460,11 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
STps->rw = ST_IDLE; /* FIXME - seems to be redundant... */
STps->eof = ST_NOEOF;
STps->at_sm = 0;
STps->last_block_valid = FALSE;
STps->last_block_valid = 0;
STps->drv_block = 0;
STps->drv_file = 0 ;
}
new_session = TRUE;
new_session = 1;
STp->recover_count = 0;
STp->abort_count = 0;
}
......@@ -4477,7 +4481,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
cmd[2] = VENDOR_IDENT_PAGE;
cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_READ, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
if (STp->buffer->syscall_result ||
STp->buffer->b_data[MODE_HEADER_LENGTH + 2] != 'L' ||
......@@ -4507,7 +4511,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
}
STp->buffer->buffer_blocks = OS_DATA_SIZE / STp->block_size;
STp->fast_open = TRUE;
STp->fast_open = 1;
scsi_release_request(SRpnt);
return 0;
}
......@@ -4518,7 +4522,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
#endif
STp->header_ok = 0;
}
STp->fast_open = FALSE;
STp->fast_open = 0;
if ((STp->buffer)->syscall_result != 0 && /* in all error conditions except no medium */
(SRpnt->sr_sense_buffer[2] != 2 || SRpnt->sr_sense_buffer[12] != 0x3A) ) {
......@@ -4540,7 +4544,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
#if DEBUG
printk(OSST_DEB_MSG "%s:D: Applying soft reset\n", name);
#endif
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE, STp->timeout, 0, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
STp->header_ok = 0;
......@@ -4549,8 +4553,8 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
memset (cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_RETRIES, TRUE);
SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
STp->timeout, MAX_RETRIES, 1);
if ((SRpnt->sr_sense_buffer[0] & 0x70) != 0x70 ||
(SRpnt->sr_sense_buffer[2] & 0x0f) == NOT_READY)
break;
......@@ -4565,11 +4569,11 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
STps->last_block_valid = FALSE;
STps->last_block_valid = 0;
STps->drv_block = 0;
STps->drv_file = 0 ;
}
new_session = TRUE;
new_session = 1;
}
}
}
......@@ -4629,8 +4633,8 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
if (debugging)
printk(OSST_DEB_MSG "%s:D: New Session\n", name);
#endif
STp->density_changed = STp->blksize_changed = FALSE;
STp->compression_changed = FALSE;
STp->density_changed = STp->blksize_changed = 0;
STp->compression_changed = 0;
}
/*
......@@ -4704,7 +4708,7 @@ static int os_scsi_tape_flush(struct file * filp)
if (STp->can_bsr)
result = osst_flush_buffer(STp, &SRpnt, 0); /* this is the default path */
else if (STps->eof == ST_FM_HIT) {
result = cross_eof(STp, &SRpnt, FALSE);
result = cross_eof(STp, &SRpnt, 0);
if (result) {
if (STps->drv_file >= 0)
STps->drv_file++;
......@@ -4716,7 +4720,7 @@ static int os_scsi_tape_flush(struct file * filp)
}
}
else if ((STps->eof == ST_NOEOF &&
!(result = cross_eof(STp, &SRpnt, TRUE))) ||
!(result = cross_eof(STp, &SRpnt, 1))) ||
STps->eof == ST_FM_HIT) {
if (STps->drv_file >= 0)
STps->drv_file++;
......@@ -4971,7 +4975,7 @@ static int osst_ioctl(struct inode * inode,struct file * file,
for (i=0; i < ST_NBR_PARTITIONS; i++) {
STp->ps[i].rw = ST_IDLE;
STp->ps[i].at_sm = 0;
STp->ps[i].last_block_valid = FALSE;
STp->ps[i].last_block_valid = 0;
}
STp->partition = STp->new_partition = 0;
STp->nbr_partitions = 1; /* Bad guess ?-) */
......@@ -4997,7 +5001,7 @@ static int osst_ioctl(struct inode * inode,struct file * file,
}
if (auto_weof)
cross_eof(STp, &SRpnt, FALSE);
cross_eof(STp, &SRpnt, 0);
if (mtc.mt_op == MTCOMPRESSION)
retval = -EINVAL; /* OnStream drives don't have compression hardware */
......@@ -5013,7 +5017,7 @@ static int osst_ioctl(struct inode * inode,struct file * file,
goto out;
}
if ((i = osst_flush_buffer(STp, &SRpnt, FALSE)) < 0) {
if ((i = osst_flush_buffer(STp, &SRpnt, 0)) < 0) {
retval = i;
goto out;
}
......@@ -5140,7 +5144,7 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
memset(tb, 0, i);
tb->sg_segs = tb->orig_sg_segs = 0;
tb->use_sg = max_sg;
tb->in_use = TRUE;
tb->in_use = 1;
tb->dma = need_dma;
tb->buffer_size = 0;
#if DEBUG
......@@ -5158,7 +5162,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
int segs, nbr, max_segs, b_size, priority, order, got;
if (STbuffer->buffer_size >= OS_FRAME_SIZE)
return TRUE;
return 1;
if (STbuffer->sg_segs) {
printk(KERN_WARNING "osst :A: Buffer not previously normalized.\n");
......@@ -5167,7 +5171,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
/* See how many segments we can use -- need at least two */
nbr = max_segs = STbuffer->use_sg;
if (nbr <= 2)
return FALSE;
return 0;
priority = GFP_KERNEL /* | __GFP_NOWARN */;
if (need_dma)
......@@ -5186,7 +5190,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
}
if (STbuffer->sg[0].page == NULL) {
printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
return FALSE;
return 0;
}
/* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
for (segs=STbuffer->sg_segs=1, got=b_size;
......@@ -5206,7 +5210,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
STbuffer->buffer_size = got;
#endif
normalize_buffer(STbuffer);
return FALSE;
return 0;
}
STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size;
got += STbuffer->sg[segs].length;
......@@ -5225,7 +5229,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
}
#endif
return TRUE;
return 1;
}
......@@ -5457,7 +5461,7 @@ static struct file_operations osst_fops = {
.release = os_scsi_tape_close,
};
static int osst_supports(Scsi_Device * SDp)
static int osst_supports(struct scsi_device * SDp)
{
struct osst_support_data {
char *vendor;
......@@ -5592,7 +5596,7 @@ static void osst_sysfs_init(void)
if ( IS_ERR(osst_sysfs_class) )
printk(KERN_WARNING "osst :W: Unable to register sysfs class\n");
else
osst_sysfs_valid = TRUE;
osst_sysfs_valid = 1;
}
static void osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name)
......@@ -5636,7 +5640,7 @@ static void osst_sysfs_cleanup(void)
static int osst_probe(struct device *dev)
{
Scsi_Device * SDp = to_scsi_device(dev);
struct scsi_device * SDp = to_scsi_device(dev);
struct osst_tape * tpnt;
struct st_modedef * STm;
struct st_partstat * STps;
......@@ -5691,7 +5695,7 @@ static int osst_probe(struct device *dev)
i = SDp->host->sg_tablesize;
if (osst_max_sg_segs < i)
i = osst_max_sg_segs;
buffer = new_tape_buffer(TRUE, SDp->host->unchecked_isa_dma, i);
buffer = new_tape_buffer(1, SDp->host->unchecked_isa_dma, i);
if (buffer == NULL) {
write_unlock(&os_scsi_tapes_lock);
printk(KERN_ERR "osst :E: Unable to allocate a tape buffer, device not attached.\n");
......@@ -5741,7 +5745,7 @@ static int osst_probe(struct device *dev)
for (i=0; i < ST_NBR_MODES; i++) {
STm = &(tpnt->modes[i]);
STm->defined = FALSE;
STm->defined = 0;
STm->sysv = OSST_SYSV;
STm->defaults_for_writes = 0;
STm->do_async_writes = OSST_ASYNC_WRITES;
......@@ -5757,15 +5761,15 @@ static int osst_probe(struct device *dev)
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
STps->last_block_valid = FALSE;
STps->last_block_valid = 0;
STps->drv_block = (-1);
STps->drv_file = (-1);
}
tpnt->current_mode = 0;
tpnt->modes[0].defined = TRUE;
tpnt->modes[2].defined = TRUE;
tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = FALSE;
tpnt->modes[0].defined = 1;
tpnt->modes[2].defined = 1;
tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = 0;
init_MUTEX(&tpnt->lock);
osst_nr_dev++;
......@@ -5804,7 +5808,7 @@ static int osst_probe(struct device *dev)
static int osst_remove(struct device *dev)
{
Scsi_Device * SDp = to_scsi_device(dev);
struct scsi_device * SDp = to_scsi_device(dev);
struct osst_tape * tpnt;
int i, mode;
......
......@@ -518,7 +518,7 @@ struct osst_buffer {
int writing;
int midlevel_result;
int syscall_result;
Scsi_Request *last_SRpnt;
struct scsi_request *last_SRpnt;
unsigned char *b_data;
os_aux_t *aux; /* onstream AUX structure at end of each block */
unsigned short use_sg; /* zero or number of s/g segments for this adapter */
......@@ -531,7 +531,7 @@ struct osst_buffer {
struct osst_tape {
struct scsi_driver *driver;
unsigned capacity;
Scsi_Device* device;
struct scsi_device *device;
struct semaphore lock; /* for serialization */
struct completion wait; /* for SCSI commands */
struct osst_buffer * buffer;
......
......@@ -11,7 +11,7 @@
* (or disk like devices) sharing a common amount of RAM
*
*
* For documentation see http://www.torque.net/sg/sdebug25.html
* For documentation see http://www.torque.net/sg/sdebug26.html
*
* D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
* dpg: work for devfs large number of disks [20010809]
......@@ -56,7 +56,7 @@
#include "scsi_debug.h"
#define SCSI_DEBUG_VERSION "1.75"
static const char * scsi_debug_version_date = "20041023";
static const char * scsi_debug_version_date = "20050113";
/* Additional Sense Code (ASC) used */
#define NO_ADDED_SENSE 0x0
......@@ -1675,6 +1675,7 @@ static void do_create_driverfs_files(void)
driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
......@@ -1693,6 +1694,7 @@ static void do_remove_driverfs_files(void)
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
......
......@@ -1948,3 +1948,96 @@ int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
sizeof(cmd->sense_buffer), sshdr);
}
EXPORT_SYMBOL(scsi_command_normalize_sense);
/**
* scsi_sense_desc_find - search for a given descriptor type in
* descriptor sense data format.
*
* @sense_buffer: byte array of descriptor format sense data
* @sb_len: number of valid bytes in sense_buffer
* @desc_type: value of descriptor type to find
* (e.g. 0 -> information)
*
* Notes:
* only valid when sense data is in descriptor format
*
* Return value:
* pointer to start of (first) descriptor if found else NULL
**/
const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
int desc_type)
{
int add_sen_len, add_len, desc_len, k;
const u8 * descp;
if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
return NULL;
if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
return NULL;
add_sen_len = (add_sen_len < (sb_len - 8)) ?
add_sen_len : (sb_len - 8);
descp = &sense_buffer[8];
for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
descp += desc_len;
add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
desc_len = add_len + 2;
if (descp[0] == desc_type)
return descp;
if (add_len < 0) // short descriptor ??
break;
}
return NULL;
}
EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_get_sense_info_fld - attempts to get information field from
* sense data (either fixed or descriptor format)
*
* @sense_buffer: byte array of sense data
* @sb_len: number of valid bytes in sense_buffer
* @info_out: pointer to 64 integer where 8 or 4 byte information
* field will be placed if found.
*
* Return value:
* 1 if information field found, 0 if not found.
**/
int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out)
{
int j;
const u8 * ucp;
u64 ull;
if (sb_len < 7)
return 0;
switch (sense_buffer[0] & 0x7f) {
case 0x70:
case 0x71:
if (sense_buffer[0] & 0x80) {
*info_out = (sense_buffer[3] << 24) +
(sense_buffer[4] << 16) +
(sense_buffer[5] << 8) + sense_buffer[6];
return 1;
} else
return 0;
case 0x72:
case 0x73:
ucp = scsi_sense_desc_find(sense_buffer, sb_len,
0 /* info desc */);
if (ucp && (0xa == ucp[1])) {
ull = 0;
for (j = 0; j < 8; ++j) {
if (j > 0)
ull <<= 8;
ull |= ucp[4 + j];
}
*info_out = ull;
return 1;
} else
return 0;
default:
return 0;
}
}
EXPORT_SYMBOL(scsi_get_sense_info_fld);
......@@ -498,19 +498,17 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
/*
* Function: scsi_end_request()
*
* Purpose: Post-processing of completed commands called from interrupt
* handler or a bottom-half handler.
* Purpose: Post-processing of completed commands (usually invoked at end
* of upper level post-processing and scsi_io_completion).
*
* Arguments: cmd - command that is complete.
* uptodate - 1 if I/O indicates success, 0 for I/O error.
* sectors - number of sectors we want to mark.
* uptodate - 1 if I/O indicates success, <= 0 for I/O error.
* bytes - number of bytes of completed I/O
* requeue - indicates whether we should requeue leftovers.
* frequeue - indicates that if we release the command block
* that the queue request function should be called.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing
* Returns: cmd if requeue done or required, NULL otherwise
*
* Notes: This is called for block device requests in order to
* mark some number of sectors as complete.
......@@ -694,8 +692,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
int this_count = cmd->bufflen;
request_queue_t *q = cmd->device->request_queue;
struct request *req = cmd->request;
int clear_errors = 1;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int sense_deferred = 0;
/*
* Free up any indirection buffers we allocated for DMA purposes.
......@@ -714,11 +713,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
kfree(cmd->buffer);
}
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
req->errors = result;
if (result) {
clear_errors = 0;
if (scsi_command_normalize_sense(cmd, &sshdr)) {
if (sense_valid) {
/*
* SG_IO wants current and deferred errors
*/
......@@ -742,6 +745,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
cmd->request_buffer = NULL;
cmd->request_bufflen = 0;
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
scsi_end_request(cmd, 1, good_bytes, 0);
return;
}
/*
* Next deal with any sectors which we were able to correctly
* handle.
......@@ -751,8 +759,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
req->nr_sectors, good_bytes));
SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
if (clear_errors)
req->errors = 0;
req->errors = 0;
/*
* If multiple sectors are requested in one buffer, then
* they will have been finished off by the first command.
......@@ -779,52 +786,37 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
* sense buffer. We can extract information from this, so we
* can choose a block to remap, etc.
*/
if (driver_byte(result) != 0) {
if (scsi_command_normalize_sense(cmd, &sshdr) &&
!scsi_sense_is_deferred(&sshdr)) {
/*
* If the device is in the process of becoming ready,
* retry.
*/
if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
if (sense_valid && !sense_deferred) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
/* detected disc change. set a bit
* and quietly refuse further access.
*/
cmd->device->changed = 1;
cmd = scsi_end_request(cmd, 0,
this_count, 1);
return;
} else {
/*
* Must have been a power glitch, or a
* bus reset. Could not have been a
* media change, so we just retry the
* request and see what happens.
*/
scsi_requeue_command(q, cmd);
return;
}
if (sshdr.sense_key == UNIT_ATTENTION) {
if (cmd->device->removable) {
/* detected disc change. set a bit
* and quietly refuse further access.
*/
cmd->device->changed = 1;
cmd = scsi_end_request(cmd, 0,
this_count, 1);
return;
} else {
/*
* Must have been a power glitch, or a
* bus reset. Could not have been a
* media change, so we just retry the
* request and see what happens.
*/
scsi_requeue_command(q, cmd);
return;
}
}
}
/*
* If we had an ILLEGAL REQUEST returned, then we may have
* performed an unsupported command. The only thing this
* should be would be a ten byte read where only a six byte
* read was supported. Also, on a system where READ CAPACITY
* failed, we may have read past the end of the disk.
*/
/*
* XXX: Following is probably broken since deferred errors
* fall through [dpg 20040827]
*/
switch (sshdr.sense_key) {
break;
case ILLEGAL_REQUEST:
/*
* If we had an ILLEGAL REQUEST returned, then we may
* have performed an unsupported command. The only
* thing this should be would be a ten byte read where
* only a six byte read was supported. Also, on a
* system where READ CAPACITY failed, we may have read
* past the end of the disk.
*/
if (cmd->device->use_10_for_rw &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
......@@ -841,6 +833,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
}
break;
case NOT_READY:
/*
* If the device is in the process of becoming ready,
* retry.
*/
if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
scsi_requeue_command(q, cmd);
return;
}
printk(KERN_INFO "Device %s not ready.\n",
req->rq_disk ? req->rq_disk->disk_name : "");
cmd = scsi_end_request(cmd, 0, this_count, 1);
......
......@@ -146,7 +146,7 @@ extern int scsi_sysfs_add_sdev(struct scsi_device *);
extern int scsi_sysfs_add_host(struct Scsi_Host *);
extern int scsi_sysfs_register(void);
extern void scsi_sysfs_unregister(void);
extern int scsi_sysfs_device_initialize(struct scsi_device *);
extern void scsi_sysfs_device_initialize(struct scsi_device *);
extern int scsi_sysfs_target_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
......
......@@ -254,10 +254,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
if (shost->transportt->device_setup) {
if (shost->transportt->device_setup(sdev))
goto out_free_queue;
}
scsi_sysfs_device_initialize(sdev);
if (shost->hostt->slave_alloc) {
ret = shost->hostt->slave_alloc(sdev);
......@@ -272,10 +269,6 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
}
}
if (scsi_sysfs_device_initialize(sdev) != 0)
goto out_cleanup_slave;
/* NOTE: this target initialisation code depends critically on
* lun scanning being sequential. */
if (scsi_sysfs_target_initialize(sdev))
......@@ -288,13 +281,11 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
spin_unlock_irqrestore(shost->host_lock, flags);
out_cleanup_slave:
if (shost->hostt->slave_destroy)
shost->hostt->slave_destroy(sdev);
out_device_destroy:
if (shost->transportt->device_destroy)
shost->transportt->device_destroy(sdev);
out_free_queue:
transport_destroy_device(&sdev->sdev_gendev);
scsi_free_queue(sdev->request_queue);
out_free_dev:
kfree(sdev);
......@@ -629,8 +620,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
if (*bflags & BLIST_NOT_LOCKABLE)
sdev->lockable = 0;
if (sdev->host->transportt->device_configure)
sdev->host->transportt->device_configure(sdev);
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure)
sdev->host->hostt->slave_configure(sdev);
......@@ -749,8 +739,7 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
} else {
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->host->transportt->device_destroy)
sdev->host->transportt->device_destroy(sdev);
transport_destroy_device(&sdev->sdev_gendev);
put_device(&sdev->sdev_gendev);
}
out:
......@@ -1330,8 +1319,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->host->transportt->device_destroy)
sdev->host->transportt->device_destroy(sdev);
transport_destroy_device(&sdev->sdev_gendev);
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_free_host_dev);
......
......@@ -170,14 +170,12 @@ void scsi_device_dev_release(struct device *dev)
if (delete) {
struct scsi_target *starget = to_scsi_target(parent);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
if (!starget->create) {
if (shost->transportt->target_destroy)
shost->transportt->target_destroy(starget);
transport_remove_device(&starget->dev);
device_del(parent);
if (starget->transport_classdev.class)
class_device_unregister(&starget->transport_classdev);
}
transport_destroy_device(&starget->dev);
put_device(parent);
}
if (sdev->request_queue)
......@@ -553,7 +551,6 @@ static void scsi_target_dev_release(struct device *dev)
**/
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
struct class_device_attribute **attrs;
struct scsi_target *starget = sdev->sdev_target;
struct Scsi_Host *shost = sdev->host;
int error, i, create;
......@@ -570,31 +567,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
printk(KERN_ERR "Target device_add failed\n");
return error;
}
if (starget->transport_classdev.class) {
int i;
struct class_device_attribute **attrs =
sdev->host->transportt->target_attrs;
error = class_device_add(&starget->transport_classdev);
if (error) {
dev_printk(KERN_ERR, &starget->dev,
"Target transport add failed\n");
return error;
}
/* take a reference for the transport_classdev; this
* is released by the transport_class .release */
get_device(&starget->dev);
for (i = 0; attrs[i]; i++) {
error = class_device_create_file(&starget->transport_classdev,
attrs[i]);
if (error) {
dev_printk(KERN_ERR, &starget->dev,
"Target transport attr add failed\n");
return error;
}
}
}
transport_add_device(&starget->dev);
}
if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
......@@ -606,25 +579,15 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
printk(KERN_INFO "error 1\n");
return error;
}
error = class_device_add(&sdev->sdev_classdev);
if (error) {
printk(KERN_INFO "error 2\n");
goto clean_device;
}
/* take a reference for the sdev_classdev; this is
* released by the sdev_class .release */
get_device(&sdev->sdev_gendev);
if (sdev->transport_classdev.class) {
error = class_device_add(&sdev->transport_classdev);
if (error)
goto clean_device2;
/* take a reference for the transport_classdev; this
* is released by the transport_class .release */
get_device(&sdev->sdev_gendev);
}
if (sdev->host->hostt->sdev_attrs) {
for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
error = attr_add(&sdev->sdev_gendev,
......@@ -650,27 +613,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
}
}
if (sdev->transport_classdev.class) {
attrs = sdev->host->transportt->device_attrs;
for (i = 0; attrs[i]; i++) {
error = class_device_create_file(&sdev->transport_classdev,
attrs[i]);
if (error) {
scsi_remove_device(sdev);
goto out;
}
}
}
transport_add_device(&sdev->sdev_gendev);
out:
return error;
clean_device2:
class_device_del(&sdev->sdev_classdev);
clean_device:
scsi_device_set_state(sdev, SDEV_CANCEL);
device_del(&sdev->sdev_gendev);
transport_destroy_device(&sdev->sdev_gendev);
put_device(&sdev->sdev_gendev);
return error;
......@@ -689,14 +641,11 @@ void scsi_remove_device(struct scsi_device *sdev)
goto out;
class_device_unregister(&sdev->sdev_classdev);
if (sdev->transport_classdev.class)
class_device_unregister(&sdev->transport_classdev);
device_del(&sdev->sdev_gendev);
scsi_device_set_state(sdev, SDEV_DEL);
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->host->transportt->device_destroy)
sdev->host->transportt->device_destroy(sdev);
transport_unregister_device(&sdev->sdev_gendev);
put_device(&sdev->sdev_gendev);
out:
......@@ -786,41 +735,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
}
}
class_device_initialize(&shost->transport_classdev);
shost->transport_classdev.class = shost->transportt->host_class;
shost->transport_classdev.dev = &shost->shost_gendev;
snprintf(shost->transport_classdev.class_id, BUS_ID_SIZE,
"host%d", shost->host_no);
if (shost->transport_classdev.class) {
struct class_device_attribute **attrs =
shost->transportt->host_attrs;
error = class_device_add(&shost->transport_classdev);
if (error)
return error;
/* take a reference for the transport_classdev; this
* is released by the transport_class .release */
get_device(&shost->shost_gendev);
for (i = 0; attrs[i]; i++) {
error = class_device_create_file(&shost->transport_classdev,
attrs[i]);
if (error)
return error;
}
if (shost->transportt->host_statistics) {
error = sysfs_create_group(
&shost->transport_classdev.kobj,
shost->transportt->host_statistics);
if (error)
return error;
}
}
transport_register_device(&shost->shost_gendev);
return 0;
}
int scsi_sysfs_device_initialize(struct scsi_device *sdev)
void scsi_sysfs_device_initialize(struct scsi_device *sdev)
{
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
......@@ -836,14 +755,14 @@ int scsi_sysfs_device_initialize(struct scsi_device *sdev)
"%d:%d:%d:%d", sdev->host->host_no,
sdev->channel, sdev->id, sdev->lun);
class_device_initialize(&sdev->transport_classdev);
sdev->transport_classdev.dev = &sdev->sdev_gendev;
sdev->transport_classdev.class = sdev->host->transportt->device_class;
snprintf(sdev->transport_classdev.class_id, BUS_ID_SIZE,
"%d:%d:%d:%d", sdev->host->host_no,
sdev->channel, sdev->id, sdev->lun);
return 0;
transport_setup_device(&sdev->sdev_gendev);
}
int scsi_is_sdev_device(const struct device *dev)
{
return dev->release == scsi_device_dev_release;
}
EXPORT_SYMBOL(scsi_is_sdev_device);
int scsi_sysfs_target_initialize(struct scsi_device *sdev)
{
......@@ -886,12 +805,6 @@ int scsi_sysfs_target_initialize(struct scsi_device *sdev)
dev->release = scsi_target_dev_release;
sprintf(dev->bus_id, "target%d:%d:%d",
shost->host_no, sdev->channel, sdev->id);
class_device_initialize(&starget->transport_classdev);
starget->transport_classdev.dev = &starget->dev;
starget->transport_classdev.class = shost->transportt->target_class;
snprintf(starget->transport_classdev.class_id, BUS_ID_SIZE,
"target%d:%d:%d",
shost->host_no, sdev->channel, sdev->id);
starget->id = sdev->id;
starget->channel = sdev->channel;
create = starget->create = 1;
......@@ -907,11 +820,17 @@ int scsi_sysfs_target_initialize(struct scsi_device *sdev)
sdev->sdev_target = starget;
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
if (create && shost->transportt->target_setup)
shost->transportt->target_setup(starget);
if (create)
transport_setup_device(&starget->dev);
return 0;
}
int scsi_is_target_device(const struct device *dev)
{
return dev->release == scsi_target_dev_release;
}
EXPORT_SYMBOL(scsi_is_target_device);
/* A blank transport template that is used in drivers that don't
* yet implement Transport Attributes */
struct scsi_transport_template blank_transport_template = { NULL, };
......@@ -180,8 +180,6 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
static void transport_class_release(struct class_device *class_dev);
static void host_class_release(struct class_device *class_dev);
static void fc_timeout_blocked_host(void *data);
static void fc_timeout_blocked_tgt(void *data);
......@@ -207,32 +205,9 @@ struct fc_internal {
#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
struct class fc_transport_class = {
.name = "fc_transport",
.release = transport_class_release,
};
struct class fc_host_class = {
.name = "fc_host",
.release = host_class_release,
};
static __init int fc_transport_init(void)
{
int error = class_register(&fc_host_class);
if (error)
return error;
return class_register(&fc_transport_class);
}
static void __exit fc_transport_exit(void)
{
class_unregister(&fc_transport_class);
class_unregister(&fc_host_class);
}
static int fc_setup_starget_transport_attrs(struct scsi_target *starget)
static int fc_add_target(struct device *dev)
{
struct scsi_target *starget = to_scsi_target(dev);
/*
* Set default values easily detected by the midlayer as
* failure cases. The scsi lldd is responsible for initializing
......@@ -247,15 +222,24 @@ static int fc_setup_starget_transport_attrs(struct scsi_target *starget)
return 0;
}
static void fc_destroy_starget(struct scsi_target *starget)
static int fc_remove_target(struct device *dev)
{
struct scsi_target *starget = to_scsi_target(dev);
/* Stop the target timer */
if (cancel_delayed_work(&fc_starget_dev_loss_work(starget)))
flush_scheduled_work();
return 0;
}
static int fc_setup_host_transport_attrs(struct Scsi_Host *shost)
static DECLARE_TRANSPORT_CLASS(fc_transport_class,
"fc_transport",
fc_add_target,
fc_remove_target,
NULL);
static int fc_add_host(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
/*
* Set default values easily detected by the midlayer as
* failure cases. The scsi lldd is responsible for initializing
......@@ -297,26 +281,35 @@ static int fc_setup_host_transport_attrs(struct Scsi_Host *shost)
return 0;
}
static void fc_destroy_host(struct Scsi_Host *shost)
static int fc_remove_host(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
/* Stop the host timer */
if (cancel_delayed_work(&fc_host_link_down_work(shost)))
flush_scheduled_work();
return 0;
}
static void transport_class_release(struct class_device *class_dev)
static DECLARE_TRANSPORT_CLASS(fc_host_class,
"fc_host",
fc_add_host,
fc_remove_host,
NULL);
static __init int fc_transport_init(void)
{
struct scsi_target *starget = transport_class_to_starget(class_dev);
put_device(&starget->dev);
int error = transport_class_register(&fc_host_class);
if (error)
return error;
return transport_class_register(&fc_transport_class);
}
static void host_class_release(struct class_device *class_dev)
static void __exit fc_transport_exit(void)
{
struct Scsi_Host *shost = transport_class_to_shost(class_dev);
put_device(&shost->shost_gendev);
transport_class_unregister(&fc_transport_class);
transport_class_unregister(&fc_host_class);
}
/*
* Remote Port (Target) Attribute Management
*/
......@@ -731,6 +724,35 @@ static struct attribute_group fc_statistics_group = {
.attrs = fc_statistics_attrs,
};
static int fc_host_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
if (!scsi_is_host_device(dev))
return 0;
shost = dev_to_shost(dev);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &fc_host_class.class)
return 0;
return 1;
}
static int fc_target_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
if (!scsi_is_target_device(dev))
return 0;
shost = dev_to_shost(dev->parent);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &fc_host_class.class)
return 0;
return 1;
}
struct scsi_transport_template *
......@@ -745,16 +767,16 @@ fc_attach_transport(struct fc_function_template *ft)
memset(i, 0, sizeof(struct fc_internal));
i->t.target_attrs = &i->starget_attrs[0];
i->t.target_class = &fc_transport_class;
i->t.target_setup = &fc_setup_starget_transport_attrs;
i->t.target_destroy = &fc_destroy_starget;
i->t.target_attrs.attrs = &i->starget_attrs[0];
i->t.target_attrs.class = &fc_transport_class.class;
i->t.target_attrs.match = fc_target_match;
attribute_container_register(&i->t.target_attrs);
i->t.target_size = sizeof(struct fc_starget_attrs);
i->t.host_attrs = &i->host_attrs[0];
i->t.host_class = &fc_host_class;
i->t.host_setup = &fc_setup_host_transport_attrs;
i->t.host_destroy = &fc_destroy_host;
i->t.host_attrs.attrs = &i->host_attrs[0];
i->t.host_attrs.class = &fc_host_class.class;
i->t.host_attrs.match = fc_host_match;
attribute_container_register(&i->t.host_attrs);
i->t.host_size = sizeof(struct fc_host_attrs);
if (ft->get_fc_host_stats)
......
......@@ -40,28 +40,17 @@ struct iscsi_internal {
#define to_iscsi_internal(tmpl) container_of(tmpl, struct iscsi_internal, t)
static void iscsi_transport_class_release(struct class_device *class_dev)
{
struct scsi_target *starget = transport_class_to_starget(class_dev);
put_device(&starget->dev);
}
struct class iscsi_transport_class = {
.name = "iscsi_transport_class",
.release = iscsi_transport_class_release,
};
static void iscsi_host_class_release(struct class_device *class_dev)
{
struct Scsi_Host *shost = transport_class_to_shost(class_dev);
put_device(&shost->shost_gendev);
}
struct class iscsi_host_class = {
.name = "iscsi_host",
.release = iscsi_host_class_release,
};
static DECLARE_TRANSPORT_CLASS(iscsi_transport_class,
"iscsi_transport",
NULL,
NULL,
NULL);
static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
"iscsi_host",
NULL,
NULL,
NULL);
/*
* iSCSI target and session attrs
*/
......@@ -265,6 +254,36 @@ iscsi_host_rd_str_attr(initiator_alias);
count++; \
}
static int iscsi_host_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
if (!scsi_is_host_device(dev))
return 0;
shost = dev_to_shost(dev);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &iscsi_host_class.class)
return 0;
return 1;
}
static int iscsi_target_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
if (!scsi_is_target_device(dev))
return 0;
shost = dev_to_shost(dev->parent);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &iscsi_host_class.class)
return 0;
return 1;
}
struct scsi_transport_template *
iscsi_attach_transport(struct iscsi_function_template *fnt)
{
......@@ -278,9 +297,10 @@ iscsi_attach_transport(struct iscsi_function_template *fnt)
memset(i, 0, sizeof(struct iscsi_internal));
i->fnt = fnt;
i->t.target_attrs = &i->session_attrs[0];
i->t.target_class = &iscsi_transport_class;
i->t.target_setup = NULL;
i->t.target_attrs.attrs = &i->session_attrs[0];
i->t.target_attrs.class = &iscsi_transport_class.class;
i->t.target_attrs.match = iscsi_target_match;
attribute_container_register(&i->t.target_attrs);
i->t.target_size = sizeof(struct iscsi_class_session);
SETUP_SESSION_RD_ATTR(tsih);
......@@ -307,9 +327,10 @@ iscsi_attach_transport(struct iscsi_function_template *fnt)
BUG_ON(count > ISCSI_SESSION_ATTRS);
i->session_attrs[count] = NULL;
i->t.host_attrs = &i->host_attrs[0];
i->t.host_class = &iscsi_host_class;
i->t.host_setup = NULL;
i->t.host_attrs.attrs = &i->host_attrs[0];
i->t.host_attrs.class = &iscsi_host_class.class;
i->t.host_attrs.match = iscsi_host_match;
attribute_container_register(&i->t.host_attrs);
i->t.host_size = 0;
count = 0;
......@@ -334,17 +355,17 @@ EXPORT_SYMBOL(iscsi_release_transport);
static __init int iscsi_transport_init(void)
{
int err = class_register(&iscsi_transport_class);
int err = transport_class_register(&iscsi_transport_class);
if (err)
return err;
return class_register(&iscsi_host_class);
return transport_class_register(&iscsi_host_class);
}
static void __exit iscsi_transport_exit(void)
{
class_unregister(&iscsi_host_class);
class_unregister(&iscsi_transport_class);
transport_class_unregister(&iscsi_host_class);
transport_class_unregister(&iscsi_transport_class);
}
module_init(iscsi_transport_init);
......
......@@ -2,6 +2,7 @@
* Parallel SCSI (SPI) transport specific attributes exported to sysfs.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -37,9 +38,6 @@
#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
static void transport_class_release(struct class_device *class_dev);
static void host_class_release(struct class_device *class_dev);
#define SPI_NUM_ATTRS 10 /* increase this if you add attributes */
#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
* on" attributes */
......@@ -119,40 +117,39 @@ static inline enum spi_signal_type spi_signal_to_value(const char *name)
return SPI_SIGNAL_UNKNOWN;
}
static int spi_host_setup(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct class spi_transport_class = {
.name = "spi_transport",
.release = transport_class_release,
};
struct class spi_host_class = {
.name = "spi_host",
.release = host_class_release,
};
spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
static __init int spi_transport_init(void)
{
int error = class_register(&spi_host_class);
if (error)
return error;
return class_register(&spi_transport_class);
return 0;
}
static void __exit spi_transport_exit(void)
{
class_unregister(&spi_transport_class);
class_unregister(&spi_host_class);
}
static DECLARE_TRANSPORT_CLASS(spi_host_class,
"spi_host",
spi_host_setup,
NULL,
NULL);
static int spi_setup_host_attrs(struct Scsi_Host *shost)
static int spi_host_match(struct attribute_container *cont,
struct device *dev)
{
spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
struct Scsi_Host *shost;
return 0;
if (!scsi_is_host_device(dev))
return 0;
shost = dev_to_shost(dev);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &spi_host_class.class)
return 0;
return 1;
}
static int spi_configure_device(struct scsi_device *sdev)
static int spi_device_configure(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
/* Populate the target capability fields with the values
......@@ -168,8 +165,10 @@ static int spi_configure_device(struct scsi_device *sdev)
return 0;
}
static int spi_setup_transport_attrs(struct scsi_target *starget)
static int spi_setup_transport_attrs(struct device *dev)
{
struct scsi_target *starget = to_scsi_target(dev);
spi_period(starget) = -1; /* illegal value */
spi_offset(starget) = 0; /* async */
spi_width(starget) = 0; /* narrow */
......@@ -187,18 +186,6 @@ static int spi_setup_transport_attrs(struct scsi_target *starget)
return 0;
}
static void transport_class_release(struct class_device *class_dev)
{
struct scsi_target *starget = transport_class_to_starget(class_dev);
put_device(&starget->dev);
}
static void host_class_release(struct class_device *class_dev)
{
struct Scsi_Host *shost = transport_class_to_shost(class_dev);
put_device(&shost->shost_gendev);
}
#define spi_transport_show_function(field, format_string) \
\
static ssize_t \
......@@ -823,6 +810,48 @@ EXPORT_SYMBOL(spi_schedule_dv_device);
i->host_attrs[count] = &i->private_host_attrs[count]; \
count++
static int spi_device_match(struct attribute_container *cont,
struct device *dev)
{
struct scsi_device *sdev;
struct Scsi_Host *shost;
if (!scsi_is_sdev_device(dev))
return 0;
sdev = to_scsi_device(dev);
shost = sdev->host;
if (!shost->transportt || shost->transportt->host_attrs.class
!= &spi_host_class.class)
return 0;
return 1;
}
static int spi_target_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
if (!scsi_is_target_device(dev))
return 0;
shost = dev_to_shost(dev->parent);
if (!shost->transportt || shost->transportt->host_attrs.class
!= &spi_host_class.class)
return 0;
return 1;
}
static DECLARE_TRANSPORT_CLASS(spi_transport_class,
"spi_transport",
spi_setup_transport_attrs,
NULL,
NULL);
static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
spi_device_match,
spi_device_configure);
struct scsi_transport_template *
spi_attach_transport(struct spi_function_template *ft)
{
......@@ -835,14 +864,15 @@ spi_attach_transport(struct spi_function_template *ft)
memset(i, 0, sizeof(struct spi_internal));
i->t.target_attrs = &i->attrs[0];
i->t.target_class = &spi_transport_class;
i->t.target_setup = &spi_setup_transport_attrs;
i->t.device_configure = &spi_configure_device;
i->t.target_attrs.class = &spi_transport_class.class;
i->t.target_attrs.attrs = &i->attrs[0];
i->t.target_attrs.match = spi_target_match;
attribute_container_register(&i->t.target_attrs);
i->t.target_size = sizeof(struct spi_transport_attrs);
i->t.host_attrs = &i->host_attrs[0];
i->t.host_class = &spi_host_class;
i->t.host_setup = &spi_setup_host_attrs;
i->t.host_attrs.class = &spi_host_class.class;
i->t.host_attrs.attrs = &i->host_attrs[0];
i->t.host_attrs.match = spi_host_match;
attribute_container_register(&i->t.host_attrs);
i->t.host_size = sizeof(struct spi_host_attrs);
i->f = ft;
......@@ -884,6 +914,21 @@ void spi_release_transport(struct scsi_transport_template *t)
}
EXPORT_SYMBOL(spi_release_transport);
static __init int spi_transport_init(void)
{
int error = transport_class_register(&spi_transport_class);
if (error)
return error;
error = anon_transport_class_register(&spi_device_class);
return transport_class_register(&spi_host_class);
}
static void __exit spi_transport_exit(void)
{
transport_class_unregister(&spi_transport_class);
anon_transport_class_unregister(&spi_device_class);
transport_class_unregister(&spi_host_class);
}
MODULE_AUTHOR("Martin Hicks");
MODULE_DESCRIPTION("SPI Transport Attributes");
......
......@@ -87,6 +87,7 @@
* Number of allowed retries
*/
#define SD_MAX_RETRIES 5
#define SD_PASSTHROUGH_RETRIES 1
static void scsi_disk_release(struct kref *kref);
......@@ -197,9 +198,11 @@ static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
static void scsi_disk_put(struct scsi_disk *sdkp)
{
struct scsi_device *sdev = sdkp->device;
down(&sd_ref_sem);
kref_put(&sdkp->kref, scsi_disk_release);
scsi_device_put(sdkp->device);
scsi_device_put(sdev);
up(&sd_ref_sem);
}
......@@ -217,15 +220,14 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
struct gendisk *disk;
sector_t block;
struct scsi_device *sdp = SCpnt->device;
struct request *rq = SCpnt->request;
timeout = sdp->timeout;
/*
* these are already setup, just copy cdb basically
* SG_IO from block layer already setup, just copy cdb basically
*/
if (SCpnt->request->flags & REQ_BLOCK_PC) {
struct request *rq = SCpnt->request;
if (blk_pc_request(rq)) {
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
......@@ -242,26 +244,28 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
timeout = rq->timeout;
SCpnt->transfersize = rq->data_len;
SCpnt->allowed = SD_PASSTHROUGH_RETRIES;
goto queue;
}
/*
* we only do REQ_CMD and REQ_BLOCK_PC
*/
if (!(SCpnt->request->flags & REQ_CMD))
if (!blk_fs_request(rq))
return 0;
disk = SCpnt->request->rq_disk;
block = SCpnt->request->sector;
disk = rq->rq_disk;
block = rq->sector;
this_count = SCpnt->request_bufflen >> 9;
SCSI_LOG_HLQUEUE(1, printk("sd_init_command: disk=%s, block=%llu, "
"count=%d\n", disk->disk_name, (unsigned long long)block, this_count));
"count=%d\n", disk->disk_name,
(unsigned long long)block, this_count));
if (!sdp || !scsi_device_online(sdp) ||
block + SCpnt->request->nr_sectors > get_capacity(disk)) {
block + rq->nr_sectors > get_capacity(disk)) {
SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
SCpnt->request->nr_sectors));
rq->nr_sectors));
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
return 0;
}
......@@ -289,7 +293,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
* for this.
*/
if (sdp->sector_size == 1024) {
if ((block & 1) || (SCpnt->request->nr_sectors & 1)) {
if ((block & 1) || (rq->nr_sectors & 1)) {
printk(KERN_ERR "sd: Bad block number requested");
return 0;
} else {
......@@ -298,7 +302,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
}
}
if (sdp->sector_size == 2048) {
if ((block & 3) || (SCpnt->request->nr_sectors & 3)) {
if ((block & 3) || (rq->nr_sectors & 3)) {
printk(KERN_ERR "sd: Bad block number requested");
return 0;
} else {
......@@ -307,7 +311,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
}
}
if (sdp->sector_size == 4096) {
if ((block & 7) || (SCpnt->request->nr_sectors & 7)) {
if ((block & 7) || (rq->nr_sectors & 7)) {
printk(KERN_ERR "sd: Bad block number requested");
return 0;
} else {
......@@ -315,25 +319,24 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
this_count = this_count >> 3;
}
}
if (rq_data_dir(SCpnt->request) == WRITE) {
if (rq_data_dir(rq) == WRITE) {
if (!sdp->writeable) {
return 0;
}
SCpnt->cmnd[0] = WRITE_6;
SCpnt->sc_data_direction = DMA_TO_DEVICE;
} else if (rq_data_dir(SCpnt->request) == READ) {
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else {
printk(KERN_ERR "sd: Unknown command %lx\n",
SCpnt->request->flags);
/* overkill panic("Unknown sd command %lx\n", SCpnt->request->flags); */
printk(KERN_ERR "sd: Unknown command %lx\n", rq->flags);
/* overkill panic("Unknown sd command %lx\n", rq->flags); */
return 0;
}
SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
disk->disk_name, (rq_data_dir(SCpnt->request) == WRITE) ?
"writing" : "reading", this_count, SCpnt->request->nr_sectors));
disk->disk_name, (rq_data_dir(rq) == WRITE) ?
"writing" : "reading", this_count, rq->nr_sectors));
SCpnt->cmnd[1] = 0;
......@@ -385,9 +388,9 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
*/
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = SD_MAX_RETRIES;
queue:
SCpnt->allowed = SD_MAX_RETRIES;
SCpnt->timeout_per_command = timeout;
/*
......@@ -760,15 +763,26 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
int this_count = SCpnt->bufflen;
int good_bytes = (result == 0 ? this_count : 0);
sector_t block_sectors = 1;
u64 first_err_block;
sector_t error_sector;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int sense_deferred = 0;
int info_valid;
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
#ifdef CONFIG_SCSI_LOGGING
SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n",
SCpnt->request->rq_disk->disk_name, result));
if (0 != result) {
SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: sb[0,2,asc,ascq]"
"=%x,%x,%x,%x\n", SCpnt->sense_buffer[0],
SCpnt->sense_buffer[2], SCpnt->sense_buffer[12],
SCpnt->sense_buffer[13]));
if (sense_valid) {
SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: sb[respc,sk,asc,"
"ascq]=%x,%x,%x,%x\n", sshdr.response_code,
sshdr.sense_key, sshdr.asc, sshdr.ascq));
}
#endif
/*
......@@ -777,19 +791,27 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
unnecessary additional work such as memcpy's that could be avoided.
*/
/* An error occurred */
if (driver_byte(result) != 0 && /* An error occurred */
(SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
switch (SCpnt->sense_buffer[2]) {
/*
* If SG_IO from block layer then set good_bytes to stop retries;
* else if errors, check them, and if necessary prepare for
* (partial) retries.
*/
if (blk_pc_request(SCpnt->request))
good_bytes = this_count;
else if (driver_byte(result) != 0 &&
sense_valid && !sense_deferred) {
switch (sshdr.sense_key) {
case MEDIUM_ERROR:
if (!(SCpnt->sense_buffer[0] & 0x80))
break;
if (!blk_fs_request(SCpnt->request))
break;
error_sector = (SCpnt->sense_buffer[3] << 24) |
(SCpnt->sense_buffer[4] << 16) |
(SCpnt->sense_buffer[5] << 8) |
SCpnt->sense_buffer[6];
info_valid = scsi_get_sense_info_fld(
SCpnt->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&first_err_block);
/*
* May want to warn and skip if following cast results
* in actual truncation (if sector_t < 64 bits)
*/
error_sector = (sector_t)first_err_block;
if (SCpnt->request->bio != NULL)
block_sectors = bio_sectors(SCpnt->request->bio);
switch (SCpnt->device->sector_size) {
......@@ -829,7 +851,7 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
*/
scsi_print_sense("sd", SCpnt);
SCpnt->result = 0;
SCpnt->sense_buffer[0] = 0x0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
good_bytes = this_count;
break;
......@@ -858,16 +880,20 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp)
{
struct scsi_sense_hdr sshdr;
if (!srp->sr_result)
return 0;
if (!(driver_byte(srp->sr_result) & DRIVER_SENSE))
return 0;
if (srp->sr_sense_buffer[2] != NOT_READY &&
srp->sr_sense_buffer[2] != UNIT_ATTENTION)
return 0;
if (srp->sr_sense_buffer[12] != 0x3A) /* medium not present */
return 0;
/* not invoked for commands that could return deferred errors */
if (scsi_request_normalize_sense(srp, &sshdr)) {
if (sshdr.sense_key != NOT_READY &&
sshdr.sense_key != UNIT_ATTENTION)
return 0;
if (sshdr.asc != 0x3A) /* medium not present */
return 0;
}
set_media_not_present(sdkp);
return 1;
}
......@@ -882,6 +908,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
unsigned long spintime_value = 0;
int retries, spintime;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
spintime = 0;
......@@ -895,19 +923,22 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
memset((void *) &cmd[1], 0, 9);
SRpnt->sr_cmd_len = 0;
SRpnt->sr_sense_buffer[0] = 0;
SRpnt->sr_sense_buffer[2] = 0;
memset(SRpnt->sr_sense_buffer, 0,
SCSI_SENSE_BUFFERSIZE);
SRpnt->sr_data_direction = DMA_NONE;
scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
0/*512*/, SD_TIMEOUT, SD_MAX_RETRIES);
the_result = SRpnt->sr_result;
if (the_result)
sense_valid = scsi_request_normalize_sense(
SRpnt, &sshdr);
retries++;
} while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
((driver_byte(the_result) & DRIVER_SENSE) &&
SRpnt->sr_sense_buffer[2] == UNIT_ATTENTION)));
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
/*
* If the drive has indicated to us that it doesn't have
......@@ -921,7 +952,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result))
printk(KERN_NOTICE "%s: Unit Not Ready, error = 0x%x\n", diskname, the_result);
printk(KERN_NOTICE "%s: Unit Not Ready, "
"error = 0x%x\n", diskname, the_result);
break;
}
......@@ -936,15 +968,15 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
* If manual intervention is required, or this is an
* absent USB storage device, a spinup is meaningless.
*/
if (SRpnt->sr_sense_buffer[2] == NOT_READY &&
SRpnt->sr_sense_buffer[12] == 4 /* not ready */ &&
SRpnt->sr_sense_buffer[13] == 3) {
if (sense_valid &&
sshdr.sense_key == NOT_READY &&
sshdr.asc == 4 && sshdr.ascq == 3) {
break; /* manual intervention required */
/*
* Issue command to spin up drive when not ready
*/
} else if (SRpnt->sr_sense_buffer[2] == NOT_READY) {
} else if (sense_valid && sshdr.sense_key == NOT_READY) {
if (!spintime) {
printk(KERN_NOTICE "%s: Spinning up disk...",
diskname);
......@@ -953,8 +985,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
memset((void *) &cmd[2], 0, 8);
cmd[4] = 1; /* Start spin cycle */
SRpnt->sr_cmd_len = 0;
SRpnt->sr_sense_buffer[0] = 0;
SRpnt->sr_sense_buffer[2] = 0;
memset(SRpnt->sr_sense_buffer, 0,
SCSI_SENSE_BUFFERSIZE);
SRpnt->sr_data_direction = DMA_NONE;
scsi_wait_req(SRpnt, (void *)cmd,
......@@ -970,7 +1002,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
/* we don't understand the sense code, so it's
* probably pointless to loop */
if(!spintime) {
printk(KERN_NOTICE "%s: Unit Not Ready, sense:\n", diskname);
printk(KERN_NOTICE "%s: Unit Not Ready, "
"sense:\n", diskname);
scsi_print_req_sense("", SRpnt);
}
break;
......@@ -998,6 +1031,8 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
int the_result, retries;
int sector_size = 0;
int longrc = 0;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
repeat:
retries = 3;
......@@ -1015,8 +1050,7 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
}
SRpnt->sr_cmd_len = 0;
SRpnt->sr_sense_buffer[0] = 0;
SRpnt->sr_sense_buffer[2] = 0;
memset(SRpnt->sr_sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
SRpnt->sr_data_direction = DMA_FROM_DEVICE;
scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
......@@ -1026,6 +1060,9 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
return;
the_result = SRpnt->sr_result;
if (the_result)
sense_valid = scsi_request_normalize_sense(SRpnt,
&sshdr);
retries--;
} while (the_result && retries);
......@@ -1047,7 +1084,7 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
/* Set dirty bit for removable devices if not ready -
* sometimes drives will not report this properly. */
if (sdp->removable &&
SRpnt->sr_sense_buffer[2] == NOT_READY)
sense_valid && sshdr.sense_key == NOT_READY)
sdp->changed = 1;
/* Either no media are present but the drive didn't tell us,
......@@ -1255,6 +1292,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
const int dbd = 0; /* DBD */
const int modepage = 0x08; /* current values, cache page */
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
if (sdkp->device->skip_ms_page_8)
goto defaults;
......@@ -1304,17 +1342,14 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
}
bad_sense:
if ((SRpnt->sr_sense_buffer[0] & 0x70) == 0x70
&& (SRpnt->sr_sense_buffer[2] & 0x0f) == ILLEGAL_REQUEST
/* ASC 0x24 ASCQ 0x00: Invalid field in CDB */
&& SRpnt->sr_sense_buffer[12] == 0x24
&& SRpnt->sr_sense_buffer[13] == 0x00) {
if (scsi_request_normalize_sense(SRpnt, &sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
printk(KERN_NOTICE "%s: cache data unavailable\n",
diskname);
} else {
diskname); /* Invalid field in CDB */
else
printk(KERN_ERR "%s: asking for cache data failed\n",
diskname);
}
defaults:
printk(KERN_ERR "%s: assuming drive cache: write through\n",
......
......@@ -7,7 +7,7 @@
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2004 Douglas Gilbert
* Copyright (C) 1998 - 2005 Douglas Gilbert
*
* Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*
......@@ -18,8 +18,8 @@
*
*/
static int sg_version_num = 30531; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.31"
static int sg_version_num = 30532; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.32"
/*
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
......@@ -60,7 +60,7 @@ static int sg_version_num = 30531; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20040516";
static char *sg_version_date = "20050117";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
......@@ -1282,6 +1282,8 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
srp->header.duration =
jiffies_to_msecs(jiffies - srp->header.duration);
if (0 != SRpnt->sr_result) {
struct scsi_sense_hdr sshdr;
memcpy(srp->sense_b, SRpnt->sr_sense_buffer,
sizeof (srp->sense_b));
srp->header.status = 0xff & SRpnt->sr_result;
......@@ -1296,11 +1298,12 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
/* Following if statement is a patch supplied by Eric Youngdale */
if (driver_byte(SRpnt->sr_result) != 0
&& (SRpnt->sr_sense_buffer[0] & 0x7f) == 0x70
&& (SRpnt->sr_sense_buffer[2] & 0xf) == UNIT_ATTENTION
&& scsi_command_normalize_sense(SCpnt, &sshdr)
&& !scsi_sense_is_deferred(&sshdr)
&& sshdr.sense_key == UNIT_ATTENTION
&& sdp->device->removable) {
/* Detected disc change. Set the bit - this may be used if */
/* there are filesystems using this device. */
/* Detected possible disc change. Set the bit - this */
/* may be used if there are filesystems using this device */
sdp->device->changed = 1;
}
}
......@@ -1573,8 +1576,8 @@ sg_remove(struct class_device *cl_dev)
* of sysfs parameters (which module_param doesn't yet support).
* Sysfs parameters defined explicitly below.
*/
module_param_named(def_reserved_size, def_reserved_size, int, 0);
module_param_named(allow_dio, sg_allow_dio, int, 0);
module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
......@@ -2606,9 +2609,14 @@ sg_page_free(char *buff, int size)
free_pages((unsigned long) buff, order);
}
#ifndef MAINTENANCE_IN_CMD
#define MAINTENANCE_IN_CMD 0xa3
#endif
static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
MODE_SENSE, MODE_SENSE_10, LOG_SENSE
READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
};
static int
......
......@@ -152,9 +152,11 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
static inline void scsi_cd_put(struct scsi_cd *cd)
{
struct scsi_device *sdev = cd->device;
down(&sr_ref_sem);
kref_put(&cd->kref, sr_kref_release);
scsi_device_put(cd->device);
scsi_device_put(sdev);
up(&sr_ref_sem);
}
......
/*
* class_container.h - a generic container for all classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
* This file is licensed under GPLv2
*/
#ifndef _ATTRIBUTE_CONTAINER_H_
#define _ATTRIBUTE_CONTAINER_H_
#include <linux/device.h>
#include <linux/list.h>
struct attribute_container {
struct list_head node;
struct list_head containers;
struct class *class;
struct class_device_attribute **attrs;
int (*match)(struct attribute_container *, struct device *);
#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01
unsigned long flags;
};
static inline int
attribute_container_no_classdevs(struct attribute_container *atc)
{
return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
}
static inline void
attribute_container_set_no_classdevs(struct attribute_container *atc)
{
atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
}
int attribute_container_register(struct attribute_container *cont);
int attribute_container_unregister(struct attribute_container *cont);
void attribute_container_create_device(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct class_device *));
void attribute_container_add_device(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct class_device *));
void attribute_container_remove_device(struct device *dev,
void (*fn)(struct attribute_container *,
struct device *,
struct class_device *));
void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct class_device *));
void attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *));
struct class_device_attribute **attribute_container_classdev_to_attrs(const struct class_device *classdev);
#endif
/*
* transport_class.h - a generic container for all transport classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
* This file is licensed under GPLv2
*/
#ifndef _TRANSPORT_CLASS_H_
#define _TRANSPORT_CLASS_H_
#include <linux/device.h>
#include <linux/attribute_container.h>
struct transport_class {
struct class class;
int (*setup)(struct device *);
int (*configure)(struct device *);
int (*remove)(struct device *);
};
#define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \
struct transport_class cls = { \
.class = { \
.name = nm, \
}, \
.setup = su, \
.remove = rm, \
.configure = cfg, \
}
struct anon_transport_class {
struct transport_class tclass;
struct attribute_container container;
};
#define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \
struct anon_transport_class cls = { \
.tclass = { \
.configure = cfg, \
}, \
. container = { \
.match = mtch, \
}, \
}
#define class_to_transport_class(x) \
container_of(x, struct transport_class, class)
void transport_remove_device(struct device *);
void transport_add_device(struct device *);
void transport_setup_device(struct device *);
void transport_configure_device(struct device *);
void transport_destroy_device(struct device *);
static inline void
transport_register_device(struct device *dev)
{
transport_setup_device(dev);
transport_add_device(dev);
}
static inline void
transport_unregister_device(struct device *dev)
{
transport_remove_device(dev);
transport_destroy_device(dev);
}
int transport_class_register(struct transport_class *);
int anon_transport_class_register(struct anon_transport_class *);
void transport_class_unregister(struct transport_class *);
void anon_transport_class_unregister(struct anon_transport_class *);
#endif
......@@ -123,8 +123,6 @@ struct scsi_device {
struct device sdev_gendev;
struct class_device sdev_classdev;
struct class_device transport_classdev;
enum scsi_device_state sdev_state;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
......@@ -133,7 +131,7 @@ struct scsi_device {
#define class_to_sdev(d) \
container_of(d, struct scsi_device, sdev_classdev)
#define transport_class_to_sdev(class_dev) \
container_of(class_dev, struct scsi_device, transport_classdev)
to_scsi_device(class_dev->dev)
/*
* scsi_target: representation of a scsi target, for now, this is only
......@@ -146,7 +144,6 @@ struct scsi_target {
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
struct class_device transport_classdev;
unsigned long create:1; /* signal that it needs to be added */
unsigned long starget_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
......@@ -157,7 +154,7 @@ static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
return to_scsi_target(sdev->sdev_gendev.parent);
}
#define transport_class_to_starget(class_dev) \
container_of(class_dev, struct scsi_target, transport_classdev)
to_scsi_target(class_dev->dev)
extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
uint, uint, uint, void *hostdata);
......@@ -226,6 +223,8 @@ extern void scsi_device_resume(struct scsi_device *sdev);
extern void scsi_target_quiesce(struct scsi_target *);
extern void scsi_target_resume(struct scsi_target *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
static inline int scsi_device_online(struct scsi_device *sdev)
{
return sdev->sdev_state != SDEV_OFFLINE;
......
......@@ -44,6 +44,12 @@ static inline int scsi_sense_is_deferred(struct scsi_sense_hdr *sshdr)
{
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
int desc_type);
extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
/*
* Reset request from external source
......
......@@ -71,7 +71,18 @@ struct scsi_host_template {
* Status: OPTIONAL
*/
int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
#ifdef CONFIG_COMPAT
/*
* Compat handler. Handle 32bit ABI.
* When unknown ioctl is passed return -ENOIOCTLCMD.
*
* Status: OPTIONAL
*/
int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
#endif
/*
* The queuecommand function is used to queue up a scsi
* command block to the LLDD. When the driver finished
......@@ -528,7 +539,6 @@ struct Scsi_Host {
* separately
*/
void *shost_data;
struct class_device transport_classdev;
/*
* We should ensure that this is aligned, both for better performance
......@@ -542,8 +552,6 @@ struct Scsi_Host {
container_of(d, struct Scsi_Host, shost_gendev)
#define class_to_shost(d) \
container_of(d, struct Scsi_Host, shost_classdev)
#define transport_class_to_shost(class_dev) \
container_of(class_dev, struct Scsi_Host, transport_classdev)
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
......@@ -578,6 +586,7 @@ static inline struct device *scsi_get_device(struct Scsi_Host *shost)
extern void scsi_unblock_requests(struct Scsi_Host *);
extern void scsi_block_requests(struct Scsi_Host *);
struct class_container;
/*
* These two functions are used to allocate and free a pseudo device
* which will connect to the host adapter itself rather than any
......@@ -587,6 +596,8 @@ extern void scsi_block_requests(struct Scsi_Host *);
*/
extern void scsi_free_host_dev(struct scsi_device *);
extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
int scsi_is_host_device(const struct device *);
/* legacy interfaces */
extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
......
......@@ -20,31 +20,16 @@
#ifndef SCSI_TRANSPORT_H
#define SCSI_TRANSPORT_H
struct scsi_transport_template {
/* The NULL terminated list of transport attributes
* that should be exported.
*/
struct class_device_attribute **device_attrs;
struct class_device_attribute **target_attrs;
struct class_device_attribute **host_attrs;
#include <linux/transport_class.h>
/* The transport class that the device is in */
struct class *device_class;
struct class *target_class;
struct class *host_class;
struct scsi_transport_template {
/* The statistics attached to the host class only */
struct attribute_group *host_statistics;
/* Constructor functions */
int (*device_setup)(struct scsi_device *);
int (*device_configure)(struct scsi_device *);
int (*target_setup)(struct scsi_target *);
int (*host_setup)(struct Scsi_Host *);
/* Destructor functions */
void (*device_destroy)(struct scsi_device *);
void (*target_destroy)(struct scsi_target *);
void (*host_destroy)(struct Scsi_Host *);
/* the attribute containers */
struct attribute_container host_attrs;
struct attribute_container target_attrs;
struct attribute_container device_attrs;
/* The size of the specific transport attribute structure (a
* space of this size will be left at the end of the
......@@ -54,4 +39,8 @@ struct scsi_transport_template {
int host_size;
};
#define transport_class_to_shost(tc) \
dev_to_shost((tc)->dev)
#endif /* SCSI_TRANSPORT_H */
......@@ -21,6 +21,7 @@
#define SCSI_TRANSPORT_SPI_H
#include <linux/config.h>
#include <linux/transport_class.h>
struct scsi_transport_template;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment