Commit eddeb0e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (43 commits)
  firewire: cleanups
  firewire: fix synchronization of gap counts
  firewire: wait until PHY configuration packet was transmitted (fix bus reset loop)
  firewire: remove unused struct member
  firewire: use bitwise and to get reg in handle_registers
  firewire: replace more hex values with defined csr constants
  firewire: reread config ROM when device reset the bus
  firewire: replace static ROM cache by allocated cache
  firewire: fw-ohci: work around generation bug in TI controllers (fix AV/C and more)
  firewire: fw-ohci: extend logging of bus generations and node ID
  firewire: fw-ohci: conditionally log busReset interrupts
  firewire: fw-ohci: don't append to AT context when it's not active
  firewire: fw-ohci: log regAccessFail events
  firewire: fw-ohci: make sure HCControl register LPS bit is set
  firewire: fw-ohci: missing PPC PMac feature calls in failure path
  firewire: fw-ohci: untangle a mixed unsigned/signed expression
  firewire: debug interrupt events
  firewire: fw-ohci: catch self_id_count == 0
  firewire: fw-ohci: add self ID error check
  firewire: fw-ohci: refactor probe, remove, suspend, resume
  ...
parents 855d854a db8be076
......@@ -41,15 +41,19 @@ to a working state and enables physical DMA by default for all remote nodes.
This can be turned off by ohci1394's module parameter phys_dma=0.
The alternative firewire-ohci driver in drivers/firewire uses filtered physical
DMA, hence is not yet suitable for remote debugging.
DMA by default, which is more secure but not suitable for remote debugging.
Compile the driver with CONFIG_FIREWIRE_OHCI_REMOTE_DMA (Kernel hacking menu:
Remote debugging over FireWire with firewire-ohci) to get unfiltered physical
DMA.
Because ohci1394 depends on the PCI enumeration to be completed, an
initialization routine which runs pretty early (long before console_init()
which makes the printk buffer appear on the console can be called) was written.
Because ohci1394 and firewire-ohci depend on the PCI enumeration to be
completed, an initialization routine which runs pretty early has been
implemented for x86. This routine runs long before console_init() can be
called, i.e. before the printk buffer appears on the console.
To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu:
Provide code for enabling DMA over FireWire early on boot) and pass the
parameter "ohci1394_dma=early" to the recompiled kernel on boot.
Remote debugging over FireWire early on boot) and pass the parameter
"ohci1394_dma=early" to the recompiled kernel on boot.
Tools
-----
......
......@@ -54,6 +54,11 @@ config FIREWIRE_OHCI
directive, use "install modulename /bin/true" for the modules to be
blacklisted.
config FIREWIRE_OHCI_DEBUG
bool
depends on FIREWIRE_OHCI
default y
config FIREWIRE_SBP2
tristate "Support for storage devices (SBP-2 protocol driver)"
depends on FIREWIRE && SCSI
......
......@@ -167,7 +167,6 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
void
fw_core_remove_descriptor(struct fw_descriptor *desc)
......@@ -182,7 +181,6 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
......@@ -220,7 +218,7 @@ fw_card_bm_work(struct work_struct *work)
struct bm_data bmd;
unsigned long flags;
int root_id, new_root_id, irm_id, gap_count, generation, grace;
int do_reset = 0;
bool do_reset = false;
spin_lock_irqsave(&card->lock, flags);
local_node = card->local_node;
......@@ -331,7 +329,7 @@ fw_card_bm_work(struct work_struct *work)
*/
spin_unlock_irqrestore(&card->lock, flags);
goto out;
} else if (root_device->config_rom[2] & BIB_CMC) {
} else if (root_device->cmc) {
/*
* FIXME: I suppose we should set the cmstr bit in the
* STATE_CLEAR register of this node, as described in
......@@ -360,14 +358,14 @@ fw_card_bm_work(struct work_struct *work)
gap_count = 63;
/*
* Finally, figure out if we should do a reset or not. If we've
* done less that 5 resets with the same physical topology and we
* Finally, figure out if we should do a reset or not. If we have
* done less than 5 resets with the same physical topology and we
* have either a new root or a new gap count setting, let's do it.
*/
if (card->bm_retries++ < 5 &&
(card->gap_count != gap_count || new_root_id != root_id))
do_reset = 1;
do_reset = true;
spin_unlock_irqrestore(&card->lock, flags);
......@@ -398,7 +396,6 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
{
static atomic_t index = ATOMIC_INIT(-1);
kref_init(&card->kref);
atomic_set(&card->device_count, 0);
card->index = atomic_inc_return(&index);
card->driver = driver;
......@@ -429,12 +426,6 @@ fw_card_add(struct fw_card *card,
card->link_speed = link_speed;
card->guid = guid;
/*
* The subsystem grabs a reference when the card is added and
* drops it when the driver calls fw_core_remove_card.
*/
fw_card_get(card);
mutex_lock(&card_mutex);
config_rom = generate_config_rom(card, &length);
list_add_tail(&card->link, &card_list);
......@@ -540,40 +531,9 @@ fw_core_remove_card(struct fw_card *card)
cancel_delayed_work_sync(&card->work);
fw_flush_transactions(card);
del_timer_sync(&card->flush_timer);
fw_card_put(card);
}
EXPORT_SYMBOL(fw_core_remove_card);
struct fw_card *
fw_card_get(struct fw_card *card)
{
kref_get(&card->kref);
return card;
}
EXPORT_SYMBOL(fw_card_get);
static void
release_card(struct kref *kref)
{
struct fw_card *card = container_of(kref, struct fw_card, kref);
kfree(card);
}
/*
* An assumption for fw_card_put() is that the card driver allocates
* the fw_card struct with kalloc and that it has been shut down
* before the last ref is dropped.
*/
void
fw_card_put(struct fw_card *card)
{
kref_put(&card->kref, release_card);
}
EXPORT_SYMBOL(fw_card_put);
int
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{
......
......@@ -269,21 +269,28 @@ static int ioctl_get_info(struct client *client, void *buffer)
{
struct fw_cdev_get_info *get_info = buffer;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
client->version = get_info->version;
get_info->version = FW_CDEV_VERSION;
down_read(&fw_device_rwsem);
if (get_info->rom != 0) {
void __user *uptr = u64_to_uptr(get_info->rom);
size_t want = get_info->rom_length;
size_t have = client->device->config_rom_length * 4;
if (copy_to_user(uptr, client->device->config_rom,
min(want, have)))
return -EFAULT;
ret = copy_to_user(uptr, client->device->config_rom,
min(want, have));
}
get_info->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
client->bus_reset_closure = get_info->bus_reset_closure;
if (get_info->bus_reset != 0) {
void __user *uptr = u64_to_uptr(get_info->bus_reset);
......
......@@ -25,7 +25,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/rwsem.h>
#include <linux/string.h>
#include <asm/semaphore.h>
#include <asm/system.h>
#include <linux/ctype.h>
......@@ -160,9 +160,9 @@ static void fw_device_release(struct device *dev)
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled.
*/
spin_lock_irqsave(&device->card->lock, flags);
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&device->card->lock, flags);
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
......@@ -195,7 +195,9 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir;
int key, value;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
......@@ -204,11 +206,15 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
fw_csr_iterator_init(&ci, dir);
while (fw_csr_iterator_next(&ci, &key, &value))
if (attr->key == key)
return snprintf(buf, buf ? PAGE_SIZE : 0,
if (attr->key == key) {
ret = snprintf(buf, buf ? PAGE_SIZE : 0,
"0x%06x\n", value);
break;
}
up_read(&fw_device_rwsem);
return -ENOENT;
return ret;
}
#define IMMEDIATE_ATTR(name, key) \
......@@ -221,9 +227,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir, *block = NULL, *p, *end;
int length, key, value, last_key = 0;
int length, key, value, last_key = 0, ret = -ENOENT;
char *b;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
......@@ -238,18 +246,20 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
}
if (block == NULL)
return -ENOENT;
goto out;
length = min(block[0] >> 16, 256U);
if (length < 3)
return -ENOENT;
goto out;
if (block[1] != 0 || block[2] != 0)
/* Unknown encoding. */
return -ENOENT;
goto out;
if (buf == NULL)
return length * 4;
if (buf == NULL) {
ret = length * 4;
goto out;
}
b = buf;
end = &block[length + 1];
......@@ -259,8 +269,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
/* Strip trailing whitespace and add newline. */
while (b--, (isspace(*b) || *b == '\0') && b > buf);
strcpy(b + 1, "\n");
ret = b + 2 - buf;
out:
up_read(&fw_device_rwsem);
return b + 2 - buf;
return ret;
}
#define TEXT_LEAF_ATTR(name, key) \
......@@ -337,19 +350,28 @@ static ssize_t
config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
size_t length;
memcpy(buf, device->config_rom, device->config_rom_length * 4);
down_read(&fw_device_rwsem);
length = device->config_rom_length * 4;
memcpy(buf, device->config_rom, length);
up_read(&fw_device_rwsem);
return device->config_rom_length * 4;
return length;
}
static ssize_t
guid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
int ret;
return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
down_read(&fw_device_rwsem);
ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
device->config_rom[3], device->config_rom[4]);
up_read(&fw_device_rwsem);
return ret;
}
static struct device_attribute fw_device_attributes[] = {
......@@ -388,7 +410,7 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
init_completion(&callback_data.done);
offset = 0xfffff0000400ULL + index * 4;
offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
offset, NULL, 4, complete_transaction, &callback_data);
......@@ -400,6 +422,9 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
return callback_data.rcode;
}
#define READ_BIB_ROM_SIZE 256
#define READ_BIB_STACK_SIZE 16
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
......@@ -409,16 +434,23 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
*/
static int read_bus_info_block(struct fw_device *device, int generation)
{
static u32 rom[256];
u32 stack[16], sp, key;
int i, end, length;
u32 *rom, *stack, *old_rom, *new_rom;
u32 sp, key;
int i, end, length, ret = -1;
rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
stack = &rom[READ_BIB_ROM_SIZE];
device->max_speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
return -1;
goto out;
/*
* As per IEEE1212 7.2, during power-up, devices can
* reply with a 0 for the first quadlet of the config
......@@ -428,7 +460,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
* retry mechanism will try again later.
*/
if (i == 0 && rom[i] == 0)
return -1;
goto out;
}
device->max_speed = device->node->max_speed;
......@@ -478,26 +510,26 @@ static int read_bus_info_block(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
if (i >= ARRAY_SIZE(rom))
if (i >= READ_BIB_ROM_SIZE)
/*
* The reference points outside the standard
* config rom area, something's fishy.
*/
return -1;
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
return -1;
goto out;
end = i + (rom[i] >> 16) + 1;
i++;
if (end > ARRAY_SIZE(rom))
if (end > READ_BIB_ROM_SIZE)
/*
* This block extends outside standard config
* area (and the array we're reading it
* into). That's broken, so ignore this
* device.
*/
return -1;
goto out;
/*
* Now read in the block. If this is a directory
......@@ -507,9 +539,9 @@ static int read_bus_info_block(struct fw_device *device, int generation)
while (i < end) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
return -1;
goto out;
if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
sp < ARRAY_SIZE(stack))
sp < READ_BIB_STACK_SIZE)
stack[sp++] = i + rom[i];
i++;
}
......@@ -517,13 +549,23 @@ static int read_bus_info_block(struct fw_device *device, int generation)
length = i;
}
device->config_rom = kmalloc(length * 4, GFP_KERNEL);
if (device->config_rom == NULL)
return -1;
memcpy(device->config_rom, rom, length * 4);
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
if (new_rom == NULL)
goto out;
down_write(&fw_device_rwsem);
device->config_rom = new_rom;
device->config_rom_length = length;
up_write(&fw_device_rwsem);
return 0;
kfree(old_rom);
ret = 0;
device->cmc = rom[2] & 1 << 30;
out:
kfree(rom);
return ret;
}
static void fw_unit_release(struct device *dev)
......@@ -592,7 +634,14 @@ static int shutdown_unit(struct device *device, void *data)
return 0;
}
static DECLARE_RWSEM(idr_rwsem);
/*
* fw_device_rwsem acts as dual purpose mutex:
* - serializes accesses to fw_device_idr,
* - serializes accesses to fw_device.config_rom/.config_rom_length and
* fw_unit.directory, unless those accesses happen at safe occasions
*/
DECLARE_RWSEM(fw_device_rwsem);
static DEFINE_IDR(fw_device_idr);
int fw_cdev_major;
......@@ -600,11 +649,11 @@ struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
down_read(&idr_rwsem);
down_read(&fw_device_rwsem);
device = idr_find(&fw_device_idr, MINOR(devt));
if (device)
fw_device_get(device);
up_read(&idr_rwsem);
up_read(&fw_device_rwsem);
return device;
}
......@@ -619,9 +668,9 @@ static void fw_device_shutdown(struct work_struct *work)
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
down_write(&idr_rwsem);
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&idr_rwsem);
up_write(&fw_device_rwsem);
fw_device_put(device);
}
......@@ -674,10 +723,10 @@ static void fw_device_init(struct work_struct *work)
err = -ENOMEM;
fw_device_get(device);
down_write(&idr_rwsem);
down_write(&fw_device_rwsem);
if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
err = idr_get_new(&fw_device_idr, device, &minor);
up_write(&idr_rwsem);
up_write(&fw_device_rwsem);
if (err < 0)
goto error;
......@@ -711,7 +760,7 @@ static void fw_device_init(struct work_struct *work)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
fw_device_shutdown(&device->work.work);
fw_device_shutdown(work);
} else {
if (device->config_rom_retries)
fw_notify("created device %s: GUID %08x%08x, S%d00, "
......@@ -725,6 +774,7 @@ static void fw_device_init(struct work_struct *work)
device->device.bus_id,
device->config_rom[3], device->config_rom[4],
1 << device->max_speed);
device->config_rom_retries = 0;
}
/*
......@@ -739,9 +789,9 @@ static void fw_device_init(struct work_struct *work)
return;
error_with_cdev:
down_write(&idr_rwsem);
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&idr_rwsem);
up_write(&fw_device_rwsem);
error:
fw_device_put(device); /* fw_device_idr's reference */
......@@ -771,6 +821,106 @@ static void fw_device_update(struct work_struct *work)
device_for_each_child(&device->device, NULL, update_unit);
}
enum {
REREAD_BIB_ERROR,
REREAD_BIB_GONE,
REREAD_BIB_UNCHANGED,
REREAD_BIB_CHANGED,
};
/* Reread and compare bus info block and header of root directory */
static int reread_bus_info_block(struct fw_device *device, int generation)
{
u32 q;
int i;
for (i = 0; i < 6; i++) {
if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
return REREAD_BIB_ERROR;
if (i == 0 && q == 0)
return REREAD_BIB_GONE;
if (i > device->config_rom_length || q != device->config_rom[i])
return REREAD_BIB_CHANGED;
}
return REREAD_BIB_UNCHANGED;
}
static void fw_device_refresh(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
int node_id = device->node_id;
switch (reread_bus_info_block(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
schedule_delayed_work(&device->work, RETRY_DELAY / 2);
return;
}
goto give_up;
case REREAD_BIB_GONE:
goto gone;
case REREAD_BIB_UNCHANGED:
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
goto gone;
fw_device_update(work);
device->config_rom_retries = 0;
goto out;
case REREAD_BIB_CHANGED:
break;
}
/*
* Something changed. We keep things simple and don't investigate
* further. We just destroy all previous units and create new ones.
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
if (read_bus_info_block(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
schedule_delayed_work(&device->work, RETRY_DELAY);
return;
}
goto give_up;
}
create_units(device);
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
goto gone;
fw_notify("refreshed device %s\n", device->device.bus_id);
device->config_rom_retries = 0;
goto out;
give_up:
fw_notify("giving up on refresh of device %s\n", device->device.bus_id);
gone:
atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
fw_device_shutdown(work);
out:
if (node_id == card->root_node->node_id)
schedule_delayed_work(&card->work, 0);
}
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
......@@ -780,7 +930,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_LINK_ON:
if (!node->link_on)
break;
create:
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
......@@ -819,6 +969,23 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
schedule_delayed_work(&device->work, INITIAL_DELAY);
break;
case FW_NODE_INITIATED_RESET:
device = node->data;
if (device == NULL)
goto create;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
schedule_delayed_work(&device->work,
node == card->local_node ? 0 : INITIAL_DELAY);
}
break;
case FW_NODE_UPDATED:
if (!node->link_on || node->data == NULL)
break;
......
......@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
enum fw_device_state {
......@@ -46,6 +47,11 @@ struct fw_attribute_group {
* fw_device.node_id is guaranteed to be current too.
*
* The same applies to fw_device.card->node_id vs. fw_device.generation.
*
* fw_device.config_rom and fw_device.config_rom_length may be accessed during
* the lifetime of any fw_unit belonging to the fw_device, before device_del()
* was called on the last fw_unit. Alternatively, they may be accessed while
* holding fw_device_rwsem.
*/
struct fw_device {
atomic_t state;
......@@ -53,6 +59,7 @@ struct fw_device {
int node_id;
int generation;
unsigned max_speed;
bool cmc;
struct fw_card *card;
struct device device;
struct list_head link;
......@@ -64,28 +71,24 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
static inline struct fw_device *
fw_device(struct device *dev)
static inline struct fw_device *fw_device(struct device *dev)
{
return container_of(dev, struct fw_device, device);
}
static inline int
fw_device_is_shutdown(struct fw_device *device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
}
static inline struct fw_device *
fw_device_get(struct fw_device *device)
static inline struct fw_device *fw_device_get(struct fw_device *device)
{
get_device(&device->device);
return device;
}
static inline void
fw_device_put(struct fw_device *device)
static inline void fw_device_put(struct fw_device *device)
{
put_device(&device->device);
}
......@@ -96,20 +99,35 @@ int fw_device_enable_phys_dma(struct fw_device *device);
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
extern struct rw_semaphore fw_device_rwsem;
extern int fw_cdev_major;
/*
* fw_unit.directory must not be accessed after device_del(&fw_unit.device).
*/
struct fw_unit {
struct device device;
u32 *directory;
struct fw_attribute_group attribute_group;
};
static inline struct fw_unit *
fw_unit(struct device *dev)
static inline struct fw_unit *fw_unit(struct device *dev)
{
return container_of(dev, struct fw_unit, device);
}
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
get_device(&unit->device);
return unit;
}
static inline void fw_unit_put(struct fw_unit *unit)
{
put_device(&unit->device);
}
#define CSR_OFFSET 0x40
#define CSR_LEAF 0x80
#define CSR_DIRECTORY 0xc0
......
......@@ -126,7 +126,6 @@ fw_iso_context_create(struct fw_card *card, int type,
return ctx;
}
EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
......@@ -134,14 +133,12 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
int
fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
EXPORT_SYMBOL(fw_iso_context_start);
int
fw_iso_context_queue(struct fw_iso_context *ctx,
......@@ -153,11 +150,9 @@ fw_iso_context_queue(struct fw_iso_context *ctx,
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
int
fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_stop);
......@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
......@@ -177,9 +178,10 @@ struct fw_ohci {
struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
int request_generation;
int request_generation; /* for timestamping incoming requests */
u32 bus_seconds;
bool old_uninorth;
bool bus_reset_packet_quirk;
/*
* Spinlock for accessing fw_ohci data. Never call out of
......@@ -237,6 +239,196 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
static void log_irqs(u32 evt)
{
if (likely(!(param_debug &
(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
return;
if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
!(evt & OHCI1394_busReset))
return;
printk(KERN_DEBUG KBUILD_MODNAME ": IRQ "
"%08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
evt & OHCI1394_reqTxComplete ? " AT_req" : "",
evt & OHCI1394_respTxComplete ? " AT_resp" : "",
evt & OHCI1394_isochRx ? " IR" : "",
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
static const char *speed[] = {
[0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
};
static const char *power[] = {
[0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
[4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
};
static const char port[] = { '.', '-', 'p', 'c', };
static char _p(u32 *s, int shift)
{
return port[*s >> shift & 3];
}
static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
{
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d, "
"local node ID %04x\n", self_id_count, generation, node_id);
for (; self_id_count--; ++s)
if ((*s & 1 << 23) == 0)
printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
"%s gc=%d %s %s%s%s\n",
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
speed[*s >> 14 & 3], *s >> 16 & 63,
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
else
printk(KERN_DEBUG "selfID n: %08x, phy %d "
"[%c%c%c%c%c%c%c%c]\n",
*s, *s >> 24 & 63,
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
}
static const char *evts[] = {
[0x00] = "evt_no_status", [0x01] = "-reserved-",
[0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
[0x04] = "evt_underrun", [0x05] = "evt_overrun",
[0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
[0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
[0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
[0x0c] = "-reserved-", [0x0d] = "-reserved-",
[0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
[0x10] = "-reserved-", [0x11] = "ack_complete",
[0x12] = "ack_pending ", [0x13] = "-reserved-",
[0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
[0x16] = "ack_busy_B", [0x17] = "-reserved-",
[0x18] = "-reserved-", [0x19] = "-reserved-",
[0x1a] = "-reserved-", [0x1b] = "ack_tardy",
[0x1c] = "-reserved-", [0x1d] = "ack_data_error",
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
[0x20] = "pending/cancelled",
};
static const char *tcodes[] = {
[0x0] = "QW req", [0x1] = "BW req",
[0x2] = "W resp", [0x3] = "-reserved-",
[0x4] = "QR req", [0x5] = "BR req",
[0x6] = "QR resp", [0x7] = "BR resp",
[0x8] = "cycle start", [0x9] = "Lk req",
[0xa] = "async stream packet", [0xb] = "Lk resp",
[0xc] = "-reserved-", [0xd] = "-reserved-",
[0xe] = "link internal", [0xf] = "-reserved-",
};
static const char *phys[] = {
[0x0] = "phy config packet", [0x1] = "link-on packet",
[0x2] = "self-id packet", [0x3] = "-reserved-",
};
static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
{
int tcode = header[0] >> 4 & 0xf;
char specific[12];
if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
return;
if (unlikely(evt >= ARRAY_SIZE(evts)))
evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
printk(KERN_DEBUG "A%c evt_bus_reset, generation %d\n",
dir, (header[2] >> 16) & 0xff);
return;
}
if (header[0] == ~header[1]) {
printk(KERN_DEBUG "A%c %s, %s, %08x\n",
dir, evts[evt], phys[header[0] >> 30 & 0x3],
header[0]);
return;
}
switch (tcode) {
case 0x0: case 0x6: case 0x8:
snprintf(specific, sizeof(specific), " = %08x",
be32_to_cpu((__force __be32)header[3]));
break;
case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
snprintf(specific, sizeof(specific), " %x,%x",
header[3] >> 16, header[3] & 0xffff);
break;
default:
specific[0] = '\0';
}
switch (tcode) {
case 0xe: case 0xa:
printk(KERN_DEBUG "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
printk(KERN_DEBUG "A%c spd %x tl %02x, "
"%04x -> %04x, %s, "
"%s, %04x%08x%s\n",
dir, speed, header[0] >> 10 & 0x3f,
header[1] >> 16, header[0] >> 16, evts[evt],
tcodes[tcode], header[1] & 0xffff, header[2], specific);
break;
default:
printk(KERN_DEBUG "A%c spd %x tl %02x, "
"%04x -> %04x, %s, "
"%s%s\n",
dir, speed, header[0] >> 10 & 0x3f,
header[1] >> 16, header[0] >> 16, evts[evt],
tcodes[tcode], specific);
}
}
#else
#define log_irqs(evt)
#define log_selfids(node_id, generation, self_id_count, sid)
#define log_ar_at_event(dir, speed, header, evt)
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
......@@ -320,6 +512,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
struct fw_ohci *ohci = ctx->ohci;
struct fw_packet p;
u32 status, length, tcode;
int evt;
p.header[0] = cond_le32_to_cpu(buffer[0]);
p.header[1] = cond_le32_to_cpu(buffer[1]);
......@@ -362,12 +555,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
status = cond_le32_to_cpu(buffer[length]);
evt = (status >> 16) & 0x1f;
p.ack = ((status >> 16) & 0x1f) - 16;
p.ack = evt - 16;
p.speed = (status >> 21) & 0x7;
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
log_ar_at_event('R', p.speed, p.header, evt);
/*
* The OHCI bus reset handler synthesizes a phy packet with
* the new generation number when a bus reset happens (see
......@@ -376,14 +572,19 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* generation. We only need this for requests; for responses
* we use the unique tlabel for finding the matching
* request.
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
* at a slightly incorrect time (in bus_reset_tasklet).
*/
if (p.ack + 16 == 0x09)
if (evt == OHCI1394_evt_bus_reset) {
if (!ohci->bus_reset_packet_quirk)
ohci->request_generation = (p.header[2] >> 16) & 0xff;
else if (ctx == &ohci->ar_request_ctx)
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
else
} else {
fw_core_handle_response(&ohci->card, &p);
}
return buffer + length + 1;
}
......@@ -770,8 +971,19 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
DESCRIPTOR_IRQ_ALWAYS |
DESCRIPTOR_BRANCH_ALWAYS);
/* FIXME: Document how the locking works. */
if (ohci->generation != packet->generation) {
/*
* If the controller and packet generations don't match, we need to
* bail out and try again. If IntEvent.busReset is set, the AT context
* is halted, so appending to the context and trying to run it is
* futile. Most controllers do the right thing and just flush the AT
* queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
* some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
* up stalling out. So we just bail out in software and try again
* later, and everyone is happy.
* FIXME: Document how the locking works.
*/
if (ohci->generation != packet->generation ||
reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
if (packet->payload_length > 0)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
......@@ -817,6 +1029,8 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
log_ar_at_event('T', packet->speed, packet->header, evt);
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
......@@ -1019,20 +1233,30 @@ static void bus_reset_tasklet(unsigned long data)
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
fw_notify("inconsistent self IDs\n");
return;
}
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
self_id_count = (reg >> 3) & 0x3ff;
if (self_id_count == 0) {
fw_notify("inconsistent self IDs\n");
return;
}
generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
fw_error("inconsistent self IDs\n");
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
fw_notify("inconsistent self IDs\n");
return;
}
ohci->self_id_buffer[j] =
cond_le32_to_cpu(ohci->self_id_cpu[i]);
}
......@@ -1067,6 +1291,9 @@ static void bus_reset_tasklet(unsigned long data)
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
if (ohci->bus_reset_packet_quirk)
ohci->request_generation = generation;
/*
* This next bit is unrelated to the AT context stuff but we
* have to do it under the spinlock also. If a new config rom
......@@ -1097,12 +1324,20 @@ static void bus_reset_tasklet(unsigned long data)
reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
}
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
#endif
spin_unlock_irqrestore(&ohci->lock, flags);
if (free_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
free_rom, free_rom_bus);
log_selfids(ohci->node_id, generation,
self_id_count, ohci->self_id_buffer);
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer);
}
......@@ -1118,7 +1353,9 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
reg_write(ohci, OHCI1394_IntEventClear, event);
/* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
log_irqs(event);
if (event & OHCI1394_selfIDComplete)
tasklet_schedule(&ohci->bus_reset_tasklet);
......@@ -1153,6 +1390,10 @@ static irqreturn_t irq_handler(int irq, void *data)
iso_event &= ~(1 << i);
}
if (unlikely(event & OHCI1394_regAccessFail))
fw_error("Register access failure - "
"please notify linux1394-devel@lists.sf.net\n");
if (unlikely(event & OHCI1394_postedWriteErr))
fw_error("PCI posted write error\n");
......@@ -1192,6 +1433,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
u32 lps;
int i;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
......@@ -1203,13 +1446,24 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled.
* full link enabled. However, with some cards (well, at least
* a JMicron PCIe card), we have to try again sometimes.
*/
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_LPS |
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
if (!lps) {
fw_error("Failed to set Link Power Status\n");
return -EIO;
}
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
......@@ -1237,7 +1491,10 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
/* Activate link_on bit and contender bit in our self ID packets.*/
if (ohci_update_phy_reg(card, 4, 0,
......@@ -1421,6 +1678,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
if (packet->ack != 0)
goto out;
log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
packet->callback(packet, &ohci->card, packet->ack);
......@@ -1435,6 +1693,9 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
static int
ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
{
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
return 0;
#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, retval = 0;
......@@ -1466,6 +1727,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
out:
spin_unlock_irqrestore(&ohci->lock, flags);
return retval;
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u64
......@@ -2045,17 +2307,9 @@ static const struct fw_card_driver ohci_driver = {
.stop_iso = ohci_stop_iso,
};
static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed;
u64 guid;
int err;
size_t size;
#ifdef CONFIG_PPC_PMAC
/* Necessary on some machines if fw-ohci was loaded/ unloaded before */
static void ohci_pmac_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
......@@ -2064,8 +2318,33 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
}
}
static void ohci_pmac_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
}
#else
#define ohci_pmac_on(dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed;
u64 guid;
int err;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
if (ohci == NULL) {
fw_error("Could not malloc fw_ohci data.\n");
......@@ -2074,10 +2353,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
ohci_pmac_on(dev);
err = pci_enable_device(dev);
if (err) {
fw_error("Failed to enable OHCI hardware.\n");
goto fail_put_card;
goto fail_free;
}
pci_set_master(dev);
......@@ -2088,6 +2369,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
#endif
ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
spin_lock_init(&ohci->lock);
tasklet_init(&ohci->bus_reset_tasklet,
......@@ -2173,8 +2456,9 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
pci_release_region(dev, 0);
fail_disable:
pci_disable_device(dev);
fail_put_card:
fw_card_put(&ohci->card);
fail_free:
kfree(&ohci->card);
ohci_pmac_off(dev);
return err;
}
......@@ -2202,72 +2486,42 @@ static void pci_remove(struct pci_dev *dev)
pci_iounmap(dev, ohci->registers);
pci_release_region(dev, 0);
pci_disable_device(dev);
fw_card_put(&ohci->card);
#ifdef CONFIG_PPC_PMAC
/* On UniNorth, power down the cable and turn off the chip clock
* to save power on laptops */
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
#endif /* CONFIG_PPC_PMAC */
kfree(&ohci->card);
ohci_pmac_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
#ifdef CONFIG_PM
static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int pci_suspend(struct pci_dev *dev, pm_message_t state)
{
struct fw_ohci *ohci = pci_get_drvdata(pdev);
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
software_reset(ohci);
free_irq(pdev->irq, ohci);
err = pci_save_state(pdev);
free_irq(dev->irq, ohci);
err = pci_save_state(dev);
if (err) {
fw_error("pci_save_state failed\n");
return err;
}
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
/* PowerMac suspend code comes last */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(pdev);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
}
#endif /* CONFIG_PPC_PMAC */
ohci_pmac_off(dev);
return 0;
}
static int pci_resume(struct pci_dev *pdev)
static int pci_resume(struct pci_dev *dev)
{
struct fw_ohci *ohci = pci_get_drvdata(pdev);
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
/* PowerMac resume code comes first */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(pdev);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
#endif /* CONFIG_PPC_PMAC */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
ohci_pmac_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
fw_error("pci_enable_device failed\n");
return err;
......
......@@ -30,6 +30,7 @@
#define OHCI1394_HCControl_softReset 0x00010000
#define OHCI1394_SelfIDBuffer 0x064
#define OHCI1394_SelfIDCount 0x068
#define OHCI1394_SelfIDCount_selfIDError 0x80000000
#define OHCI1394_IRMultiChanMaskHiSet 0x070
#define OHCI1394_IRMultiChanMaskHiClear 0x074
#define OHCI1394_IRMultiChanMaskLoSet 0x078
......@@ -124,6 +125,7 @@
#define OHCI1394_lockRespErr 0x00000200
#define OHCI1394_selfIDComplete 0x00010000
#define OHCI1394_busReset 0x00020000
#define OHCI1394_regAccessFail 0x00040000
#define OHCI1394_phy 0x00080000
#define OHCI1394_cycleSynch 0x00100000
#define OHCI1394_cycle64Seconds 0x00200000
......
......@@ -153,6 +153,7 @@ struct sbp2_target {
struct list_head lu_list;
u64 management_agent_address;
u64 guid;
int directory_id;
int node_id;
int address_high;
......@@ -174,9 +175,7 @@ struct sbp2_target {
#define SBP2_ORB_NULL 0x80000000
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
#define SBP2_DIRECTION_TO_MEDIA 0x0
#define SBP2_DIRECTION_FROM_MEDIA 0x1
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
/* Unit directory keys */
#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
......@@ -224,8 +223,8 @@ struct sbp2_status {
};
struct sbp2_pointer {
u32 high;
u32 low;
__be32 high;
__be32 low;
};
struct sbp2_orb {
......@@ -253,8 +252,8 @@ struct sbp2_management_orb {
struct {
struct sbp2_pointer password;
struct sbp2_pointer response;
u32 misc;
u32 length;
__be32 misc;
__be32 length;
struct sbp2_pointer status_fifo;
} request;
__be32 response[4];
......@@ -263,20 +262,17 @@ struct sbp2_management_orb {
struct sbp2_status status;
};
#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
struct sbp2_login_response {
u32 misc;
__be32 misc;
struct sbp2_pointer command_block_agent;
u32 reconnect_hold;
__be32 reconnect_hold;
};
#define COMMAND_ORB_DATA_SIZE(v) ((v))
#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
#define COMMAND_ORB_SPEED(v) ((v) << 24)
#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
#define COMMAND_ORB_DIRECTION ((1) << 27)
#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
#define COMMAND_ORB_NOTIFY ((1) << 31)
......@@ -285,7 +281,7 @@ struct sbp2_command_orb {
struct {
struct sbp2_pointer next;
struct sbp2_pointer data_descriptor;
u32 misc;
__be32 misc;
u8 command_block[12];
} request;
struct scsi_cmnd *cmd;
......@@ -459,8 +455,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
unsigned long flags;
orb->pointer.high = 0;
orb->pointer.low = orb->request_bus;
fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
orb->pointer.low = cpu_to_be32(orb->request_bus);
spin_lock_irqsave(&device->card->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
......@@ -537,30 +532,30 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
goto fail_mapping_response;
orb->request.response.high = 0;
orb->request.response.low = orb->response_bus;
orb->request.response.low = cpu_to_be32(orb->response_bus);
orb->request.misc =
orb->request.misc = cpu_to_be32(
MANAGEMENT_ORB_NOTIFY |
MANAGEMENT_ORB_FUNCTION(function) |
MANAGEMENT_ORB_LUN(lun_or_login_id);
orb->request.length =
MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
MANAGEMENT_ORB_LUN(lun_or_login_id));
orb->request.length = cpu_to_be32(
MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
orb->request.status_fifo.high = lu->address_handler.offset >> 32;
orb->request.status_fifo.low = lu->address_handler.offset;
orb->request.status_fifo.high =
cpu_to_be32(lu->address_handler.offset >> 32);
orb->request.status_fifo.low =
cpu_to_be32(lu->address_handler.offset);
if (function == SBP2_LOGIN_REQUEST) {
/* Ask for 2^2 == 4 seconds reconnect grace period */
orb->request.misc |=
orb->request.misc |= cpu_to_be32(
MANAGEMENT_ORB_RECONNECT(2) |
MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login);
MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
timeout = lu->tgt->mgt_orb_timeout;
} else {
timeout = SBP2_ORB_TIMEOUT;
}
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
init_completion(&orb->done);
orb->base.callback = complete_management_orb;
......@@ -605,8 +600,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
sizeof(orb->response), DMA_FROM_DEVICE);
fail_mapping_response:
if (response)
fw_memcpy_from_be32(response,
orb->response, sizeof(orb->response));
memcpy(response, orb->response, sizeof(orb->response));
kref_put(&orb->base.kref, free_orb);
return retval;
......@@ -701,10 +695,8 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
if (!tgt->dont_block && !lu->blocked &&
lu->generation != card->generation) {
lu->blocked = true;
if (++tgt->blocked == 1) {
if (++tgt->blocked == 1)
scsi_block_requests(shost);
fw_notify("blocked %s\n", lu->tgt->bus_id);
}
}
spin_unlock_irqrestore(&card->lock, flags);
}
......@@ -731,10 +723,8 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
}
spin_unlock_irqrestore(&card->lock, flags);
if (unblock) {
if (unblock)
scsi_unblock_requests(shost);
fw_notify("unblocked %s\n", lu->tgt->bus_id);
}
}
/*
......@@ -796,7 +786,7 @@ static void sbp2_release_target(struct kref *kref)
scsi_remove_host(shost);
fw_notify("released %s\n", tgt->bus_id);
put_device(&tgt->unit->device);
fw_unit_put(tgt->unit);
scsi_host_put(shost);
fw_device_put(device);
}
......@@ -825,6 +815,22 @@ complete_set_busy_timeout(struct fw_card *card, int rcode,
complete(done);
}
/*
* Write retransmit retry values into the BUSY_TIMEOUT register.
* - The single-phase retry protocol is supported by all SBP-2 devices, but the
* default retry_limit value is 0 (i.e. never retry transmission). We write a
* saner value after logging into the device.
* - The dual-phase retry protocol is optional to implement, and if not
* supported, writes to the dual-phase portion of the register will be
* ignored. We try to write the original 1394-1995 default here.
* - In the case of devices that are also SBP-3-compliant, all writes are
* ignored, as the register is read-only, but contains single-phase retry of
* 15, which is what we're trying to set for all SBP-2 device anyway, so this
* write attempt is safe and yields more consistent behavior for all devices.
*
* See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
* and section 6.4 of the SBP-3 spec for further details.
*/
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
......@@ -832,8 +838,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
struct fw_transaction t;
static __be32 busy_timeout;
/* FIXME: we should try to set dual-phase cycle_limit too */
busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT);
busy_timeout = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
......@@ -885,11 +890,10 @@ static void sbp2_login(struct work_struct *work)
tgt->address_high = local_node_id << 16;
sbp2_set_generation(lu, generation);
/* Get command block agent offset and login id. */
lu->command_block_agent_address =
((u64) (response.command_block_agent.high & 0xffff) << 32) |
response.command_block_agent.low;
lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
<< 32) | be32_to_cpu(response.command_block_agent.low);
lu->login_id = be32_to_cpu(response.misc) & 0xffff;
fw_notify("%s: logged in to LUN %04x (%d retries)\n",
tgt->bus_id, lu->lun, lu->retries);
......@@ -1111,6 +1115,7 @@ static int sbp2_probe(struct device *dev)
kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
tgt->bus_id = unit->device.bus_id;
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
......@@ -1119,6 +1124,7 @@ static int sbp2_probe(struct device *dev)
goto fail_shost_put;
fw_device_get(device);
fw_unit_get(unit);
/* Initialize to values that won't match anything in our table. */
firmware_revision = 0xff000000;
......@@ -1134,8 +1140,6 @@ static int sbp2_probe(struct device *dev)
sbp2_init_workarounds(tgt, model, firmware_revision);
get_device(&unit->device);
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, 0);
......@@ -1367,9 +1371,12 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
* tables.
*/
if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
orb->request.data_descriptor.high = lu->tgt->address_high;
orb->request.data_descriptor.low = sg_dma_address(sg);
orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
orb->request.data_descriptor.high =
cpu_to_be32(lu->tgt->address_high);
orb->request.data_descriptor.low =
cpu_to_be32(sg_dma_address(sg));
orb->request.misc |=
cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
return 0;
}
......@@ -1390,16 +1397,14 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
goto fail_page_table;
}
l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
orb->page_table[j].low = sg_addr;
orb->page_table[j].high = (l << 16);
orb->page_table[j].low = cpu_to_be32(sg_addr);
orb->page_table[j].high = cpu_to_be32(l << 16);
sg_addr += l;
sg_len -= l;
j++;
}
}
fw_memcpy_to_be32(orb->page_table, orb->page_table,
sizeof(orb->page_table[0]) * j);
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
......@@ -1413,11 +1418,10 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
* initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high.
*/
orb->request.data_descriptor.high = lu->tgt->address_high;
orb->request.data_descriptor.low = orb->page_table_bus;
orb->request.misc |=
COMMAND_ORB_PAGE_TABLE_PRESENT |
COMMAND_ORB_DATA_SIZE(j);
orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
COMMAND_ORB_DATA_SIZE(j));
return 0;
......@@ -1463,8 +1467,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->done = done;
orb->cmd = cmd;
orb->request.next.high = SBP2_ORB_NULL;
orb->request.next.low = 0x0;
orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
/*
* At speed 100 we can do 512 bytes per packet, at speed 200,
* 1024 bytes per packet etc. The SBP-2 max_payload field
......@@ -1473,25 +1476,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
*/
max_payload = min(device->max_speed + 7,
device->card->max_receive - 1);
orb->request.misc =
orb->request.misc = cpu_to_be32(
COMMAND_ORB_MAX_PAYLOAD(max_payload) |
COMMAND_ORB_SPEED(device->max_speed) |
COMMAND_ORB_NOTIFY;
COMMAND_ORB_NOTIFY);
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
orb->request.misc |=
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
else if (cmd->sc_data_direction == DMA_TO_DEVICE)
orb->request.misc |=
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
goto out;
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
memset(orb->request.command_block,
0, sizeof(orb->request.command_block));
memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
orb->base.callback = complete_command_orb;
......@@ -1519,11 +1514,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
/*
* Update the dma alignment (minimum alignment requirements for
* start and end of DMA transfers) to be a sector
*/
blk_queue_update_dma_alignment(sdev->request_queue, 511);
/* SBP-2 requires quadlet alignment of the data buffers. */
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
......@@ -1581,16 +1573,14 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu;
struct fw_device *device;
if (!sdev)
return 0;
lu = sdev->hostdata;
device = fw_device(lu->tgt->unit->device.parent);
return sprintf(buf, "%08x%08x:%06x:%04x\n",
device->config_rom[3], device->config_rom[4],
return sprintf(buf, "%016llx:%06x:%04x\n",
(unsigned long long)lu->tgt->guid,
lu->tgt->directory_id, lu->lun);
}
......
......@@ -108,6 +108,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
node->link_on = SELF_ID_LINK_ON(sid);
node->phy_speed = SELF_ID_PHY_SPEED(sid);
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
node->port_count = port_count;
atomic_set(&node->ref_count, 1);
......@@ -289,12 +290,11 @@ static struct fw_node *build_tree(struct fw_card *card,
beta_repeaters_present = true;
/*
* If all PHYs does not report the same gap count
* setting, we fall back to 63 which will force a gap
* count reconfiguration and a reset.
* If PHYs report different gap counts, set an invalid count
* which will force a gap count reconfiguration and a reset.
*/
if (SELF_ID_GAP_COUNT(q) != gap_count)
gap_count = 63;
gap_count = 0;
update_hop_count(node);
......@@ -431,6 +431,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
event = FW_NODE_LINK_OFF;
else if (!node0->link_on && node1->link_on)
event = FW_NODE_LINK_ON;
else if (node1->initiated_reset && node1->link_on)
event = FW_NODE_INITIATED_RESET;
else
event = FW_NODE_UPDATED;
......
......@@ -20,11 +20,12 @@
#define __fw_topology_h
enum {
FW_NODE_CREATED = 0x00,
FW_NODE_UPDATED = 0x01,
FW_NODE_DESTROYED = 0x02,
FW_NODE_LINK_ON = 0x03,
FW_NODE_LINK_OFF = 0x04,
FW_NODE_CREATED,
FW_NODE_UPDATED,
FW_NODE_DESTROYED,
FW_NODE_LINK_ON,
FW_NODE_LINK_OFF,
FW_NODE_INITIATED_RESET,
};
struct fw_node {
......
......@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -294,42 +295,40 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
}
EXPORT_SYMBOL(fw_send_request);
struct fw_phy_packet {
struct fw_packet packet;
struct completion done;
};
static void
transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
kfree(packet);
}
static void send_phy_packet(struct fw_card *card, u32 data, int generation)
{
struct fw_packet *packet;
packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
if (packet == NULL)
return;
packet->header[0] = data;
packet->header[1] = ~data;
packet->header_length = 8;
packet->payload_length = 0;
packet->speed = SCODE_100;
packet->generation = generation;
packet->callback = transmit_phy_packet_callback;
struct fw_phy_packet *p =
container_of(packet, struct fw_phy_packet, packet);
card->driver->send_request(card, packet);
complete(&p->done);
}
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
u32 q;
q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
struct fw_phy_packet p;
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
PHY_CONFIG_ROOT_ID(node_id) |
PHY_CONFIG_GAP_COUNT(gap_count);
send_phy_packet(card, q, generation);
p.packet.header[0] = data;
p.packet.header[1] = ~data;
p.packet.header_length = 8;
p.packet.payload_length = 0;
p.packet.speed = SCODE_100;
p.packet.generation = generation;
p.packet.callback = transmit_phy_packet_callback;
init_completion(&p.done);
card->driver->send_request(card, &p.packet);
wait_for_completion(&p.done);
}
void fw_flush_transactions(struct fw_card *card)
......@@ -389,21 +388,21 @@ lookup_enclosing_address_handler(struct list_head *list,
static DEFINE_SPINLOCK(address_handler_lock);
static LIST_HEAD(address_handler_list);
const struct fw_address_region fw_low_memory_region =
{ .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
const struct fw_address_region fw_high_memory_region =
{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
#if 0
const struct fw_address_region fw_low_memory_region =
{ .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
const struct fw_address_region fw_private_region =
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
const struct fw_address_region fw_csr_region =
{ .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
EXPORT_SYMBOL(fw_low_memory_region);
EXPORT_SYMBOL(fw_high_memory_region);
EXPORT_SYMBOL(fw_private_region);
EXPORT_SYMBOL(fw_csr_region);
EXPORT_SYMBOL(fw_unit_space_region);
#endif /* 0 */
/**
* Allocate a range of addresses in the node space of the OHCI
......@@ -747,7 +746,8 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
EXPORT_SYMBOL(fw_core_handle_response);
static const struct fw_address_region topology_map_region =
{ .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
static void
handle_topology_map(struct fw_card *card, struct fw_request *request,
......@@ -785,7 +785,8 @@ static struct fw_address_handler topology_map = {
};
static const struct fw_address_region registers_region =
{ .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void
handle_registers(struct fw_card *card, struct fw_request *request,
......@@ -794,7 +795,7 @@ handle_registers(struct fw_card *card, struct fw_request *request,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
int reg = offset - CSR_REGISTER_BASE;
int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time;
__be32 *data = payload;
......
......@@ -201,11 +201,7 @@ struct fw_address_region {
u64 end;
};
extern const struct fw_address_region fw_low_memory_region;
extern const struct fw_address_region fw_high_memory_region;
extern const struct fw_address_region fw_private_region;
extern const struct fw_address_region fw_csr_region;
extern const struct fw_address_region fw_unit_space_region;
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region);
......@@ -221,12 +217,9 @@ struct fw_card {
const struct fw_card_driver *driver;
struct device *device;
atomic_t device_count;
struct kref kref;
int node_id;
int generation;
/* This is the generation used for timestamping incoming requests. */
int request_generation;
int current_tlabel, tlabel_mask;
struct list_head transaction_list;
struct timer_list flush_timer;
......@@ -263,9 +256,6 @@ struct fw_card {
int bm_generation;
};
struct fw_card *fw_card_get(struct fw_card *card);
void fw_card_put(struct fw_card *card);
/*
* The iso packet format allows for an immediate header/payload part
* stored in 'header' immediately after the packet info plus an
......
......@@ -133,8 +133,7 @@ static void host_reset(struct hpsb_host *host)
host->csr.state &= ~0x100;
}
host->csr.topology_map[1] =
cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
be32_add_cpu(&host->csr.topology_map[1], 1);
host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
| host->selfid_count);
host->csr.topology_map[0] =
......@@ -142,8 +141,7 @@ static void host_reset(struct hpsb_host *host)
| csr_crc16(host->csr.topology_map + 1,
host->selfid_count + 2));
host->csr.speed_map[1] =
cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
be32_add_cpu(&host->csr.speed_map[1], 1);
host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
| csr_crc16(host->csr.speed_map+1,
0x3f1));
......
......@@ -2180,7 +2180,6 @@ MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
static struct hpsb_protocol_driver dv1394_driver = {
.name = "dv1394",
.id_table = dv1394_id_table,
};
......@@ -2568,7 +2567,6 @@ static int __init dv1394_init_module(void)
cdev_init(&dv1394_cdev, &dv1394_fops);
dv1394_cdev.owner = THIS_MODULE;
kobject_set_name(&dv1394_cdev.kobj, "dv1394");
ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
if (ret) {
printk(KERN_ERR "dv1394: unable to register character device\n");
......
......@@ -339,7 +339,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
(hweight64(alignment) != 1)) {
HPSB_ERR("%s called with invalid alignment: 0x%048llx",
__FUNCTION__, (unsigned long long)alignment);
__func__, (unsigned long long)alignment);
return retval;
}
......@@ -354,7 +354,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
if (((start|end) & ~align_mask) || (start >= end) ||
(end > CSR1212_ALL_SPACE_END)) {
HPSB_ERR("%s called with invalid addresses "
"(start = %012Lx end = %012Lx)", __FUNCTION__,
"(start = %012Lx end = %012Lx)", __func__,
(unsigned long long)start,(unsigned long long)end);
return retval;
}
......@@ -422,7 +422,7 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
if (((start|end) & 3) || (start >= end) ||
(end > CSR1212_ALL_SPACE_END)) {
HPSB_ERR("%s called with invalid addresses", __FUNCTION__);
HPSB_ERR("%s called with invalid addresses", __func__);
return 0;
}
......
......@@ -242,7 +242,7 @@ int hpsb_bus_reset(struct hpsb_host *host)
{
if (host->in_bus_reset) {
HPSB_NOTICE("%s called while bus reset already in progress",
__FUNCTION__);
__func__);
return 1;
}
......@@ -373,6 +373,8 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
speedcap[n] = sid->speed;
if (speedcap[n] > host->csr.lnk_spd)
speedcap[n] = host->csr.lnk_spd;
n--;
}
}
......
......@@ -701,7 +701,11 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
return 0;
driver = container_of(drv, struct hpsb_protocol_driver, driver);
for (id = driver->id_table; id->match_flags != 0; id++) {
id = driver->id_table;
if (!id)
return 0;
for (; id->match_flags != 0; id++) {
if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
id->vendor_id != ud->vendor_id)
continue;
......
......@@ -149,7 +149,7 @@ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id ,
/* Module Parameters */
static int phys_dma = 1;
module_param(phys_dma, int, 0444);
MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
static void dma_trm_tasklet(unsigned long data);
static void dma_trm_reset(struct dma_trm_ctx *d);
......@@ -708,7 +708,7 @@ static void insert_packet(struct ti_ohci *ohci,
/* FIXME: do something about it */
PRINT(KERN_ERR,
"%s: packet data addr: %p size %Zd bytes "
"cross page boundary", __FUNCTION__,
"cross page boundary", __func__,
packet->data, packet->data_size);
}
#endif
......@@ -2089,10 +2089,8 @@ static void dma_trm_reset(struct dma_trm_ctx *d)
spin_lock_irqsave(&d->lock, flags);
list_splice(&d->fifo_list, &packet_list);
list_splice(&d->pending_list, &packet_list);
INIT_LIST_HEAD(&d->fifo_list);
INIT_LIST_HEAD(&d->pending_list);
list_splice_init(&d->fifo_list, &packet_list);
list_splice_init(&d->pending_list, &packet_list);
d->branchAddrPtr = NULL;
d->sent_ind = d->prg_ind;
......@@ -2787,7 +2785,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
if (d->buf_cpu == NULL || d->buf_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate dma buffer");
PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -2796,7 +2794,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate dma prg");
PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -2804,7 +2802,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
if (d->spb == NULL) {
PRINT(KERN_ERR, "Failed to allocate split buffer");
PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -2830,7 +2828,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
memset(d->buf_cpu[i], 0, d->buf_size);
} else {
PRINT(KERN_ERR,
"Failed to allocate dma buffer");
"Failed to allocate %s", "DMA buffer");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -2841,7 +2839,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
} else {
PRINT(KERN_ERR,
"Failed to allocate dma prg");
"Failed to allocate %s", "DMA prg");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
......@@ -2902,7 +2900,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate at dma prg");
PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
free_dma_trm_ctx(d);
return -ENOMEM;
}
......@@ -2925,7 +2923,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
} else {
PRINT(KERN_ERR,
"Failed to allocate at dma prg");
"Failed to allocate %s", "AT DMA prg");
free_dma_trm_ctx(d);
return -ENOMEM;
}
......@@ -2986,22 +2984,9 @@ static struct hpsb_host_driver ohci1394_driver = {
* PCI Driver Interface functions *
***********************************/
#define FAIL(err, fmt, args...) \
do { \
PRINT_G(KERN_ERR, fmt , ## args); \
ohci1394_pci_remove(dev); \
return err; \
} while (0)
static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
resource_size_t ohci_base;
#ifdef CONFIG_PPC_PMAC
/* Necessary on some machines if ohci1394 was loaded/ unloaded before */
static void ohci1394_pmac_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
......@@ -3010,15 +2995,45 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
}
}
static void ohci1394_pmac_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
}
#else
#define ohci1394_pmac_on(dev)
#define ohci1394_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
if (pci_enable_device(dev))
FAIL(-ENXIO, "Failed to enable OHCI hardware");
static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
resource_size_t ohci_base;
int err = -ENOMEM;
ohci1394_pmac_on(dev);
if (pci_enable_device(dev)) {
PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
err = -ENXIO;
goto err;
}
pci_set_master(dev);
host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
if (!host) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
goto err;
}
ohci = host->hostdata;
ohci->dev = dev;
ohci->host = host;
......@@ -3067,15 +3082,20 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
(unsigned long long)pci_resource_len(dev, 0));
if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
OHCI1394_DRIVER_NAME))
FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
OHCI1394_DRIVER_NAME)) {
PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
(unsigned long long)ohci_base,
(unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
goto err;
}
ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
if (ohci->registers == NULL)
FAIL(-ENXIO, "Failed to remap registers - card not accessible");
if (ohci->registers == NULL) {
PRINT_G(KERN_ERR, "Failed to remap registers");
err = -ENXIO;
goto err;
}
ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
......@@ -3083,16 +3103,20 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->csr_config_rom_cpu =
pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
&ohci->csr_config_rom_bus);
if (ohci->csr_config_rom_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate buffer config rom");
if (ohci->csr_config_rom_cpu == NULL) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
goto err;
}
ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
/* self-id dma buffer allocation */
ohci->selfid_buf_cpu =
pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
&ohci->selfid_buf_bus);
if (ohci->selfid_buf_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
if (ohci->selfid_buf_cpu == NULL) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
goto err;
}
ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
......@@ -3108,28 +3132,32 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
OHCI1394_AsReqRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AR Req context");
OHCI1394_AsReqRcvContextBase) < 0) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
goto err;
}
/* AR DMA response context allocation */
if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
OHCI1394_AsRspRcvContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AR Resp context");
OHCI1394_AsRspRcvContextBase) < 0) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
goto err;
}
/* AT DMA request context */
if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
OHCI1394_AsReqTrContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AT Req context");
OHCI1394_AsReqTrContextBase) < 0) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
goto err;
}
/* AT DMA response context */
if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
OHCI1394_AsRspTrContextBase) < 0)
FAIL(-ENOMEM, "Failed to allocate AT Resp context");
OHCI1394_AsRspTrContextBase) < 0) {
PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
goto err;
}
/* Start off with a soft reset, to clear everything to a sane
* state. */
ohci_soft_reset(ohci);
......@@ -3172,9 +3200,10 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
* by that point.
*/
if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
OHCI1394_DRIVER_NAME, ohci))
FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
OHCI1394_DRIVER_NAME, ohci)) {
PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
goto err;
}
ohci->init_state = OHCI_INIT_HAVE_IRQ;
ohci_initialize(ohci);
......@@ -3194,25 +3223,28 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
/* Tell the highlevel this host is ready */
if (hpsb_add_host(host))
FAIL(-ENOMEM, "Failed to register host with highlevel");
if (hpsb_add_host(host)) {
PRINT_G(KERN_ERR, "Failed to register host with highlevel");
goto err;
}
ohci->init_state = OHCI_INIT_DONE;
return 0;
#undef FAIL
err:
ohci1394_pci_remove(dev);
return err;
}
static void ohci1394_pci_remove(struct pci_dev *pdev)
static void ohci1394_pci_remove(struct pci_dev *dev)
{
struct ti_ohci *ohci;
struct device *dev;
struct device *device;
ohci = pci_get_drvdata(pdev);
ohci = pci_get_drvdata(dev);
if (!ohci)
return;
goto out;
dev = get_device(&ohci->host->device);
device = get_device(&ohci->host->device);
switch (ohci->init_state) {
case OHCI_INIT_DONE:
......@@ -3246,7 +3278,7 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
/* Soft reset before we start - this disables
* interrupts and clears linkEnable and LPS. */
ohci_soft_reset(ohci);
free_irq(ohci->dev->irq, ohci);
free_irq(dev->irq, ohci);
case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
/* The ohci_soft_reset() stops all DMA contexts, so we
......@@ -3257,12 +3289,12 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
free_dma_trm_ctx(&ohci->at_resp_context);
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
ohci->selfid_buf_bus);
case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
ohci->csr_config_rom_cpu,
ohci->csr_config_rom_bus);
......@@ -3270,35 +3302,24 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
iounmap(ohci->registers);
case OHCI_INIT_HAVE_MEM_REGION:
release_mem_region(pci_resource_start(ohci->dev, 0),
release_mem_region(pci_resource_start(dev, 0),
OHCI1394_REGISTER_SIZE);
#ifdef CONFIG_PPC_PMAC
/* On UniNorth, power down the cable and turn off the chip clock
* to save power on laptops */
if (machine_is(powermac)) {
struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
#endif /* CONFIG_PPC_PMAC */
case OHCI_INIT_ALLOC_HOST:
pci_set_drvdata(ohci->dev, NULL);
pci_set_drvdata(dev, NULL);
}
if (dev)
put_device(dev);
if (device)
put_device(device);
out:
ohci1394_pmac_off(dev);
}
#ifdef CONFIG_PM
static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
{
int err;
struct ti_ohci *ohci = pci_get_drvdata(pdev);
struct ti_ohci *ohci = pci_get_drvdata(dev);
if (!ohci) {
printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
......@@ -3326,32 +3347,23 @@ static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
ohci_soft_reset(ohci);
err = pci_save_state(pdev);
err = pci_save_state(dev);
if (err) {
PRINT(KERN_ERR, "pci_save_state failed with %d", err);
return err;
}
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
DBGMSG("pci_set_power_state failed with %d", err);
/* PowerMac suspend code comes last */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(pdev);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
}
#endif /* CONFIG_PPC_PMAC */
ohci1394_pmac_off(dev);
return 0;
}
static int ohci1394_pci_resume(struct pci_dev *pdev)
static int ohci1394_pci_resume(struct pci_dev *dev)
{
int err;
struct ti_ohci *ohci = pci_get_drvdata(pdev);
struct ti_ohci *ohci = pci_get_drvdata(dev);
if (!ohci) {
printk(KERN_ERR "%s: tried to resume nonexisting host\n",
......@@ -3360,19 +3372,10 @@ static int ohci1394_pci_resume(struct pci_dev *pdev)
}
DBGMSG("resume called");
/* PowerMac resume code comes first */
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(pdev);
if (ofn)
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
#endif /* CONFIG_PPC_PMAC */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
ohci1394_pmac_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
return err;
......
......@@ -226,7 +226,7 @@ static int get_phy_reg(struct ti_lynx *lynx, int addr)
if (addr > 15) {
PRINT(KERN_ERR, lynx->id,
"%s: PHY register address %d out of range",
__FUNCTION__, addr);
__func__, addr);
return -1;
}
......@@ -238,7 +238,7 @@ static int get_phy_reg(struct ti_lynx *lynx, int addr)
if (i > 10000) {
PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
__FUNCTION__);
__func__);
retval = -1;
break;
}
......@@ -261,13 +261,13 @@ static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
if (addr > 15) {
PRINT(KERN_ERR, lynx->id,
"%s: PHY register address %d out of range", __FUNCTION__, addr);
"%s: PHY register address %d out of range", __func__, addr);
return -1;
}
if (val > 0xff) {
PRINT(KERN_ERR, lynx->id,
"%s: PHY register value %d out of range", __FUNCTION__, val);
"%s: PHY register value %d out of range", __func__, val);
return -1;
}
......@@ -287,7 +287,7 @@ static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
if (page > 7) {
PRINT(KERN_ERR, lynx->id,
"%s: PHY page %d out of range", __FUNCTION__, page);
"%s: PHY page %d out of range", __func__, page);
return -1;
}
......@@ -309,7 +309,7 @@ static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
if (port > 15) {
PRINT(KERN_ERR, lynx->id,
"%s: PHY port %d out of range", __FUNCTION__, port);
"%s: PHY port %d out of range", __func__, port);
return -1;
}
......@@ -738,8 +738,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
spin_lock_irqsave(&lynx->async.queue_lock, flags);
reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
list_splice(&lynx->async.queue, &packet_list);
INIT_LIST_HEAD(&lynx->async.queue);
list_splice_init(&lynx->async.queue, &packet_list);
if (list_empty(&lynx->async.pcl_queue)) {
spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
......
......@@ -2959,7 +2959,6 @@ MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
static struct hpsb_protocol_driver raw1394_driver = {
.name = "raw1394",
.id_table = raw1394_id_table,
};
/******************************************************************************/
......@@ -3004,7 +3003,6 @@ static int __init init_raw1394(void)
cdev_init(&raw1394_cdev, &raw1394_fops);
raw1394_cdev.owner = THIS_MODULE;
kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
if (ret) {
HPSB_ERR("raw1394 failed to register minor device block");
......
......@@ -615,7 +615,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
cmd->Current_SCpnt = Current_SCpnt;
list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
} else
SBP2_ERR("%s: no orbs available", __FUNCTION__);
SBP2_ERR("%s: no orbs available", __func__);
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return cmd;
}
......@@ -1294,7 +1294,7 @@ static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
SBP2_ERR("%s error", __FUNCTION__);
SBP2_ERR("%s error", __func__);
return 0;
}
......@@ -1985,11 +1985,8 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
lu->sdev = sdev;
sdev->allow_restart = 1;
/*
* Update the dma alignment (minimum alignment requirements for
* start and end of DMA transfers) to be a sector
*/
blk_queue_update_dma_alignment(sdev->request_queue, 511);
/* SBP-2 requires quadlet alignment of the data buffers. */
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
......
......@@ -1316,7 +1316,6 @@ MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
static struct hpsb_protocol_driver video1394_driver = {
.name = VIDEO1394_DRIVER_NAME,
.id_table = video1394_id_table,
};
......@@ -1504,7 +1503,6 @@ static int __init video1394_init_module (void)
cdev_init(&video1394_cdev, &video1394_fops);
video1394_cdev.owner = THIS_MODULE;
kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
if (ret) {
PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
......
......@@ -583,7 +583,7 @@ config LATENCYTOP
to find out which userspace is blocking on what kernel operations.
config PROVIDE_OHCI1394_DMA_INIT
bool "Provide code for enabling DMA over FireWire early on boot"
bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
help
If you want to debug problems which hang or crash the kernel early
......@@ -611,6 +611,17 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
config FIREWIRE_OHCI_REMOTE_DMA
bool "Remote debugging over FireWire with firewire-ohci"
depends on FIREWIRE_OHCI
help
This option lets you use the FireWire bus for remote debugging
with help of the firewire-ohci driver. It enables unfiltered
remote DMA in firewire-ohci.
See Documentation/debugging-via-ohci1394.txt for more information.
If unsure, say N.
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment