summaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/Kconfig55
-rw-r--r--drivers/firewire/fw-card.c109
-rw-r--r--drivers/firewire/fw-cdev.c30
-rw-r--r--drivers/firewire/fw-device.c323
-rw-r--r--drivers/firewire/fw-device.h48
-rw-r--r--drivers/firewire/fw-iso.c5
-rw-r--r--drivers/firewire/fw-ohci.c425
-rw-r--r--drivers/firewire/fw-ohci.h2
-rw-r--r--drivers/firewire/fw-sbp2.c560
-rw-r--r--drivers/firewire/fw-topology.c14
-rw-r--r--drivers/firewire/fw-topology.h11
-rw-r--r--drivers/firewire/fw-transaction.c84
-rw-r--r--drivers/firewire/fw-transaction.h18
13 files changed, 1210 insertions, 474 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index fe9e768cfbc..fb4d391810b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,5 +1,3 @@
-# -*- shell-script -*-
-
comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
depends on EXPERIMENTAL=n
@@ -21,27 +19,7 @@ config FIREWIRE
NOTE:
You should only build ONE of the stacks, unless you REALLY know what
- you are doing. If you install both, you should configure them only as
- modules rather than link them statically, and you should blacklist one
- of the concurrent low-level drivers in /etc/modprobe.conf. Add either
-
- blacklist firewire-ohci
- or
- blacklist ohci1394
-
- there depending on which driver you DON'T want to have auto-loaded.
- You can optionally do the same with the other IEEE 1394/ FireWire
- drivers.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use either
-
- install firewire-ohci /bin/true
- or
- install ohci1394 /bin/true
-
- and so on, depending on which modules you DON't want to have
- auto-loaded.
+ you are doing.
config FIREWIRE_OHCI
tristate "Support for OHCI FireWire host controllers"
@@ -57,8 +35,29 @@ config FIREWIRE_OHCI
NOTE:
- If you also build ohci1394 of the classic stack, blacklist either
- ohci1394 or firewire-ohci to let hotplug load only the desired driver.
+ You should only build ohci1394 or firewire-ohci, but not both.
+ If you nevertheless want to install both, you should configure them
+ only as modules and blacklist the driver(s) which you don't want to
+ have auto-loaded. Add either
+
+ blacklist firewire-ohci
+ or
+ blacklist ohci1394
+ blacklist video1394
+ blacklist dv1394
+
+ to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
+ depending on your distribution. The latter two modules should be
+ blacklisted together with ohci1394 because they depend on ohci1394.
+
+ If you have an old modprobe which doesn't implement the blacklist
+ directive, use "install modulename /bin/true" for the modules to be
+ blacklisted.
+
+config FIREWIRE_OHCI_DEBUG
+ bool
+ depends on FIREWIRE_OHCI
+ default y
config FIREWIRE_SBP2
tristate "Support for storage devices (SBP-2 protocol driver)"
@@ -75,9 +74,3 @@ config FIREWIRE_SBP2
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
-
- NOTE:
-
- If you also build sbp2 of the classic stack, blacklist either sbp2
- or firewire-sbp2 to let hotplug load only the desired driver.
-
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 3e9719948a8..5b4c0d9f517 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/crc-itu-t.h>
@@ -166,7 +167,6 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
-EXPORT_SYMBOL(fw_core_add_descriptor);
void
fw_core_remove_descriptor(struct fw_descriptor *desc)
@@ -181,7 +181,6 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
-EXPORT_SYMBOL(fw_core_remove_descriptor);
static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
@@ -214,17 +213,29 @@ static void
fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
- struct fw_device *root;
+ struct fw_device *root_device;
+ struct fw_node *root_node, *local_node;
struct bm_data bmd;
unsigned long flags;
int root_id, new_root_id, irm_id, gap_count, generation, grace;
- int do_reset = 0;
+ bool do_reset = false;
spin_lock_irqsave(&card->lock, flags);
+ local_node = card->local_node;
+ root_node = card->root_node;
+
+ if (local_node == NULL) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+ fw_node_get(local_node);
+ fw_node_get(root_node);
generation = card->generation;
- root = card->root_node->data;
- root_id = card->root_node->node_id;
+ root_device = root_node->data;
+ if (root_device)
+ fw_device_get(root_device);
+ root_id = root_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
if (card->bm_generation + 1 == generation ||
@@ -243,14 +254,14 @@ fw_card_bm_work(struct work_struct *work)
irm_id = card->irm_node->node_id;
if (!card->irm_node->link_on) {
- new_root_id = card->local_node->node_id;
+ new_root_id = local_node->node_id;
fw_notify("IRM has link off, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
}
bmd.lock.arg = cpu_to_be32(0x3f);
- bmd.lock.data = cpu_to_be32(card->local_node->node_id);
+ bmd.lock.data = cpu_to_be32(local_node->node_id);
spin_unlock_irqrestore(&card->lock, flags);
@@ -267,12 +278,12 @@ fw_card_bm_work(struct work_struct *work)
* Another bus reset happened. Just return,
* the BM work has been rescheduled.
*/
- return;
+ goto out;
}
if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
/* Somebody else is BM, let them do the work. */
- return;
+ goto out;
spin_lock_irqsave(&card->lock, flags);
if (bmd.rcode != RCODE_COMPLETE) {
@@ -282,7 +293,7 @@ fw_card_bm_work(struct work_struct *work)
* do a bus reset and pick the local node as
* root, and thus, IRM.
*/
- new_root_id = card->local_node->node_id;
+ new_root_id = local_node->node_id;
fw_notify("BM lock failed, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
@@ -295,7 +306,7 @@ fw_card_bm_work(struct work_struct *work)
*/
spin_unlock_irqrestore(&card->lock, flags);
schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
- return;
+ goto out;
}
/*
@@ -305,20 +316,20 @@ fw_card_bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
- if (root == NULL) {
+ if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
*/
- new_root_id = card->local_node->node_id;
- } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
+ new_root_id = local_node->node_id;
+ } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) {
/*
* If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
spin_unlock_irqrestore(&card->lock, flags);
- return;
- } else if (root->config_rom[2] & BIB_CMC) {
+ goto out;
+ } else if (root_device->cmc) {
/*
* FIXME: I suppose we should set the cmstr bit in the
* STATE_CLEAR register of this node, as described in
@@ -332,7 +343,7 @@ fw_card_bm_work(struct work_struct *work)
* successfully read the config rom, but it's not
* cycle master capable.
*/
- new_root_id = card->local_node->node_id;
+ new_root_id = local_node->node_id;
}
pick_me:
@@ -341,20 +352,20 @@ fw_card_bm_work(struct work_struct *work)
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
- card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[card->root_node->max_hops];
+ root_node->max_hops < ARRAY_SIZE(gap_count_table))
+ gap_count = gap_count_table[root_node->max_hops];
else
gap_count = 63;
/*
- * Finally, figure out if we should do a reset or not. If we've
- * done less that 5 resets with the same physical topology and we
+ * Finally, figure out if we should do a reset or not. If we have
+ * done less than 5 resets with the same physical topology and we
* have either a new root or a new gap count setting, let's do it.
*/
if (card->bm_retries++ < 5 &&
(card->gap_count != gap_count || new_root_id != root_id))
- do_reset = 1;
+ do_reset = true;
spin_unlock_irqrestore(&card->lock, flags);
@@ -364,6 +375,11 @@ fw_card_bm_work(struct work_struct *work)
fw_send_phy_config(card, new_root_id, generation, gap_count);
fw_core_initiate_bus_reset(card, 1);
}
+ out:
+ if (root_device)
+ fw_device_put(root_device);
+ fw_node_put(root_node);
+ fw_node_put(local_node);
}
static void
@@ -380,7 +396,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
{
static atomic_t index = ATOMIC_INIT(-1);
- kref_init(&card->kref);
+ atomic_set(&card->device_count, 0);
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
@@ -410,12 +426,6 @@ fw_card_add(struct fw_card *card,
card->link_speed = link_speed;
card->guid = guid;
- /*
- * The subsystem grabs a reference when the card is added and
- * drops it when the driver calls fw_core_remove_card.
- */
- fw_card_get(card);
-
mutex_lock(&card_mutex);
config_rom = generate_config_rom(card, &length);
list_add_tail(&card->link, &card_list);
@@ -511,44 +521,19 @@ fw_core_remove_card(struct fw_card *card)
card->driver = &dummy_driver;
fw_destroy_nodes(card);
- flush_scheduled_work();
+ /*
+ * Wait for all device workqueue jobs to finish. Otherwise the
+ * firewire-core module could be unloaded before the jobs ran.
+ */
+ while (atomic_read(&card->device_count) > 0)
+ msleep(100);
+ cancel_delayed_work_sync(&card->work);
fw_flush_transactions(card);
del_timer_sync(&card->flush_timer);
-
- fw_card_put(card);
}
EXPORT_SYMBOL(fw_core_remove_card);
-struct fw_card *
-fw_card_get(struct fw_card *card)
-{
- kref_get(&card->kref);
-
- return card;
-}
-EXPORT_SYMBOL(fw_card_get);
-
-static void
-release_card(struct kref *kref)
-{
- struct fw_card *card = container_of(kref, struct fw_card, kref);
-
- kfree(card);
-}
-
-/*
- * An assumption for fw_card_put() is that the card driver allocates
- * the fw_card struct with kalloc and that it has been shut down
- * before the last ref is dropped.
- */
-void
-fw_card_put(struct fw_card *card)
-{
- kref_put(&card->kref, release_card);
-}
-EXPORT_SYMBOL(fw_card_put);
-
int
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 7e73cbaa412..4a541921a14 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
struct client *client;
unsigned long flags;
- device = fw_device_from_devt(inode->i_rdev);
+ device = fw_device_get_by_devt(inode->i_rdev);
if (device == NULL)
return -ENODEV;
client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (client == NULL)
+ if (client == NULL) {
+ fw_device_put(device);
return -ENOMEM;
+ }
- client->device = fw_device_get(device);
+ client->device = device;
INIT_LIST_HEAD(&client->event_list);
INIT_LIST_HEAD(&client->resource_list);
spin_lock_init(&client->lock);
@@ -267,21 +269,28 @@ static int ioctl_get_info(struct client *client, void *buffer)
{
struct fw_cdev_get_info *get_info = buffer;
struct fw_cdev_event_bus_reset bus_reset;
+ unsigned long ret = 0;
client->version = get_info->version;
get_info->version = FW_CDEV_VERSION;
+ down_read(&fw_device_rwsem);
+
if (get_info->rom != 0) {
void __user *uptr = u64_to_uptr(get_info->rom);
size_t want = get_info->rom_length;
size_t have = client->device->config_rom_length * 4;
- if (copy_to_user(uptr, client->device->config_rom,
- min(want, have)))
- return -EFAULT;
+ ret = copy_to_user(uptr, client->device->config_rom,
+ min(want, have));
}
get_info->rom_length = client->device->config_rom_length * 4;
+ up_read(&fw_device_rwsem);
+
+ if (ret != 0)
+ return -EFAULT;
+
client->bus_reset_closure = get_info->bus_reset_closure;
if (get_info->bus_reset != 0) {
void __user *uptr = u64_to_uptr(get_info->bus_reset);
@@ -644,6 +653,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
struct fw_cdev_create_iso_context *request = buffer;
struct fw_iso_context *context;
+ /* We only support one context at this time. */
+ if (client->iso_context != NULL)
+ return -EBUSY;
+
if (request->channel > 63)
return -EINVAL;
@@ -790,8 +803,9 @@ static int ioctl_start_iso(struct client *client, void *buffer)
{
struct fw_cdev_start_iso *request = buffer;
- if (request->handle != 0)
+ if (client->iso_context == NULL || request->handle != 0)
return -EINVAL;
+
if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
if (request->tags == 0 || request->tags > 15)
return -EINVAL;
@@ -808,7 +822,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer)
{
struct fw_cdev_stop_iso *request = buffer;
- if (request->handle != 0)
+ if (client->iso_context == NULL || request->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index de9066e69ad..d9c8daf7ae7 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -25,8 +25,9 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/idr.h>
+#include <linux/string.h>
#include <linux/rwsem.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
#include <asm/system.h>
#include <linux/ctype.h>
#include "fw-transaction.h"
@@ -150,35 +151,24 @@ struct bus_type fw_bus_type = {
};
EXPORT_SYMBOL(fw_bus_type);
-struct fw_device *fw_device_get(struct fw_device *device)
-{
- get_device(&device->device);
-
- return device;
-}
-
-void fw_device_put(struct fw_device *device)
-{
- put_device(&device->device);
-}
-
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
+ struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled.
*/
- spin_lock_irqsave(&device->card->lock, flags);
+ spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
- fw_card_put(device->card);
kfree(device->config_rom);
kfree(device);
+ atomic_dec(&card->device_count);
}
int fw_device_enable_phys_dma(struct fw_device *device)
@@ -206,7 +196,9 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir;
- int key, value;
+ int key, value, ret = -ENOENT;
+
+ down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
@@ -215,11 +207,15 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
fw_csr_iterator_init(&ci, dir);
while (fw_csr_iterator_next(&ci, &key, &value))
- if (attr->key == key)
- return snprintf(buf, buf ? PAGE_SIZE : 0,
- "0x%06x\n", value);
+ if (attr->key == key) {
+ ret = snprintf(buf, buf ? PAGE_SIZE : 0,
+ "0x%06x\n", value);
+ break;
+ }
+
+ up_read(&fw_device_rwsem);
- return -ENOENT;
+ return ret;
}
#define IMMEDIATE_ATTR(name, key) \
@@ -232,9 +228,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir, *block = NULL, *p, *end;
- int length, key, value, last_key = 0;
+ int length, key, value, last_key = 0, ret = -ENOENT;
char *b;
+ down_read(&fw_device_rwsem);
+
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
@@ -249,18 +247,20 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
}
if (block == NULL)
- return -ENOENT;
+ goto out;
length = min(block[0] >> 16, 256U);
if (length < 3)
- return -ENOENT;
+ goto out;
if (block[1] != 0 || block[2] != 0)
/* Unknown encoding. */
- return -ENOENT;
+ goto out;
- if (buf == NULL)
- return length * 4;
+ if (buf == NULL) {
+ ret = length * 4;
+ goto out;
+ }
b = buf;
end = &block[length + 1];
@@ -270,8 +270,11 @@ show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
/* Strip trailing whitespace and add newline. */
while (b--, (isspace(*b) || *b == '\0') && b > buf);
strcpy(b + 1, "\n");
+ ret = b + 2 - buf;
+ out:
+ up_read(&fw_device_rwsem);
- return b + 2 - buf;
+ return ret;
}
#define TEXT_LEAF_ATTR(name, key) \
@@ -348,22 +351,28 @@ static ssize_t
config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
+ size_t length;
- memcpy(buf, device->config_rom, device->config_rom_length * 4);
+ down_read(&fw_device_rwsem);
+ length = device->config_rom_length * 4;
+ memcpy(buf, device->config_rom, length);
+ up_read(&fw_device_rwsem);
- return device->config_rom_length * 4;
+ return length;
}
static ssize_t
guid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
- u64 guid;
+ int ret;
- guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
+ down_read(&fw_device_rwsem);
+ ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
+ device->config_rom[3], device->config_rom[4]);
+ up_read(&fw_device_rwsem);
- return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
- (unsigned long long)guid);
+ return ret;
}
static struct device_attribute fw_device_attributes[] = {
@@ -402,7 +411,7 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
init_completion(&callback_data.done);
- offset = 0xfffff0000400ULL + index * 4;
+ offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
offset, NULL, 4, complete_transaction, &callback_data);
@@ -414,6 +423,9 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
return callback_data.rcode;
}
+#define READ_BIB_ROM_SIZE 256
+#define READ_BIB_STACK_SIZE 16
+
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
@@ -423,16 +435,23 @@ read_rom(struct fw_device *device, int generation, int index, u32 *data)
*/
static int read_bus_info_block(struct fw_device *device, int generation)
{
- static u32 rom[256];
- u32 stack[16], sp, key;
- int i, end, length;
+ u32 *rom, *stack, *old_rom, *new_rom;
+ u32 sp, key;
+ int i, end, length, ret = -1;
+
+ rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
+ sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+ if (rom == NULL)
+ return -ENOMEM;
+
+ stack = &rom[READ_BIB_ROM_SIZE];
device->max_speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
- return -1;
+ goto out;
/*
* As per IEEE1212 7.2, during power-up, devices can
* reply with a 0 for the first quadlet of the config
@@ -442,7 +461,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
* retry mechanism will try again later.
*/
if (i == 0 && rom[i] == 0)
- return -1;
+ goto out;
}
device->max_speed = device->node->max_speed;
@@ -492,26 +511,26 @@ static int read_bus_info_block(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (i >= ARRAY_SIZE(rom))
+ if (i >= READ_BIB_ROM_SIZE)
/*
* The reference points outside the standard
* config rom area, something's fishy.
*/
- return -1;
+ goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
- return -1;
+ goto out;
end = i + (rom[i] >> 16) + 1;
i++;
- if (end > ARRAY_SIZE(rom))
+ if (end > READ_BIB_ROM_SIZE)
/*
* This block extends outside standard config
* area (and the array we're reading it
* into). That's broken, so ignore this
* device.
*/
- return -1;
+ goto out;
/*
* Now read in the block. If this is a directory
@@ -521,9 +540,9 @@ static int read_bus_info_block(struct fw_device *device, int generation)
while (i < end) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
- return -1;
+ goto out;
if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
- sp < ARRAY_SIZE(stack))
+ sp < READ_BIB_STACK_SIZE)
stack[sp++] = i + rom[i];
i++;
}
@@ -531,13 +550,23 @@ static int read_bus_info_block(struct fw_device *device, int generation)
length = i;
}
- device->config_rom = kmalloc(length * 4, GFP_KERNEL);
- if (device->config_rom == NULL)
- return -1;
- memcpy(device->config_rom, rom, length * 4);
+ old_rom = device->config_rom;
+ new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
+ if (new_rom == NULL)
+ goto out;
+
+ down_write(&fw_device_rwsem);
+ device->config_rom = new_rom;
device->config_rom_length = length;
+ up_write(&fw_device_rwsem);
- return 0;
+ kfree(old_rom);
+ ret = 0;
+ device->cmc = rom[2] & 1 << 30;
+ out:
+ kfree(rom);
+
+ return ret;
}
static void fw_unit_release(struct device *dev)
@@ -606,17 +635,26 @@ static int shutdown_unit(struct device *device, void *data)
return 0;
}
-static DECLARE_RWSEM(idr_rwsem);
+/*
+ * fw_device_rwsem acts as dual purpose mutex:
+ * - serializes accesses to fw_device_idr,
+ * - serializes accesses to fw_device.config_rom/.config_rom_length and
+ * fw_unit.directory, unless those accesses happen at safe occasions
+ */
+DECLARE_RWSEM(fw_device_rwsem);
+
static DEFINE_IDR(fw_device_idr);
int fw_cdev_major;
-struct fw_device *fw_device_from_devt(dev_t devt)
+struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
- down_read(&idr_rwsem);
+ down_read(&fw_device_rwsem);
device = idr_find(&fw_device_idr, MINOR(devt));
- up_read(&idr_rwsem);
+ if (device)
+ fw_device_get(device);
+ up_read(&fw_device_rwsem);
return device;
}
@@ -627,13 +665,14 @@ static void fw_device_shutdown(struct work_struct *work)
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
- down_write(&idr_rwsem);
- idr_remove(&fw_device_idr, minor);
- up_write(&idr_rwsem);
-
fw_device_cdev_remove(device);
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
+
+ down_write(&fw_device_rwsem);
+ idr_remove(&fw_device_idr, minor);
+ up_write(&fw_device_rwsem);
+ fw_device_put(device);
}
static struct device_type fw_device_type = {
@@ -668,7 +707,8 @@ static void fw_device_init(struct work_struct *work)
*/
if (read_bus_info_block(device, device->generation) < 0) {
- if (device->config_rom_retries < MAX_RETRIES) {
+ if (device->config_rom_retries < MAX_RETRIES &&
+ atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
schedule_delayed_work(&device->work, RETRY_DELAY);
} else {
@@ -682,10 +722,13 @@ static void fw_device_init(struct work_struct *work)
}
err = -ENOMEM;
- down_write(&idr_rwsem);
+
+ fw_device_get(device);
+ down_write(&fw_device_rwsem);
if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
err = idr_get_new(&fw_device_idr, device, &minor);
- up_write(&idr_rwsem);
+ up_write(&fw_device_rwsem);
+
if (err < 0)
goto error;
@@ -717,13 +760,23 @@ static void fw_device_init(struct work_struct *work)
*/
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
- FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
- fw_device_shutdown(&device->work.work);
- else
- fw_notify("created new fw device %s "
- "(%d config rom retries, S%d00)\n",
- device->device.bus_id, device->config_rom_retries,
- 1 << device->max_speed);
+ FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
+ fw_device_shutdown(work);
+ } else {
+ if (device->config_rom_retries)
+ fw_notify("created device %s: GUID %08x%08x, S%d00, "
+ "%d config ROM retries\n",
+ device->device.bus_id,
+ device->config_rom[3], device->config_rom[4],
+ 1 << device->max_speed,
+ device->config_rom_retries);
+ else
+ fw_notify("created device %s: GUID %08x%08x, S%d00\n",
+ device->device.bus_id,
+ device->config_rom[3], device->config_rom[4],
+ 1 << device->max_speed);
+ device->config_rom_retries = 0;
+ }
/*
* Reschedule the IRM work if we just finished reading the
@@ -737,11 +790,13 @@ static void fw_device_init(struct work_struct *work)
return;
error_with_cdev:
- down_write(&idr_rwsem);
+ down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
- up_write(&idr_rwsem);
+ up_write(&fw_device_rwsem);
error:
- put_device(&device->device);
+ fw_device_put(device); /* fw_device_idr's reference */
+
+ put_device(&device->device); /* our reference */
}
static int update_unit(struct device *dev, void *data)
@@ -767,6 +822,106 @@ static void fw_device_update(struct work_struct *work)
device_for_each_child(&device->device, NULL, update_unit);
}
+enum {
+ REREAD_BIB_ERROR,
+ REREAD_BIB_GONE,
+ REREAD_BIB_UNCHANGED,
+ REREAD_BIB_CHANGED,
+};
+
+/* Reread and compare bus info block and header of root directory */
+static int reread_bus_info_block(struct fw_device *device, int generation)
+{
+ u32 q;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
+ return REREAD_BIB_ERROR;
+
+ if (i == 0 && q == 0)
+ return REREAD_BIB_GONE;
+
+ if (i > device->config_rom_length || q != device->config_rom[i])
+ return REREAD_BIB_CHANGED;
+ }
+
+ return REREAD_BIB_UNCHANGED;
+}
+
+static void fw_device_refresh(struct work_struct *work)
+{
+ struct fw_device *device =
+ container_of(work, struct fw_device, work.work);
+ struct fw_card *card = device->card;
+ int node_id = device->node_id;
+
+ switch (reread_bus_info_block(device, device->generation)) {
+ case REREAD_BIB_ERROR:
+ if (device->config_rom_retries < MAX_RETRIES / 2 &&
+ atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+ device->config_rom_retries++;
+ schedule_delayed_work(&device->work, RETRY_DELAY / 2);
+
+ return;
+ }
+ goto give_up;
+
+ case REREAD_BIB_GONE:
+ goto gone;
+
+ case REREAD_BIB_UNCHANGED:
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+ goto gone;
+
+ fw_device_update(work);
+ device->config_rom_retries = 0;
+ goto out;
+
+ case REREAD_BIB_CHANGED:
+ break;
+ }
+
+ /*
+ * Something changed. We keep things simple and don't investigate
+ * further. We just destroy all previous units and create new ones.
+ */
+ device_for_each_child(&device->device, NULL, shutdown_unit);
+
+ if (read_bus_info_block(device, device->generation) < 0) {
+ if (device->config_rom_retries < MAX_RETRIES &&
+ atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+ device->config_rom_retries++;
+ schedule_delayed_work(&device->work, RETRY_DELAY);
+
+ return;
+ }
+ goto give_up;
+ }
+
+ create_units(device);
+
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+ goto gone;
+
+ fw_notify("refreshed device %s\n", device->device.bus_id);
+ device->config_rom_retries = 0;
+ goto out;
+
+ give_up:
+ fw_notify("giving up on refresh of device %s\n", device->device.bus_id);
+ gone:
+ atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
+ fw_device_shutdown(work);
+ out:
+ if (node_id == card->root_node->node_id)
+ schedule_delayed_work(&card->work, 0);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -776,7 +931,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_LINK_ON:
if (!node->link_on)
break;
-
+ create:
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
@@ -791,7 +946,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
*/
device_initialize(&device->device);
atomic_set(&device->state, FW_DEVICE_INITIALIZING);
- device->card = fw_card_get(card);
+ atomic_inc(&card->device_count);
+ device->card = card;
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
@@ -814,6 +970,23 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
schedule_delayed_work(&device->work, INITIAL_DELAY);
break;
+ case FW_NODE_INITIATED_RESET:
+ device = node->data;
+ if (device == NULL)
+ goto create;
+
+ device->node_id = node->node_id;
+ smp_wmb(); /* update node_id before generation */
+ device->generation = card->generation;
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_RUNNING,
+ FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+ PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ schedule_delayed_work(&device->work,
+ node == card->local_node ? 0 : INITIAL_DELAY);
+ }
+ break;
+
case FW_NODE_UPDATED:
if (!node->link_on || node->data == NULL)
break;
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 0854fe2bc11..5f131f5129d 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/cdev.h>
+#include <linux/rwsem.h>
#include <asm/atomic.h>
enum fw_device_state {
@@ -46,6 +47,11 @@ struct fw_attribute_group {
* fw_device.node_id is guaranteed to be current too.
*
* The same applies to fw_device.card->node_id vs. fw_device.generation.
+ *
+ * fw_device.config_rom and fw_device.config_rom_length may be accessed during
+ * the lifetime of any fw_unit belonging to the fw_device, before device_del()
+ * was called on the last fw_unit. Alternatively, they may be accessed while
+ * holding fw_device_rwsem.
*/
struct fw_device {
atomic_t state;
@@ -53,6 +59,7 @@ struct fw_device {
int node_id;
int generation;
unsigned max_speed;
+ bool cmc;
struct fw_card *card;
struct device device;
struct list_head link;
@@ -64,40 +71,63 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *
-fw_device(struct device *dev)
+static inline struct fw_device *fw_device(struct device *dev)
{
return container_of(dev, struct fw_device, device);
}
-static inline int
-fw_device_is_shutdown(struct fw_device *device)
+static inline int fw_device_is_shutdown(struct fw_device *device)
{
return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
}
-struct fw_device *fw_device_get(struct fw_device *device);
-void fw_device_put(struct fw_device *device);
+static inline struct fw_device *fw_device_get(struct fw_device *device)
+{
+ get_device(&device->device);
+
+ return device;
+}
+
+static inline void fw_device_put(struct fw_device *device)
+{
+ put_device(&device->device);
+}
+
+struct fw_device *fw_device_get_by_devt(dev_t devt);
int fw_device_enable_phys_dma(struct fw_device *device);
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
-struct fw_device *fw_device_from_devt(dev_t devt);
+extern struct rw_semaphore fw_device_rwsem;
extern int fw_cdev_major;
+/*
+ * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
+ */
struct fw_unit {
struct device device;
u32 *directory;
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *
-fw_unit(struct device *dev)
+static inline struct fw_unit *fw_unit(struct device *dev)
{
return container_of(dev, struct fw_unit, device);
}
+static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
+{
+ get_device(&unit->device);
+
+ return unit;
+}
+
+static inline void fw_unit_put(struct fw_unit *unit)
+{
+ put_device(&unit->device);
+}
+
#define CSR_OFFSET 0x40
#define CSR_LEAF 0x80
#define CSR_DIRECTORY 0xc0
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index 2b640e9be6d..bcbe794a3ea 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -126,7 +126,6 @@ fw_iso_context_create(struct fw_card *card, int type,
return ctx;
}
-EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
@@ -134,14 +133,12 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
-EXPORT_SYMBOL(fw_iso_context_destroy);
int
fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
-EXPORT_SYMBOL(fw_iso_context_start);
int
fw_iso_context_queue(struct fw_iso_context *ctx,
@@ -153,11 +150,9 @@ fw_iso_context_queue(struct fw_iso_context *ctx,
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
-EXPORT_SYMBOL(fw_iso_context_queue);
int
fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
-EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 7ebad3c14cb..4f02c55f13e 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -27,12 +27,17 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/system.h>
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pmac_feature.h>
+#endif
+
#include "fw-ohci.h"
#include "fw-transaction.h"
@@ -173,8 +178,10 @@ struct fw_ohci {
struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
- int request_generation;
+ int request_generation; /* for timestamping incoming requests */
u32 bus_seconds;
+ bool old_uninorth;
+ bool bus_reset_packet_quirk;
/*
* Spinlock for accessing fw_ohci data. Never call out of
@@ -232,6 +239,196 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
+#define OHCI_PARAM_DEBUG_AT_AR 1
+#define OHCI_PARAM_DEBUG_SELFIDS 2
+#define OHCI_PARAM_DEBUG_IRQS 4
+#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
+
+static int param_debug;
+module_param_named(debug, param_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+ ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
+ ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
+ ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
+ ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
+ ", or a combination, or all = -1)");
+
+static void log_irqs(u32 evt)
+{
+ if (likely(!(param_debug &
+ (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
+ return;
+
+ if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
+ !(evt & OHCI1394_busReset))
+ return;
+
+ printk(KERN_DEBUG KBUILD_MODNAME ": IRQ "
+ "%08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ evt,
+ evt & OHCI1394_selfIDComplete ? " selfID" : "",
+ evt & OHCI1394_RQPkt ? " AR_req" : "",
+ evt & OHCI1394_RSPkt ? " AR_resp" : "",
+ evt & OHCI1394_reqTxComplete ? " AT_req" : "",
+ evt & OHCI1394_respTxComplete ? " AT_resp" : "",
+ evt & OHCI1394_isochRx ? " IR" : "",
+ evt & OHCI1394_isochTx ? " IT" : "",
+ evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
+ evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
+ evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
+ evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
+ evt & OHCI1394_busReset ? " busReset" : "",
+ evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
+ OHCI1394_RSPkt | OHCI1394_reqTxComplete |
+ OHCI1394_respTxComplete | OHCI1394_isochRx |
+ OHCI1394_isochTx | OHCI1394_postedWriteErr |
+ OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+ OHCI1394_regAccessFail | OHCI1394_busReset)
+ ? " ?" : "");
+}
+
+static const char *speed[] = {
+ [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
+};
+static const char *power[] = {
+ [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
+ [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
+};
+static const char port[] = { '.', '-', 'p', 'c', };
+
+static char _p(u32 *s, int shift)
+{
+ return port[*s >> shift & 3];
+}
+
+static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
+{
+ if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
+ return;
+
+ printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d, "
+ "local node ID %04x\n", self_id_count, generation, node_id);
+
+ for (; self_id_count--; ++s)
+ if ((*s & 1 << 23) == 0)
+ printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
+ "%s gc=%d %s %s%s%s\n",
+ *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
+ speed[*s >> 14 & 3], *s >> 16 & 63,
+ power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+ *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+ else
+ printk(KERN_DEBUG "selfID n: %08x, phy %d "
+ "[%c%c%c%c%c%c%c%c]\n",
+ *s, *s >> 24 & 63,
+ _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
+ _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
+}
+
+static const char *evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+};
+static const char *tcodes[] = {
+ [0x0] = "QW req", [0x1] = "BW req",
+ [0x2] = "W resp", [0x3] = "-reserved-",
+ [0x4] = "QR req", [0x5] = "BR req",
+ [0x6] = "QR resp", [0x7] = "BR resp",
+ [0x8] = "cycle start", [0x9] = "Lk req",
+ [0xa] = "async stream packet", [0xb] = "Lk resp",
+ [0xc] = "-reserved-", [0xd] = "-reserved-",
+ [0xe] = "link internal", [0xf] = "-reserved-",
+};
+static const char *phys[] = {
+ [0x0] = "phy config packet", [0x1] = "link-on packet",
+ [0x2] = "self-id packet", [0x3] = "-reserved-",
+};
+
+static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
+{
+ int tcode = header[0] >> 4 & 0xf;
+ char specific[12];
+
+ if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
+ return;
+
+ if (unlikely(evt >= ARRAY_SIZE(evts)))
+ evt = 0x1f;
+
+ if (evt == OHCI1394_evt_bus_reset) {
+ printk(KERN_DEBUG "A%c evt_bus_reset, generation %d\n",
+ dir, (header[2] >> 16) & 0xff);
+ return;
+ }
+
+ if (header[0] == ~header[1]) {
+ printk(KERN_DEBUG "A%c %s, %s, %08x\n",
+ dir, evts[evt], phys[header[0] >> 30 & 0x3],
+ header[0]);
+ return;
+ }
+
+ switch (tcode) {
+ case 0x0: case 0x6: case 0x8:
+ snprintf(specific, sizeof(specific), " = %08x",
+ be32_to_cpu((__force __be32)header[3]));
+ break;
+ case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+ snprintf(specific, sizeof(specific), " %x,%x",
+ header[3] >> 16, header[3] & 0xffff);
+ break;
+ default:
+ specific[0] = '\0';
+ }
+
+ switch (tcode) {
+ case 0xe: case 0xa:
+ printk(KERN_DEBUG "A%c %s, %s\n",
+ dir, evts[evt], tcodes[tcode]);
+ break;
+ case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+ printk(KERN_DEBUG "A%c spd %x tl %02x, "
+ "%04x -> %04x, %s, "
+ "%s, %04x%08x%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], header[1] & 0xffff, header[2], specific);
+ break;
+ default:
+ printk(KERN_DEBUG "A%c spd %x tl %02x, "
+ "%04x -> %04x, %s, "
+ "%s%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], specific);
+ }
+}
+
+#else
+
+#define log_irqs(evt)
+#define log_selfids(node_id, generation, self_id_count, sid)
+#define log_ar_at_event(dir, speed, header, evt)
+
+#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
+
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -276,19 +473,13 @@ static int ar_context_add_page(struct ar_context *ctx)
{
struct device *dev = ctx->ohci->card.device;
struct ar_buffer *ab;
- dma_addr_t ab_bus;
+ dma_addr_t uninitialized_var(ab_bus);
size_t offset;
- ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
+ ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
if (ab == NULL)
return -ENOMEM;
- ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ab_bus)) {
- free_page((unsigned long) ab);
- return -ENOMEM;
- }
-
memset(&ab->descriptor, 0, sizeof(ab->descriptor));
ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_STATUS |
@@ -299,8 +490,6 @@ static int ar_context_add_page(struct ar_context *ctx)
ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.branch_address = 0;
- dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
-
ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
ctx->last_buffer->next = ab;
ctx->last_buffer = ab;
@@ -311,15 +500,23 @@ static int ar_context_add_page(struct ar_context *ctx)
return 0;
}
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#define cond_le32_to_cpu(v) \
+ (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+#else
+#define cond_le32_to_cpu(v) le32_to_cpu(v)
+#endif
+
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
{
struct fw_ohci *ohci = ctx->ohci;
struct fw_packet p;
u32 status, length, tcode;
+ int evt;
- p.header[0] = le32_to_cpu(buffer[0]);
- p.header[1] = le32_to_cpu(buffer[1]);
- p.header[2] = le32_to_cpu(buffer[2]);
+ p.header[0] = cond_le32_to_cpu(buffer[0]);
+ p.header[1] = cond_le32_to_cpu(buffer[1]);
+ p.header[2] = cond_le32_to_cpu(buffer[2]);
tcode = (p.header[0] >> 4) & 0x0f;
switch (tcode) {
@@ -331,7 +528,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
case TCODE_READ_BLOCK_REQUEST :
- p.header[3] = le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
p.payload_length = 0;
break;
@@ -340,7 +537,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- p.header[3] = le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
p.payload_length = p.header[3] >> 16;
break;
@@ -357,13 +554,16 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
- status = le32_to_cpu(buffer[length]);
+ status = cond_le32_to_cpu(buffer[length]);
+ evt = (status >> 16) & 0x1f;
- p.ack = ((status >> 16) & 0x1f) - 16;
+ p.ack = evt - 16;
p.speed = (status >> 21) & 0x7;
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
+ log_ar_at_event('R', p.speed, p.header, evt);
+
/*
* The OHCI bus reset handler synthesizes a phy packet with
* the new generation number when a bus reset happens (see
@@ -372,14 +572,19 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* generation. We only need this for requests; for responses
* we use the unique tlabel for finding the matching
* request.
+ *
+ * Alas some chips sometimes emit bus reset packets with a
+ * wrong generation. We set the correct generation for these
+ * at a slightly incorrect time (in bus_reset_tasklet).
*/
-
- if (p.ack + 16 == 0x09)
- ohci->request_generation = (buffer[2] >> 16) & 0xff;
- else if (ctx == &ohci->ar_request_ctx)
+ if (evt == OHCI1394_evt_bus_reset) {
+ if (!ohci->bus_reset_packet_quirk)
+ ohci->request_generation = (p.header[2] >> 16) & 0xff;
+ } else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
- else
+ } else {
fw_core_handle_response(&ohci->card, &p);
+ }
return buffer + length + 1;
}
@@ -397,6 +602,8 @@ static void ar_context_tasklet(unsigned long data)
if (d->res_count == 0) {
size_t size, rest, offset;
+ dma_addr_t start_bus;
+ void *start;
/*
* This descriptor is finished and we may have a
@@ -405,11 +612,9 @@ static void ar_context_tasklet(unsigned long data)
*/
offset = offsetof(struct ar_buffer, data);
- dma_unmap_single(ohci->card.device,
- le32_to_cpu(ab->descriptor.data_address) - offset,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ start = buffer = ab;
+ start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
- buffer = ab;
ab = ab->next;
d = &ab->descriptor;
size = buffer + PAGE_SIZE - ctx->pointer;
@@ -423,7 +628,8 @@ static void ar_context_tasklet(unsigned long data)
while (buffer < end)
buffer = handle_ar_packet(ctx, buffer);
- free_page((unsigned long)buffer);
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ start, start_bus);
ar_context_add_page(ctx);
} else {
buffer = ctx->pointer;
@@ -532,7 +738,7 @@ static int
context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
- dma_addr_t bus_addr;
+ dma_addr_t uninitialized_var(bus_addr);
int offset;
/*
@@ -765,8 +971,19 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
DESCRIPTOR_IRQ_ALWAYS |
DESCRIPTOR_BRANCH_ALWAYS);
- /* FIXME: Document how the locking works. */
- if (ohci->generation != packet->generation) {
+ /*
+ * If the controller and packet generations don't match, we need to
+ * bail out and try again. If IntEvent.busReset is set, the AT context
+ * is halted, so appending to the context and trying to run it is
+ * futile. Most controllers do the right thing and just flush the AT
+ * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
+ * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
+ * up stalling out. So we just bail out in software and try again
+ * later, and everyone is happy.
+ * FIXME: Document how the locking works.
+ */
+ if (ohci->generation != packet->generation ||
+ reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
if (packet->payload_length > 0)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
@@ -812,6 +1029,8 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
+ log_ar_at_event('T', packet->speed, packet->header, evt);
+
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1014,21 +1233,32 @@ static void bus_reset_tasklet(unsigned long data)
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
+ reg = reg_read(ohci, OHCI1394_SelfIDCount);
+ if (reg & OHCI1394_SelfIDCount_selfIDError) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
-
- self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
- generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+ self_id_count = (reg >> 3) & 0x3ff;
+ if (self_id_count == 0) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
+ generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
- fw_error("inconsistent self IDs\n");
- ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
+ if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
+ ohci->self_id_buffer[j] =
+ cond_le32_to_cpu(ohci->self_id_cpu[i]);
}
rmb();
@@ -1061,6 +1291,9 @@ static void bus_reset_tasklet(unsigned long data)
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ if (ohci->bus_reset_packet_quirk)
+ ohci->request_generation = generation;
+
/*
* This next bit is unrelated to the AT context stuff but we
* have to do it under the spinlock also. If a new config rom
@@ -1091,12 +1324,20 @@ static void bus_reset_tasklet(unsigned long data)
reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
}
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+#endif
+
spin_unlock_irqrestore(&ohci->lock, flags);
if (free_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
free_rom, free_rom_bus);
+ log_selfids(ohci->node_id, generation,
+ self_id_count, ohci->self_id_buffer);
+
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer);
}
@@ -1112,7 +1353,9 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- reg_write(ohci, OHCI1394_IntEventClear, event);
+ /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
+ reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+ log_irqs(event);
if (event & OHCI1394_selfIDComplete)
tasklet_schedule(&ohci->bus_reset_tasklet);
@@ -1147,6 +1390,10 @@ static irqreturn_t irq_handler(int irq, void *data)
iso_event &= ~(1 << i);
}
+ if (unlikely(event & OHCI1394_regAccessFail))
+ fw_error("Register access failure - "
+ "please notify linux1394-devel@lists.sf.net\n");
+
if (unlikely(event & OHCI1394_postedWriteErr))
fw_error("PCI posted write error\n");
@@ -1186,6 +1433,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
+ u32 lps;
+ int i;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
@@ -1197,13 +1446,24 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
- * full link enabled.
+ * full link enabled. However, with some cards (well, at least
+ * a JMicron PCIe card), we have to try again sometimes.
*/
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_LPS |
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- msleep(50);
+
+ for (lps = 0, i = 0; !lps && i < 3; i++) {
+ msleep(50);
+ lps = reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_LPS;
+ }
+
+ if (!lps) {
+ fw_error("Failed to set Link Power Status\n");
+ return -EIO;
+ }
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
@@ -1231,7 +1491,10 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
+ OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+ OHCI1394_masterIntEnable);
+ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
/* Activate link_on bit and contender bit in our self ID packets.*/
if (ohci_update_phy_reg(card, 4, 0,
@@ -1316,7 +1579,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
unsigned long flags;
int retval = -EBUSY;
__be32 *next_config_rom;
- dma_addr_t next_config_rom_bus;
+ dma_addr_t uninitialized_var(next_config_rom_bus);
ohci = fw_ohci(card);
@@ -1415,6 +1678,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
if (packet->ack != 0)
goto out;
+ log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
packet->callback(packet, &ohci->card, packet->ack);
@@ -1429,6 +1693,9 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
static int
ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
{
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+ return 0;
+#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, retval = 0;
@@ -1460,6 +1727,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
out:
spin_unlock_irqrestore(&ohci->lock, flags);
return retval;
+#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u64
@@ -1487,7 +1755,7 @@ static int handle_ir_dualbuffer_packet(struct context *context,
void *p, *end;
int i;
- if (db->first_res_count > 0 && db->second_res_count > 0) {
+ if (db->first_res_count != 0 && db->second_res_count != 0) {
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1513,7 +1781,7 @@ static int handle_ir_dualbuffer_packet(struct context *context,
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
i += ctx->base.header_size;
ctx->excess_bytes +=
- (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff;
+ (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
p += ctx->base.header_size + 4;
}
ctx->header_length = i;
@@ -2039,6 +2307,35 @@ static const struct fw_card_driver ohci_driver = {
.stop_iso = ohci_stop_iso,
};
+#ifdef CONFIG_PPC_PMAC
+static void ohci_pmac_on(struct pci_dev *dev)
+{
+ if (machine_is(powermac)) {
+ struct device_node *ofn = pci_device_to_OF_node(dev);
+
+ if (ofn) {
+ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
+ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
+ }
+ }
+}
+
+static void ohci_pmac_off(struct pci_dev *dev)
+{
+ if (machine_is(powermac)) {
+ struct device_node *ofn = pci_device_to_OF_node(dev);
+
+ if (ofn) {
+ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
+ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
+ }
+ }
+}
+#else
+#define ohci_pmac_on(dev)
+#define ohci_pmac_off(dev)
+#endif /* CONFIG_PPC_PMAC */
+
static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
@@ -2056,16 +2353,24 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
+ ohci_pmac_on(dev);
+
err = pci_enable_device(dev);
if (err) {
fw_error("Failed to enable OHCI hardware.\n");
- goto fail_put_card;
+ goto fail_free;
}
pci_set_master(dev);
pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
pci_set_drvdata(dev, ohci);
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+ ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
+ dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
+#endif
+ ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+
spin_lock_init(&ohci->lock);
tasklet_init(&ohci->bus_reset_tasklet,
@@ -2151,8 +2456,9 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
pci_release_region(dev, 0);
fail_disable:
pci_disable_device(dev);
- fail_put_card:
- fw_card_put(&ohci->card);
+ fail_free:
+ kfree(&ohci->card);
+ ohci_pmac_off(dev);
return err;
}
@@ -2180,39 +2486,42 @@ static void pci_remove(struct pci_dev *dev)
pci_iounmap(dev, ohci->registers);
pci_release_region(dev, 0);
pci_disable_device(dev);
- fw_card_put(&ohci->card);
+ kfree(&ohci->card);
+ ohci_pmac_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pci_suspend(struct pci_dev *dev, pm_message_t state)
{
- struct fw_ohci *ohci = pci_get_drvdata(pdev);
+ struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
software_reset(ohci);
- free_irq(pdev->irq, ohci);
- err = pci_save_state(pdev);
+ free_irq(dev->irq, ohci);
+ err = pci_save_state(dev);
if (err) {
fw_error("pci_save_state failed\n");
return err;
}
- err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
+ ohci_pmac_off(dev);
return 0;
}
-static int pci_resume(struct pci_dev *pdev)
+static int pci_resume(struct pci_dev *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(pdev);
+ struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
+ ohci_pmac_on(dev);
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+ err = pci_enable_device(dev);
if (err) {
fw_error("pci_enable_device failed\n");
return err;
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
index dec4f04e6b2..a2fbb6240ca 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/fw-ohci.h
@@ -30,6 +30,7 @@
#define OHCI1394_HCControl_softReset 0x00010000
#define OHCI1394_SelfIDBuffer 0x064
#define OHCI1394_SelfIDCount 0x068
+#define OHCI1394_SelfIDCount_selfIDError 0x80000000
#define OHCI1394_IRMultiChanMaskHiSet 0x070
#define OHCI1394_IRMultiChanMaskHiClear 0x074
#define OHCI1394_IRMultiChanMaskLoSet 0x078
@@ -124,6 +125,7 @@
#define OHCI1394_lockRespErr 0x00000200
#define OHCI1394_selfIDComplete 0x00010000
#define OHCI1394_busReset 0x00020000
+#define OHCI1394_regAccessFail 0x00040000
#define OHCI1394_phy 0x00080000
#define OHCI1394_cycleSynch 0x00100000
#define OHCI1394_cycle64Seconds 0x00200000
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 19ece9b6d74..2a999373863 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -28,14 +28,15 @@
* and many others.
*/
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/device.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/timer.h>
@@ -47,9 +48,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
#include "fw-device.h"
+#include "fw-topology.h"
+#include "fw-transaction.h"
/*
* So far only bridges from Oxford Semiconductor are known to support
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
* Avoids access beyond actual disk limits on devices with an off-by-one bug.
* Don't use this with devices which don't have this bug.
*
+ * - delay inquiry
+ * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
+ *
* - override internal blacklist
* Instead of adding to the built-in blacklist, use only the workarounds
* specified in the module load parameter.
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
+#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
+#define SBP2_INQUIRY_DELAY 12
#define SBP2_WORKAROUND_OVERRIDE 0x100
static int sbp2_param_workarounds;
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+ ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
", or a combination)");
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2";
struct sbp2_logical_unit {
struct sbp2_target *tgt;
struct list_head link;
- struct scsi_device *sdev;
struct fw_address_handler address_handler;
struct list_head orb_list;
@@ -132,6 +138,8 @@ struct sbp2_logical_unit {
int generation;
int retries;
struct delayed_work work;
+ bool has_sdev;
+ bool blocked;
};
/*
@@ -141,16 +149,19 @@ struct sbp2_logical_unit {
struct sbp2_target {
struct kref kref;
struct fw_unit *unit;
+ const char *bus_id;
+ struct list_head lu_list;
u64 management_agent_address;
+ u64 guid;
int directory_id;
int node_id;
int address_high;
-
- unsigned workarounds;
- struct list_head lu_list;
-
+ unsigned int workarounds;
unsigned int mgt_orb_timeout;
+
+ int dont_block; /* counter for each logical unit */
+ int blocked; /* ditto */
};
/*
@@ -160,12 +171,11 @@ struct sbp2_target {
*/
#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */
#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
-#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
+#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
#define SBP2_ORB_NULL 0x80000000
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
-
-#define SBP2_DIRECTION_TO_MEDIA 0x0
-#define SBP2_DIRECTION_FROM_MEDIA 0x1
+#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
+#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
/* Unit directory keys */
#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
@@ -213,8 +223,8 @@ struct sbp2_status {
};
struct sbp2_pointer {
- u32 high;
- u32 low;
+ __be32 high;
+ __be32 low;
};
struct sbp2_orb {
@@ -242,8 +252,8 @@ struct sbp2_management_orb {
struct {
struct sbp2_pointer password;
struct sbp2_pointer response;
- u32 misc;
- u32 length;
+ __be32 misc;
+ __be32 length;
struct sbp2_pointer status_fifo;
} request;
__be32 response[4];
@@ -252,20 +262,17 @@ struct sbp2_management_orb {
struct sbp2_status status;
};
-#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
-#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
-
struct sbp2_login_response {
- u32 misc;
+ __be32 misc;
struct sbp2_pointer command_block_agent;
- u32 reconnect_hold;
+ __be32 reconnect_hold;
};
#define COMMAND_ORB_DATA_SIZE(v) ((v))
#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
#define COMMAND_ORB_SPEED(v) ((v) << 24)
-#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
+#define COMMAND_ORB_DIRECTION ((1) << 27)
#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
#define COMMAND_ORB_NOTIFY ((1) << 31)
@@ -274,7 +281,7 @@ struct sbp2_command_orb {
struct {
struct sbp2_pointer next;
struct sbp2_pointer data_descriptor;
- u32 misc;
+ __be32 misc;
u8 command_block[12];
} request;
struct scsi_cmnd *cmd;
@@ -297,7 +304,7 @@ struct sbp2_command_orb {
static const struct {
u32 firmware_revision;
u32 model;
- unsigned workarounds;
+ unsigned int workarounds;
} sbp2_workarounds_table[] = {
/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
.firmware_revision = 0x002800,
@@ -305,6 +312,11 @@ static const struct {
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
SBP2_WORKAROUND_MODE_SENSE_8,
},
+ /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
+ .firmware_revision = 0x002800,
+ .model = 0x000000,
+ .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY,
+ },
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
.model = ~0,
@@ -315,6 +327,11 @@ static const struct {
.model = ~0,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
+ /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
+ .firmware_revision = 0x002600,
+ .model = ~0,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
+ },
/*
* There are iPods (2nd gen, 3rd gen) with model_id == 0, but
@@ -438,8 +455,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
unsigned long flags;
orb->pointer.high = 0;
- orb->pointer.low = orb->request_bus;
- fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
+ orb->pointer.low = cpu_to_be32(orb->request_bus);
spin_lock_irqsave(&device->card->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
@@ -501,6 +517,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
unsigned int timeout;
int retval = -ENOMEM;
+ if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
+ return 0;
+
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
if (orb == NULL)
return -ENOMEM;
@@ -512,31 +531,31 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
if (dma_mapping_error(orb->response_bus))
goto fail_mapping_response;
- orb->request.response.high = 0;
- orb->request.response.low = orb->response_bus;
+ orb->request.response.high = 0;
+ orb->request.response.low = cpu_to_be32(orb->response_bus);
- orb->request.misc =
+ orb->request.misc = cpu_to_be32(
MANAGEMENT_ORB_NOTIFY |
MANAGEMENT_ORB_FUNCTION(function) |
- MANAGEMENT_ORB_LUN(lun_or_login_id);
- orb->request.length =
- MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
+ MANAGEMENT_ORB_LUN(lun_or_login_id));
+ orb->request.length = cpu_to_be32(
+ MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
- orb->request.status_fifo.high = lu->address_handler.offset >> 32;
- orb->request.status_fifo.low = lu->address_handler.offset;
+ orb->request.status_fifo.high =
+ cpu_to_be32(lu->address_handler.offset >> 32);
+ orb->request.status_fifo.low =
+ cpu_to_be32(lu->address_handler.offset);
if (function == SBP2_LOGIN_REQUEST) {
/* Ask for 2^2 == 4 seconds reconnect grace period */
- orb->request.misc |=
+ orb->request.misc |= cpu_to_be32(
MANAGEMENT_ORB_RECONNECT(2) |
- MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login);
+ MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
timeout = lu->tgt->mgt_orb_timeout;
} else {
timeout = SBP2_ORB_TIMEOUT;
}
- fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
-
init_completion(&orb->done);
orb->base.callback = complete_management_orb;
@@ -553,20 +572,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
retval = -EIO;
if (sbp2_cancel_orbs(lu) == 0) {
- fw_error("orb reply timed out, rcode=0x%02x\n",
- orb->base.rcode);
+ fw_error("%s: orb reply timed out, rcode=0x%02x\n",
+ lu->tgt->bus_id, orb->base.rcode);
goto out;
}
if (orb->base.rcode != RCODE_COMPLETE) {
- fw_error("management write failed, rcode 0x%02x\n",
- orb->base.rcode);
+ fw_error("%s: management write failed, rcode 0x%02x\n",
+ lu->tgt->bus_id, orb->base.rcode);
goto out;
}
if (STATUS_GET_RESPONSE(orb->status) != 0 ||
STATUS_GET_SBP_STATUS(orb->status) != 0) {
- fw_error("error status: %d:%d\n",
+ fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
STATUS_GET_RESPONSE(orb->status),
STATUS_GET_SBP_STATUS(orb->status));
goto out;
@@ -581,8 +600,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
sizeof(orb->response), DMA_FROM_DEVICE);
fail_mapping_response:
if (response)
- fw_memcpy_from_be32(response,
- orb->response, sizeof(orb->response));
+ memcpy(response, orb->response, sizeof(orb->response));
kref_put(&orb->base.kref, free_orb);
return retval;
@@ -590,29 +608,154 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
static void
complete_agent_reset_write(struct fw_card *card, int rcode,
- void *payload, size_t length, void *data)
+ void *payload, size_t length, void *done)
{
- struct fw_transaction *t = data;
+ complete(done);
+}
- kfree(t);
+static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
+{
+ struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct fw_transaction t;
+ static u32 z;
+
+ fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
+ lu->tgt->node_id, lu->generation, device->max_speed,
+ lu->command_block_agent_address + SBP2_AGENT_RESET,
+ &z, sizeof(z), complete_agent_reset_write, &done);
+ wait_for_completion(&done);
}
-static int sbp2_agent_reset(struct sbp2_logical_unit *lu)
+static void
+complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ kfree(data);
+}
+
+static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct fw_transaction *t;
- static u32 zero;
+ static u32 z;
- t = kzalloc(sizeof(*t), GFP_ATOMIC);
+ t = kmalloc(sizeof(*t), GFP_ATOMIC);
if (t == NULL)
- return -ENOMEM;
+ return;
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
- &zero, sizeof(zero), complete_agent_reset_write, t);
+ &z, sizeof(z), complete_agent_reset_write_no_wait, t);
+}
- return 0;
+static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
+{
+ struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
+ unsigned long flags;
+
+ /* serialize with comparisons of lu->generation and card->generation */
+ spin_lock_irqsave(&card->lock, flags);
+ lu->generation = generation;
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+{
+ /*
+ * We may access dont_block without taking card->lock here:
+ * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
+ * are currently serialized against each other.
+ * And a wrong result in sbp2_conditionally_block()'s access of
+ * dont_block is rather harmless, it simply misses its first chance.
+ */
+ --lu->tgt->dont_block;
+}
+
+/*
+ * Blocks lu->tgt if all of the following conditions are met:
+ * - Login, INQUIRY, and high-level SCSI setup of all of the target's
+ * logical units have been finished (indicated by dont_block == 0).
+ * - lu->generation is stale.
+ *
+ * Note, scsi_block_requests() must be called while holding card->lock,
+ * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
+ * unblock the target.
+ */
+static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
+{
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (!tgt->dont_block && !lu->blocked &&
+ lu->generation != card->generation) {
+ lu->blocked = true;
+ if (++tgt->blocked == 1)
+ scsi_block_requests(shost);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/*
+ * Unblocks lu->tgt as soon as all its logical units can be unblocked.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section. On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
+{
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ unsigned long flags;
+ bool unblock = false;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (lu->blocked && lu->generation == card->generation) {
+ lu->blocked = false;
+ unblock = --tgt->blocked == 0;
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (unblock)
+ scsi_unblock_requests(shost);
+}
+
+/*
+ * Prevents future blocking of tgt and unblocks it.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section. On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_unblock(struct sbp2_target *tgt)
+{
+ struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ ++tgt->dont_block;
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ scsi_unblock_requests(shost);
+}
+
+static int sbp2_lun2int(u16 lun)
+{
+ struct scsi_lun eight_bytes_lun;
+
+ memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
+ eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
+ eight_bytes_lun.scsi_lun[1] = lun & 0xff;
+
+ return scsilun_to_int(&eight_bytes_lun);
}
static void sbp2_release_target(struct kref *kref)
@@ -621,26 +764,31 @@ static void sbp2_release_target(struct kref *kref)
struct sbp2_logical_unit *lu, *next;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ struct scsi_device *sdev;
struct fw_device *device = fw_device(tgt->unit->device.parent);
- list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
- if (lu->sdev)
- scsi_remove_device(lu->sdev);
+ /* prevent deadlocks */
+ sbp2_unblock(tgt);
- if (!fw_device_is_shutdown(device))
- sbp2_send_management_orb(lu, tgt->node_id,
- lu->generation, SBP2_LOGOUT_REQUEST,
- lu->login_id, NULL);
+ list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+ sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
+ SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
fw_core_remove_address_handler(&lu->address_handler);
list_del(&lu->link);
kfree(lu);
}
scsi_remove_host(shost);
- fw_notify("released %s\n", tgt->unit->device.bus_id);
+ fw_notify("released %s\n", tgt->bus_id);
- put_device(&tgt->unit->device);
+ fw_unit_put(tgt->unit);
scsi_host_put(shost);
+ fw_device_put(device);
}
static struct workqueue_struct *sbp2_wq;
@@ -660,77 +808,154 @@ static void sbp2_target_put(struct sbp2_target *tgt)
kref_put(&tgt->kref, sbp2_release_target);
}
+static void
+complete_set_busy_timeout(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *done)
+{
+ complete(done);
+}
+
+/*
+ * Write retransmit retry values into the BUSY_TIMEOUT register.
+ * - The single-phase retry protocol is supported by all SBP-2 devices, but the
+ * default retry_limit value is 0 (i.e. never retry transmission). We write a
+ * saner value after logging into the device.
+ * - The dual-phase retry protocol is optional to implement, and if not
+ * supported, writes to the dual-phase portion of the register will be
+ * ignored. We try to write the original 1394-1995 default here.
+ * - In the case of devices that are also SBP-3-compliant, all writes are
+ * ignored, as the register is read-only, but contains single-phase retry of
+ * 15, which is what we're trying to set for all SBP-2 device anyway, so this
+ * write attempt is safe and yields more consistent behavior for all devices.
+ *
+ * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
+ * and section 6.4 of the SBP-3 spec for further details.
+ */
+static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
+{
+ struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct fw_transaction t;
+ static __be32 busy_timeout;
+
+ busy_timeout = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
+
+ fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
+ lu->tgt->node_id, lu->generation, device->max_speed,
+ CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &busy_timeout,
+ sizeof(busy_timeout), complete_set_busy_timeout, &done);
+ wait_for_completion(&done);
+}
+
static void sbp2_reconnect(struct work_struct *work);
static void sbp2_login(struct work_struct *work)
{
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
- struct Scsi_Host *shost =
- container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]);
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_device *device = fw_device(tgt->unit->device.parent);
+ struct Scsi_Host *shost;
struct scsi_device *sdev;
- struct scsi_lun eight_bytes_lun;
- struct fw_unit *unit = lu->tgt->unit;
- struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_login_response response;
int generation, node_id, local_node_id;
+ if (fw_device_is_shutdown(device))
+ goto out;
+
generation = device->generation;
smp_rmb(); /* node_id must not be older than generation */
node_id = device->node_id;
local_node_id = device->card->node_id;
+ /* If this is a re-login attempt, log out, or we might be rejected. */
+ if (lu->has_sdev)
+ sbp2_send_management_orb(lu, device->node_id, generation,
+ SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+
if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
- if (lu->retries++ < 5)
+ if (lu->retries++ < 5) {
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
- else
- fw_error("failed to login to %s LUN %04x\n",
- unit->device.bus_id, lu->lun);
+ } else {
+ fw_error("%s: failed to login to LUN %04x\n",
+ tgt->bus_id, lu->lun);
+ /* Let any waiting I/O fail from now on. */
+ sbp2_unblock(lu->tgt);
+ }
goto out;
}
- lu->generation = generation;
- lu->tgt->node_id = node_id;
- lu->tgt->address_high = local_node_id << 16;
+ tgt->node_id = node_id;
+ tgt->address_high = local_node_id << 16;
+ sbp2_set_generation(lu, generation);
- /* Get command block agent offset and login id. */
lu->command_block_agent_address =
- ((u64) (response.command_block_agent.high & 0xffff) << 32) |
- response.command_block_agent.low;
- lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
+ ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
+ << 32) | be32_to_cpu(response.command_block_agent.low);
+ lu->login_id = be32_to_cpu(response.misc) & 0xffff;
- fw_notify("logged in to %s LUN %04x (%d retries)\n",
- unit->device.bus_id, lu->lun, lu->retries);
+ fw_notify("%s: logged in to LUN %04x (%d retries)\n",
+ tgt->bus_id, lu->lun, lu->retries);
-#if 0
- /* FIXME: The linux1394 sbp2 does this last step. */
- sbp2_set_busy_timeout(scsi_id);
-#endif
+ /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
+ sbp2_set_busy_timeout(lu);
PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
sbp2_agent_reset(lu);
- memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
- eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
- eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
+ /* This was a re-login. */
+ if (lu->has_sdev) {
+ sbp2_cancel_orbs(lu);
+ sbp2_conditionally_unblock(lu);
+ goto out;
+ }
- sdev = __scsi_add_device(shost, 0, 0,
- scsilun_to_int(&eight_bytes_lun), lu);
- if (IS_ERR(sdev)) {
- sbp2_send_management_orb(lu, node_id, generation,
- SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
- /*
- * Set this back to sbp2_login so we fall back and
- * retry login on bus reset.
- */
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- } else {
- lu->sdev = sdev;
+ if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
+ ssleep(SBP2_INQUIRY_DELAY);
+
+ shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
+ /*
+ * FIXME: We are unable to perform reconnects while in sbp2_login().
+ * Therefore __scsi_add_device() will get into trouble if a bus reset
+ * happens in parallel. It will either fail or leave us with an
+ * unusable sdev. As a workaround we check for this and retry the
+ * whole login and SCSI probing.
+ */
+
+ /* Reported error during __scsi_add_device() */
+ if (IS_ERR(sdev))
+ goto out_logout_login;
+
+ /* Unreported error during __scsi_add_device() */
+ smp_rmb(); /* get current card generation */
+ if (generation != device->card->generation) {
+ scsi_remove_device(sdev);
scsi_device_put(sdev);
+ goto out_logout_login;
}
+
+ /* No error during __scsi_add_device() */
+ lu->has_sdev = true;
+ scsi_device_put(sdev);
+ sbp2_allow_block(lu);
+ goto out;
+
+ out_logout_login:
+ smp_rmb(); /* generation may have changed */
+ generation = device->generation;
+ smp_rmb(); /* node_id must not be older than generation */
+
+ sbp2_send_management_orb(lu, device->node_id, generation,
+ SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+ /*
+ * If a bus reset happened, sbp2_update will have requeued
+ * lu->work already. Reset the work from reconnect to login.
+ */
+ PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
out:
- sbp2_target_put(lu->tgt);
+ sbp2_target_put(tgt);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -751,10 +976,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
return -ENOMEM;
}
- lu->tgt = tgt;
- lu->sdev = NULL;
- lu->lun = lun_entry & 0xffff;
- lu->retries = 0;
+ lu->tgt = tgt;
+ lu->lun = lun_entry & 0xffff;
+ lu->retries = 0;
+ lu->has_sdev = false;
+ lu->blocked = false;
+ ++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
INIT_DELAYED_WORK(&lu->work, sbp2_login);
@@ -813,7 +1040,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
if (timeout > tgt->mgt_orb_timeout)
fw_notify("%s: config rom contains %ds "
"management ORB timeout, limiting "
- "to %ds\n", tgt->unit->device.bus_id,
+ "to %ds\n", tgt->bus_id,
timeout / 1000,
tgt->mgt_orb_timeout / 1000);
break;
@@ -836,12 +1063,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
u32 firmware_revision)
{
int i;
- unsigned w = sbp2_param_workarounds;
+ unsigned int w = sbp2_param_workarounds;
if (w)
fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
"if you need the workarounds parameter for %s\n",
- tgt->unit->device.bus_id);
+ tgt->bus_id);
if (w & SBP2_WORKAROUND_OVERRIDE)
goto out;
@@ -863,8 +1090,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
if (w)
fw_notify("Workarounds for %s: 0x%x "
"(firmware_revision 0x%06x, model_id 0x%06x)\n",
- tgt->unit->device.bus_id,
- w, firmware_revision, model);
+ tgt->bus_id, w, firmware_revision, model);
tgt->workarounds = w;
}
@@ -888,6 +1114,8 @@ static int sbp2_probe(struct device *dev)
tgt->unit = unit;
kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
+ tgt->bus_id = unit->device.bus_id;
+ tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
@@ -895,6 +1123,9 @@ static int sbp2_probe(struct device *dev)
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
+ fw_device_get(device);
+ fw_unit_get(unit);
+
/* Initialize to values that won't match anything in our table. */
firmware_revision = 0xff000000;
model = 0xff000000;
@@ -909,8 +1140,6 @@ static int sbp2_probe(struct device *dev)
sbp2_init_workarounds(tgt, model, firmware_revision);
- get_device(&unit->device);
-
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, 0);
@@ -938,10 +1167,13 @@ static void sbp2_reconnect(struct work_struct *work)
{
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
- struct fw_unit *unit = lu->tgt->unit;
- struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_device *device = fw_device(tgt->unit->device.parent);
int generation, node_id, local_node_id;
+ if (fw_device_is_shutdown(device))
+ goto out;
+
generation = device->generation;
smp_rmb(); /* node_id must not be older than generation */
node_id = device->node_id;
@@ -950,10 +1182,17 @@ static void sbp2_reconnect(struct work_struct *work)
if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_RECONNECT_REQUEST,
lu->login_id, NULL) < 0) {
- if (lu->retries++ >= 5) {
- fw_error("failed to reconnect to %s\n",
- unit->device.bus_id);
- /* Fall back and try to log in again. */
+ /*
+ * If reconnect was impossible even though we are in the
+ * current generation, fall back and try to log in again.
+ *
+ * We could check for "Function rejected" status, but
+ * looking at the bus generation as simpler and more general.
+ */
+ smp_rmb(); /* get current card generation */
+ if (generation == device->card->generation ||
+ lu->retries++ >= 5) {
+ fw_error("%s: failed to reconnect\n", tgt->bus_id);
lu->retries = 0;
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
}
@@ -961,17 +1200,18 @@ static void sbp2_reconnect(struct work_struct *work)
goto out;
}
- lu->generation = generation;
- lu->tgt->node_id = node_id;
- lu->tgt->address_high = local_node_id << 16;
+ tgt->node_id = node_id;
+ tgt->address_high = local_node_id << 16;
+ sbp2_set_generation(lu, generation);
- fw_notify("reconnected to %s LUN %04x (%d retries)\n",
- unit->device.bus_id, lu->lun, lu->retries);
+ fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+ tgt->bus_id, lu->lun, lu->retries);
sbp2_agent_reset(lu);
sbp2_cancel_orbs(lu);
+ sbp2_conditionally_unblock(lu);
out:
- sbp2_target_put(lu->tgt);
+ sbp2_target_put(tgt);
}
static void sbp2_update(struct fw_unit *unit)
@@ -986,6 +1226,7 @@ static void sbp2_update(struct fw_unit *unit)
* Iteration over tgt->lu_list is therefore safe here.
*/
list_for_each_entry(lu, &tgt->lu_list, link) {
+ sbp2_conditionally_block(lu);
lu->retries = 0;
sbp2_queue_work(lu, 0);
}
@@ -1063,7 +1304,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
if (status != NULL) {
if (STATUS_GET_DEAD(*status))
- sbp2_agent_reset(orb->lu);
+ sbp2_agent_reset_no_wait(orb->lu);
switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE:
@@ -1089,6 +1330,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
* or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16;
+ sbp2_conditionally_block(orb->lu);
}
dma_unmap_single(device->card->device, orb->base.request_bus,
@@ -1129,9 +1371,12 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
* tables.
*/
if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
- orb->request.data_descriptor.high = lu->tgt->address_high;
- orb->request.data_descriptor.low = sg_dma_address(sg);
- orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
+ orb->request.data_descriptor.high =
+ cpu_to_be32(lu->tgt->address_high);
+ orb->request.data_descriptor.low =
+ cpu_to_be32(sg_dma_address(sg));
+ orb->request.misc |=
+ cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
return 0;
}
@@ -1152,16 +1397,14 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
goto fail_page_table;
}
l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
- orb->page_table[j].low = sg_addr;
- orb->page_table[j].high = (l << 16);
+ orb->page_table[j].low = cpu_to_be32(sg_addr);
+ orb->page_table[j].high = cpu_to_be32(l << 16);
sg_addr += l;
sg_len -= l;
j++;
}
}
- fw_memcpy_to_be32(orb->page_table, orb->page_table,
- sizeof(orb->page_table[0]) * j);
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
@@ -1175,11 +1418,10 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
* initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high.
*/
- orb->request.data_descriptor.high = lu->tgt->address_high;
- orb->request.data_descriptor.low = orb->page_table_bus;
- orb->request.misc |=
- COMMAND_ORB_PAGE_TABLE_PRESENT |
- COMMAND_ORB_DATA_SIZE(j);
+ orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
+ orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
+ orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
+ COMMAND_ORB_DATA_SIZE(j));
return 0;
@@ -1197,7 +1439,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
struct sbp2_logical_unit *lu = cmd->device->hostdata;
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_command_orb *orb;
- unsigned max_payload;
+ unsigned int max_payload;
int retval = SCSI_MLQUEUE_HOST_BUSY;
/*
@@ -1225,8 +1467,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->done = done;
orb->cmd = cmd;
- orb->request.next.high = SBP2_ORB_NULL;
- orb->request.next.low = 0x0;
+ orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
/*
* At speed 100 we can do 512 bytes per packet, at speed 200,
* 1024 bytes per packet etc. The SBP-2 max_payload field
@@ -1235,25 +1476,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
*/
max_payload = min(device->max_speed + 7,
device->card->max_receive - 1);
- orb->request.misc =
+ orb->request.misc = cpu_to_be32(
COMMAND_ORB_MAX_PAYLOAD(max_payload) |
COMMAND_ORB_SPEED(device->max_speed) |
- COMMAND_ORB_NOTIFY;
+ COMMAND_ORB_NOTIFY);
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- orb->request.misc |=
- COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
- else if (cmd->sc_data_direction == DMA_TO_DEVICE)
- orb->request.misc |=
- COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
+ orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
goto out;
- fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
-
- memset(orb->request.command_block,
- 0, sizeof(orb->request.command_block));
memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
orb->base.callback = complete_command_orb;
@@ -1275,13 +1508,14 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
+ /* (Re-)Adding logical units via the SCSI stack is not supported. */
+ if (!lu)
+ return -ENOSYS;
+
sdev->allow_restart = 1;
- /*
- * Update the dma alignment (minimum alignment requirements for
- * start and end of DMA transfers) to be a sector
- */
- blk_queue_update_dma_alignment(sdev->request_queue, 511);
+ /* SBP-2 requires quadlet alignment of the data buffers. */
+ blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
@@ -1319,7 +1553,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{
struct sbp2_logical_unit *lu = cmd->device->hostdata;
- fw_notify("sbp2_scsi_abort\n");
+ fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
sbp2_agent_reset(lu);
sbp2_cancel_orbs(lu);
@@ -1339,16 +1573,14 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu;
- struct fw_device *device;
if (!sdev)
return 0;
lu = sdev->hostdata;
- device = fw_device(lu->tgt->unit->device.parent);
- return sprintf(buf, "%08x%08x:%06x:%04x\n",
- device->config_rom[3], device->config_rom[4],
+ return sprintf(buf, "%016llx:%06x:%04x\n",
+ (unsigned long long)lu->tgt->guid,
lu->tgt->directory_id, lu->lun);
}
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 172c1867e9a..213b0ff8f3d 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/errno.h>
+#include <asm/bug.h>
#include <asm/system.h>
#include "fw-transaction.h"
#include "fw-topology.h"
@@ -107,6 +108,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
node->link_on = SELF_ID_LINK_ON(sid);
node->phy_speed = SELF_ID_PHY_SPEED(sid);
+ node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
node->port_count = port_count;
atomic_set(&node->ref_count, 1);
@@ -288,12 +290,11 @@ static struct fw_node *build_tree(struct fw_card *card,
beta_repeaters_present = true;
/*
- * If all PHYs does not report the same gap count
- * setting, we fall back to 63 which will force a gap
- * count reconfiguration and a reset.
+ * If PHYs report different gap counts, set an invalid count
+ * which will force a gap count reconfiguration and a reset.
*/
if (SELF_ID_GAP_COUNT(q) != gap_count)
- gap_count = 63;
+ gap_count = 0;
update_hop_count(node);
@@ -383,6 +384,7 @@ void fw_destroy_nodes(struct fw_card *card)
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
+ card->local_node = NULL;
spin_unlock_irqrestore(&card->lock, flags);
}
@@ -423,12 +425,14 @@ update_tree(struct fw_card *card, struct fw_node *root)
node1 = fw_node(list1.next);
while (&node0->link != &list0) {
+ WARN_ON(node0->port_count != node1->port_count);
- /* assert(node0->port_count == node1->port_count); */
if (node0->link_on && !node1->link_on)
event = FW_NODE_LINK_OFF;
else if (!node0->link_on && node1->link_on)
event = FW_NODE_LINK_ON;
+ else if (node1->initiated_reset && node1->link_on)
+ event = FW_NODE_INITIATED_RESET;
else
event = FW_NODE_UPDATED;
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index cedc1ec906e..addb9f8ea77 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -20,11 +20,12 @@
#define __fw_topology_h
enum {
- FW_NODE_CREATED = 0x00,
- FW_NODE_UPDATED = 0x01,
- FW_NODE_DESTROYED = 0x02,
- FW_NODE_LINK_ON = 0x03,
- FW_NODE_LINK_OFF = 0x04,
+ FW_NODE_CREATED,
+ FW_NODE_UPDATED,
+ FW_NODE_DESTROYED,
+ FW_NODE_LINK_ON,
+ FW_NODE_LINK_OFF,
+ FW_NODE_INITIATED_RESET,
};
struct fw_node {
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 7fcc59dedf0..ccf0e4cf108 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -28,7 +29,6 @@
#include <linux/list.h>
#include <linux/kthread.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "fw-transaction.h"
#include "fw-topology.h"
@@ -294,42 +294,40 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
}
EXPORT_SYMBOL(fw_send_request);
+struct fw_phy_packet {
+ struct fw_packet packet;
+ struct completion done;
+};
+
static void
transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- kfree(packet);
-}
-
-static void send_phy_packet(struct fw_card *card, u32 data, int generation)
-{
- struct fw_packet *packet;
+ struct fw_phy_packet *p =
+ container_of(packet, struct fw_phy_packet, packet);
- packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
- if (packet == NULL)
- return;
-
- packet->header[0] = data;
- packet->header[1] = ~data;
- packet->header_length = 8;
- packet->payload_length = 0;
- packet->speed = SCODE_100;
- packet->generation = generation;
- packet->callback = transmit_phy_packet_callback;
-
- card->driver->send_request(card, packet);
+ complete(&p->done);
}
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- u32 q;
-
- q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
- PHY_CONFIG_ROOT_ID(node_id) |
- PHY_CONFIG_GAP_COUNT(gap_count);
-
- send_phy_packet(card, q, generation);
+ struct fw_phy_packet p;
+ u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+ PHY_CONFIG_ROOT_ID(node_id) |
+ PHY_CONFIG_GAP_COUNT(gap_count);
+
+ p.packet.header[0] = data;
+ p.packet.header[1] = ~data;
+ p.packet.header_length = 8;
+ p.packet.payload_length = 0;
+ p.packet.speed = SCODE_100;
+ p.packet.generation = generation;
+ p.packet.callback = transmit_phy_packet_callback;
+ init_completion(&p.done);
+
+ card->driver->send_request(card, &p.packet);
+ wait_for_completion(&p.done);
}
void fw_flush_transactions(struct fw_card *card)
@@ -389,21 +387,21 @@ lookup_enclosing_address_handler(struct list_head *list,
static DEFINE_SPINLOCK(address_handler_lock);
static LIST_HEAD(address_handler_list);
-const struct fw_address_region fw_low_memory_region =
- { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
const struct fw_address_region fw_high_memory_region =
{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
+EXPORT_SYMBOL(fw_high_memory_region);
+
+#if 0
+const struct fw_address_region fw_low_memory_region =
+ { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
const struct fw_address_region fw_private_region =
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
const struct fw_address_region fw_csr_region =
- { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
+ { .start = CSR_REGISTER_BASE,
+ .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
-EXPORT_SYMBOL(fw_low_memory_region);
-EXPORT_SYMBOL(fw_high_memory_region);
-EXPORT_SYMBOL(fw_private_region);
-EXPORT_SYMBOL(fw_csr_region);
-EXPORT_SYMBOL(fw_unit_space_region);
+#endif /* 0 */
/**
* Allocate a range of addresses in the node space of the OHCI
@@ -736,12 +734,19 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ /*
+ * The response handler may be executed while the request handler
+ * is still pending. Cancel the request handler.
+ */
+ card->driver->cancel_packet(card, &t->packet);
+
t->callback(card, rcode, data, data_length, t->callback_data);
}
EXPORT_SYMBOL(fw_core_handle_response);
static const struct fw_address_region topology_map_region =
- { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
+ { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
+ .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
static void
handle_topology_map(struct fw_card *card, struct fw_request *request,
@@ -751,7 +756,7 @@ handle_topology_map(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
int i, start, end;
- u32 *map;
+ __be32 *map;
if (!TCODE_IS_READ_REQUEST(tcode)) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
@@ -779,7 +784,8 @@ static struct fw_address_handler topology_map = {
};
static const struct fw_address_region registers_region =
- { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
+ { .start = CSR_REGISTER_BASE,
+ .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void
handle_registers(struct fw_card *card, struct fw_request *request,
@@ -788,7 +794,7 @@ handle_registers(struct fw_card *card, struct fw_request *request,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
- int reg = offset - CSR_REGISTER_BASE;
+ int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time;
__be32 *data = payload;
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index fa7967b5740..04d3854f656 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/dma-mapping.h>
#include <linux/firewire-constants.h>
+#include <asm/atomic.h>
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
@@ -85,12 +86,12 @@
static inline void
fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
{
- u32 *dst = _dst;
- u32 *src = _src;
+ u32 *dst = _dst;
+ __be32 *src = _src;
int i;
for (i = 0; i < size / 4; i++)
- dst[i] = cpu_to_be32(src[i]);
+ dst[i] = be32_to_cpu(src[i]);
}
static inline void
@@ -200,11 +201,7 @@ struct fw_address_region {
u64 end;
};
-extern const struct fw_address_region fw_low_memory_region;
extern const struct fw_address_region fw_high_memory_region;
-extern const struct fw_address_region fw_private_region;
-extern const struct fw_address_region fw_csr_region;
-extern const struct fw_address_region fw_unit_space_region;
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region);
@@ -219,12 +216,10 @@ extern struct bus_type fw_bus_type;
struct fw_card {
const struct fw_card_driver *driver;
struct device *device;
- struct kref kref;
+ atomic_t device_count;
int node_id;
int generation;
- /* This is the generation used for timestamping incoming requests. */
- int request_generation;
int current_tlabel, tlabel_mask;
struct list_head transaction_list;
struct timer_list flush_timer;
@@ -261,9 +256,6 @@ struct fw_card {
int bm_generation;
};
-struct fw_card *fw_card_get(struct fw_card *card);
-void fw_card_put(struct fw_card *card);
-
/*
* The iso packet format allows for an immediate header/payload part
* stored in 'header' immediately after the packet info plus an