summary refs log tree commit diff
path: root/drivers/ieee1394
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/ieee1394
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/Kconfig188
-rw-r--r--drivers/ieee1394/Makefile26
-rw-r--r--drivers/ieee1394/amdtp.c1300
-rw-r--r--drivers/ieee1394/amdtp.h84
-rw-r--r--drivers/ieee1394/cmp.c311
-rw-r--r--drivers/ieee1394/cmp.h31
-rw-r--r--drivers/ieee1394/config_roms.c236
-rw-r--r--drivers/ieee1394/config_roms.h27
-rw-r--r--drivers/ieee1394/csr.c857
-rw-r--r--drivers/ieee1394/csr.h96
-rw-r--r--drivers/ieee1394/csr1212.c1612
-rw-r--r--drivers/ieee1394/csr1212.h727
-rw-r--r--drivers/ieee1394/dma.c260
-rw-r--r--drivers/ieee1394/dma.h78
-rw-r--r--drivers/ieee1394/dv1394-private.h587
-rw-r--r--drivers/ieee1394/dv1394.c2663
-rw-r--r--drivers/ieee1394/dv1394.h305
-rw-r--r--drivers/ieee1394/eth1394.c1801
-rw-r--r--drivers/ieee1394/eth1394.h236
-rw-r--r--drivers/ieee1394/highlevel.c704
-rw-r--r--drivers/ieee1394/highlevel.h190
-rw-r--r--drivers/ieee1394/hosts.c233
-rw-r--r--drivers/ieee1394/hosts.h215
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h111
-rw-r--r--drivers/ieee1394/ieee1394.h202
-rw-r--r--drivers/ieee1394/ieee1394_core.c1330
-rw-r--r--drivers/ieee1394/ieee1394_core.h228
-rw-r--r--drivers/ieee1394/ieee1394_hotplug.h33
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c601
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h64
-rw-r--r--drivers/ieee1394/ieee1394_types.h101
-rw-r--r--drivers/ieee1394/iso.c451
-rw-r--r--drivers/ieee1394/iso.h201
-rw-r--r--drivers/ieee1394/nodemgr.c1732
-rw-r--r--drivers/ieee1394/nodemgr.h207
-rw-r--r--drivers/ieee1394/ohci1394.c3705
-rw-r--r--drivers/ieee1394/ohci1394.h456
-rw-r--r--drivers/ieee1394/oui.db7048
-rw-r--r--drivers/ieee1394/oui2c.sh23
-rw-r--r--drivers/ieee1394/pcilynx.c1982
-rw-r--r--drivers/ieee1394/pcilynx.h516
-rw-r--r--drivers/ieee1394/raw1394-private.h86
-rw-r--r--drivers/ieee1394/raw1394.c2958
-rw-r--r--drivers/ieee1394/raw1394.h181
-rw-r--r--drivers/ieee1394/sbp2.c2864
-rw-r--r--drivers/ieee1394/sbp2.h484
-rw-r--r--drivers/ieee1394/video1394.c1527
-rw-r--r--drivers/ieee1394/video1394.h67
48 files changed, 39925 insertions, 0 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
new file mode 100644
index 000000000000..78b201fb5e8a
--- /dev/null
+++ b/drivers/ieee1394/Kconfig
@@ -0,0 +1,188 @@
+# -*- shell-script -*-
+
+menu "IEEE 1394 (FireWire) support"
+
+config IEEE1394
+	tristate "IEEE 1394 (FireWire) support"
+	depends on PCI || BROKEN
+	select NET
+	help
+	  IEEE 1394 describes a high performance serial bus, which is also
+	  known as FireWire(tm) or i.Link(tm) and is used for connecting all
+	  sorts of devices (most notably digital video cameras) to your
+	  computer.
+
+	  If you have FireWire hardware and want to use it, say Y here.  This
+	  is the core support only, you will also need to select a driver for
+	  your IEEE 1394 adapter.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called ieee1394.
+
+comment "Subsystem Options"
+	depends on IEEE1394
+
+config IEEE1394_VERBOSEDEBUG
+	bool "Excessive debugging output"
+	depends on IEEE1394
+	help
+	  If you say Y here, you will get very verbose debugging logs from
+	  the subsystem which includes a dump of the header of every sent
+	  and received packet.  This can amount to a high amount of data
+	  collected in a very short time which is usually also saved to
+	  disk by the system logging daemons.
+
+	  Say Y if you really want or need the debugging output, everyone
+	  else says N.
+
+config IEEE1394_OUI_DB
+	bool "OUI Database built-in"
+	depends on IEEE1394
+	help
+	  If you say Y here, then an OUI list (vendor unique ID's) will be
+	  compiled into the ieee1394 module. This doesn't really do much
+	  except being able to display the vendor of a hardware node. The
+	  downside is that it adds about 300k to the size of the module,
+	  or kernel (depending on whether you compile ieee1394 as a
+	  module, or static in the kernel).
+
+	  This option is not needed for userspace programs like gscanbus
+	  to show this information.
+
+config IEEE1394_EXTRA_CONFIG_ROMS
+	bool "Build in extra config rom entries for certain functionality"
+	depends on IEEE1394
+	help
+	  Some IEEE1394 functionality depends on extra config rom entries
+	  being available in the host adapters CSR. These options will
+	  allow you to choose which ones.
+
+config IEEE1394_CONFIG_ROM_IP1394
+	bool "IP-1394 Entry"
+	depends on IEEE1394_EXTRA_CONFIG_ROMS && IEEE1394
+	help
+	  Adds an entry for using IP-over-1394. If you want to use your
+	  IEEE1394 bus as a network for IP systems (including interacting
+	  with MacOSX and WinXP IP-over-1394), enable this option and the
+	  eth1394 option below.
+
+comment "Device Drivers"
+	depends on IEEE1394
+
+comment "Texas Instruments PCILynx requires I2C"
+	depends on IEEE1394 && I2C=n
+
+config IEEE1394_PCILYNX
+	tristate "Texas Instruments PCILynx support"
+	depends on PCI && IEEE1394 && I2C
+	select I2C_ALGOBIT
+	help
+	  Say Y here if you have an IEEE-1394 controller with the Texas
+	  Instruments PCILynx chip.  Note: this driver is written for revision
+	  2 of this chip and may not work with revision 0.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called pcilynx.
+
+# Non-maintained pcilynx options
+# if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
+#     bool '    Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
+#     bool '    Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
+# fi
+config IEEE1394_OHCI1394
+	tristate "OHCI-1394 support"
+	depends on PCI && IEEE1394
+	help
+	  Enable this driver if you have an IEEE 1394 controller based on the
+	  OHCI-1394 specification. The current driver is only tested with OHCI
+	  chipsets made by Texas Instruments and NEC. Most third-party vendors
+	  use one of these chipsets.  It should work with any OHCI-1394
+	  compliant card, however.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called ohci1394.
+
+comment "Protocol Drivers"
+	depends on IEEE1394
+
+config IEEE1394_VIDEO1394
+	tristate "OHCI-1394 Video support"
+	depends on IEEE1394 && IEEE1394_OHCI1394
+	help
+	  This option enables video device usage for OHCI-1394 cards.  Enable
+	  this option only if you have an IEEE 1394 video device connected to
+	  an OHCI-1394 card.
+
+config IEEE1394_SBP2
+	tristate "SBP-2 support (Harddisks etc.)"
+	depends on IEEE1394 && SCSI && (PCI || BROKEN)
+	help
+	  This option enables you to use SBP-2 devices connected to your IEEE
+	  1394 bus.  SBP-2 devices include harddrives and DVD devices.
+
+config IEEE1394_SBP2_PHYS_DMA
+	bool "Enable Phys DMA support for SBP2 (Debug)"
+	depends on IEEE1394 && IEEE1394_SBP2
+
+config IEEE1394_ETH1394
+	tristate "Ethernet over 1394"
+	depends on IEEE1394 && EXPERIMENTAL && INET
+	select IEEE1394_CONFIG_ROM_IP1394
+	select IEEE1394_EXTRA_CONFIG_ROMS
+	help
+	  This driver implements a functional majority of RFC 2734: IPv4 over
+	  1394.  It will provide IP connectivity with implementations of RFC
+	  2734 found on other operating systems.  It will not communicate with
+	  older versions of this driver found in stock kernels prior to 2.6.3.
+	  This driver is still considered experimental.  It does not yet support
+	  MCAP, therefore multicast support is significantly limited.
+
+config IEEE1394_DV1394
+	tristate "OHCI-DV I/O support"
+	depends on IEEE1394 && IEEE1394_OHCI1394
+	help
+	  This driver allows you to transmit and receive DV (digital video)
+	  streams on an OHCI-1394 card using a simple frame-oriented
+	  interface.
+
+	  The user-space API for dv1394 is documented in dv1394.h.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called dv1394.
+
+config IEEE1394_RAWIO
+	tristate "Raw IEEE1394 I/O support"
+	depends on IEEE1394
+	help
+	  Say Y here if you want support for the raw device. This is generally
+	  a good idea, so you should say Y here. The raw device enables
+	  direct communication of user programs with the IEEE 1394 bus and
+	  thus with the attached peripherals.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called raw1394.
+
+config IEEE1394_CMP
+	tristate "IEC61883-1 Plug support"
+	depends on IEEE1394
+	help
+	  This option enables the Connection Management Procedures
+	  (IEC61883-1) driver, which implements input and output plugs.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called cmp.
+
+config IEEE1394_AMDTP
+	tristate "IEC61883-6 (Audio transmission) support"
+	depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
+	help
+	  This option enables the Audio & Music Data Transmission Protocol
+	  (IEC61883-6) driver, which implements audio transmission over
+	  IEEE1394.
+
+	  The userspace interface is documented in amdtp.h.
+
+	  To compile this driver as a module, say M here: the
+	  module will be called amdtp.
+
+endmenu
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
new file mode 100644
index 000000000000..e8b4d48d376e
--- /dev/null
+++ b/drivers/ieee1394/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the Linux IEEE 1394 implementation
+#
+
+ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
+		 highlevel.o csr.o nodemgr.o oui.o dma.o iso.o \
+		 csr1212.o config_roms.o
+
+obj-$(CONFIG_IEEE1394) += ieee1394.o
+obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
+obj-$(CONFIG_IEEE1394_OHCI1394) += ohci1394.o
+obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
+obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
+obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
+obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
+obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
+obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
+obj-$(CONFIG_IEEE1394_CMP) += cmp.o
+
+quiet_cmd_oui2c = OUI2C   $@
+      cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
+
+targets := oui.c
+$(obj)/oui.o: $(obj)/oui.c
+$(obj)/oui.c: $(src)/oui.db $(src)/oui2c.sh FORCE
+	$(call if_changed,oui2c)
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
new file mode 100644
index 000000000000..84ae027b021a
--- /dev/null
+++ b/drivers/ieee1394/amdtp.c
@@ -0,0 +1,1300 @@
+/* -*- c-basic-offset: 8 -*-
+ *
+ * amdtp.c - Audio and Music Data Transmission Protocol Driver
+ * Copyright (C) 2001 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* OVERVIEW
+ * --------
+ *
+ * The AMDTP driver is designed to expose the IEEE1394 bus as a
+ * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
+ * then your favourite MP3 player, game or whatever sound program will
+ * output to an IEEE1394 isochronous channel.  The signal destination
+ * could be a set of IEEE1394 loudspeakers (if and when such things
+ * become available) or an amplifier with IEEE1394 input (like the
+ * Sony STR-LSA1).  The driver only handles the actual streaming, some
+ * connection management is also required for this to actually work.
+ * That is outside the scope of this driver, and furthermore it is not
+ * really standardized yet.
+ *
+ * The Audio and Music Data Tranmission Protocol is available at
+ *
+ *     http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
+ *
+ *
+ * TODO
+ * ----
+ *
+ * - We should be able to change input sample format between LE/BE, as
+ *   we already shift the bytes around when we construct the iso
+ *   packets.
+ *
+ * - Fix DMA stop after bus reset!
+ *
+ * - Clean up iso context handling in ohci1394.
+ *
+ *
+ * MAYBE TODO
+ * ----------
+ *
+ * - Receive data for local playback or recording.  Playback requires
+ *   soft syncing with the sound card.
+ *
+ * - Signal processing, i.e. receive packets, do some processing, and
+ *   transmit them again using the same packet structure and timestamps
+ *   offset by processing time.
+ *
+ * - Maybe make an ALSA interface, that is, create a file_ops
+ *   implementation that recognizes ALSA ioctls and uses defaults for
+ *   things that can't be controlled through ALSA (iso channel).
+ *
+ *   Changes:
+ *
+ * - Audit copy_from_user in amdtp_write.
+ *                           Daniele Bellucci <bellucda@tiscali.it>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/ioctl32.h>
+#include <linux/compat.h>
+#include <linux/cdev.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#include "hosts.h"
+#include "highlevel.h"
+#include "ieee1394.h"
+#include "ieee1394_core.h"
+#include "ohci1394.h"
+
+#include "amdtp.h"
+#include "cmp.h"
+
+#define FMT_AMDTP 0x10
+#define FDF_AM824 0x00
+#define FDF_SFC_32KHZ   0x00
+#define FDF_SFC_44K1HZ  0x01
+#define FDF_SFC_48KHZ   0x02
+#define FDF_SFC_88K2HZ  0x03
+#define FDF_SFC_96KHZ   0x04
+#define FDF_SFC_176K4HZ 0x05
+#define FDF_SFC_192KHZ  0x06
+
+struct descriptor_block {
+	struct output_more_immediate {
+		u32 control;
+		u32 pad0;
+		u32 skip;
+		u32 pad1;
+		u32 header[4];
+	} header_desc;
+
+	struct output_last {
+		u32 control;
+		u32 data_address;
+		u32 branch;
+		u32 status;
+	} payload_desc;
+};
+
+struct packet {
+	struct descriptor_block *db;
+	dma_addr_t db_bus;
+	struct iso_packet *payload;
+	dma_addr_t payload_bus;
+};
+
+#include <asm/byteorder.h>
+
+#if defined __BIG_ENDIAN_BITFIELD
+
+struct iso_packet {
+	/* First quadlet */
+	unsigned int dbs      : 8;
+	unsigned int eoh0     : 2;
+	unsigned int sid      : 6;
+
+	unsigned int dbc      : 8;
+	unsigned int fn       : 2;
+	unsigned int qpc      : 3;
+	unsigned int sph      : 1;
+	unsigned int reserved : 2;
+
+	/* Second quadlet */
+	unsigned int fdf      : 8;
+	unsigned int eoh1     : 2;
+	unsigned int fmt      : 6;
+
+	unsigned int syt      : 16;
+
+        quadlet_t data[0];
+};
+
+#elif defined __LITTLE_ENDIAN_BITFIELD
+
+struct iso_packet {
+	/* First quadlet */
+	unsigned int sid      : 6;
+	unsigned int eoh0     : 2;
+	unsigned int dbs      : 8;
+
+	unsigned int reserved : 2;
+	unsigned int sph      : 1;
+	unsigned int qpc      : 3;
+	unsigned int fn       : 2;
+	unsigned int dbc      : 8;
+
+	/* Second quadlet */
+	unsigned int fmt      : 6;
+	unsigned int eoh1     : 2;
+	unsigned int fdf      : 8;
+
+	unsigned int syt      : 16;
+
+	quadlet_t data[0];
+};
+
+#else
+
+#error Unknown bitfield type
+
+#endif
+
+struct fraction {
+	int integer;
+	int numerator;
+	int denominator;
+};
+
+#define PACKET_LIST_SIZE 256
+#define MAX_PACKET_LISTS 4
+
+struct packet_list {
+	struct list_head link;
+	int last_cycle_count;
+	struct packet packets[PACKET_LIST_SIZE];
+};
+
+#define BUFFER_SIZE 128
+
+/* This implements a circular buffer for incoming samples. */
+
+struct buffer {
+	size_t head, tail, length, size;
+	unsigned char data[0];
+};
+
+struct stream {
+	int iso_channel;
+	int format;
+	int rate;
+	int dimension;
+	int fdf;
+	int mode;
+	int sample_format;
+	struct cmp_pcr *opcr;
+
+	/* Input samples are copied here. */
+	struct buffer *input;
+
+	/* ISO Packer state */
+	unsigned char dbc;
+	struct packet_list *current_packet_list;
+	int current_packet;
+	struct fraction ready_samples, samples_per_cycle;
+
+	/* We use these to generate control bits when we are packing
+	 * iec958 data.
+	 */
+	int iec958_frame_count;
+	int iec958_rate_code;
+
+	/* The cycle_count and cycle_offset fields are used for the
+	 * synchronization timestamps (syt) in the cip header.  They
+	 * are incremented by at least a cycle every time we put a
+	 * time stamp in a packet.  As we don't time stamp all
+	 * packages, cycle_count isn't updated in every cycle, and
+	 * sometimes it's incremented by 2.  Thus, we have
+	 * cycle_count2, which is simply incremented by one with each
+	 * packet, so we can compare it to the transmission time
+	 * written back in the dma programs.
+	 */
+	atomic_t cycle_count, cycle_count2;
+	struct fraction cycle_offset, ticks_per_syt_offset;
+	int syt_interval;
+	int stale_count;
+
+	/* Theses fields control the sample output to the DMA engine.
+	 * The dma_packet_lists list holds packet lists currently
+	 * queued for dma; the head of the list is currently being
+	 * processed.  The last program in a packet list generates an
+	 * interrupt, which removes the head from dma_packet_lists and
+	 * puts it back on the free list.
+	 */
+	struct list_head dma_packet_lists;
+	struct list_head free_packet_lists;
+        wait_queue_head_t packet_list_wait;
+	spinlock_t packet_list_lock;
+	struct ohci1394_iso_tasklet iso_tasklet;
+	struct pci_pool *descriptor_pool, *packet_pool;
+
+	/* Streams at a host controller are chained through this field. */
+	struct list_head link;
+	struct amdtp_host *host;
+};
+
+struct amdtp_host {
+	struct hpsb_host *host;
+	struct ti_ohci *ohci;
+	struct list_head stream_list;
+	spinlock_t stream_list_lock;
+};
+
+static struct hpsb_highlevel amdtp_highlevel;
+
+
+/* FIXME: This doesn't belong here... */
+
+#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
+#define OHCI1394_CONTEXT_RUN         0x00008000
+#define OHCI1394_CONTEXT_WAKE        0x00001000
+#define OHCI1394_CONTEXT_DEAD        0x00000800
+#define OHCI1394_CONTEXT_ACTIVE      0x00000400
+
+static void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
+			   dma_addr_t first_cmd, int z, int cycle_match)
+{
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
+	reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z);
+	reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0);
+	wmb();
+	reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
+		  OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) |
+		  OHCI1394_CONTEXT_RUN);
+}
+
+static void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
+{
+	reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
+		  OHCI1394_CONTEXT_WAKE);
+}
+
+static void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
+{
+	u32 control;
+	int wait;
+
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx);
+	reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16,
+		  OHCI1394_CONTEXT_RUN);
+	wmb();
+
+	if (synchronous) {
+		for (wait = 0; wait < 5; wait++) {
+			control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
+			if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
+				break;
+
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(1);
+		}
+	}
+}
+
+/* Note: we can test if free_packet_lists is empty without aquiring
+ * the packet_list_lock.  The interrupt handler only adds to the free
+ * list, there is no race condition between testing the list non-empty
+ * and acquiring the lock.
+ */
+
+static struct packet_list *stream_get_free_packet_list(struct stream *s)
+{
+	struct packet_list *pl;
+	unsigned long flags;
+
+	if (list_empty(&s->free_packet_lists))
+		return NULL;
+
+	spin_lock_irqsave(&s->packet_list_lock, flags);
+	pl = list_entry(s->free_packet_lists.next, struct packet_list, link);
+	list_del(&pl->link);
+	spin_unlock_irqrestore(&s->packet_list_lock, flags);
+
+	return pl;
+}
+
+static void stream_start_dma(struct stream *s, struct packet_list *pl)
+{
+	u32 syt_cycle, cycle_count, start_cycle;
+
+	cycle_count = reg_read(s->host->ohci,
+			       OHCI1394_IsochronousCycleTimer) >> 12;
+	syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f;
+
+	/* We program the DMA controller to start transmission at
+	 * least 17 cycles from now - this happens when the lower four
+	 * bits of cycle_count is 0x0f and syt_cycle is 0, in this
+	 * case the start cycle is cycle_count - 15 + 32. */
+	start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle;
+	if ((start_cycle & 0x1fff) >= 8000)
+		start_cycle = start_cycle - 8000 + 0x2000;
+
+	ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context,
+			      pl->packets[0].db_bus, 3,
+			      start_cycle & 0x7fff);
+}
+
+static void stream_put_dma_packet_list(struct stream *s,
+				       struct packet_list *pl)
+{
+	unsigned long flags;
+	struct packet_list *prev;
+
+	/* Remember the cycle_count used for timestamping the last packet. */
+	pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1;
+	pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0;
+
+	spin_lock_irqsave(&s->packet_list_lock, flags);
+	list_add_tail(&pl->link, &s->dma_packet_lists);
+	spin_unlock_irqrestore(&s->packet_list_lock, flags);
+
+	prev = list_entry(pl->link.prev, struct packet_list, link);
+	if (pl->link.prev != &s->dma_packet_lists) {
+		struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
+		last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
+		last->db->header_desc.skip = pl->packets[0].db_bus | 3;
+		ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context);
+	}
+	else
+		stream_start_dma(s, pl);
+}
+
+static void stream_shift_packet_lists(unsigned long l)
+{
+	struct stream *s = (struct stream *) l;
+	struct packet_list *pl;
+	struct packet *last;
+	int diff;
+
+	if (list_empty(&s->dma_packet_lists)) {
+		HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__);
+		return;
+	}
+
+	/* Now that we know the list is non-empty, we can get the head
+	 * of the list without locking, because the process context
+	 * only adds to the tail.
+	 */
+	pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
+	last = &pl->packets[PACKET_LIST_SIZE - 1];
+
+	/* This is weird... if we stop dma processing in the middle of
+	 * a packet list, the dma context immediately generates an
+	 * interrupt if we enable it again later.  This only happens
+	 * when amdtp_release is interrupted while waiting for dma to
+	 * complete, though.  Anyway, we detect this by seeing that
+	 * the status of the dma descriptor that we expected an
+	 * interrupt from is still 0.
+	 */
+	if (last->db->payload_desc.status == 0) {
+		HPSB_INFO("weird interrupt...");
+		return;
+	}
+
+	/* If the last descriptor block does not specify a branch
+	 * address, we have a sample underflow.
+	 */
+	if (last->db->payload_desc.branch == 0)
+		HPSB_INFO("FIXME: sample underflow...");
+
+	/* Here we check when (which cycle) the last packet was sent
+	 * and compare it to what the iso packer was using at the
+	 * time.  If there is a mismatch, we adjust the cycle count in
+	 * the iso packer.  However, there are still up to
+	 * MAX_PACKET_LISTS packet lists queued with bad time stamps,
+	 * so we disable time stamp monitoring for the next
+	 * MAX_PACKET_LISTS packet lists.
+	 */
+	diff = (last->db->payload_desc.status - pl->last_cycle_count) & 0xf;
+	if (diff > 0 && s->stale_count == 0) {
+		atomic_add(diff, &s->cycle_count);
+		atomic_add(diff, &s->cycle_count2);
+		s->stale_count = MAX_PACKET_LISTS;
+	}
+
+	if (s->stale_count > 0)
+		s->stale_count--;
+
+	/* Finally, we move the packet list that was just processed
+	 * back to the free list, and notify any waiters.
+	 */
+	spin_lock(&s->packet_list_lock);
+	list_del(&pl->link);
+	list_add_tail(&pl->link, &s->free_packet_lists);
+	spin_unlock(&s->packet_list_lock);
+
+	wake_up_interruptible(&s->packet_list_wait);
+}
+
+static struct packet *stream_current_packet(struct stream *s)
+{
+	if (s->current_packet_list == NULL &&
+	    (s->current_packet_list = stream_get_free_packet_list(s)) == NULL)
+		return NULL;
+
+	return &s->current_packet_list->packets[s->current_packet];
+}
+
+static void stream_queue_packet(struct stream *s)
+{
+	s->current_packet++;
+	if (s->current_packet == PACKET_LIST_SIZE) {
+		stream_put_dma_packet_list(s, s->current_packet_list);
+		s->current_packet_list = NULL;
+		s->current_packet = 0;
+	}
+}
+
+/* Integer fractional math.  When we transmit a 44k1Hz signal we must
+ * send 5 41/80 samples per isochronous cycle, as these occur 8000
+ * times a second.  Of course, we must send an integral number of
+ * samples in a packet, so we use the integer math to alternate
+ * between sending 5 and 6 samples per packet.
+ */
+
+static void fraction_init(struct fraction *f, int numerator, int denominator)
+{
+	f->integer = numerator / denominator;
+	f->numerator = numerator % denominator;
+	f->denominator = denominator;
+}
+
+static __inline__ void fraction_add(struct fraction *dst,
+				    struct fraction *src1,
+				    struct fraction *src2)
+{
+	/* assert: src1->denominator == src2->denominator */
+
+	int sum, denom;
+
+	/* We use these two local variables to allow gcc to optimize
+	 * the division and the modulo into only one division. */
+
+	sum = src1->numerator + src2->numerator;
+	denom = src1->denominator;
+	dst->integer = src1->integer + src2->integer + sum / denom;
+	dst->numerator = sum % denom;
+	dst->denominator = denom;
+}
+
+static __inline__ void fraction_sub_int(struct fraction *dst,
+					struct fraction *src, int integer)
+{
+	dst->integer = src->integer - integer;
+	dst->numerator = src->numerator;
+	dst->denominator = src->denominator;
+}
+
+static __inline__ int fraction_floor(struct fraction *frac)
+{
+	return frac->integer;
+}
+
+static __inline__ int fraction_ceil(struct fraction *frac)
+{
+	return frac->integer + (frac->numerator > 0 ? 1 : 0);
+}
+
+static void packet_initialize(struct packet *p, struct packet *next)
+{
+	/* Here we initialize the dma descriptor block for
+	 * transferring one iso packet.  We use two descriptors per
+	 * packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
+	 * IEEE1394 iso packet header and an OUTPUT_LAST descriptor
+	 * for the payload.
+	 */
+
+	p->db->header_desc.control =
+		DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
+
+	if (next) {
+		p->db->payload_desc.control =
+			DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
+		p->db->payload_desc.branch = next->db_bus | 3;
+		p->db->header_desc.skip = next->db_bus | 3;
+	}
+	else {
+		p->db->payload_desc.control =
+			DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
+			DMA_CTL_UPDATE | DMA_CTL_IRQ;
+		p->db->payload_desc.branch = 0;
+		p->db->header_desc.skip = 0;
+	}
+	p->db->payload_desc.data_address = p->payload_bus;
+	p->db->payload_desc.status = 0;
+}
+
+static struct packet_list *packet_list_alloc(struct stream *s)
+{
+	int i;
+	struct packet_list *pl;
+	struct packet *next;
+
+	pl = kmalloc(sizeof *pl, SLAB_KERNEL);
+	if (pl == NULL)
+		return NULL;
+
+	for (i = 0; i < PACKET_LIST_SIZE; i++) {
+		struct packet *p = &pl->packets[i];
+		p->db = pci_pool_alloc(s->descriptor_pool, SLAB_KERNEL,
+				       &p->db_bus);
+		p->payload = pci_pool_alloc(s->packet_pool, SLAB_KERNEL,
+					    &p->payload_bus);
+	}
+
+	for (i = 0; i < PACKET_LIST_SIZE; i++) {
+		if (i < PACKET_LIST_SIZE - 1)
+			next = &pl->packets[i + 1];
+		else
+			next = NULL;
+		packet_initialize(&pl->packets[i], next);
+	}
+
+	return pl;
+}
+
+static void packet_list_free(struct packet_list *pl, struct stream *s)
+{
+	int i;
+
+	for (i = 0; i < PACKET_LIST_SIZE; i++) {
+		struct packet *p = &pl->packets[i];
+		pci_pool_free(s->descriptor_pool, p->db, p->db_bus);
+		pci_pool_free(s->packet_pool, p->payload, p->payload_bus);
+	}
+	kfree(pl);
+}
+
+static struct buffer *buffer_alloc(int size)
+{
+	struct buffer *b;
+
+	b = kmalloc(sizeof *b + size, SLAB_KERNEL);
+	if (b == NULL)
+		return NULL;
+	b->head = 0;
+	b->tail = 0;
+	b->length = 0;
+	b->size = size;
+
+	return b;
+}
+
+static unsigned char *buffer_get_bytes(struct buffer *buffer, int size)
+{
+	unsigned char *p;
+
+	if (buffer->head + size > buffer->size)
+		BUG();
+
+	p = &buffer->data[buffer->head];
+	buffer->head += size;
+	if (buffer->head == buffer->size)
+		buffer->head = 0;
+	buffer->length -= size;
+
+	return p;
+}
+
+static unsigned char *buffer_put_bytes(struct buffer *buffer,
+				       size_t max, size_t *actual)
+{
+	size_t length;
+	unsigned char *p;
+
+	p = &buffer->data[buffer->tail];
+	length = min(buffer->size - buffer->length, max);
+	if (buffer->tail + length < buffer->size) {
+		*actual = length;
+		buffer->tail += length;
+	}
+	else {
+		*actual = buffer->size - buffer->tail;
+		 buffer->tail = 0;
+	}
+
+	buffer->length += *actual;
+	return p;
+}
+
+static u32 get_iec958_header_bits(struct stream *s, int sub_frame, u32 sample)
+{
+	int csi, parity, shift;
+	int block_start;
+	u32 bits;
+
+	switch (s->iec958_frame_count) {
+	case 1:
+		csi = s->format == AMDTP_FORMAT_IEC958_AC3;
+		break;
+	case 2:
+	case 9:
+		csi = 1;
+		break;
+	case 24 ... 27:
+		csi = (s->iec958_rate_code >> (27 - s->iec958_frame_count)) & 0x01;
+		break;
+	default:
+		csi = 0;
+		break;
+	}
+
+	block_start = (s->iec958_frame_count == 0 && sub_frame == 0);
+
+	/* The parity bit is the xor of the sample bits and the
+	 * channel status info bit. */
+	for (shift = 16, parity = sample ^ csi; shift > 0; shift >>= 1)
+		parity ^= (parity >> shift);
+
+	bits =  (block_start << 5) |		/* Block start bit */
+		((sub_frame == 0) << 4) |	/* Subframe bit */
+		((parity & 1) << 3) |		/* Parity bit */
+		(csi << 2);			/* Channel status info bit */
+
+	return bits;
+}
+
+static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
+{
+	switch (s->format) {
+	case AMDTP_FORMAT_IEC958_PCM:
+	case AMDTP_FORMAT_IEC958_AC3:
+		return get_iec958_header_bits(s, sub_frame, sample);
+
+	case AMDTP_FORMAT_RAW:
+		return 0x40;
+
+	default:
+		return 0;
+	}
+}
+
+static void fill_payload_le16(struct stream *s, quadlet_t *data, int nevents)
+{
+	quadlet_t *event, sample, bits;
+	unsigned char *p;
+	int i, j;
+
+	for (i = 0, event = data; i < nevents; i++) {
+
+		for (j = 0; j < s->dimension; j++) {
+			p = buffer_get_bytes(s->input, 2);
+			sample = (p[1] << 16) | (p[0] << 8);
+			bits = get_header_bits(s, j, sample);
+			event[j] = cpu_to_be32((bits << 24) | sample);
+		}
+
+		event += s->dimension;
+		if (++s->iec958_frame_count == 192)
+			s->iec958_frame_count = 0;
+	}
+}
+
+static void fill_packet(struct stream *s, struct packet *packet, int nevents)
+{
+	int syt_index, syt, size;
+	u32 control;
+
+	size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
+
+	/* Update DMA descriptors */
+	packet->db->payload_desc.status = 0;
+	control = packet->db->payload_desc.control & 0xffff0000;
+	packet->db->payload_desc.control = control | size;
+
+	/* Fill IEEE1394 headers */
+	packet->db->header_desc.header[0] =
+		(IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
+		(s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
+	packet->db->header_desc.header[1] = size << 16;
+
+	/* Calculate synchronization timestamp (syt). First we
+	 * determine syt_index, that is, the index in the packet of
+	 * the sample for which the timestamp is valid. */
+	syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
+	if (syt_index < nevents) {
+		syt = ((atomic_read(&s->cycle_count) << 12) |
+		       s->cycle_offset.integer) & 0xffff;
+		fraction_add(&s->cycle_offset,
+			     &s->cycle_offset, &s->ticks_per_syt_offset);
+
+		/* This next addition should be modulo 8000 (0x1f40),
+		 * but we only use the lower 4 bits of cycle_count, so
+		 * we don't need the modulo. */
+		atomic_add(s->cycle_offset.integer / 3072, &s->cycle_count);
+		s->cycle_offset.integer %= 3072;
+	}
+	else
+		syt = 0xffff;
+
+	atomic_inc(&s->cycle_count2);
+
+	/* Fill cip header */
+	packet->payload->eoh0 = 0;
+	packet->payload->sid = s->host->host->node_id & 0x3f;
+	packet->payload->dbs = s->dimension;
+	packet->payload->fn = 0;
+	packet->payload->qpc = 0;
+	packet->payload->sph = 0;
+	packet->payload->reserved = 0;
+	packet->payload->dbc = s->dbc;
+	packet->payload->eoh1 = 2;
+	packet->payload->fmt = FMT_AMDTP;
+	packet->payload->fdf = s->fdf;
+	packet->payload->syt = cpu_to_be16(syt);
+
+	switch (s->sample_format) {
+	case AMDTP_INPUT_LE16:
+		fill_payload_le16(s, packet->payload->data, nevents);
+		break;
+	}
+
+	s->dbc += nevents;
+}
+
+static void stream_flush(struct stream *s)
+{
+	struct packet *p;
+	int nevents;
+	struct fraction next;
+
+	/* The AMDTP specifies two transmission modes: blocking and
+	 * non-blocking.  In blocking mode you always transfer
+	 * syt_interval or zero samples, whereas in non-blocking mode
+	 * you send as many samples as you have available at transfer
+	 * time.
+	 *
+	 * The fraction samples_per_cycle specifies the number of
+	 * samples that become available per cycle.  We add this to
+	 * the fraction ready_samples, which specifies the number of
+	 * leftover samples from the previous transmission.  The sum,
+	 * stored in the fraction next, specifies the number of
+	 * samples available for transmission, and from this we
+	 * determine the number of samples to actually transmit.
+	 */
+
+	while (1) {
+		fraction_add(&next, &s->ready_samples, &s->samples_per_cycle);
+		if (s->mode == AMDTP_MODE_BLOCKING) {
+			if (fraction_floor(&next) >= s->syt_interval)
+				nevents = s->syt_interval;
+			else
+				nevents = 0;
+		}
+		else
+			nevents = fraction_floor(&next);
+
+		p = stream_current_packet(s);
+		if (s->input->length < nevents * s->dimension * 2 || p == NULL)
+			break;
+
+		fill_packet(s, p, nevents);
+		stream_queue_packet(s);
+
+		/* Now that we have successfully queued the packet for
+		 * transmission, we update the fraction ready_samples. */
+		fraction_sub_int(&s->ready_samples, &next, nevents);
+	}
+}
+
+static int stream_alloc_packet_lists(struct stream *s)
+{
+	int max_nevents, max_packet_size, i;
+
+	if (s->mode == AMDTP_MODE_BLOCKING)
+		max_nevents = s->syt_interval;
+	else
+		max_nevents = fraction_ceil(&s->samples_per_cycle);
+
+	max_packet_size = max_nevents * s->dimension * 4 + 8;
+	s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
+					 max_packet_size, 0, 0);
+
+	if (s->packet_pool == NULL)
+		return -1;
+
+	INIT_LIST_HEAD(&s->free_packet_lists);
+	INIT_LIST_HEAD(&s->dma_packet_lists);
+	for (i = 0; i < MAX_PACKET_LISTS; i++) {
+		struct packet_list *pl = packet_list_alloc(s);
+		if (pl == NULL)
+			break;
+		list_add_tail(&pl->link, &s->free_packet_lists);
+	}
+
+	return i < MAX_PACKET_LISTS ? -1 : 0;
+}
+
+static void stream_free_packet_lists(struct stream *s)
+{
+	struct packet_list *packet_l, *packet_l_next;
+
+	if (s->current_packet_list != NULL)
+		packet_list_free(s->current_packet_list, s);
+	list_for_each_entry_safe(packet_l, packet_l_next, &s->dma_packet_lists, link)
+		packet_list_free(packet_l, s);
+	list_for_each_entry_safe(packet_l, packet_l_next, &s->free_packet_lists, link)
+		packet_list_free(packet_l, s);
+	if (s->packet_pool != NULL)
+		pci_pool_destroy(s->packet_pool);
+
+	s->current_packet_list = NULL;
+	INIT_LIST_HEAD(&s->free_packet_lists);
+	INIT_LIST_HEAD(&s->dma_packet_lists);
+	s->packet_pool = NULL;
+}
+
+static void plug_update(struct cmp_pcr *plug, void *data)
+{
+	struct stream *s = data;
+
+	HPSB_INFO("plug update: p2p_count=%d, channel=%d",
+		  plug->p2p_count, plug->channel);
+	s->iso_channel = plug->channel;
+	if (plug->p2p_count > 0) {
+		struct packet_list *pl;
+
+		pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
+		stream_start_dma(s, pl);
+	}
+	else {
+		ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 0);
+	}
+}
+
+static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
+{
+	const int transfer_delay = 9000;
+
+	if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
+		s->format = cfg->format;
+	else
+		return -EINVAL;
+
+	switch (cfg->rate) {
+	case 32000:
+		s->syt_interval = 8;
+		s->fdf = FDF_SFC_32KHZ;
+		s->iec958_rate_code = 0x0c;
+		break;
+	case 44100:
+		s->syt_interval = 8;
+		s->fdf = FDF_SFC_44K1HZ;
+		s->iec958_rate_code = 0x00;
+		break;
+	case 48000:
+		s->syt_interval = 8;
+		s->fdf = FDF_SFC_48KHZ;
+		s->iec958_rate_code = 0x04;
+		break;
+	case 88200:
+		s->syt_interval = 16;
+		s->fdf = FDF_SFC_88K2HZ;
+		s->iec958_rate_code = 0x00;
+		break;
+	case 96000:
+		s->syt_interval = 16;
+		s->fdf = FDF_SFC_96KHZ;
+		s->iec958_rate_code = 0x00;
+		break;
+	case 176400:
+		s->syt_interval = 32;
+		s->fdf = FDF_SFC_176K4HZ;
+		s->iec958_rate_code = 0x00;
+		break;
+	case 192000:
+		s->syt_interval = 32;
+		s->fdf = FDF_SFC_192KHZ;
+		s->iec958_rate_code = 0x00;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	s->rate = cfg->rate;
+	fraction_init(&s->samples_per_cycle, s->rate, 8000);
+	fraction_init(&s->ready_samples, 0, 8000);
+
+	/* The ticks_per_syt_offset is initialized to the number of
+	 * ticks between syt_interval events.  The number of ticks per
+	 * second is 24.576e6, so the number of ticks between
+	 * syt_interval events is 24.576e6 * syt_interval / rate.
+	 */
+	fraction_init(&s->ticks_per_syt_offset,
+		      24576000 * s->syt_interval, s->rate);
+	fraction_init(&s->cycle_offset, (transfer_delay % 3072) * s->rate, s->rate);
+	atomic_set(&s->cycle_count, transfer_delay / 3072);
+	atomic_set(&s->cycle_count2, 0);
+
+	s->mode = cfg->mode;
+	s->sample_format = AMDTP_INPUT_LE16;
+
+	/* When using the AM824 raw subformat we can stream signals of
+	 * any dimension.  The IEC958 subformat, however, only
+	 * supports 2 channels.
+	 */
+	if (s->format == AMDTP_FORMAT_RAW || cfg->dimension == 2)
+		s->dimension = cfg->dimension;
+	else
+		return -EINVAL;
+
+	if (s->opcr != NULL) {
+		cmp_unregister_opcr(s->host->host, s->opcr);
+		s->opcr = NULL;
+	}
+
+	switch(cmd) {
+	case AMDTP_IOC_PLUG:
+		s->opcr = cmp_register_opcr(s->host->host, cfg->u.plug,
+					   /*payload*/ 12, plug_update, s);
+		if (s->opcr == NULL)
+			return -EINVAL;
+		s->iso_channel = s->opcr->channel;
+		break;
+
+	case AMDTP_IOC_CHANNEL:
+		if (cfg->u.channel >= 0 && cfg->u.channel < 64)
+			s->iso_channel = cfg->u.channel;
+		else
+			return -EINVAL;
+		break;
+	}
+
+	/* The ioctl settings were all valid, so we realloc the packet
+	 * lists to make sure the packet size is big enough.
+	 */
+	if (s->packet_pool != NULL)
+		stream_free_packet_lists(s);
+
+	if (stream_alloc_packet_lists(s) < 0) {
+		stream_free_packet_lists(s);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static struct stream *stream_alloc(struct amdtp_host *host)
+{
+	struct stream *s;
+	unsigned long flags;
+
+        s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
+        if (s == NULL)
+                return NULL;
+
+        memset(s, 0, sizeof(struct stream));
+	s->host = host;
+
+	s->input = buffer_alloc(BUFFER_SIZE);
+	if (s->input == NULL) {
+		kfree(s);
+		return NULL;
+	}
+
+	s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
+					     sizeof(struct descriptor_block),
+					     16, 0);
+
+	if (s->descriptor_pool == NULL) {
+		kfree(s->input);
+		kfree(s);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&s->free_packet_lists);
+	INIT_LIST_HEAD(&s->dma_packet_lists);
+
+        init_waitqueue_head(&s->packet_list_wait);
+        spin_lock_init(&s->packet_list_lock);
+
+	ohci1394_init_iso_tasklet(&s->iso_tasklet, OHCI_ISO_TRANSMIT,
+				  stream_shift_packet_lists,
+				  (unsigned long) s);
+
+	if (ohci1394_register_iso_tasklet(host->ohci, &s->iso_tasklet) < 0) {
+		pci_pool_destroy(s->descriptor_pool);
+		kfree(s->input);
+		kfree(s);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&host->stream_list_lock, flags);
+	list_add_tail(&s->link, &host->stream_list);
+	spin_unlock_irqrestore(&host->stream_list_lock, flags);
+
+	return s;
+}
+
+static void stream_free(struct stream *s)
+{
+	unsigned long flags;
+
+	/* Stop the DMA.  We wait for the dma packet list to become
+	 * empty and let the dma controller run out of programs.  This
+	 * seems to be more reliable than stopping it directly, since
+	 * that sometimes generates an it transmit interrupt if we
+	 * later re-enable the context.
+	 */
+	wait_event_interruptible(s->packet_list_wait,
+				 list_empty(&s->dma_packet_lists));
+
+	ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
+	ohci1394_unregister_iso_tasklet(s->host->ohci, &s->iso_tasklet);
+
+	if (s->opcr != NULL)
+		cmp_unregister_opcr(s->host->host, s->opcr);
+
+	spin_lock_irqsave(&s->host->stream_list_lock, flags);
+	list_del(&s->link);
+	spin_unlock_irqrestore(&s->host->stream_list_lock, flags);
+
+	kfree(s->input);
+
+	stream_free_packet_lists(s);
+	pci_pool_destroy(s->descriptor_pool);
+
+	kfree(s);
+}
+
+/* File operations */
+
+static ssize_t amdtp_write(struct file *file, const char __user *buffer, size_t count,
+			   loff_t *offset_is_ignored)
+{
+	struct stream *s = file->private_data;
+	unsigned char *p;
+	int i;
+	size_t length;
+
+	if (s->packet_pool == NULL)
+		return -EBADFD;
+
+	/* Fill the circular buffer from the input buffer and call the
+	 * iso packer when the buffer is full.  The iso packer may
+	 * leave bytes in the buffer for two reasons: either the
+	 * remaining bytes wasn't enough to build a new packet, or
+	 * there were no free packet lists.  In the first case we
+	 * re-fill the buffer and call the iso packer again or return
+	 * if we used all the data from userspace.  In the second
+	 * case, the wait_event_interruptible will block until the irq
+	 * handler frees a packet list.
+	 */
+
+	for (i = 0; i < count; i += length) {
+		p = buffer_put_bytes(s->input, count - i, &length);
+		if (copy_from_user(p, buffer + i, length))
+			return -EFAULT;
+		if (s->input->length < s->input->size)
+			continue;
+
+		stream_flush(s);
+
+		if (s->current_packet_list != NULL)
+			continue;
+
+		if (file->f_flags & O_NONBLOCK)
+			return i + length > 0 ? i + length : -EAGAIN;
+
+		if (wait_event_interruptible(s->packet_list_wait,
+					     !list_empty(&s->free_packet_lists)))
+			return -EINTR;
+	}
+
+	return count;
+}
+
+static long amdtp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct stream *s = file->private_data;
+	struct amdtp_ioctl cfg;
+	int err;
+	lock_kernel();
+	switch(cmd)
+	{
+	case AMDTP_IOC_PLUG:
+	case AMDTP_IOC_CHANNEL:
+		if (copy_from_user(&cfg, (struct amdtp_ioctl __user *) arg, sizeof cfg))
+			err = -EFAULT;
+		else
+			err = stream_configure(s, cmd, &cfg);
+		break;
+
+	default:
+		err = -EINVAL;
+		break;
+	}
+	unlock_kernel();
+	return err;
+}
+
+static unsigned int amdtp_poll(struct file *file, poll_table *pt)
+{
+	struct stream *s = file->private_data;
+
+	poll_wait(file, &s->packet_list_wait, pt);
+
+	if (!list_empty(&s->free_packet_lists))
+		return POLLOUT | POLLWRNORM;
+	else
+		return 0;
+}
+
+static int amdtp_open(struct inode *inode, struct file *file)
+{
+	struct amdtp_host *host;
+	int i = ieee1394_file_to_instance(file);
+
+	host = hpsb_get_hostinfo_bykey(&amdtp_highlevel, i);
+	if (host == NULL)
+		return -ENODEV;
+
+	file->private_data = stream_alloc(host);
+	if (file->private_data == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int amdtp_release(struct inode *inode, struct file *file)
+{
+	struct stream *s = file->private_data;
+
+	stream_free(s);
+
+	return 0;
+}
+
+static struct cdev amdtp_cdev;
+static struct file_operations amdtp_fops =
+{
+	.owner =	THIS_MODULE,
+	.write =	amdtp_write,
+	.poll =		amdtp_poll,
+	.unlocked_ioctl = amdtp_ioctl,
+	.compat_ioctl = amdtp_ioctl, /* All amdtp ioctls are compatible */
+	.open =		amdtp_open,
+	.release =	amdtp_release
+};
+
+/* IEEE1394 Subsystem functions */
+
+static void amdtp_add_host(struct hpsb_host *host)
+{
+	struct amdtp_host *ah;
+	int minor;
+
+	if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
+		return;
+
+	ah = hpsb_create_hostinfo(&amdtp_highlevel, host, sizeof(*ah));
+	if (!ah) {
+		HPSB_ERR("amdtp: Unable able to alloc hostinfo");
+		return;
+	}
+
+	ah->host = host;
+	ah->ohci = host->hostdata;
+
+	hpsb_set_hostinfo_key(&amdtp_highlevel, host, ah->host->id);
+
+	minor = IEEE1394_MINOR_BLOCK_AMDTP * 16 + ah->host->id;
+
+	INIT_LIST_HEAD(&ah->stream_list);
+	spin_lock_init(&ah->stream_list_lock);
+
+	devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
+			S_IFCHR|S_IRUSR|S_IWUSR, "amdtp/%d", ah->host->id);
+}
+
+static void amdtp_remove_host(struct hpsb_host *host)
+{
+	struct amdtp_host *ah = hpsb_get_hostinfo(&amdtp_highlevel, host);
+
+	if (ah)
+		devfs_remove("amdtp/%d", ah->host->id);
+
+	return;
+}
+
+static struct hpsb_highlevel amdtp_highlevel = {
+	.name =		"amdtp",
+	.add_host =	amdtp_add_host,
+	.remove_host =	amdtp_remove_host,
+};
+
+/* Module interface */
+
+MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
+MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
+		   "on OHCI boards.");
+MODULE_SUPPORTED_DEVICE("amdtp");
+MODULE_LICENSE("GPL");
+
+static int __init amdtp_init_module (void)
+{
+	cdev_init(&amdtp_cdev, &amdtp_fops);
+	amdtp_cdev.owner = THIS_MODULE;
+	kobject_set_name(&amdtp_cdev.kobj, "amdtp");
+	if (cdev_add(&amdtp_cdev, IEEE1394_AMDTP_DEV, 16)) {
+		HPSB_ERR("amdtp: unable to add char device");
+ 		return -EIO;
+ 	}
+
+	devfs_mk_dir("amdtp");
+
+	hpsb_register_highlevel(&amdtp_highlevel);
+
+	HPSB_INFO("Loaded AMDTP driver");
+
+	return 0;
+}
+
+static void __exit amdtp_exit_module (void)
+{
+        hpsb_unregister_highlevel(&amdtp_highlevel);
+	devfs_remove("amdtp");
+	cdev_del(&amdtp_cdev);
+
+	HPSB_INFO("Unloaded AMDTP driver");
+}
+
+module_init(amdtp_init_module);
+module_exit(amdtp_exit_module);
+MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16);
diff --git a/drivers/ieee1394/amdtp.h b/drivers/ieee1394/amdtp.h
new file mode 100644
index 000000000000..531f28e3ab50
--- /dev/null
+++ b/drivers/ieee1394/amdtp.h
@@ -0,0 +1,84 @@
+/* -*- c-basic-offset: 8 -*- */
+
+#ifndef __AMDTP_H
+#define __AMDTP_H
+
+#include <asm/types.h>
+#include "ieee1394-ioctl.h"
+
+/* The userspace interface for the Audio & Music Data Transmission
+ * Protocol driver is really simple. First, open /dev/amdtp, use the
+ * ioctl to configure format, rate, dimension and either plug or
+ * channel, then start writing samples.
+ *
+ * The formats supported by the driver are listed below.
+ * AMDTP_FORMAT_RAW corresponds to the AM824 raw format, which can
+ * carry any number of channels, so use this if you're streaming
+ * multichannel audio.  The AMDTP_FORMAT_IEC958_PCM corresponds to the
+ * AM824 IEC958 encapsulation without the IEC958 data bit set, using
+ * AMDTP_FORMAT_IEC958_AC3 will transmit the samples with the data bit
+ * set, suitable for transmitting compressed AC-3 audio.
+ *
+ * The rate field specifies the transmission rate; supported values
+ * are 32000, 44100, 48000, 88200, 96000, 176400 and 192000.
+ *
+ * The dimension field specifies the dimension of the signal, that is,
+ * the number of audio channels.  Only AMDTP_FORMAT_RAW supports
+ * settings greater than 2.
+ *
+ * The mode field specifies which transmission mode to use. The AMDTP
+ * specifies two different transmission modes: blocking and
+ * non-blocking.  The blocking transmission mode always send a fixed
+ * number of samples, typically 8, 16 or 32.  To exactly match the
+ * transmission rate, the driver alternates between sending empty and
+ * non-empty packets.  In non-blocking mode, the driver transmits as
+ * small packets as possible.  For example, for a transmission rate of
+ * 44100Hz, the driver should send 5 41/80 samples in every cycle, but
+ * this is not possible so instead the driver alternates between
+ * sending 5 and 6 samples.
+ *
+ * The last thing to specify is either the isochronous channel to use
+ * or the output plug to connect to.  If you know what channel the
+ * destination device will listen on, you can specify the channel
+ * directly and use the AMDTP_IOC_CHANNEL ioctl.  However, if the
+ * destination device chooses the channel and uses the IEC61883-1 plug
+ * mechanism, you can specify an output plug to connect to.  The
+ * driver will pick up the channel number from the plug once the
+ * destination device locks the output plug control register.  In this
+ * case set the plug field and use the AMDTP_IOC_PLUG ioctl.
+ *
+ * Having configured the interface, the driver now accepts writes of
+ * regular 16 bit signed little endian samples, with the channels
+ * interleaved.  For example, 4 channels would look like:
+ *
+ *   | sample 0                                      | sample 1    ...
+ *   | ch. 0     | ch. 1     | ch. 2     | ch. 3     | ch. 0     | ...
+ *   | lsb | msb | lsb | msb | lsb | msb | lsb | msb | lsb | msb | ...
+ *
+ */
+
+enum {
+	AMDTP_FORMAT_RAW,
+	AMDTP_FORMAT_IEC958_PCM,
+	AMDTP_FORMAT_IEC958_AC3
+};
+
+enum {
+	AMDTP_MODE_BLOCKING,
+	AMDTP_MODE_NON_BLOCKING,
+};
+
+enum {
+	AMDTP_INPUT_LE16,
+	AMDTP_INPUT_BE16,
+};
+
+struct amdtp_ioctl {
+	__u32 format;
+	__u32 rate;
+	__u32 dimension;
+	__u32 mode;
+	union { __u32 channel; __u32 plug; } u;
+};
+
+#endif /* __AMDTP_H */
diff --git a/drivers/ieee1394/cmp.c b/drivers/ieee1394/cmp.c
new file mode 100644
index 000000000000..69aed26e83a1
--- /dev/null
+++ b/drivers/ieee1394/cmp.c
@@ -0,0 +1,311 @@
+/* -*- c-basic-offset: 8 -*-
+ *
+ * cmp.c - Connection Management Procedures
+ * Copyright (C) 2001 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* TODO
+ * ----
+ *
+ * - Implement IEC61883-1 output plugs and connection management.
+ *   This should probably be part of the general subsystem, as it could
+ *   be shared with dv1394.
+ *
+ * - Add IEC61883 unit directory when loading this module.  This
+ *   requires a run-time changeable config rom.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+
+#include "hosts.h"
+#include "highlevel.h"
+#include "ieee1394.h"
+#include "ieee1394_core.h"
+#include "cmp.h"
+
+struct plug {
+	union {
+		struct cmp_pcr pcr;
+		quadlet_t quadlet;
+	} u;
+	void (*update)(struct cmp_pcr *plug, void *data);
+	void *data;
+};
+
+struct cmp_host {
+	struct hpsb_host *host;
+
+	union {
+		struct cmp_mpr ompr;
+		quadlet_t ompr_quadlet;
+	} u;
+	struct plug opcr[2];
+
+	union {
+		struct cmp_mpr impr;
+		quadlet_t impr_quadlet;
+	} v;
+	struct plug ipcr[2];
+};
+
+enum {
+	CMP_P2P_CONNECTION,
+	CMP_BC_CONNECTION
+};
+
+#define CSR_PCR_MAP      0x900
+#define CSR_PCR_MAP_END  0x9fc
+
+static struct hpsb_highlevel cmp_highlevel;
+
+static void cmp_add_host(struct hpsb_host *host);
+static void cmp_host_reset(struct hpsb_host *host);
+static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
+		    u64 addr, size_t length, u16 flags);
+static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
+		    u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags);
+
+static struct hpsb_highlevel cmp_highlevel = {
+	.name =		"cmp",
+	.add_host =	cmp_add_host,
+	.host_reset =	cmp_host_reset,
+};
+
+static struct hpsb_address_ops pcr_ops = {
+	.read =	pcr_read,
+	.lock =	pcr_lock,
+};
+
+
+struct cmp_pcr *
+cmp_register_opcr(struct hpsb_host *host, int opcr_number, int payload,
+		  void (*update)(struct cmp_pcr *pcr, void *data),
+		  void *data)
+{
+	struct cmp_host *ch;
+	struct plug *plug;
+
+	ch = hpsb_get_hostinfo(&cmp_highlevel, host);
+
+	if (opcr_number >= ch->u.ompr.nplugs ||
+	    ch->opcr[opcr_number].update != NULL)
+		return NULL;
+
+	plug = &ch->opcr[opcr_number];
+	plug->u.pcr.online = 1;
+	plug->u.pcr.bcast_count = 0;
+	plug->u.pcr.p2p_count = 0;
+	plug->u.pcr.overhead = 0;
+	plug->u.pcr.payload = payload;
+	plug->update = update;
+	plug->data = data;
+
+	return &plug->u.pcr;
+}
+
+void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *opcr)
+{
+	struct cmp_host *ch;
+	struct plug *plug;
+
+	ch = hpsb_get_hostinfo(&cmp_highlevel, host);
+	plug = (struct plug *)opcr;
+	if (plug - ch->opcr >= ch->u.ompr.nplugs) BUG();
+
+	plug->u.pcr.online = 0;
+	plug->update = NULL;
+}
+
+static void reset_plugs(struct cmp_host *ch)
+{
+	int i;
+
+	ch->u.ompr.non_persistent_ext = 0xff;
+	for (i = 0; i < ch->u.ompr.nplugs; i++) {
+		ch->opcr[i].u.pcr.bcast_count = 0;
+		ch->opcr[i].u.pcr.p2p_count = 0;
+		ch->opcr[i].u.pcr.overhead = 0;
+	}
+}
+
+static void cmp_add_host(struct hpsb_host *host)
+{
+	struct cmp_host *ch = hpsb_create_hostinfo(&cmp_highlevel, host, sizeof (*ch));
+
+	if (ch == NULL) {
+		HPSB_ERR("Failed to allocate cmp_host");
+		return;
+	}
+
+	hpsb_register_addrspace(&cmp_highlevel, host, &pcr_ops,
+				CSR_REGISTER_BASE + CSR_PCR_MAP,
+				CSR_REGISTER_BASE + CSR_PCR_MAP_END);
+
+	ch->host = host;
+	ch->u.ompr.rate = IEEE1394_SPEED_100;
+	ch->u.ompr.bcast_channel_base = 63;
+	ch->u.ompr.nplugs = 2;
+
+	reset_plugs(ch);
+}
+
+static void cmp_host_reset(struct hpsb_host *host)
+{
+	struct cmp_host *ch;
+
+	ch = hpsb_get_hostinfo(&cmp_highlevel, host);
+	if (ch == NULL) {
+		HPSB_ERR("cmp: Tried to reset unknown host");
+		return;
+	}
+
+	reset_plugs(ch);
+}
+
+static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
+		    u64 addr, size_t length, u16 flags)
+{
+	int csraddr = addr - CSR_REGISTER_BASE;
+	int plug;
+	struct cmp_host *ch;
+
+	if (length != 4)
+		return RCODE_TYPE_ERROR;
+
+	ch = hpsb_get_hostinfo(&cmp_highlevel, host);
+	if (csraddr == 0x900) {
+		*buf = cpu_to_be32(ch->u.ompr_quadlet);
+		return RCODE_COMPLETE;
+	}
+	else if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
+		plug = (csraddr - 0x904) / 4;
+		*buf = cpu_to_be32(ch->opcr[plug].u.quadlet);
+		return RCODE_COMPLETE;
+	}
+	else if (csraddr < 0x980) {
+		return RCODE_ADDRESS_ERROR;
+	}
+	else if (csraddr == 0x980) {
+		*buf = cpu_to_be32(ch->v.impr_quadlet);
+		return RCODE_COMPLETE;
+	}
+	else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
+		plug = (csraddr - 0x984) / 4;
+		*buf = cpu_to_be32(ch->ipcr[plug].u.quadlet);
+		return RCODE_COMPLETE;
+	}
+	else
+		return RCODE_ADDRESS_ERROR;
+}
+
+static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
+		    u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags)
+{
+	int csraddr = addr - CSR_REGISTER_BASE;
+	int plug;
+	struct cmp_host *ch;
+
+	ch = hpsb_get_hostinfo(&cmp_highlevel, host);
+
+	if (extcode != EXTCODE_COMPARE_SWAP)
+		return RCODE_TYPE_ERROR;
+
+	if (csraddr == 0x900) {
+		/* FIXME: Ignore writes to bits 30-31 and 0-7 */
+		*store = cpu_to_be32(ch->u.ompr_quadlet);
+		if (arg == cpu_to_be32(ch->u.ompr_quadlet))
+			ch->u.ompr_quadlet = be32_to_cpu(data);
+
+		return RCODE_COMPLETE;
+	}
+	if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
+		plug = (csraddr - 0x904) / 4;
+		*store = cpu_to_be32(ch->opcr[plug].u.quadlet);
+
+		if (arg == *store)
+			ch->opcr[plug].u.quadlet = be32_to_cpu(data);
+
+		if (be32_to_cpu(*store) != ch->opcr[plug].u.quadlet &&
+		    ch->opcr[plug].update != NULL)
+			ch->opcr[plug].update(&ch->opcr[plug].u.pcr,
+					      ch->opcr[plug].data);
+
+		return RCODE_COMPLETE;
+	}
+	else if (csraddr < 0x980) {
+		return RCODE_ADDRESS_ERROR;
+	}
+	else if (csraddr == 0x980) {
+		/* FIXME: Ignore writes to bits 24-31 and 0-7 */
+		*store = cpu_to_be32(ch->u.ompr_quadlet);
+		if (arg == cpu_to_be32(ch->u.ompr_quadlet))
+			ch->u.ompr_quadlet = be32_to_cpu(data);
+
+		return RCODE_COMPLETE;
+	}
+	else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
+		plug = (csraddr - 0x984) / 4;
+		*store = cpu_to_be32(ch->ipcr[plug].u.quadlet);
+
+		if (arg == *store)
+			ch->ipcr[plug].u.quadlet = be32_to_cpu(data);
+
+		if (be32_to_cpu(*store) != ch->ipcr[plug].u.quadlet &&
+		    ch->ipcr[plug].update != NULL)
+			ch->ipcr[plug].update(&ch->ipcr[plug].u.pcr,
+					      ch->ipcr[plug].data);
+
+		return RCODE_COMPLETE;
+	}
+	else
+		return RCODE_ADDRESS_ERROR;
+}
+
+
+/* Module interface */
+
+MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
+MODULE_DESCRIPTION("Connection Management Procedures (CMP)");
+MODULE_SUPPORTED_DEVICE("cmp");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(cmp_register_opcr);
+EXPORT_SYMBOL(cmp_unregister_opcr);
+
+static int __init cmp_init_module (void)
+{
+	hpsb_register_highlevel (&cmp_highlevel);
+
+	HPSB_INFO("Loaded CMP driver");
+
+	return 0;
+}
+
+static void __exit cmp_exit_module (void)
+{
+        hpsb_unregister_highlevel(&cmp_highlevel);
+
+	HPSB_INFO("Unloaded CMP driver");
+}
+
+module_init(cmp_init_module);
+module_exit(cmp_exit_module);
diff --git a/drivers/ieee1394/cmp.h b/drivers/ieee1394/cmp.h
new file mode 100644
index 000000000000..f9288bfcd494
--- /dev/null
+++ b/drivers/ieee1394/cmp.h
@@ -0,0 +1,31 @@
+#ifndef __CMP_H
+#define __CMP_H
+
+struct cmp_mpr {
+	u32 nplugs:5;
+	u32 reserved:3;
+	u32 persistent_ext:8;
+	u32 non_persistent_ext:8;
+	u32 bcast_channel_base:6;
+	u32 rate:2;
+} __attribute__((packed));
+
+struct cmp_pcr {
+	u32 payload:10;
+	u32 overhead:4;
+	u32 speed:2;
+	u32 channel:6;
+	u32 reserved:2;
+	u32 p2p_count:6;
+	u32 bcast_count:1;
+	u32 online:1;
+} __attribute__((packed));
+
+struct cmp_pcr *cmp_register_opcr(struct hpsb_host *host, int plug,
+				  int payload,
+				  void (*update)(struct cmp_pcr *plug,
+						 void *data),
+				  void *data);
+void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *plug);
+
+#endif /* __CMP_H */
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
new file mode 100644
index 000000000000..1017fd717248
--- /dev/null
+++ b/drivers/ieee1394/config_roms.c
@@ -0,0 +1,236 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * ConfigROM  entries
+ *
+ * Copyright (C) 2004 Ben Collins
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "csr.h"
+#include "config_roms.h"
+
+struct hpsb_config_rom_entry {
+	const char *name;
+
+	/* Base initialization, called at module load */
+	int (*init)(void);
+
+	/* Add entry to specified host */
+	int (*add)(struct hpsb_host *host);
+
+	/* Remove entry from specified host */
+	void (*remove)(struct hpsb_host *host);
+
+	/* Cleanup called at module exit */
+	void (*cleanup)(void);
+
+	/* The flag added to host->config_roms */
+	unsigned int flag;
+};
+
+
+int hpsb_default_host_entry(struct hpsb_host *host)
+{
+	struct csr1212_keyval *root;
+	struct csr1212_keyval *vend_id = NULL;
+	struct csr1212_keyval *text = NULL;
+	char csr_name[128];
+	int ret;
+
+	sprintf(csr_name, "Linux - %s", host->driver->name);
+	root = host->csr.rom->root_kv;
+
+	vend_id = csr1212_new_immediate(CSR1212_KV_ID_VENDOR, host->csr.guid_hi >> 8);
+	text = csr1212_new_string_descriptor_leaf(csr_name);
+
+	if (!vend_id || !text) {
+		if (vend_id)
+			csr1212_release_keyval(vend_id);
+		if (text)
+			csr1212_release_keyval(text);
+		csr1212_destroy_csr(host->csr.rom);
+		return -ENOMEM;
+	}
+
+	ret = csr1212_associate_keyval(vend_id, text);
+	csr1212_release_keyval(text);
+	ret |= csr1212_attach_keyval_to_directory(root, vend_id);
+	csr1212_release_keyval(vend_id);
+	if (ret != CSR1212_SUCCESS) {
+		csr1212_destroy_csr(host->csr.rom);
+		return -ENOMEM;
+	}
+
+	host->update_config_rom = 1;
+
+	return 0;
+}
+
+
+#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
+#include "eth1394.h"
+
+static struct csr1212_keyval *ip1394_ud;
+
+static int config_rom_ip1394_init(void)
+{
+	struct csr1212_keyval *spec_id = NULL;
+	struct csr1212_keyval *spec_desc = NULL;
+	struct csr1212_keyval *ver = NULL;
+	struct csr1212_keyval *ver_desc = NULL;
+	int ret = -ENOMEM;
+
+	ip1394_ud = csr1212_new_directory(CSR1212_KV_ID_UNIT);
+
+	spec_id = csr1212_new_immediate(CSR1212_KV_ID_SPECIFIER_ID,
+					ETHER1394_GASP_SPECIFIER_ID);
+	spec_desc = csr1212_new_string_descriptor_leaf("IANA");
+	ver = csr1212_new_immediate(CSR1212_KV_ID_VERSION,
+				    ETHER1394_GASP_VERSION);
+	ver_desc = csr1212_new_string_descriptor_leaf("IPv4");
+
+	if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
+		goto ip1394_fail;
+
+	if (csr1212_associate_keyval(spec_id, spec_desc) == CSR1212_SUCCESS &&
+	    csr1212_associate_keyval(ver, ver_desc) == CSR1212_SUCCESS &&
+	    csr1212_attach_keyval_to_directory(ip1394_ud, spec_id) == CSR1212_SUCCESS &&
+	    csr1212_attach_keyval_to_directory(ip1394_ud, ver) == CSR1212_SUCCESS)
+		ret = 0;
+
+ip1394_fail:
+	if (ret && ip1394_ud) {
+		csr1212_release_keyval(ip1394_ud);
+		ip1394_ud = NULL;
+	}
+
+	if (spec_id)
+		csr1212_release_keyval(spec_id);
+	if (spec_desc)
+		csr1212_release_keyval(spec_desc);
+	if (ver)
+		csr1212_release_keyval(ver);
+	if (ver_desc)
+		csr1212_release_keyval(ver_desc);
+
+	return ret;
+}
+
+static void config_rom_ip1394_cleanup(void)
+{
+	if (ip1394_ud) {
+		csr1212_release_keyval(ip1394_ud);
+		ip1394_ud = NULL;
+	}
+}
+
+static int config_rom_ip1394_add(struct hpsb_host *host)
+{
+	if (!ip1394_ud)
+		return -ENODEV;
+
+	if (csr1212_attach_keyval_to_directory(host->csr.rom->root_kv,
+					       ip1394_ud) != CSR1212_SUCCESS)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void config_rom_ip1394_remove(struct hpsb_host *host)
+{
+	csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
+}
+
+static struct hpsb_config_rom_entry ip1394_entry = {
+	.name		= "ip1394",
+	.init		= config_rom_ip1394_init,
+	.add		= config_rom_ip1394_add,
+	.remove		= config_rom_ip1394_remove,
+	.cleanup	= config_rom_ip1394_cleanup,
+	.flag		= HPSB_CONFIG_ROM_ENTRY_IP1394,
+};
+#endif /* CONFIG_IEEE1394_CONFIG_ROM_IP1394 */
+
+
+static struct hpsb_config_rom_entry *const config_rom_entries[] = {
+#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
+	&ip1394_entry,
+#endif
+	NULL,
+};
+
+
+int hpsb_init_config_roms(void)
+{
+	int i, error = 0;
+
+	for (i = 0; config_rom_entries[i]; i++) {
+		if (!config_rom_entries[i]->init)
+			continue;
+
+		if (config_rom_entries[i]->init()) {
+			HPSB_ERR("Failed to initialize config rom entry `%s'",
+				 config_rom_entries[i]->name);
+			error = -1;
+		} else
+			HPSB_DEBUG("Initialized config rom entry `%s'",
+				   config_rom_entries[i]->name);
+	}
+
+	return error;
+}
+
+void hpsb_cleanup_config_roms(void)
+{
+	int i;
+
+	for (i = 0; config_rom_entries[i]; i++) {
+		if (config_rom_entries[i]->cleanup)
+			config_rom_entries[i]->cleanup();
+	}
+}
+
+int hpsb_add_extra_config_roms(struct hpsb_host *host)
+{
+	int i, error = 0;
+
+	for (i = 0; config_rom_entries[i]; i++) {
+		if (config_rom_entries[i]->add(host)) {
+			HPSB_ERR("fw-host%d: Failed to attach config rom entry `%s'",
+				 host->id, config_rom_entries[i]->name);
+			error = -1;
+		} else {
+			host->config_roms |= config_rom_entries[i]->flag;
+			host->update_config_rom = 1;
+		}
+	}
+
+	return error;
+}
+
+void hpsb_remove_extra_config_roms(struct hpsb_host *host)
+{
+	int i;
+
+	for (i = 0; config_rom_entries[i]; i++) {
+		if (!(host->config_roms & config_rom_entries[i]->flag))
+			continue;
+
+		config_rom_entries[i]->remove(host);
+
+		host->config_roms &= ~config_rom_entries[i]->flag;
+		host->update_config_rom = 1;
+	}
+}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
new file mode 100644
index 000000000000..0a70544cfe65
--- /dev/null
+++ b/drivers/ieee1394/config_roms.h
@@ -0,0 +1,27 @@
+#ifndef _IEEE1394_CONFIG_ROMS_H
+#define _IEEE1394_CONFIG_ROMS_H
+
+#include "ieee1394_types.h"
+#include "hosts.h"
+
+/* The default host entry. This must succeed. */
+int hpsb_default_host_entry(struct hpsb_host *host);
+
+/* Initialize all config roms */
+int hpsb_init_config_roms(void);
+
+/* Cleanup all config roms */
+void hpsb_cleanup_config_roms(void);
+
+/* Add extra config roms to specified host */
+int hpsb_add_extra_config_roms(struct hpsb_host *host);
+
+/* Remove extra config roms from specified host */
+void hpsb_remove_extra_config_roms(struct hpsb_host *host);
+
+
+/* List of flags to check if a host contains a certain extra config rom
+ * entry. Available in the host->config_roms member. */
+#define HPSB_CONFIG_ROM_ENTRY_IP1394		0x00000001
+
+#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
new file mode 100644
index 000000000000..1b98684aebcd
--- /dev/null
+++ b/drivers/ieee1394/csr.c
@@ -0,0 +1,857 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * CSR implementation, iso/bus manager implementation.
+ *
+ * Copyright (C) 1999 Andreas E. Bombe
+ *               2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ *
+ *
+ * Contributions:
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *        configuration ROM manipulation
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/param.h>
+#include <linux/spinlock.h>
+
+#include "csr1212.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394.h"
+#include "highlevel.h"
+
+/* Module Parameters */
+/* this module parameter can be used to disable mapping of the FCP registers */
+
+static int fcp = 1;
+module_param(fcp, int, 0444);
+MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
+
+static struct csr1212_keyval *node_cap = NULL;
+
+static void add_host(struct hpsb_host *host);
+static void remove_host(struct hpsb_host *host);
+static void host_reset(struct hpsb_host *host);
+static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
+		     u64 addr, size_t length, u16 fl);
+static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
+		     quadlet_t *data, u64 addr, size_t length, u16 flags);
+static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
+		     u64 addr, size_t length, u16 flags);
+static int write_regs(struct hpsb_host *host, int nodeid, int destid,
+		      quadlet_t *data, u64 addr, size_t length, u16 flags);
+static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
+		     u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl);
+static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
+		       u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl);
+static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
+			   u64 addr, size_t length, u16 fl);
+static u64 allocate_addr_range(u64 size, u32 alignment, void *__host);
+static void release_addr_range(u64 addr, void *__host);
+
+static struct hpsb_highlevel csr_highlevel = {
+	.name =		"standard registers",
+	.add_host =	add_host,
+	.remove_host =	remove_host,
+	.host_reset =	host_reset,
+};
+
+static struct hpsb_address_ops map_ops = {
+	.read = read_maps,
+};
+
+static struct hpsb_address_ops fcp_ops = {
+	.write = write_fcp,
+};
+
+static struct hpsb_address_ops reg_ops = {
+	.read = read_regs,
+	.write = write_regs,
+	.lock = lock_regs,
+	.lock64 = lock64_regs,
+};
+
+static struct hpsb_address_ops config_rom_ops = {
+	.read = read_config_rom,
+};
+
+struct csr1212_bus_ops csr_bus_ops = {
+	.allocate_addr_range =	allocate_addr_range,
+	.release_addr =		release_addr_range,
+};
+
+
+static u16 csr_crc16(unsigned *data, int length)
+{
+        int check=0, i;
+        int shift, sum, next=0;
+
+        for (i = length; i; i--) {
+                for (next = check, shift = 28; shift >= 0; shift -= 4 ) {
+                        sum = ((next >> 12) ^ (be32_to_cpu(*data) >> shift)) & 0xf;
+                        next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
+                }
+                check = next & 0xffff;
+                data++;
+        }
+
+        return check;
+}
+
+static void host_reset(struct hpsb_host *host)
+{
+        host->csr.state &= 0x300;
+
+        host->csr.bus_manager_id = 0x3f;
+        host->csr.bandwidth_available = 4915;
+	host->csr.channels_available_hi = 0xfffffffe;	/* pre-alloc ch 31 per 1394a-2000 */
+        host->csr.channels_available_lo = ~0;
+	host->csr.broadcast_channel = 0x80000000 | 31;
+
+	if (host->is_irm) {
+		if (host->driver->hw_csr_reg) {
+			host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
+		}
+	}
+
+        host->csr.node_ids = host->node_id << 16;
+
+        if (!host->is_root) {
+                /* clear cmstr bit */
+                host->csr.state &= ~0x100;
+        }
+
+        host->csr.topology_map[1] =
+                cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
+        host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
+                                                | host->selfid_count);
+        host->csr.topology_map[0] =
+                cpu_to_be32((host->selfid_count + 2) << 16
+                            | csr_crc16(host->csr.topology_map + 1,
+                                        host->selfid_count + 2));
+
+        host->csr.speed_map[1] =
+                cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
+        host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
+                                             | csr_crc16(host->csr.speed_map+1,
+                                                         0x3f1));
+}
+
+/*
+ * HI == seconds (bits 0:2)
+ * LO == fraction units of 1/8000 of a second, as per 1394 (bits 19:31)
+ *
+ * Convert to units and then to HZ, for comparison to jiffies.
+ *
+ * By default this will end up being 800 units, or 100ms (125usec per
+ * unit).
+ *
+ * NOTE: The spec says 1/8000, but also says we can compute based on 1/8192
+ * like CSR specifies. Should make our math less complex.
+ */
+static inline void calculate_expire(struct csr_control *csr)
+{
+	unsigned long units;
+
+	/* Take the seconds, and convert to units */
+	units = (unsigned long)(csr->split_timeout_hi & 0x07) << 13;
+
+	/* Add in the fractional units */
+	units += (unsigned long)(csr->split_timeout_lo >> 19);
+
+	/* Convert to jiffies */
+	csr->expire = (unsigned long)(units * HZ) >> 13UL;
+
+	/* Just to keep from rounding low */
+	csr->expire++;
+
+	HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
+}
+
+
+static void add_host(struct hpsb_host *host)
+{
+	struct csr1212_keyval *root;
+	quadlet_t bus_info[CSR_BUS_INFO_SIZE];
+
+	hpsb_register_addrspace(&csr_highlevel, host, &reg_ops,
+				CSR_REGISTER_BASE,
+				CSR_REGISTER_BASE + CSR_CONFIG_ROM);
+	hpsb_register_addrspace(&csr_highlevel, host, &config_rom_ops,
+				CSR_REGISTER_BASE + CSR_CONFIG_ROM,
+				CSR_REGISTER_BASE + CSR_CONFIG_ROM_END);
+	if (fcp) {
+		hpsb_register_addrspace(&csr_highlevel, host, &fcp_ops,
+					CSR_REGISTER_BASE + CSR_FCP_COMMAND,
+					CSR_REGISTER_BASE + CSR_FCP_END);
+	}
+	hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
+				CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP,
+				CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP_END);
+	hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
+				CSR_REGISTER_BASE + CSR_SPEED_MAP,
+				CSR_REGISTER_BASE + CSR_SPEED_MAP_END);
+
+        spin_lock_init(&host->csr.lock);
+
+        host->csr.state                 = 0;
+        host->csr.node_ids              = 0;
+        host->csr.split_timeout_hi      = 0;
+        host->csr.split_timeout_lo      = 800 << 19;
+	calculate_expire(&host->csr);
+        host->csr.cycle_time            = 0;
+        host->csr.bus_time              = 0;
+        host->csr.bus_manager_id        = 0x3f;
+        host->csr.bandwidth_available   = 4915;
+	host->csr.channels_available_hi = 0xfffffffe;	/* pre-alloc ch 31 per 1394a-2000 */
+        host->csr.channels_available_lo = ~0;
+	host->csr.broadcast_channel = 0x80000000 | 31;
+
+	if (host->is_irm) {
+		if (host->driver->hw_csr_reg) {
+			host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
+		}
+	}
+
+	if (host->csr.max_rec >= 9)
+		host->csr.max_rom = 2;
+	else if (host->csr.max_rec >= 5)
+		host->csr.max_rom = 1;
+	else
+		host->csr.max_rom = 0;
+
+	host->csr.generation = 2;
+
+	bus_info[1] = __constant_cpu_to_be32(0x31333934);
+	bus_info[2] = cpu_to_be32((1 << CSR_IRMC_SHIFT) |
+				  (1 << CSR_CMC_SHIFT) |
+				  (1 << CSR_ISC_SHIFT) |
+				  (0 << CSR_BMC_SHIFT) |
+				  (0 << CSR_PMC_SHIFT) |
+				  (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
+				  (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
+				  (host->csr.max_rom << CSR_MAX_ROM_SHIFT) |
+				  (host->csr.generation << CSR_GENERATION_SHIFT) |
+				  host->csr.lnk_spd);
+
+	bus_info[3] = cpu_to_be32(host->csr.guid_hi);
+	bus_info[4] = cpu_to_be32(host->csr.guid_lo);
+
+	/* The hardware copy of the bus info block will be set later when a
+	 * bus reset is issued. */
+
+	csr1212_init_local_csr(host->csr.rom, bus_info, host->csr.max_rom);
+
+	root = host->csr.rom->root_kv;
+
+	if(csr1212_attach_keyval_to_directory(root, node_cap) != CSR1212_SUCCESS) {
+		HPSB_ERR("Failed to attach Node Capabilities to root directory");
+	}
+
+	host->update_config_rom = 1;
+}
+
+static void remove_host(struct hpsb_host *host)
+{
+	quadlet_t bus_info[CSR_BUS_INFO_SIZE];
+
+	bus_info[1] = __constant_cpu_to_be32(0x31333934);
+	bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
+				  (0 << CSR_CMC_SHIFT) |
+				  (0 << CSR_ISC_SHIFT) |
+				  (0 << CSR_BMC_SHIFT) |
+				  (0 << CSR_PMC_SHIFT) |
+				  (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
+				  (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
+				  (0 << CSR_MAX_ROM_SHIFT) |
+				  (0 << CSR_GENERATION_SHIFT) |
+				  host->csr.lnk_spd);
+
+	bus_info[3] = cpu_to_be32(host->csr.guid_hi);
+	bus_info[4] = cpu_to_be32(host->csr.guid_lo);
+
+	csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, node_cap);
+
+	csr1212_init_local_csr(host->csr.rom, bus_info, 0);
+	host->update_config_rom = 1;
+}
+
+
+int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
+	size_t buffersize, unsigned char rom_version)
+{
+	unsigned long flags;
+	int ret;
+
+	HPSB_NOTICE("hpsb_update_config_rom() is deprecated");
+
+        spin_lock_irqsave(&host->csr.lock, flags);
+	if (rom_version != host->csr.generation)
+                ret = -1;
+	else if (buffersize > host->csr.rom->cache_head->size)
+		ret = -2;
+        else {
+		/* Just overwrite the generated ConfigROM image with new data,
+		 * it can be regenerated later. */
+		memcpy(host->csr.rom->cache_head->data, new_rom, buffersize);
+		host->csr.rom->cache_head->len = buffersize;
+
+		if (host->driver->set_hw_config_rom)
+			host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
+		/* Increment the generation number to keep some sort of sync
+		 * with the newer ConfigROM manipulation method. */
+		host->csr.generation++;
+		if (host->csr.generation > 0xf || host->csr.generation < 2)
+			host->csr.generation = 2;
+		ret=0;
+        }
+        spin_unlock_irqrestore(&host->csr.lock, flags);
+        return ret;
+}
+
+
+/* Read topology / speed maps and configuration ROM */
+static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
+                     u64 addr, size_t length, u16 fl)
+{
+	unsigned long flags;
+        int csraddr = addr - CSR_REGISTER_BASE;
+        const char *src;
+
+        spin_lock_irqsave(&host->csr.lock, flags);
+
+	if (csraddr < CSR_SPEED_MAP) {
+                src = ((char *)host->csr.topology_map) + csraddr
+                        - CSR_TOPOLOGY_MAP;
+        } else {
+                src = ((char *)host->csr.speed_map) + csraddr - CSR_SPEED_MAP;
+        }
+
+        memcpy(buffer, src, length);
+        spin_unlock_irqrestore(&host->csr.lock, flags);
+        return RCODE_COMPLETE;
+}
+
+
+#define out if (--length == 0) break
+
+static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
+                     u64 addr, size_t length, u16 flags)
+{
+        int csraddr = addr - CSR_REGISTER_BASE;
+        int oldcycle;
+        quadlet_t ret;
+
+        if ((csraddr | length) & 0x3)
+                return RCODE_TYPE_ERROR;
+
+        length /= 4;
+
+        switch (csraddr) {
+        case CSR_STATE_CLEAR:
+                *(buf++) = cpu_to_be32(host->csr.state);
+                out;
+        case CSR_STATE_SET:
+                *(buf++) = cpu_to_be32(host->csr.state);
+                out;
+        case CSR_NODE_IDS:
+                *(buf++) = cpu_to_be32(host->csr.node_ids);
+                out;
+
+        case CSR_RESET_START:
+                return RCODE_TYPE_ERROR;
+
+                /* address gap - handled by default below */
+
+        case CSR_SPLIT_TIMEOUT_HI:
+                *(buf++) = cpu_to_be32(host->csr.split_timeout_hi);
+                out;
+        case CSR_SPLIT_TIMEOUT_LO:
+                *(buf++) = cpu_to_be32(host->csr.split_timeout_lo);
+                out;
+
+                /* address gap */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_CYCLE_TIME:
+                oldcycle = host->csr.cycle_time;
+                host->csr.cycle_time =
+                        host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
+
+                if (oldcycle > host->csr.cycle_time) {
+                        /* cycle time wrapped around */
+                        host->csr.bus_time += 1 << 7;
+                }
+                *(buf++) = cpu_to_be32(host->csr.cycle_time);
+                out;
+        case CSR_BUS_TIME:
+                oldcycle = host->csr.cycle_time;
+                host->csr.cycle_time =
+                        host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
+
+                if (oldcycle > host->csr.cycle_time) {
+                        /* cycle time wrapped around */
+                        host->csr.bus_time += (1 << 7);
+                }
+                *(buf++) = cpu_to_be32(host->csr.bus_time
+                                       | (host->csr.cycle_time >> 25));
+                out;
+
+                /* address gap */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_BUSY_TIMEOUT:
+                /* not yet implemented */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_BUS_MANAGER_ID:
+                if (host->driver->hw_csr_reg)
+                        ret = host->driver->hw_csr_reg(host, 0, 0, 0);
+                else
+                        ret = host->csr.bus_manager_id;
+
+                *(buf++) = cpu_to_be32(ret);
+                out;
+        case CSR_BANDWIDTH_AVAILABLE:
+                if (host->driver->hw_csr_reg)
+                        ret = host->driver->hw_csr_reg(host, 1, 0, 0);
+                else
+                        ret = host->csr.bandwidth_available;
+
+                *(buf++) = cpu_to_be32(ret);
+                out;
+        case CSR_CHANNELS_AVAILABLE_HI:
+                if (host->driver->hw_csr_reg)
+                        ret = host->driver->hw_csr_reg(host, 2, 0, 0);
+                else
+                        ret = host->csr.channels_available_hi;
+
+                *(buf++) = cpu_to_be32(ret);
+                out;
+        case CSR_CHANNELS_AVAILABLE_LO:
+                if (host->driver->hw_csr_reg)
+                        ret = host->driver->hw_csr_reg(host, 3, 0, 0);
+                else
+                        ret = host->csr.channels_available_lo;
+
+                *(buf++) = cpu_to_be32(ret);
+                out;
+
+	case CSR_BROADCAST_CHANNEL:
+		*(buf++) = cpu_to_be32(host->csr.broadcast_channel);
+		out;
+
+                /* address gap to end - fall through to default */
+        default:
+                return RCODE_ADDRESS_ERROR;
+        }
+
+        return RCODE_COMPLETE;
+}
+
+static int write_regs(struct hpsb_host *host, int nodeid, int destid,
+		      quadlet_t *data, u64 addr, size_t length, u16 flags)
+{
+        int csraddr = addr - CSR_REGISTER_BASE;
+
+        if ((csraddr | length) & 0x3)
+                return RCODE_TYPE_ERROR;
+
+        length /= 4;
+
+        switch (csraddr) {
+        case CSR_STATE_CLEAR:
+                /* FIXME FIXME FIXME */
+                printk("doh, someone wants to mess with state clear\n");
+                out;
+        case CSR_STATE_SET:
+                printk("doh, someone wants to mess with state set\n");
+                out;
+
+        case CSR_NODE_IDS:
+                host->csr.node_ids &= NODE_MASK << 16;
+                host->csr.node_ids |= be32_to_cpu(*(data++)) & (BUS_MASK << 16);
+                host->node_id = host->csr.node_ids >> 16;
+                host->driver->devctl(host, SET_BUS_ID, host->node_id >> 6);
+                out;
+
+        case CSR_RESET_START:
+                /* FIXME - perform command reset */
+                out;
+
+                /* address gap */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_SPLIT_TIMEOUT_HI:
+                host->csr.split_timeout_hi =
+                        be32_to_cpu(*(data++)) & 0x00000007;
+		calculate_expire(&host->csr);
+                out;
+        case CSR_SPLIT_TIMEOUT_LO:
+                host->csr.split_timeout_lo =
+                        be32_to_cpu(*(data++)) & 0xfff80000;
+		calculate_expire(&host->csr);
+                out;
+
+                /* address gap */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_CYCLE_TIME:
+                /* should only be set by cycle start packet, automatically */
+                host->csr.cycle_time = be32_to_cpu(*data);
+                host->driver->devctl(host, SET_CYCLE_COUNTER,
+                                       be32_to_cpu(*(data++)));
+                out;
+        case CSR_BUS_TIME:
+                host->csr.bus_time = be32_to_cpu(*(data++)) & 0xffffff80;
+                out;
+
+                /* address gap */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_BUSY_TIMEOUT:
+                /* not yet implemented */
+                return RCODE_ADDRESS_ERROR;
+
+        case CSR_BUS_MANAGER_ID:
+        case CSR_BANDWIDTH_AVAILABLE:
+        case CSR_CHANNELS_AVAILABLE_HI:
+        case CSR_CHANNELS_AVAILABLE_LO:
+                /* these are not writable, only lockable */
+                return RCODE_TYPE_ERROR;
+
+	case CSR_BROADCAST_CHANNEL:
+		/* only the valid bit can be written */
+		host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
+                        | (be32_to_cpu(*data) & 0x40000000);
+		out;
+
+                /* address gap to end - fall through */
+        default:
+                return RCODE_ADDRESS_ERROR;
+        }
+
+        return RCODE_COMPLETE;
+}
+
+#undef out
+
+
+static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
+                     u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl)
+{
+        int csraddr = addr - CSR_REGISTER_BASE;
+        unsigned long flags;
+        quadlet_t *regptr = NULL;
+
+        if (csraddr & 0x3)
+		return RCODE_TYPE_ERROR;
+
+        if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
+            || extcode != EXTCODE_COMPARE_SWAP)
+                goto unsupported_lockreq;
+
+        data = be32_to_cpu(data);
+        arg = be32_to_cpu(arg);
+
+	/* Is somebody releasing the broadcast_channel on us? */
+	if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
+		/* Note: this is may not be the right way to handle
+		 * the problem, so we should look into the proper way
+		 * eventually. */
+		HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
+			  "broadcast channel 31.  Ignoring.",
+			  NODE_BUS_ARGS(host, nodeid));
+
+		data &= ~0x1;	/* keep broadcast channel allocated */
+	}
+
+        if (host->driver->hw_csr_reg) {
+                quadlet_t old;
+
+                old = host->driver->
+                        hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
+                                   data, arg);
+
+                *store = cpu_to_be32(old);
+                return RCODE_COMPLETE;
+        }
+
+        spin_lock_irqsave(&host->csr.lock, flags);
+
+        switch (csraddr) {
+        case CSR_BUS_MANAGER_ID:
+                regptr = &host->csr.bus_manager_id;
+		*store = cpu_to_be32(*regptr);
+		if (*regptr == arg)
+			*regptr = data;
+                break;
+
+        case CSR_BANDWIDTH_AVAILABLE:
+        {
+                quadlet_t bandwidth;
+                quadlet_t old;
+                quadlet_t new;
+
+                regptr = &host->csr.bandwidth_available;
+                old = *regptr;
+
+                /* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
+                if (arg > 0x1fff) {
+                        *store = cpu_to_be32(old);	/* change nothing */
+			break;
+                }
+                data &= 0x1fff;
+                if (arg >= data) {
+                        /* allocate bandwidth */
+                        bandwidth = arg - data;
+                        if (old >= bandwidth) {
+                                new = old - bandwidth;
+                                *store = cpu_to_be32(arg);
+                                *regptr = new;
+                        } else {
+                                *store = cpu_to_be32(old);
+                        }
+                } else {
+                        /* deallocate bandwidth */
+                        bandwidth = data - arg;
+                        if (old + bandwidth < 0x2000) {
+                                new = old + bandwidth;
+                                *store = cpu_to_be32(arg);
+                                *regptr = new;
+                        } else {
+                                *store = cpu_to_be32(old);
+                        }
+                }
+                break;
+        }
+
+        case CSR_CHANNELS_AVAILABLE_HI:
+        {
+                /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
+                quadlet_t affected_channels = arg ^ data;
+
+                regptr = &host->csr.channels_available_hi;
+
+                if ((arg & affected_channels) == (*regptr & affected_channels)) {
+                        *regptr ^= affected_channels;
+                        *store = cpu_to_be32(arg);
+                } else {
+                        *store = cpu_to_be32(*regptr);
+                }
+
+                break;
+        }
+
+        case CSR_CHANNELS_AVAILABLE_LO:
+        {
+                /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
+                quadlet_t affected_channels = arg ^ data;
+
+                regptr = &host->csr.channels_available_lo;
+
+                if ((arg & affected_channels) == (*regptr & affected_channels)) {
+                        *regptr ^= affected_channels;
+                        *store = cpu_to_be32(arg);
+                } else {
+                        *store = cpu_to_be32(*regptr);
+                }
+                break;
+        }
+        }
+
+        spin_unlock_irqrestore(&host->csr.lock, flags);
+
+        return RCODE_COMPLETE;
+
+ unsupported_lockreq:
+        switch (csraddr) {
+        case CSR_STATE_CLEAR:
+        case CSR_STATE_SET:
+        case CSR_RESET_START:
+        case CSR_NODE_IDS:
+        case CSR_SPLIT_TIMEOUT_HI:
+        case CSR_SPLIT_TIMEOUT_LO:
+        case CSR_CYCLE_TIME:
+        case CSR_BUS_TIME:
+	case CSR_BROADCAST_CHANNEL:
+                return RCODE_TYPE_ERROR;
+
+        case CSR_BUSY_TIMEOUT:
+                /* not yet implemented - fall through */
+        default:
+                return RCODE_ADDRESS_ERROR;
+        }
+}
+
+static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
+		       u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
+{
+	int csraddr = addr - CSR_REGISTER_BASE;
+	unsigned long flags;
+
+	data = be64_to_cpu(data);
+	arg = be64_to_cpu(arg);
+
+	if (csraddr & 0x3)
+		return RCODE_TYPE_ERROR;
+
+	if (csraddr != CSR_CHANNELS_AVAILABLE
+	    || extcode != EXTCODE_COMPARE_SWAP)
+		goto unsupported_lock64req;
+
+	/* Is somebody releasing the broadcast_channel on us? */
+	if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
+		/* Note: this is may not be the right way to handle
+		 * the problem, so we should look into the proper way
+                 * eventually. */
+		HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
+			  "broadcast channel 31.  Ignoring.",
+			  NODE_BUS_ARGS(host, nodeid));
+
+		data &= ~0x100000000ULL;	/* keep broadcast channel allocated */
+	}
+
+	if (host->driver->hw_csr_reg) {
+		quadlet_t data_hi, data_lo;
+		quadlet_t arg_hi, arg_lo;
+		quadlet_t old_hi, old_lo;
+
+		data_hi = data >> 32;
+		data_lo = data & 0xFFFFFFFF;
+		arg_hi = arg >> 32;
+		arg_lo = arg & 0xFFFFFFFF;
+
+		old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
+                                                  data_hi, arg_hi);
+
+		old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
+                                                  data_lo, arg_lo);
+
+		*store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
+	} else {
+		octlet_t old;
+		octlet_t affected_channels = arg ^ data;
+
+		spin_lock_irqsave(&host->csr.lock, flags);
+
+		old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
+
+		if ((arg & affected_channels) == (old & affected_channels)) {
+			host->csr.channels_available_hi ^= (affected_channels >> 32);
+			host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
+			*store = cpu_to_be64(arg);
+		} else {
+			*store = cpu_to_be64(old);
+		}
+
+		spin_unlock_irqrestore(&host->csr.lock, flags);
+	}
+
+	/* Is somebody erroneously releasing the broadcast_channel on us? */
+	if (host->csr.channels_available_hi & 0x1)
+		host->csr.channels_available_hi &= ~0x1;
+
+	return RCODE_COMPLETE;
+
+ unsupported_lock64req:
+	switch (csraddr) {
+	case CSR_STATE_CLEAR:
+	case CSR_STATE_SET:
+	case CSR_RESET_START:
+	case CSR_NODE_IDS:
+	case CSR_SPLIT_TIMEOUT_HI:
+	case CSR_SPLIT_TIMEOUT_LO:
+	case CSR_CYCLE_TIME:
+	case CSR_BUS_TIME:
+	case CSR_BUS_MANAGER_ID:
+	case CSR_BROADCAST_CHANNEL:
+	case CSR_BUSY_TIMEOUT:
+	case CSR_BANDWIDTH_AVAILABLE:
+		return RCODE_TYPE_ERROR;
+
+	default:
+		return RCODE_ADDRESS_ERROR;
+	}
+}
+
+static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
+		     quadlet_t *data, u64 addr, size_t length, u16 flags)
+{
+        int csraddr = addr - CSR_REGISTER_BASE;
+
+        if (length > 512)
+                return RCODE_TYPE_ERROR;
+
+        switch (csraddr) {
+        case CSR_FCP_COMMAND:
+                highlevel_fcp_request(host, nodeid, 0, (u8 *)data, length);
+                break;
+        case CSR_FCP_RESPONSE:
+                highlevel_fcp_request(host, nodeid, 1, (u8 *)data, length);
+                break;
+        default:
+                return RCODE_TYPE_ERROR;
+        }
+
+        return RCODE_COMPLETE;
+}
+
+static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
+			   u64 addr, size_t length, u16 fl)
+{
+	u32 offset = addr - CSR1212_REGISTER_SPACE_BASE;
+
+	if (csr1212_read(host->csr.rom, offset, buffer, length) == CSR1212_SUCCESS)
+		return RCODE_COMPLETE;
+	else
+		return RCODE_ADDRESS_ERROR;
+}
+
+static u64 allocate_addr_range(u64 size, u32 alignment, void *__host)
+{
+ 	struct hpsb_host *host = (struct hpsb_host*)__host;
+
+	return hpsb_allocate_and_register_addrspace(&csr_highlevel,
+						    host,
+						    &config_rom_ops,
+						    size, alignment,
+						    CSR1212_UNITS_SPACE_BASE,
+						    CSR1212_UNITS_SPACE_END);
+}
+
+static void release_addr_range(u64 addr, void *__host)
+{
+ 	struct hpsb_host *host = (struct hpsb_host*)__host;
+	hpsb_unregister_addrspace(&csr_highlevel, host, addr);
+}
+
+
+int init_csr(void)
+{
+	node_cap = csr1212_new_immediate(CSR1212_KV_ID_NODE_CAPABILITIES, 0x0083c0);
+	if (!node_cap) {
+		HPSB_ERR("Failed to allocate memory for Node Capabilties ConfigROM entry!");
+		return -ENOMEM;
+	}
+
+	hpsb_register_highlevel(&csr_highlevel);
+
+	return 0;
+}
+
+void cleanup_csr(void)
+{
+	if (node_cap)
+		csr1212_release_keyval(node_cap);
+        hpsb_unregister_highlevel(&csr_highlevel);
+}
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h
new file mode 100644
index 000000000000..ea9aa4f53ab6
--- /dev/null
+++ b/drivers/ieee1394/csr.h
@@ -0,0 +1,96 @@
+
+#ifndef _IEEE1394_CSR_H
+#define _IEEE1394_CSR_H
+
+#ifdef CONFIG_PREEMPT
+#include <linux/sched.h>
+#endif
+
+#include "csr1212.h"
+
+#define CSR_REGISTER_BASE  0xfffff0000000ULL
+
+/* register offsets relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR           0x0
+#define CSR_STATE_SET             0x4
+#define CSR_NODE_IDS              0x8
+#define CSR_RESET_START           0xc
+#define CSR_SPLIT_TIMEOUT_HI      0x18
+#define CSR_SPLIT_TIMEOUT_LO      0x1c
+#define CSR_CYCLE_TIME            0x200
+#define CSR_BUS_TIME              0x204
+#define CSR_BUSY_TIMEOUT          0x210
+#define CSR_BUS_MANAGER_ID        0x21c
+#define CSR_BANDWIDTH_AVAILABLE   0x220
+#define CSR_CHANNELS_AVAILABLE    0x224
+#define CSR_CHANNELS_AVAILABLE_HI 0x224
+#define CSR_CHANNELS_AVAILABLE_LO 0x228
+#define CSR_BROADCAST_CHANNEL     0x234
+#define CSR_CONFIG_ROM            0x400
+#define CSR_CONFIG_ROM_END        0x800
+#define CSR_FCP_COMMAND           0xB00
+#define CSR_FCP_RESPONSE          0xD00
+#define CSR_FCP_END               0xF00
+#define CSR_TOPOLOGY_MAP          0x1000
+#define CSR_TOPOLOGY_MAP_END      0x1400
+#define CSR_SPEED_MAP             0x2000
+#define CSR_SPEED_MAP_END         0x3000
+
+/* IEEE 1394 bus specific Configuration ROM Key IDs */
+#define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30)
+
+/* IEEE 1394 Bus Inforamation Block specifics */
+#define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t))
+
+#define CSR_IRMC_SHIFT 31
+#define CSR_CMC_SHIFT  30
+#define CSR_ISC_SHIFT  29
+#define CSR_BMC_SHIFT  28
+#define CSR_PMC_SHIFT  27
+#define CSR_CYC_CLK_ACC_SHIFT 16
+#define CSR_MAX_REC_SHIFT 12
+#define CSR_MAX_ROM_SHIFT 8
+#define CSR_GENERATION_SHIFT 4
+
+#define CSR_SET_BUS_INFO_GENERATION(csr, gen)				\
+	((csr)->bus_info_data[2] =					\
+		cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) &	\
+			     ~(0xf << CSR_GENERATION_SHIFT)) |          \
+			    (gen) << CSR_GENERATION_SHIFT))
+
+struct csr_control {
+        spinlock_t lock;
+
+        quadlet_t state;
+        quadlet_t node_ids;
+        quadlet_t split_timeout_hi, split_timeout_lo;
+	unsigned long expire;	// Calculated from split_timeout
+        quadlet_t cycle_time;
+        quadlet_t bus_time;
+        quadlet_t bus_manager_id;
+        quadlet_t bandwidth_available;
+        quadlet_t channels_available_hi, channels_available_lo;
+	quadlet_t broadcast_channel;
+
+	/* Bus Info */
+	quadlet_t guid_hi, guid_lo;
+	u8 cyc_clk_acc;
+	u8 max_rec;
+	u8 max_rom;
+	u8 generation;	/* Only use values between 0x2 and 0xf */
+	u8 lnk_spd;
+
+	unsigned long gen_timestamp[16];
+
+	struct csr1212_csr *rom;
+
+        quadlet_t topology_map[256];
+        quadlet_t speed_map[1024];
+};
+
+extern struct csr1212_bus_ops csr_bus_ops;
+
+int init_csr(void);
+void cleanup_csr(void);
+
+#endif /* _IEEE1394_CSR_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
new file mode 100644
index 000000000000..7c4330e2e875
--- /dev/null
+++ b/drivers/ieee1394/csr1212.c
@@ -0,0 +1,1612 @@
+/*
+ * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
+ *
+ * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
+ *                    Steve Kinneberg <kinnebergsteve@acmsystems.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *    1. Redistributions of source code must retain the above copyright notice,
+ *       this list of conditions and the following disclaimer.
+ *    2. Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *    3. The name of the author may not be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* TODO List:
+ * - Verify interface consistency: i.e., public functions that take a size
+ *   parameter expect size to be in bytes.
+ * - Convenience functions for reading a block of data from a given offset.
+ */
+
+#ifndef __KERNEL__
+#include <string.h>
+#endif
+
+#include "csr1212.h"
+
+
+/* Permitted key type for each key id */
+#define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
+#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
+#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
+#define __L (1 << CSR1212_KV_TYPE_LEAF)
+static const u_int8_t csr1212_key_id_type_map[0x30] = {
+	0,			/* Reserved */
+	__D | __L,		/* Descriptor */
+	__I | __D | __L,	/* Bus_Dependent_Info */
+	__I | __D | __L,	/* Vendor */
+	__I,			/* Hardware_Version */
+	0, 0,			/* Reserved */
+	__D | __L,		/* Module */
+	0, 0, 0, 0,		/* Reserved */
+	__I,			/* Node_Capabilities */
+	__L,			/* EUI_64 */
+	0, 0, 0,		/* Reserved */
+	__D,			/* Unit */
+	__I,			/* Specifier_ID */
+	__I,			/* Version */
+	__I | __C | __D | __L,	/* Dependent_Info */
+	__L,			/* Unit_Location */
+	0,			/* Reserved */
+	__I,			/* Model */
+	__D,			/* Instance */
+	__L,			/* Keyword */
+	__D,			/* Feature */
+	__L,			/* Extended_ROM */
+	__I,			/* Extended_Key_Specifier_ID */
+	__I,			/* Extended_Key */
+	__I | __C | __D | __L,	/* Extended_Data */
+	__L,			/* Modifiable_Descriptor */
+	__I,			/* Directory_ID */
+	__I,			/* Revision */
+};
+#undef __I
+#undef __C
+#undef __D
+#undef __L
+
+
+#define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
+#define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
+
+static inline void free_keyval(struct csr1212_keyval *kv)
+{
+	if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
+	    (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
+		CSR1212_FREE(kv->value.leaf.data);
+
+	CSR1212_FREE(kv);
+}
+
+static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
+{
+	int shift;
+	u_int32_t data;
+	u_int16_t sum, crc = 0;
+
+	for (; length; length--) {
+		data = CSR1212_BE32_TO_CPU(*buffer);
+		buffer++;
+		for (shift = 28; shift >= 0; shift -= 4 ) {
+			sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
+			crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
+		}
+		crc &= 0xffff;
+	}
+
+	return CSR1212_CPU_TO_BE16(crc);
+}
+
+#if 0
+/* Microsoft computes the CRC with the bytes in reverse order.  Therefore we
+ * have a special version of the CRC algorithm to account for their buggy
+ * software. */
+static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
+{
+	int shift;
+	u_int32_t data;
+	u_int16_t sum, crc = 0;
+
+	for (; length; length--) {
+		data = CSR1212_LE32_TO_CPU(*buffer);
+		buffer++;
+		for (shift = 28; shift >= 0; shift -= 4 ) {
+			sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
+			crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
+		}
+		crc &= 0xffff;
+	}
+
+	return CSR1212_CPU_TO_BE16(crc);
+}
+#endif
+
+static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
+							 struct csr1212_keyval *kv)
+{
+	struct csr1212_dentry *pos;
+
+	for (pos = dir->value.directory.dentries_head;
+	     pos != NULL; pos = pos->next) {
+		if (pos->kv == kv)
+			return pos;
+	}
+	return NULL;
+}
+
+
+static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
+								u_int32_t offset)
+{
+	struct csr1212_keyval *kv;
+
+	for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
+		if (kv->offset == offset)
+			return kv;
+	}
+	return NULL;
+}
+
+
+/* Creation Routines */
+struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
+				       size_t bus_info_size, void *private)
+{
+	struct csr1212_csr *csr;
+
+	csr = CSR1212_MALLOC(sizeof(*csr));
+	if (!csr)
+		return NULL;
+
+	csr->cache_head =
+		csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
+					 CSR1212_CONFIG_ROM_SPACE_SIZE);
+	if (!csr->cache_head) {
+		CSR1212_FREE(csr);
+		return NULL;
+	}
+
+	/* The keyval key id is not used for the root node, but a valid key id
+	 * that can be used for a directory needs to be passed to
+	 * csr1212_new_directory(). */
+	csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
+	if (!csr->root_kv) {
+		CSR1212_FREE(csr->cache_head);
+		CSR1212_FREE(csr);
+		return NULL;
+	}
+
+	csr->bus_info_data = csr->cache_head->data;
+	csr->bus_info_len = bus_info_size;
+	csr->crc_len = bus_info_size;
+	csr->ops = ops;
+	csr->private = private;
+	csr->cache_tail = csr->cache_head;
+
+	return csr;
+}
+
+
+
+void csr1212_init_local_csr(struct csr1212_csr *csr,
+			    const u_int32_t *bus_info_data, int max_rom)
+{
+	static const int mr_map[] = { 4, 64, 1024, 0 };
+
+	csr->max_rom = mr_map[max_rom];
+	memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
+}
+
+
+static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
+{
+	struct csr1212_keyval *kv;
+
+	if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
+		return NULL;
+
+	kv = CSR1212_MALLOC(sizeof(*kv));
+	if (!kv)
+		return NULL;
+
+	kv->key.type = type;
+	kv->key.id = key;
+
+	kv->associate = NULL;
+	kv->refcnt = 1;
+
+	kv->next = NULL;
+	kv->prev = NULL;
+	kv->offset = 0;
+	kv->valid = 0;
+	return kv;
+}
+
+struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
+{
+	struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
+
+	if (!kv)
+		return NULL;
+
+	kv->value.immediate = value;
+	kv->valid = 1;
+	return kv;
+}
+
+struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
+{
+	struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
+
+	if (!kv)
+		return NULL;
+
+	if (data_len > 0) {
+		kv->value.leaf.data = CSR1212_MALLOC(data_len);
+		if (!kv->value.leaf.data) {
+			CSR1212_FREE(kv);
+			return NULL;
+		}
+
+		if (data)
+			memcpy(kv->value.leaf.data, data, data_len);
+	} else {
+		kv->value.leaf.data = NULL;
+	}
+
+	kv->value.leaf.len = bytes_to_quads(data_len);
+	kv->offset = 0;
+	kv->valid = 1;
+
+	return kv;
+}
+
+struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
+{
+	struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
+
+	if (!kv)
+		return NULL;
+
+	kv->value.csr_offset = csr_offset;
+
+	kv->offset = 0;
+	kv->valid = 1;
+	return kv;
+}
+
+struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
+{
+	struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
+
+	if (!kv)
+		return NULL;
+
+	kv->value.directory.len = 0;
+	kv->offset = 0;
+	kv->value.directory.dentries_head = NULL;
+	kv->value.directory.dentries_tail = NULL;
+	kv->valid = 1;
+	return kv;
+}
+
+int csr1212_associate_keyval(struct csr1212_keyval *kv,
+			     struct csr1212_keyval *associate)
+{
+	if (!kv || !associate)
+		return CSR1212_EINVAL;
+
+	if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
+	   (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
+	    associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
+	    associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
+	    associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
+	    associate->key.id < 0x30))
+		return CSR1212_EINVAL;
+
+	if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
+	   associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
+		return CSR1212_EINVAL;
+
+	if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
+	   associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
+		return CSR1212_EINVAL;
+
+	if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
+	   kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
+		return CSR1212_EINVAL;
+
+	if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
+	   kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
+		return CSR1212_EINVAL;
+
+	if (kv->associate)
+		csr1212_release_keyval(kv->associate);
+
+	associate->refcnt++;
+	kv->associate = associate;
+
+	return CSR1212_SUCCESS;
+}
+
+int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
+				       struct csr1212_keyval *kv)
+{
+	struct csr1212_dentry *dentry;
+
+	if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
+		return CSR1212_EINVAL;
+
+	dentry = CSR1212_MALLOC(sizeof(*dentry));
+	if (!dentry)
+		return CSR1212_ENOMEM;
+
+	dentry->kv = kv;
+
+	kv->refcnt++;
+
+	dentry->next = NULL;
+	dentry->prev = dir->value.directory.dentries_tail;
+
+	if (!dir->value.directory.dentries_head)
+		dir->value.directory.dentries_head = dentry;
+
+	if (dir->value.directory.dentries_tail)
+		dir->value.directory.dentries_tail->next = dentry;
+	dir->value.directory.dentries_tail = dentry;
+
+	return CSR1212_SUCCESS;
+}
+
+struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
+						      u_int32_t value)
+{
+	struct csr1212_keyval *kvs, *kvk, *kvv;
+
+	kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
+	kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
+	kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
+
+	if (!kvs || !kvk || !kvv) {
+		if (kvs)
+			free_keyval(kvs);
+		if (kvk)
+			free_keyval(kvk);
+		if (kvv)
+			free_keyval(kvv);
+		return NULL;
+	}
+
+	/* Don't keep a local reference to the extended key or value. */
+	kvk->refcnt = 0;
+	kvv->refcnt = 0;
+
+	csr1212_associate_keyval(kvk, kvv);
+	csr1212_associate_keyval(kvs, kvk);
+
+	return kvs;
+}
+
+struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
+						 const void *data, size_t data_len)
+{
+	struct csr1212_keyval *kvs, *kvk, *kvv;
+
+	kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
+	kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
+	kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
+
+	if (!kvs || !kvk || !kvv) {
+		if (kvs)
+			free_keyval(kvs);
+		if (kvk)
+			free_keyval(kvk);
+		if (kvv)
+			free_keyval(kvv);
+		return NULL;
+	}
+
+	/* Don't keep a local reference to the extended key or value. */
+	kvk->refcnt = 0;
+	kvv->refcnt = 0;
+
+	csr1212_associate_keyval(kvk, kvv);
+	csr1212_associate_keyval(kvs, kvk);
+
+	return kvs;
+}
+
+struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
+						   const void *data, size_t data_len)
+{
+	struct csr1212_keyval *kv;
+
+	kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
+			      data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
+	if (!kv)
+		return NULL;
+
+	CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
+	CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
+
+	if (data) {
+		memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
+	}
+
+	return kv;
+}
+
+
+struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
+							   u_int16_t cset,
+							   u_int16_t language,
+							   const void *data,
+							   size_t data_len)
+{
+	struct csr1212_keyval *kv;
+	char *lstr;
+
+	kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
+					 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
+	if (!kv)
+		return NULL;
+
+	CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
+	CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
+	CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
+
+	lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
+
+	/* make sure last quadlet is zeroed out */
+	*((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
+
+	/* don't copy the NUL terminator */
+	memcpy(lstr, data, data_len);
+
+	return kv;
+}
+
+static int csr1212_check_minimal_ascii(const char *s)
+{
+	static const char minimal_ascii_table[] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
+		0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
+		0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+		0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+		0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+		0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+		0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+		0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+		0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
+		0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+		0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+		0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+		0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
+	};
+	for (; *s; s++) {
+		if (minimal_ascii_table[*s & 0x7F] != *s)
+			return -1; /* failed */
+	}
+	/* String conforms to minimal-ascii, as specified by IEEE 1212,
+	 * par. 7.4 */
+	return 0;
+}
+
+struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
+{
+	/* Check if string conform to minimal_ascii format */
+	if (csr1212_check_minimal_ascii(s))
+		return NULL;
+
+	/* IEEE 1212, par. 7.5.4.1  Textual descriptors (minimal ASCII) */
+	return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
+}
+
+struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
+							u_int8_t palette_depth,
+							u_int8_t color_space,
+							u_int16_t language,
+							u_int16_t hscan,
+							u_int16_t vscan,
+							u_int32_t *palette,
+							u_int32_t *pixels)
+{
+	static const int pd[4] = { 0, 4, 16, 256 };
+	static const int cs[16] = { 4, 2 };
+	struct csr1212_keyval *kv;
+	int palette_size = pd[palette_depth] * cs[color_space];
+	int pixel_size = (hscan * vscan + 3) & ~0x3;
+
+	if ((palette_depth && !palette) || !pixels)
+		return NULL;
+
+	kv = csr1212_new_descriptor_leaf(1, 0, NULL,
+					 palette_size + pixel_size +
+					 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
+	if (!kv)
+		return NULL;
+
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
+	CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
+
+	if (palette_size)
+		memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
+		       palette_size);
+
+	memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
+
+	return kv;
+}
+
+struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
+							      u_int64_t address)
+{
+	struct csr1212_keyval *kv;
+
+	/* IEEE 1212, par. 7.5.4.3  Modifiable descriptors */
+	kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
+	if(!kv)
+		return NULL;
+
+	CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
+	CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
+	CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
+
+	return kv;
+}
+
+static int csr1212_check_keyword(const char *s)
+{
+	for (; *s; s++) {
+
+		if (('A' <= *s) && (*s <= 'Z'))
+			continue;
+		if (('0' <= *s) && (*s <= '9'))
+			continue;
+		if (*s == '-')
+			continue;
+
+		return -1; /* failed */
+	}
+	/* String conforms to keyword, as specified by IEEE 1212,
+	 * par. 7.6.5 */
+	return CSR1212_SUCCESS;
+}
+
+struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
+{
+	struct csr1212_keyval *kv;
+	char *buffer;
+	int i, data_len = 0;
+
+	/* Check all keywords to see if they conform to restrictions:
+	 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
+	 * Each word is zero-terminated.
+	 * Also calculate the total length of the keywords.
+	 */
+	for (i = 0; i < strc; i++) {
+		if (!strv[i] || csr1212_check_keyword(strv[i])) {
+			return NULL;
+		}
+		data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
+	}
+
+	/* IEEE 1212, par. 7.6.5 Keyword leaves */
+	kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
+	if (!kv)
+		return NULL;
+
+	buffer = (char *)kv->value.leaf.data;
+
+	/* make sure last quadlet is zeroed out */
+	*((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
+
+	/* Copy keyword(s) into leaf data buffer */
+	for (i = 0; i < strc; i++) {
+		int len = strlen(strv[i]) + 1;
+		memcpy(buffer, strv[i], len);
+		buffer += len;
+	}
+	return kv;
+}
+
+
+/* Destruction Routines */
+
+void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
+					  struct csr1212_keyval *kv)
+{
+	struct csr1212_dentry *dentry;
+
+	if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
+		return;
+
+	dentry = csr1212_find_keyval(dir, kv);
+
+	if (!dentry)
+		return;
+
+	if (dentry->prev)
+		dentry->prev->next = dentry->next;
+	if (dentry->next)
+		dentry->next->prev = dentry->prev;
+	if (dir->value.directory.dentries_head == dentry)
+		dir->value.directory.dentries_head = dentry->next;
+	if (dir->value.directory.dentries_tail == dentry)
+		dir->value.directory.dentries_tail = dentry->prev;
+
+	CSR1212_FREE(dentry);
+
+	csr1212_release_keyval(kv);
+}
+
+
+void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
+{
+	if (kv->associate) {
+		csr1212_release_keyval(kv->associate);
+	}
+
+	kv->associate = NULL;
+}
+
+
+/* This function is used to free the memory taken by a keyval.  If the given
+ * keyval is a directory type, then any keyvals contained in that directory
+ * will be destroyed as well if their respective refcnts are 0.  By means of
+ * list manipulation, this routine will descend a directory structure in a
+ * non-recursive manner. */
+void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
+{
+	struct csr1212_keyval *k, *a;
+	struct csr1212_dentry dentry;
+	struct csr1212_dentry *head, *tail;
+
+	dentry.kv = kv;
+	dentry.next = NULL;
+	dentry.prev = NULL;
+
+	head = &dentry;
+	tail = head;
+
+	while (head) {
+		k = head->kv;
+
+		while (k) {
+			k->refcnt--;
+
+			if (k->refcnt > 0)
+				break;
+
+			a = k->associate;
+
+			if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
+				/* If the current entry is a directory, then move all
+				 * the entries to the destruction list. */
+				if (k->value.directory.dentries_head) {
+					tail->next = k->value.directory.dentries_head;
+					k->value.directory.dentries_head->prev = tail;
+					tail = k->value.directory.dentries_tail;
+				}
+			}
+			free_keyval(k);
+			k = a;
+		}
+
+		head = head->next;
+		if (head) {
+			if (head->prev && head->prev != &dentry) {
+				CSR1212_FREE(head->prev);
+			}
+			head->prev = NULL;
+		} else if (tail != &dentry)
+			CSR1212_FREE(tail);
+	}
+}
+
+
+void csr1212_destroy_csr(struct csr1212_csr *csr)
+{
+	struct csr1212_csr_rom_cache *c, *oc;
+	struct csr1212_cache_region *cr, *ocr;
+
+	csr1212_release_keyval(csr->root_kv);
+
+	c = csr->cache_head;
+	while (c) {
+		oc = c;
+		cr = c->filled_head;
+		while (cr) {
+			ocr = cr;
+			cr = cr->next;
+			CSR1212_FREE(ocr);
+		}
+		c = c->next;
+		CSR1212_FREE(oc);
+	}
+
+	CSR1212_FREE(csr);
+}
+
+
+
+/* CSR Image Creation */
+
+static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
+{
+	struct csr1212_csr_rom_cache *cache;
+	u_int64_t csr_addr;
+
+	if (!csr || !csr->ops->allocate_addr_range ||
+	    !csr->ops->release_addr)
+		return CSR1212_ENOMEM;
+
+	/* ROM size must be a multiple of csr->max_rom */
+	romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
+
+	csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
+	if (csr_addr == ~0ULL) {
+		return CSR1212_ENOMEM;
+	}
+	if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
+		/* Invalid address returned from allocate_addr_range(). */
+		csr->ops->release_addr(csr_addr, csr->private);
+		return CSR1212_ENOMEM;
+	}
+
+	cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
+	if (!cache) {
+		csr->ops->release_addr(csr_addr, csr->private);
+		return CSR1212_ENOMEM;
+	}
+
+	cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
+	if (!cache->ext_rom) {
+		csr->ops->release_addr(csr_addr, csr->private);
+		CSR1212_FREE(cache);
+		return CSR1212_ENOMEM;
+	}
+
+	if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
+		csr1212_release_keyval(cache->ext_rom);
+		csr->ops->release_addr(csr_addr, csr->private);
+		CSR1212_FREE(cache);
+		return CSR1212_ENOMEM;
+	}
+	cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
+	cache->ext_rom->value.leaf.len = -1;
+	cache->ext_rom->value.leaf.data = cache->data;
+
+	/* Add cache to tail of cache list */
+	cache->prev = csr->cache_tail;
+	csr->cache_tail->next = cache;
+	csr->cache_tail = cache;
+	return CSR1212_SUCCESS;
+}
+
+static inline void csr1212_remove_cache(struct csr1212_csr *csr,
+					struct csr1212_csr_rom_cache *cache)
+{
+	if (csr->cache_head == cache)
+		csr->cache_head = cache->next;
+	if (csr->cache_tail == cache)
+		csr->cache_tail = cache->prev;
+
+	if (cache->prev)
+		cache->prev->next = cache->next;
+	if (cache->next)
+		cache->next->prev = cache->prev;
+
+	if (cache->ext_rom) {
+		csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
+		csr1212_release_keyval(cache->ext_rom);
+	}
+
+	CSR1212_FREE(cache);
+}
+
+static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
+					  struct csr1212_keyval **layout_tail)
+{
+	struct csr1212_dentry *dentry;
+	struct csr1212_keyval *dkv;
+	struct csr1212_keyval *last_extkey_spec = NULL;
+	struct csr1212_keyval *last_extkey = NULL;
+	int num_entries = 0;
+
+	for (dentry = dir->value.directory.dentries_head; dentry;
+	     dentry = dentry->next) {
+		for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
+			/* Special Case: Extended Key Specifier_ID */
+			if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
+				if (last_extkey_spec == NULL) {
+					last_extkey_spec = dkv;
+				} else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
+					last_extkey_spec = dkv;
+				} else {
+					continue;
+				}
+			/* Special Case: Extended Key */
+			} else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
+				if (last_extkey == NULL) {
+					last_extkey = dkv;
+				} else if (dkv->value.immediate != last_extkey->value.immediate) {
+					last_extkey = dkv;
+				} else {
+					continue;
+				}
+			}
+
+			num_entries += 1;
+
+			switch(dkv->key.type) {
+			default:
+			case CSR1212_KV_TYPE_IMMEDIATE:
+			case CSR1212_KV_TYPE_CSR_OFFSET:
+				break;
+			case CSR1212_KV_TYPE_LEAF:
+			case CSR1212_KV_TYPE_DIRECTORY:
+				/* Remove from list */
+				if (dkv->prev && (dkv->prev->next == dkv))
+					dkv->prev->next = dkv->next;
+				if (dkv->next && (dkv->next->prev == dkv))
+					dkv->next->prev = dkv->prev;
+				//if (dkv == *layout_tail)
+				//	*layout_tail = dkv->prev;
+
+				/* Special case: Extended ROM leafs */
+				if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
+					dkv->value.leaf.len = -1;
+					/* Don't add Extended ROM leafs in the layout list,
+					 * they are handled differently. */
+					break;
+				}
+
+				/* Add to tail of list */
+				dkv->next = NULL;
+				dkv->prev = *layout_tail;
+				(*layout_tail)->next = dkv;
+				*layout_tail = dkv;
+				break;
+			}
+		}
+	}
+	return num_entries;
+}
+
+size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
+{
+	struct csr1212_keyval *ltail = kv;
+	size_t agg_size = 0;
+
+	while(kv) {
+		switch(kv->key.type) {
+		case CSR1212_KV_TYPE_LEAF:
+			/* Add 1 quadlet for crc/len field */
+			agg_size += kv->value.leaf.len + 1;
+			break;
+
+		case CSR1212_KV_TYPE_DIRECTORY:
+			kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail);
+			/* Add 1 quadlet for crc/len field */
+			agg_size += kv->value.directory.len + 1;
+			break;
+		}
+		kv = kv->next;
+	}
+	return quads_to_bytes(agg_size);
+}
+
+struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
+						  struct csr1212_keyval *start_kv,
+						  int start_pos)
+{
+	struct csr1212_keyval *kv = start_kv;
+	struct csr1212_keyval *okv = start_kv;
+	int pos = start_pos;
+	int kv_len = 0, okv_len = 0;
+
+	cache->layout_head = kv;
+
+	while(kv && pos < cache->size) {
+		/* Special case: Extended ROM leafs */
+		if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
+			kv->offset = cache->offset + pos;
+		}
+
+		switch(kv->key.type) {
+		case CSR1212_KV_TYPE_LEAF:
+			kv_len = kv->value.leaf.len;
+			break;
+
+		case CSR1212_KV_TYPE_DIRECTORY:
+			kv_len = kv->value.directory.len;
+			break;
+
+		default:
+			/* Should never get here */
+			break;
+		}
+
+		pos += quads_to_bytes(kv_len + 1);
+
+		if (pos <= cache->size) {
+			okv = kv;
+			okv_len = kv_len;
+			kv = kv->next;
+		}
+	}
+
+	cache->layout_tail = okv;
+	cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
+
+	return kv;
+}
+
+static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
+					 u_int32_t *data_buffer)
+{
+	struct csr1212_dentry *dentry;
+	struct csr1212_keyval *last_extkey_spec = NULL;
+	struct csr1212_keyval *last_extkey = NULL;
+	int index = 0;
+
+	for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
+		struct csr1212_keyval *a;
+
+		for (a = dentry->kv; a; a = a->associate) {
+			u_int32_t value = 0;
+
+			/* Special Case: Extended Key Specifier_ID */
+			if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
+				if (last_extkey_spec == NULL) {
+					last_extkey_spec = a;
+				} else if (a->value.immediate != last_extkey_spec->value.immediate) {
+					last_extkey_spec = a;
+				} else {
+					continue;
+				}
+			/* Special Case: Extended Key */
+			} else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
+				if (last_extkey == NULL) {
+					last_extkey = a;
+				} else if (a->value.immediate != last_extkey->value.immediate) {
+					last_extkey = a;
+				} else {
+					continue;
+				}
+			}
+
+			switch(a->key.type) {
+			case CSR1212_KV_TYPE_IMMEDIATE:
+				value = a->value.immediate;
+				break;
+			case CSR1212_KV_TYPE_CSR_OFFSET:
+				value = a->value.csr_offset;
+				break;
+			case CSR1212_KV_TYPE_LEAF:
+				value = a->offset;
+				value -= dir->offset + quads_to_bytes(1+index);
+				value = bytes_to_quads(value);
+				break;
+			case CSR1212_KV_TYPE_DIRECTORY:
+				value = a->offset;
+				value -= dir->offset + quads_to_bytes(1+index);
+				value = bytes_to_quads(value);
+				break;
+			default:
+				/* Should never get here */
+				break; /* GDB breakpoint */
+			}
+
+			value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
+			value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
+				(CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
+			data_buffer[index] = CSR1212_CPU_TO_BE32(value);
+			index++;
+		}
+	}
+}
+
+void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
+{
+	struct csr1212_keyval *kv, *nkv;
+	struct csr1212_keyval_img *kvi;
+
+	for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
+		kvi = (struct csr1212_keyval_img *)
+			(cache->data + bytes_to_quads(kv->offset - cache->offset));
+		switch(kv->key.type) {
+		default:
+		case CSR1212_KV_TYPE_IMMEDIATE:
+		case CSR1212_KV_TYPE_CSR_OFFSET:
+			/* Should never get here */
+			break; /* GDB breakpoint */
+
+		case CSR1212_KV_TYPE_LEAF:
+			/* Don't copy over Extended ROM areas, they are
+			 * already filled out! */
+			if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
+				memcpy(kvi->data, kv->value.leaf.data,
+				       quads_to_bytes(kv->value.leaf.len));
+
+			kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
+			kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
+			break;
+
+		case CSR1212_KV_TYPE_DIRECTORY:
+			csr1212_generate_tree_subdir(kv, kvi->data);
+
+			kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
+			kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
+			break;
+		}
+
+		nkv = kv->next;
+		if (kv->prev)
+			kv->prev->next = NULL;
+		if (kv->next)
+			kv->next->prev = NULL;
+		kv->prev = NULL;
+		kv->next = NULL;
+	}
+}
+
+int csr1212_generate_csr_image(struct csr1212_csr *csr)
+{
+	struct csr1212_bus_info_block_img *bi;
+	struct csr1212_csr_rom_cache *cache;
+	struct csr1212_keyval *kv;
+	size_t agg_size;
+	int ret;
+	int init_offset;
+
+	if (!csr)
+		return CSR1212_EINVAL;
+
+	cache = csr->cache_head;
+
+	bi = (struct csr1212_bus_info_block_img*)cache->data;
+
+	bi->length = bytes_to_quads(csr->bus_info_len) - 1;
+	bi->crc_length = bi->length;
+	bi->crc = csr1212_crc16(bi->data, bi->crc_length);
+
+	csr->root_kv->next = NULL;
+	csr->root_kv->prev = NULL;
+
+	agg_size = csr1212_generate_layout_order(csr->root_kv);
+
+	init_offset = csr->bus_info_len;
+
+	for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
+		if (!cache) {
+			/* Estimate approximate number of additional cache
+			 * regions needed (it assumes that the cache holding
+			 * the first 1K Config ROM space always exists). */
+			int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
+						(2 * sizeof(u_int32_t))) + 1;
+
+			/* Add additional cache regions, extras will be
+			 * removed later */
+			for (; est_c; est_c--) {
+				ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
+				if (ret != CSR1212_SUCCESS)
+					return ret;
+			}
+			/* Need to re-layout for additional cache regions */
+			agg_size = csr1212_generate_layout_order(csr->root_kv);
+			kv = csr->root_kv;
+			cache = csr->cache_head;
+			init_offset = csr->bus_info_len;
+		}
+		kv = csr1212_generate_positions(cache, kv, init_offset);
+		agg_size -= cache->len;
+		init_offset = sizeof(u_int32_t);
+	}
+
+	/* Remove unused, excess cache regions */
+	while (cache) {
+		struct csr1212_csr_rom_cache *oc = cache;
+
+		cache = cache->next;
+		csr1212_remove_cache(csr, oc);
+	}
+
+	/* Go through the list backward so that when done, the correct CRC
+	 * will be calculated for the Extended ROM areas. */
+	for(cache = csr->cache_tail; cache; cache = cache->prev) {
+		/* Only Extended ROM caches should have this set. */
+		if (cache->ext_rom) {
+			int leaf_size;
+
+			/* Make sure the Extended ROM leaf is a multiple of
+			 * max_rom in size. */
+			leaf_size = (cache->len + (csr->max_rom - 1)) &
+				~(csr->max_rom - 1);
+
+			/* Zero out the unused ROM region */
+			memset(cache->data + bytes_to_quads(cache->len), 0x00,
+			       leaf_size - cache->len);
+
+			/* Subtract leaf header */
+			leaf_size -= sizeof(u_int32_t);
+
+			/* Update the Extended ROM leaf length */
+			cache->ext_rom->value.leaf.len =
+				bytes_to_quads(leaf_size);
+		} else {
+			/* Zero out the unused ROM region */
+			memset(cache->data + bytes_to_quads(cache->len), 0x00,
+			       cache->size - cache->len);
+		}
+
+		/* Copy the data into the cache buffer */
+		csr1212_fill_cache(cache);
+
+		if (cache != csr->cache_head) {
+			/* Set the length and CRC of the extended ROM. */
+			struct csr1212_keyval_img *kvi =
+				(struct csr1212_keyval_img*)cache->data;
+
+			kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
+			kvi->crc = csr1212_crc16(kvi->data,
+						 bytes_to_quads(cache->len) - 1);
+
+		}
+	}
+
+	return CSR1212_SUCCESS;
+}
+
+int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
+{
+	struct csr1212_csr_rom_cache *cache;
+
+	for (cache = csr->cache_head; cache; cache = cache->next) {
+		if (offset >= cache->offset &&
+		    (offset + len) <= (cache->offset + cache->size)) {
+			memcpy(buffer,
+			       &cache->data[bytes_to_quads(offset - cache->offset)],
+			       len);
+			return CSR1212_SUCCESS;
+		}
+	}
+	return CSR1212_ENOENT;
+}
+
+
+
+/* Parse a chunk of data as a Config ROM */
+
+static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
+{
+	struct csr1212_bus_info_block_img *bi;
+	struct csr1212_cache_region *cr;
+	int i;
+	int ret;
+
+	/* IEEE 1212 says that the entire bus info block should be readable in
+	 * a single transaction regardless of the max_rom value.
+	 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
+	 * bus info block will be read 1 quadlet at a time.  The rest of the
+	 * ConfigROM will be read according to the max_rom field. */
+	for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
+		ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
+					 sizeof(csr1212_quad_t),
+					 &csr->cache_head->data[bytes_to_quads(i)],
+					 csr->private);
+		if (ret != CSR1212_SUCCESS)
+			return ret;
+	}
+
+	bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
+	csr->crc_len = quads_to_bytes(bi->crc_length);
+
+	/* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
+	 * always the case, so read the rest of the crc area 1 quadlet at a time. */
+	for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
+		ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
+					 sizeof(csr1212_quad_t),
+					 &csr->cache_head->data[bytes_to_quads(i)],
+					 csr->private);
+		if (ret != CSR1212_SUCCESS)
+			return ret;
+	}
+
+	if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
+		return CSR1212_EINVAL;
+
+#if 0
+	/* Apparently there are too many differnt wrong implementations of the
+	 * CRC algorithm that verifying them is moot. */
+	if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
+	    (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
+		return CSR1212_EINVAL;
+#endif
+
+	cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+	if (!cr)
+		return CSR1212_ENOMEM;
+
+	cr->next = NULL;
+	cr->prev = NULL;
+	cr->offset_start = 0;
+	cr->offset_end = csr->crc_len + 4;
+
+	csr->cache_head->filled_head = cr;
+	csr->cache_head->filled_tail = cr;
+
+	return CSR1212_SUCCESS;
+}
+
+static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
+				   csr1212_quad_t ki,
+				   u_int32_t kv_pos)
+{
+	int ret = CSR1212_SUCCESS;
+	struct csr1212_keyval *k = NULL;
+	u_int32_t offset;
+
+	switch(CSR1212_KV_KEY_TYPE(ki)) {
+	case CSR1212_KV_TYPE_IMMEDIATE:
+		k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
+					  CSR1212_KV_VAL(ki));
+		if (!k) {
+			ret = CSR1212_ENOMEM;
+			goto fail;
+		}
+
+		k->refcnt = 0;	/* Don't keep local reference when parsing. */
+		break;
+
+	case CSR1212_KV_TYPE_CSR_OFFSET:
+		k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
+					   CSR1212_KV_VAL(ki));
+		if (!k) {
+			ret = CSR1212_ENOMEM;
+			goto fail;
+		}
+		k->refcnt = 0;	/* Don't keep local reference when parsing. */
+		break;
+
+	default:
+		/* Compute the offset from 0xffff f000 0000. */
+		offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
+		if (offset == kv_pos) {
+			/* Uh-oh.  Can't have a relative offset of 0 for Leaves
+			 * or Directories.  The Config ROM image is most likely
+			 * messed up, so we'll just abort here. */
+			ret = CSR1212_EIO;
+			goto fail;
+		}
+
+		k = csr1212_find_keyval_offset(dir, offset);
+
+		if (k)
+			break;		/* Found it. */
+
+		if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
+			k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
+		} else {
+			k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
+		}
+		if (!k) {
+			ret = CSR1212_ENOMEM;
+			goto fail;
+		}
+		k->refcnt = 0;	/* Don't keep local reference when parsing. */
+		k->valid = 0;	/* Contents not read yet so it's not valid. */
+		k->offset = offset;
+
+		k->prev = dir;
+		k->next = dir->next;
+		dir->next->prev = k;
+		dir->next = k;
+	}
+	ret = csr1212_attach_keyval_to_directory(dir, k);
+
+fail:
+	if (ret != CSR1212_SUCCESS) {
+		if (k)
+			free_keyval(k);
+	}
+	return ret;
+}
+
+
+int csr1212_parse_keyval(struct csr1212_keyval *kv,
+			 struct csr1212_csr_rom_cache *cache)
+{
+	struct csr1212_keyval_img *kvi;
+	int i;
+	int ret = CSR1212_SUCCESS;
+	int kvi_len;
+
+	kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
+								      cache->offset)];
+	kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
+
+#if 0
+	/* Apparently there are too many differnt wrong implementations of the
+	 * CRC algorithm that verifying them is moot. */
+	if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
+	    (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
+		ret = CSR1212_EINVAL;
+		goto fail;
+	}
+#endif
+
+	switch(kv->key.type) {
+	case CSR1212_KV_TYPE_DIRECTORY:
+		for (i = 0; i < kvi_len; i++) {
+			csr1212_quad_t ki = kvi->data[i];
+
+			/* Some devices put null entries in their unit
+			 * directories.  If we come across such an entry,
+			 * then skip it. */
+			if (ki == 0x0)
+				continue;
+			ret = csr1212_parse_dir_entry(kv, ki,
+						      (kv->offset +
+						       quads_to_bytes(i + 1)));
+		}
+		kv->value.directory.len = kvi_len;
+		break;
+
+	case CSR1212_KV_TYPE_LEAF:
+		if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
+			kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
+			if (!kv->value.leaf.data)
+			{
+				ret = CSR1212_ENOMEM;
+				goto fail;
+			}
+
+			kv->value.leaf.len = kvi_len;
+			memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
+		}
+		break;
+	}
+
+	kv->valid = 1;
+
+fail:
+	return ret;
+}
+
+
+int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
+{
+	struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
+	struct csr1212_keyval_img *kvi = NULL;
+	struct csr1212_csr_rom_cache *cache;
+	int cache_index;
+	u_int64_t addr;
+	u_int32_t *cache_ptr;
+	u_int16_t kv_len = 0;
+
+	if (!csr || !kv)
+		return CSR1212_EINVAL;
+
+	/* First find which cache the data should be in (or go in if not read
+	 * yet). */
+	for (cache = csr->cache_head; cache; cache = cache->next) {
+		if (kv->offset >= cache->offset &&
+		    kv->offset < (cache->offset + cache->size))
+			break;
+	}
+
+	if (!cache) {
+		csr1212_quad_t q;
+		u_int32_t cache_size;
+
+		/* Only create a new cache for Extended ROM leaves. */
+		if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
+			return CSR1212_EINVAL;
+
+		if (csr->ops->bus_read(csr,
+				       CSR1212_REGISTER_SPACE_BASE + kv->offset,
+				       sizeof(csr1212_quad_t), &q, csr->private)) {
+			return CSR1212_EIO;
+		}
+
+		kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
+
+		cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
+			      (csr->max_rom - 1)) & ~(csr->max_rom - 1);
+
+		cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
+		if (!cache)
+			return CSR1212_ENOMEM;
+
+		kv->value.leaf.data = &cache->data[1];
+		csr->cache_tail->next = cache;
+		cache->prev = csr->cache_tail;
+		cache->next = NULL;
+		csr->cache_tail = cache;
+		cache->filled_head =
+			CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+		if (!cache->filled_head) {
+			return CSR1212_ENOMEM;
+		}
+
+		cache->filled_head->offset_start = 0;
+		cache->filled_head->offset_end = sizeof(csr1212_quad_t);
+		cache->filled_tail = cache->filled_head;
+		cache->filled_head->next = NULL;
+		cache->filled_head->prev = NULL;
+		cache->data[0] = q;
+
+		/* Don't read the entire extended ROM now.  Pieces of it will
+		 * be read when entries inside it are read. */
+		return csr1212_parse_keyval(kv, cache);
+	}
+
+	cache_index = kv->offset - cache->offset;
+
+	/* Now seach read portions of the cache to see if it is there. */
+	for (cr = cache->filled_head; cr; cr = cr->next) {
+		if (cache_index < cr->offset_start) {
+			newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+			if (!newcr)
+				return CSR1212_ENOMEM;
+
+			newcr->offset_start = cache_index & ~(csr->max_rom - 1);
+			newcr->offset_end = newcr->offset_start;
+			newcr->next = cr;
+			newcr->prev = cr->prev;
+			cr->prev = newcr;
+			cr = newcr;
+			break;
+		} else if ((cache_index >= cr->offset_start) &&
+			   (cache_index < cr->offset_end)) {
+			kvi = (struct csr1212_keyval_img*)
+				(&cache->data[bytes_to_quads(cache_index)]);
+			kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
+						1);
+			break;
+		} else if (cache_index == cr->offset_end)
+			break;
+	}
+
+	if (!cr) {
+		cr = cache->filled_tail;
+		newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+		if (!newcr)
+			return CSR1212_ENOMEM;
+
+		newcr->offset_start = cache_index & ~(csr->max_rom - 1);
+		newcr->offset_end = newcr->offset_start;
+		newcr->prev = cr;
+		newcr->next = cr->next;
+		cr->next = newcr;
+		cr = newcr;
+		cache->filled_tail = newcr;
+	}
+
+	while(!kvi || cr->offset_end < cache_index + kv_len) {
+		cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
+							~(csr->max_rom - 1))];
+
+		addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
+			cr->offset_end) & ~(csr->max_rom - 1);
+
+		if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
+				       csr->private)) {
+			if (csr->max_rom == 4)
+				/* We've got problems! */
+				return CSR1212_EIO;
+
+			/* Apperently the max_rom value was a lie, set it to
+			 * do quadlet reads and try again. */
+			csr->max_rom = 4;
+			continue;
+		}
+
+		cr->offset_end += csr->max_rom - (cr->offset_end &
+						  (csr->max_rom - 1));
+
+		if (!kvi && (cr->offset_end > cache_index)) {
+			kvi = (struct csr1212_keyval_img*)
+				(&cache->data[bytes_to_quads(cache_index)]);
+			kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
+						1);
+		}
+
+		if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
+			/* The Leaf or Directory claims its length extends
+			 * beyond the ConfigROM image region and thus beyond the
+			 * end of our cache region.  Therefore, we abort now
+			 * rather than seg faulting later. */
+			return CSR1212_EIO;
+		}
+
+		ncr = cr->next;
+
+		if (ncr && (cr->offset_end >= ncr->offset_start)) {
+			/* consolidate region entries */
+			ncr->offset_start = cr->offset_start;
+
+			if (cr->prev)
+				cr->prev->next = cr->next;
+			ncr->prev = cr->prev;
+			if (cache->filled_head == cr)
+				cache->filled_head = ncr;
+			CSR1212_FREE(cr);
+			cr = ncr;
+		}
+	}
+
+	return csr1212_parse_keyval(kv, cache);
+}
+
+
+
+int csr1212_parse_csr(struct csr1212_csr *csr)
+{
+	static const int mr_map[] = { 4, 64, 1024, 0 };
+	struct csr1212_dentry *dentry;
+	int ret;
+
+	if (!csr || !csr->ops->bus_read)
+		return CSR1212_EINVAL;
+
+	ret = csr1212_parse_bus_info_block(csr);
+	if (ret != CSR1212_SUCCESS)
+		return ret;
+
+	if (!csr->ops->get_max_rom)
+		csr->max_rom = mr_map[0];	/* default value */
+	else
+		csr->max_rom = mr_map[csr->ops->get_max_rom(csr->bus_info_data,
+							    csr->private)];
+
+	csr->cache_head->layout_head = csr->root_kv;
+	csr->cache_head->layout_tail = csr->root_kv;
+
+	csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
+		csr->bus_info_len;
+
+	csr->root_kv->valid = 0;
+	csr->root_kv->next = csr->root_kv;
+	csr->root_kv->prev = csr->root_kv;
+	csr1212_get_keyval(csr, csr->root_kv);
+
+	/* Scan through the Root directory finding all extended ROM regions
+	 * and make cache regions for them */
+	for (dentry = csr->root_kv->value.directory.dentries_head;
+	     dentry; dentry = dentry->next) {
+		if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
+			csr1212_get_keyval(csr, dentry->kv);
+
+			if (ret != CSR1212_SUCCESS)
+				return ret;
+		}
+	}
+
+	return CSR1212_SUCCESS;
+}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
new file mode 100644
index 000000000000..e6734263a1d3
--- /dev/null
+++ b/drivers/ieee1394/csr1212.h
@@ -0,0 +1,727 @@
+/*
+ * csr1212.h -- IEEE 1212 Control and Status Register support for Linux
+ *
+ * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
+ *                    Steve Kinneberg <kinnebergsteve@acmsystems.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *    1. Redistributions of source code must retain the above copyright notice,
+ *       this list of conditions and the following disclaimer.
+ *    2. Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *    3. The name of the author may not be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CSR1212_H__
+#define __CSR1212_H__
+
+
+/* Compatibility layer */
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <asm/pgalloc.h>
+
+#define CSR1212_MALLOC(size)		vmalloc((size))
+#define CSR1212_FREE(ptr)		vfree(ptr)
+#define CSR1212_BE16_TO_CPU(quad)	be16_to_cpu(quad)
+#define CSR1212_CPU_TO_BE16(quad)	cpu_to_be16(quad)
+#define CSR1212_BE32_TO_CPU(quad)	be32_to_cpu(quad)
+#define CSR1212_CPU_TO_BE32(quad)	cpu_to_be32(quad)
+#define CSR1212_BE64_TO_CPU(quad)	be64_to_cpu(quad)
+#define CSR1212_CPU_TO_BE64(quad)	cpu_to_be64(quad)
+
+#define CSR1212_LE16_TO_CPU(quad)	le16_to_cpu(quad)
+#define CSR1212_CPU_TO_LE16(quad)	cpu_to_le16(quad)
+#define CSR1212_LE32_TO_CPU(quad)	le32_to_cpu(quad)
+#define CSR1212_CPU_TO_LE32(quad)	cpu_to_le32(quad)
+#define CSR1212_LE64_TO_CPU(quad)	le64_to_cpu(quad)
+#define CSR1212_CPU_TO_LE64(quad)	cpu_to_le64(quad)
+
+#include <linux/errno.h>
+#define CSR1212_SUCCESS (0)
+#define CSR1212_EINVAL	(-EINVAL)
+#define CSR1212_ENOMEM	(-ENOMEM)
+#define CSR1212_ENOENT	(-ENOENT)
+#define CSR1212_EIO	(-EIO)
+#define CSR1212_EBUSY	(-EBUSY)
+
+#else	/* Userspace */
+
+#include <sys/types.h>
+#include <malloc.h>
+#define CSR1212_MALLOC(size)		malloc(size)
+#define CSR1212_FREE(ptr)		free(ptr)
+#include <endian.h>
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#include <byteswap.h>
+#define CSR1212_BE16_TO_CPU(quad)	bswap_16(quad)
+#define CSR1212_CPU_TO_BE16(quad)	bswap_16(quad)
+#define CSR1212_BE32_TO_CPU(quad)	bswap_32(quad)
+#define CSR1212_CPU_TO_BE32(quad)	bswap_32(quad)
+#define CSR1212_BE64_TO_CPU(quad)	bswap_64(quad)
+#define CSR1212_CPU_TO_BE64(quad)	bswap_64(quad)
+
+#define CSR1212_LE16_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_LE16(quad)	(quad)
+#define CSR1212_LE32_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_LE32(quad)	(quad)
+#define CSR1212_LE64_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_LE64(quad)	(quad)
+#else
+#define CSR1212_BE16_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_BE16(quad)	(quad)
+#define CSR1212_BE32_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_BE32(quad)	(quad)
+#define CSR1212_BE64_TO_CPU(quad)	(quad)
+#define CSR1212_CPU_TO_BE64(quad)	(quad)
+
+#define CSR1212_LE16_TO_CPU(quad)	bswap_16(quad)
+#define CSR1212_CPU_TO_LE16(quad)	bswap_16(quad)
+#define CSR1212_LE32_TO_CPU(quad)	bswap_32(quad)
+#define CSR1212_CPU_TO_LE32(quad)	bswap_32(quad)
+#define CSR1212_LE64_TO_CPU(quad)	bswap_64(quad)
+#define CSR1212_CPU_TO_LE64(quad)	bswap_64(quad)
+#endif
+
+#include <errno.h>
+#define CSR1212_SUCCESS (0)
+#define CSR1212_EINVAL	(EINVAL)
+#define CSR1212_ENOMEM	(ENOMEM)
+#define CSR1212_ENOENT	(ENOENT)
+#define CSR1212_EIO	(EIO)
+#define CSR1212_EBUSY	(EBUSY)
+
+#endif
+
+
+#define CSR1212_KV_VAL_MASK			0xffffff
+#define CSR1212_KV_KEY_SHIFT			24
+#define CSR1212_KV_KEY_TYPE_SHIFT		6
+#define CSR1212_KV_KEY_ID_MASK			0x3f
+#define CSR1212_KV_KEY_TYPE_MASK		0x3		/* After shift */
+
+
+/* CSR 1212 key types */
+#define CSR1212_KV_TYPE_IMMEDIATE		0
+#define CSR1212_KV_TYPE_CSR_OFFSET		1
+#define CSR1212_KV_TYPE_LEAF			2
+#define CSR1212_KV_TYPE_DIRECTORY		3
+
+
+/* CSR 1212 key ids */
+#define CSR1212_KV_ID_DESCRIPTOR		0x01
+#define CSR1212_KV_ID_BUS_DEPENDENT_INFO	0x02
+#define CSR1212_KV_ID_VENDOR			0x03
+#define CSR1212_KV_ID_HARDWARE_VERSION		0x04
+#define CSR1212_KV_ID_MODULE			0x07
+#define CSR1212_KV_ID_NODE_CAPABILITIES		0x0C
+#define CSR1212_KV_ID_EUI_64			0x0D
+#define CSR1212_KV_ID_UNIT			0x11
+#define CSR1212_KV_ID_SPECIFIER_ID		0x12
+#define CSR1212_KV_ID_VERSION			0x13
+#define CSR1212_KV_ID_DEPENDENT_INFO		0x14
+#define CSR1212_KV_ID_UNIT_LOCATION		0x15
+#define CSR1212_KV_ID_MODEL			0x17
+#define CSR1212_KV_ID_INSTANCE			0x18
+#define CSR1212_KV_ID_KEYWORD			0x19
+#define CSR1212_KV_ID_FEATURE			0x1A
+#define CSR1212_KV_ID_EXTENDED_ROM		0x1B
+#define CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID	0x1C
+#define CSR1212_KV_ID_EXTENDED_KEY		0x1D
+#define CSR1212_KV_ID_EXTENDED_DATA		0x1E
+#define CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR	0x1F
+#define CSR1212_KV_ID_DIRECTORY_ID		0x20
+#define CSR1212_KV_ID_REVISION			0x21
+
+
+/* IEEE 1212 Address space map */
+#define CSR1212_ALL_SPACE_BASE			(0x000000000000ULL)
+#define CSR1212_ALL_SPACE_SIZE			(1ULL << 48)
+#define CSR1212_ALL_SPACE_END			(CSR1212_ALL_SPACE_BASE + CSR1212_ALL_SPACE_SIZE)
+
+#define  CSR1212_MEMORY_SPACE_BASE		(0x000000000000ULL)
+#define  CSR1212_MEMORY_SPACE_SIZE		((256ULL * (1ULL << 40)) - (512ULL * (1ULL << 20)))
+#define  CSR1212_MEMORY_SPACE_END		(CSR1212_MEMORY_SPACE_BASE + CSR1212_MEMORY_SPACE_SIZE)
+
+#define  CSR1212_PRIVATE_SPACE_BASE		(0xffffe0000000ULL)
+#define  CSR1212_PRIVATE_SPACE_SIZE		(256ULL * (1ULL << 20))
+#define  CSR1212_PRIVATE_SPACE_END		(CSR1212_PRIVATE_SPACE_BASE + CSR1212_PRIVATE_SPACE_SIZE)
+
+#define  CSR1212_REGISTER_SPACE_BASE		(0xfffff0000000ULL)
+#define  CSR1212_REGISTER_SPACE_SIZE		(256ULL * (1ULL << 20))
+#define  CSR1212_REGISTER_SPACE_END		(CSR1212_REGISTER_SPACE_BASE + CSR1212_REGISTER_SPACE_SIZE)
+
+#define  CSR1212_CSR_ARCH_REG_SPACE_BASE	(0xfffff0000000ULL)
+#define  CSR1212_CSR_ARCH_REG_SPACE_SIZE	(512)
+#define  CSR1212_CSR_ARCH_REG_SPACE_END		(CSR1212_CSR_ARCH_REG_SPACE_BASE + CSR1212_CSR_ARCH_REG_SPACE_SIZE)
+#define  CSR1212_CSR_ARCH_REG_SPACE_OFFSET	(CSR1212_CSR_ARCH_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
+
+#define  CSR1212_CSR_BUS_DEP_REG_SPACE_BASE	(0xfffff0000200ULL)
+#define  CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE	(512)
+#define  CSR1212_CSR_BUS_DEP_REG_SPACE_END	(CSR1212_CSR_BUS_DEP_REG_SPACE_BASE + CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE)
+#define  CSR1212_CSR_BUS_DEP_REG_SPACE_OFFSET	(CSR1212_CSR_BUS_DEP_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
+
+#define  CSR1212_CONFIG_ROM_SPACE_BASE		(0xfffff0000400ULL)
+#define  CSR1212_CONFIG_ROM_SPACE_SIZE		(1024)
+#define  CSR1212_CONFIG_ROM_SPACE_END		(CSR1212_CONFIG_ROM_SPACE_BASE + CSR1212_CONFIG_ROM_SPACE_SIZE)
+#define  CSR1212_CONFIG_ROM_SPACE_OFFSET	(CSR1212_CONFIG_ROM_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
+
+#define  CSR1212_UNITS_SPACE_BASE		(0xfffff0000800ULL)
+#define  CSR1212_UNITS_SPACE_SIZE		((256ULL * (1ULL << 20)) - 2048)
+#define  CSR1212_UNITS_SPACE_END		(CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
+#define  CSR1212_UNITS_SPACE_OFFSET		(CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
+
+#define  CSR1212_EXTENDED_ROM_SIZE		(0x10000 * sizeof(u_int32_t))
+
+
+/* Config ROM image structures */
+struct csr1212_bus_info_block_img {
+	u_int8_t length;
+	u_int8_t crc_length;
+	u_int16_t crc;
+
+	/* Must be last */
+	u_int32_t data[0];	/* older gcc can't handle [] which is standard */
+};
+
+#define CSR1212_KV_KEY(quad)		(CSR1212_BE32_TO_CPU(quad) >> CSR1212_KV_KEY_SHIFT)
+#define CSR1212_KV_KEY_TYPE(quad)	(CSR1212_KV_KEY(quad) >> CSR1212_KV_KEY_TYPE_SHIFT)
+#define CSR1212_KV_KEY_ID(quad)		(CSR1212_KV_KEY(quad) & CSR1212_KV_KEY_ID_MASK)
+#define CSR1212_KV_VAL(quad)		(CSR1212_BE32_TO_CPU(quad) & CSR1212_KV_VAL_MASK)
+
+#define CSR1212_SET_KV_KEY(quad, key)	((quad) = \
+	CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | ((key) << CSR1212_KV_KEY_SHIFT)))
+#define CSR1212_SET_KV_VAL(quad, val)	((quad) = \
+	CSR1212_CPU_TO_BE32((CSR1212_KV_KEY(quad) << CSR1212_KV_KEY_SHIFT) | (val)))
+#define CSR1212_SET_KV_TYPEID(quad, type, id)	((quad) = \
+	CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | \
+	(((((type) & CSR1212_KV_KEY_TYPE_MASK) << CSR1212_KV_KEY_TYPE_SHIFT) | \
+	  ((id) & CSR1212_KV_KEY_ID_MASK)) << CSR1212_KV_KEY_SHIFT)))
+
+typedef u_int32_t csr1212_quad_t;
+
+
+struct csr1212_keyval_img {
+	u_int16_t length;
+	u_int16_t crc;
+
+	/* Must be last */
+	csr1212_quad_t data[0];	/* older gcc can't handle [] which is standard */
+};
+
+struct csr1212_leaf {
+	int len;
+	u_int32_t *data;
+};
+
+struct csr1212_dentry {
+	struct csr1212_dentry *next, *prev;
+	struct csr1212_keyval *kv;
+};
+
+struct csr1212_directory {
+	int len;
+	struct csr1212_dentry *dentries_head, *dentries_tail;
+};
+
+struct csr1212_keyval {
+	struct {
+		u_int8_t type;
+		u_int8_t id;
+	} key;
+	union {
+		u_int32_t immediate;
+		u_int32_t csr_offset;
+		struct csr1212_leaf leaf;
+		struct csr1212_directory directory;
+	} value;
+	struct csr1212_keyval *associate;
+	int refcnt;
+
+	/* used in generating and/or parsing CSR image */
+	struct csr1212_keyval *next, *prev;	/* flat list of CSR elements */
+	u_int32_t offset;	/* position in CSR from 0xffff f000 0000 */
+	u_int8_t valid;		/* flag indicating keyval has valid data*/
+};
+
+
+struct csr1212_cache_region {
+	struct csr1212_cache_region *next, *prev;
+	u_int32_t offset_start;		/* inclusive */
+	u_int32_t offset_end;		/* exclusive */
+};
+
+struct csr1212_csr_rom_cache {
+	struct csr1212_csr_rom_cache *next, *prev;
+	struct csr1212_cache_region *filled_head, *filled_tail;
+	struct csr1212_keyval *layout_head, *layout_tail;
+	size_t size;
+	u_int32_t offset;
+	struct csr1212_keyval *ext_rom;
+	size_t len;
+
+	/* Must be last */
+	u_int32_t data[0];	/* older gcc can't handle [] which is standard */
+};
+
+struct csr1212_csr {
+	size_t bus_info_len;	/* bus info block length in bytes */
+	size_t crc_len;		/* crc length in bytes */
+	u_int32_t *bus_info_data;	/* bus info data incl bus name and EUI */
+
+	void *private;		/* private, bus specific data */
+	struct csr1212_bus_ops *ops;
+
+	struct csr1212_keyval *root_kv;
+
+	int max_rom;		/* max bytes readable in Config ROM region */
+
+	/* Items below used for image parsing and generation */
+	struct csr1212_csr_rom_cache *cache_head, *cache_tail;
+};
+
+struct csr1212_bus_ops {
+	/* This function is used by csr1212 to read additional information
+	 * from remote nodes when parsing a Config ROM (i.e., read Config ROM
+	 * entries located in the Units Space.  Must return 0 on success
+	 * anything else indicates an error. */
+	int (*bus_read) (struct csr1212_csr *csr, u_int64_t addr,
+			 u_int16_t length, void *buffer, void *private);
+
+	/* This function is used by csr1212 to allocate a region in units space
+	 * in the event that Config ROM entries don't all fit in the predefined
+	 * 1K region.  The void *private parameter is private member of struct
+	 * csr1212_csr. */
+	u_int64_t (*allocate_addr_range) (u_int64_t size, u_int32_t alignment,
+					  void *private);
+
+
+	/* This function is used by csr1212 to release a region in units space
+	 * that is no longer needed. */
+	void (*release_addr) (u_int64_t addr, void *private);
+
+	/* This function is used by csr1212 to determine the max read request
+	 * supported by a remote node when reading the ConfigROM space.  Must
+	 * return 0, 1, or 2 per IEEE 1212.  */
+	int (*get_max_rom) (u_int32_t *bus_info, void *private);
+};
+
+
+
+
+/* Descriptor Leaf manipulation macros */
+#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
+#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
+#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
+
+#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) >> CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
+#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) & \
+	 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
+#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
+	(&((kv)->value.leaf.data[1]))
+
+#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
+	((kv)->value.leaf.data[0] = \
+	 CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
+			     ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
+#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
+	((kv)->value.leaf.data[0] = \
+	 CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
+			      CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
+			     ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
+
+/* Text Descriptor Leaf manipulation macros */
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK 0xf /* after shift */
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff  /* after shift */
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
+
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
+	 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
+	((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
+			     CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
+			    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) & \
+	 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
+	(&((kv)->value.leaf.data[2]))
+
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
+	((kv)->value.leaf.data[1] = \
+	 ((kv)->value.leaf.data[1] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
+				CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((width) & \
+			      CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
+			     CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
+	((kv)->value.leaf.data[1] = \
+	 ((kv)->value.leaf.data[1] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
+				CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((char_set) & \
+			      CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
+			     CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
+#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
+	((kv)->value.leaf.data[1] = \
+	 ((kv)->value.leaf.data[1] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
+	 CSR1212_CPU_TO_BE32(((language) & \
+			      CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
+
+
+/* Icon Descriptor Leaf manipulation macros */
+#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK 0xffffff
+#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT 30
+#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK 0x3 /* after shift */
+#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT 16
+#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK 0xf /* after shift */
+#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
+#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT 16
+#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK 0xffff /* after shift */
+#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK 0xffff
+#define CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD (3 * sizeof(u_int32_t))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[2]) & \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv) \
+	((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
+	  CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT) & \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) & \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN(kv) \
+	((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) >> \
+	  CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_SHIFT) & \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_MASK)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN(kv) \
+	(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) & \
+	 CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv) \
+	(&((kv)->value.leaf.data[5]))
+
+static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyval *kv)
+{
+	static const int pd[4] = { 0, 4, 16, 256 };
+	static const int cs[16] = { 4, 2 };
+	int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
+
+	return &kv->value.leaf.data[5 +
+				    (ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
+			   sizeof(u_int32_t)];
+}
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version) \
+	((kv)->value.leaf.data[2] = \
+	 ((kv)->value.leaf.data[2] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK))) | \
+	 CSR1212_CPU_TO_BE32(((version) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth) \
+	((kv)->value.leaf.data[3] = \
+	 ((kv)->value.leaf.data[3] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK << \
+				CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((palette_depth) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK) << \
+			     CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space) \
+	((kv)->value.leaf.data[3] = \
+	 ((kv)->value.leaf.data[3] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK << \
+				CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((color_space) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK) << \
+			     CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
+	((kv)->value.leaf.data[3] = \
+	 ((kv)->value.leaf.data[3] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
+	 CSR1212_CPU_TO_BE32(((language) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan) \
+	((kv)->value.leaf.data[4] = \
+	 ((kv)->value.leaf.data[4] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK << \
+				CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((hscan) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK) << \
+			     CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))
+
+#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan) \
+	((kv)->value.leaf.data[4] = \
+	 (((kv)->value.leaf.data[4] & \
+	  CSR1212_CPU_TO_BE32(~CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK))) | \
+	 CSR1212_CPU_TO_BE32(((vscan) & \
+			      CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)))
+
+
+/* Modifiable Descriptor Leaf manipulation macros */
+#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT 16
+#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK 0xffff
+#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_SHIFT 32
+#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK 0xffff
+#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK 0xffffffffULL
+
+#define CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE(kv) \
+	CSR1212_BE16_TO_CPU((kv)->value.leaf.data[0] >> CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE_SHIFT)
+
+#define CSR1212_MODIFIABLE_DESCRIPTOR_ADDRESS(kv) \
+	(CSR1212_BE16_TO_CPU(((u_int64_t)((kv)->value.leaf.data[0])) << \
+			     CSR1212_MODIFIABLE_DESCRIPTOR_ADDR_HI_SHIFT) | \
+	 CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]))
+
+#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, size) \
+	((kv)->value.leaf.data[0] = \
+	 ((kv)->value.leaf.data[0] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK << \
+				CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))) | \
+	 CSR1212_CPU_TO_BE32(((size) & \
+			      CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK) << \
+			     CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))
+
+#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, addr) \
+	((kv)->value.leaf.data[0] = \
+	 ((kv)->value.leaf.data[0] & \
+	  CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK))) | \
+	  CSR1212_CPU_TO_BE32(((addr) & \
+			       CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK)))
+
+#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, addr) \
+	((kv)->value.leaf.data[1] = \
+	 CSR1212_CPU_TO_BE32(addr & CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK))
+
+
+
+/* The following 2 function are for creating new Configuration ROM trees.  The
+ * first function is used for both creating local trees and parsing remote
+ * trees.  The second function adds pertinent information to local Configuration
+ * ROM trees - namely data for the bus information block. */
+extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
+					      size_t bus_info_size,
+					      void *private);
+extern void csr1212_init_local_csr(struct csr1212_csr *csr,
+				   const u_int32_t *bus_info_data, int max_rom);
+
+
+/* The following function destroys a Configuration ROM tree and release all
+ * memory taken by the tree. */
+extern void csr1212_destroy_csr(struct csr1212_csr *csr);
+
+
+/* The following set of functions are fore creating new keyvals for placement in
+ * a Configuration ROM tree.  Code that creates new keyvals with these functions
+ * must release those keyvals with csr1212_release_keyval() when they are no
+ * longer needed. */
+extern struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value);
+extern struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data,
+					       size_t data_len);
+extern struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key,
+						     u_int32_t csr_offset);
+extern struct csr1212_keyval *csr1212_new_directory(u_int8_t key);
+extern struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec,
+							     u_int32_t key,
+							     u_int32_t value);
+extern struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec,
+							u_int32_t key,
+							const void *data,
+							size_t data_len);
+extern struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype,
+							  u_int32_t specifier_id,
+							  const void *data,
+							  size_t data_len);
+extern struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
+								  u_int16_t cset,
+								  u_int16_t language,
+								  const void *data,
+								  size_t data_len);
+extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
+extern struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
+							       u_int8_t palette_depth,
+							       u_int8_t color_space,
+							       u_int16_t language,
+							       u_int16_t hscan,
+							       u_int16_t vscan,
+							       u_int32_t *palette,
+							       u_int32_t *pixels);
+extern struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
+								     u_int64_t address);
+extern struct csr1212_keyval *csr1212_new_keyword_leaf(int strc,
+						       const char *strv[]);
+
+
+/* The following functions manage association between keyvals.  Typically,
+ * Descriptor Leaves and Directories will be associated with another keyval and
+ * it is desirable for the Descriptor keyval to be place immediately after the
+ * keyval that it is associated with.*/
+extern int csr1212_associate_keyval(struct csr1212_keyval *kv,
+				    struct csr1212_keyval *associate);
+extern void csr1212_disassociate_keyval(struct csr1212_keyval *kv);
+
+
+/* The following functions manage the association of a keyval and directories.
+ * A keyval may be attached to more than one directory. */
+extern int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
+					      struct csr1212_keyval *kv);
+extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
+						 struct csr1212_keyval *kv);
+
+
+/* The following functions create a Configuration ROM image from the tree of
+ * keyvals provided.  csr1212_generate_csr_image() creates a complete image in
+ * the list of caches available via csr->cache_head.  The other functions are
+ * provided should there be a need to create a flat image without restrictions
+ * placed by IEEE 1212. */
+extern struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
+							 struct csr1212_keyval *start_kv,
+							 int start_pos);
+extern size_t csr1212_generate_layout_order(struct csr1212_keyval *kv);
+extern void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache);
+extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
+
+
+/* This is a convience function for reading a block of data out of one of the
+ * caches in the csr->cache_head list. */
+extern int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer,
+			u_int32_t len);
+
+
+/* The following functions are in place for parsing Configuration ROM images.
+ * csr1212_parse_keyval() is used should there be a need to directly parse a
+ * Configuration ROM directly. */
+extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
+				struct csr1212_csr_rom_cache *cache);
+extern int csr1212_parse_csr(struct csr1212_csr *csr);
+
+/* These are internal functions referenced by inline functions below. */
+extern int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
+extern void _csr1212_destroy_keyval(struct csr1212_keyval *kv);
+
+
+/* This function allocates a new cache which may be used for either parsing or
+ * generating sub-sets of Configuration ROM images. */
+static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t offset,
+								     size_t size)
+{
+	struct csr1212_csr_rom_cache *cache;
+
+	cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size);
+	if (!cache)
+		return NULL;
+
+	cache->next = NULL;
+	cache->prev = NULL;
+	cache->filled_head = NULL;
+	cache->filled_tail = NULL;
+	cache->layout_head = NULL;
+	cache->layout_tail = NULL;
+	cache->offset = offset;
+	cache->size = size;
+	cache->ext_rom = NULL;
+
+	return cache;
+}
+
+
+/* This function ensures that a keyval contains data when referencing a keyval
+ * created by parsing a Configuration ROM. */
+static inline struct csr1212_keyval *csr1212_get_keyval(struct csr1212_csr *csr,
+							struct csr1212_keyval *kv)
+{
+	if (!kv)
+		return NULL;
+	if (!kv->valid)
+		if (_csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
+			return NULL;
+	return kv;
+}
+
+
+/* This function increments the reference count for a keyval should there be a
+ * need for code to retain a keyval that has been parsed. */
+static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
+{
+	kv->refcnt++;
+}
+
+
+/* This function decrements a keyval's reference count and will destroy the
+ * keyval when there are no more users of the keyval.  This should be called by
+ * any code that calls csr1212_keep_keyval() or any of the keyval creation
+ * routines csr1212_new_*(). */
+static inline void csr1212_release_keyval(struct csr1212_keyval *kv)
+{
+	if (kv->refcnt > 1)
+		kv->refcnt--;
+	else
+		_csr1212_destroy_keyval(kv);
+}
+
+
+/*
+ * This macro allows for looping over the keyval entries in a directory and it
+ * ensures that keyvals from remote ConfigROMs are parsed properly.
+ *
+ * _csr is a struct csr1212_csr * that points to CSR associated with dir.
+ * _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index).
+ * _dir is a struct csr1212_keyval * that points to the directory to be looped.
+ * _pos is a struct csr1212_dentry * that is used internally for indexing.
+ *
+ * kv will be NULL upon exit of the loop.
+ */
+#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos)			\
+	for (csr1212_get_keyval((_csr), (_dir)),				\
+	     _pos = (_dir)->value.directory.dentries_head,			\
+	     _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;	\
+	     (_kv) && (_pos);							\
+	     (_kv->associate == NULL) ?						\
+		     ((_pos = _pos->next), 					\
+		      (_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) :	\
+                          NULL)) :						\
+		     (_kv = csr1212_get_keyval((_csr), _kv->associate)))
+
+
+
+#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
new file mode 100644
index 000000000000..758819d1999d
--- /dev/null
+++ b/drivers/ieee1394/dma.c
@@ -0,0 +1,260 @@
+/*
+ * DMA region bookkeeping routines
+ *
+ * Copyright (C) 2002 Maas Digital LLC
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "dma.h"
+
+/* dma_prog_region */
+
+void dma_prog_region_init(struct dma_prog_region *prog)
+{
+	prog->kvirt = NULL;
+	prog->dev = NULL;
+	prog->n_pages = 0;
+	prog->bus_addr = 0;
+}
+
+int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
+{
+	/* round up to page size */
+	n_bytes = PAGE_ALIGN(n_bytes);
+
+	prog->n_pages = n_bytes >> PAGE_SHIFT;
+
+	prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
+	if (!prog->kvirt) {
+		printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
+		dma_prog_region_free(prog);
+		return -ENOMEM;
+	}
+
+	prog->dev = dev;
+
+	return 0;
+}
+
+void dma_prog_region_free(struct dma_prog_region *prog)
+{
+	if (prog->kvirt) {
+		pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr);
+	}
+
+	prog->kvirt = NULL;
+	prog->dev = NULL;
+	prog->n_pages = 0;
+	prog->bus_addr = 0;
+}
+
+/* dma_region */
+
+void dma_region_init(struct dma_region *dma)
+{
+	dma->kvirt = NULL;
+	dma->dev = NULL;
+	dma->n_pages = 0;
+	dma->n_dma_pages = 0;
+	dma->sglist = NULL;
+}
+
+int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
+{
+	unsigned int i;
+
+	/* round up to page size */
+	n_bytes = PAGE_ALIGN(n_bytes);
+
+	dma->n_pages = n_bytes >> PAGE_SHIFT;
+
+	dma->kvirt = vmalloc_32(n_bytes);
+	if (!dma->kvirt) {
+		printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
+		goto err;
+	}
+
+	/* Clear the ram out, no junk to the user */
+	memset(dma->kvirt, 0, n_bytes);
+
+	/* allocate scatter/gather list */
+	dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
+	if (!dma->sglist) {
+		printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
+		goto err;
+	}
+
+	/* just to be safe - this will become unnecessary once sglist->address goes away */
+	memset(dma->sglist, 0, dma->n_pages * sizeof(*dma->sglist));
+
+	/* fill scatter/gather list with pages */
+	for (i = 0; i < dma->n_pages; i++) {
+		unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT);
+
+		dma->sglist[i].page = vmalloc_to_page((void *)va);
+		dma->sglist[i].length = PAGE_SIZE;
+	}
+
+	/* map sglist to the IOMMU */
+	dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
+
+	if (dma->n_dma_pages == 0) {
+		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
+		goto err;
+	}
+
+	dma->dev = dev;
+	dma->direction = direction;
+
+	return 0;
+
+err:
+	dma_region_free(dma);
+	return -ENOMEM;
+}
+
+void dma_region_free(struct dma_region *dma)
+{
+	if (dma->n_dma_pages) {
+		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
+		dma->n_dma_pages = 0;
+		dma->dev = NULL;
+	}
+
+	vfree(dma->sglist);
+	dma->sglist = NULL;
+
+	vfree(dma->kvirt);
+	dma->kvirt = NULL;
+	dma->n_pages = 0;
+}
+
+/* find the scatterlist index and remaining offset corresponding to a
+   given offset from the beginning of the buffer */
+static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
+{
+	int i;
+	unsigned long off = offset;
+
+	for (i = 0; i < dma->n_dma_pages; i++) {
+		if (off < sg_dma_len(&dma->sglist[i])) {
+			*rem = off;
+			break;
+		}
+
+		off -= sg_dma_len(&dma->sglist[i]);
+	}
+
+	BUG_ON(i >= dma->n_dma_pages);
+
+	return i;
+}
+
+dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
+{
+	unsigned long rem;
+
+	struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
+	return sg_dma_address(sg) + rem;
+}
+
+void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len)
+{
+	int first, last;
+	unsigned long rem;
+
+	if (!len)
+		len = 1;
+
+	first = dma_region_find(dma, offset, &rem);
+	last = dma_region_find(dma, offset + len - 1, &rem);
+
+	pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+}
+
+void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len)
+{
+	int first, last;
+	unsigned long rem;
+
+	if (!len)
+		len = 1;
+
+	first = dma_region_find(dma, offset, &rem);
+	last = dma_region_find(dma, offset + len - 1, &rem);
+
+	pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+}
+
+#ifdef CONFIG_MMU
+
+/* nopage() handler for mmap access */
+
+static struct page*
+dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
+{
+	unsigned long offset;
+	unsigned long kernel_virt_addr;
+	struct page *ret = NOPAGE_SIGBUS;
+
+	struct dma_region *dma = (struct dma_region*) area->vm_private_data;
+
+	if (!dma->kvirt)
+		goto out;
+
+	if ( (address < (unsigned long) area->vm_start) ||
+	    (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) )
+		goto out;
+
+	if (type)
+		*type = VM_FAULT_MINOR;
+	offset = address - area->vm_start;
+	kernel_virt_addr = (unsigned long) dma->kvirt + offset;
+	ret = vmalloc_to_page((void*) kernel_virt_addr);
+	get_page(ret);
+out:
+	return ret;
+}
+
+static struct vm_operations_struct dma_region_vm_ops = {
+	.nopage	= dma_region_pagefault,
+};
+
+int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long size;
+
+	if (!dma->kvirt)
+		return -EINVAL;
+
+	/* must be page-aligned */
+	if (vma->vm_pgoff != 0)
+		return -EINVAL;
+
+	/* check the length */
+	size = vma->vm_end - vma->vm_start;
+	if (size > (dma->n_pages << PAGE_SHIFT))
+		return -EINVAL;
+
+	vma->vm_ops = &dma_region_vm_ops;
+	vma->vm_private_data = dma;
+	vma->vm_file = file;
+	vma->vm_flags |= VM_RESERVED;
+
+	return 0;
+}
+
+#else /* CONFIG_MMU */
+
+int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
new file mode 100644
index 000000000000..061550a6fb99
--- /dev/null
+++ b/drivers/ieee1394/dma.h
@@ -0,0 +1,78 @@
+/*
+ * DMA region bookkeeping routines
+ *
+ * Copyright (C) 2002 Maas Digital LLC
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#ifndef IEEE1394_DMA_H
+#define IEEE1394_DMA_H
+
+#include <linux/pci.h>
+#include <asm/scatterlist.h>
+
+/* struct dma_prog_region
+
+   a small, physically-contiguous DMA buffer with random-access,
+   synchronous usage characteristics
+*/
+
+struct dma_prog_region {
+	unsigned char    *kvirt;     /* kernel virtual address */
+	struct pci_dev   *dev;       /* PCI device */
+	unsigned int      n_pages;   /* # of kernel pages */
+	dma_addr_t        bus_addr;  /* base bus address */
+};
+
+/* clear out all fields but do not allocate any memory */
+void dma_prog_region_init(struct dma_prog_region *prog);
+int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev);
+void dma_prog_region_free(struct dma_prog_region *prog);
+
+static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset)
+{
+	return prog->bus_addr + offset;
+}
+
+/* struct dma_region
+
+   a large, non-physically-contiguous DMA buffer with streaming,
+   asynchronous usage characteristics
+*/
+
+struct dma_region {
+	unsigned char      *kvirt;       /* kernel virtual address */
+	struct pci_dev     *dev;         /* PCI device */
+	unsigned int        n_pages;     /* # of kernel pages */
+	unsigned int        n_dma_pages; /* # of IOMMU pages */
+	struct scatterlist *sglist;      /* IOMMU mapping */
+	int                 direction;   /* PCI_DMA_TODEVICE, etc */
+};
+
+/* clear out all fields but do not allocate anything */
+void dma_region_init(struct dma_region *dma);
+
+/* allocate the buffer and map it to the IOMMU */
+int  dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction);
+
+/* unmap and free the buffer */
+void dma_region_free(struct dma_region *dma);
+
+/* sync the CPU's view of the buffer */
+void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len);
+/* sync the IO bus' view of the buffer */
+void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len);
+
+/* map the buffer into a user space process */
+int  dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma);
+
+/* macro to index into a DMA region (or dma_prog_region) */
+#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) )
+
+/* return the DMA bus address of the byte with the given offset
+   relative to the beginning of the dma_region */
+dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);
+
+#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h
new file mode 100644
index 000000000000..80b5ac7fe383
--- /dev/null
+++ b/drivers/ieee1394/dv1394-private.h
@@ -0,0 +1,587 @@
+/*
+ * dv1394-private.h - DV input/output over IEEE 1394 on OHCI chips
+ *   Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
+ *     receive by Dan Dennedy <dan@dennedy.org>
+ *
+ * based on:
+ *   video1394.h - driver for OHCI 1394 boards
+ *   Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                          Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DV_1394_PRIVATE_H
+#define _DV_1394_PRIVATE_H
+
+#include "ieee1394.h"
+#include "ohci1394.h"
+#include "dma.h"
+
+/* data structures private to the dv1394 driver */
+/* none of this is exposed to user-space */
+
+
+/*
+   the 8-byte CIP (Common Isochronous Packet) header that precedes
+   each packet of DV data.
+
+   See the IEC 61883 standard.
+*/
+
+struct CIP_header { unsigned char b[8]; };
+
+static inline void fill_cip_header(struct CIP_header *cip,
+				   unsigned char source_node_id,
+				   unsigned long counter,
+				   enum pal_or_ntsc format,
+				   unsigned long timestamp)
+{
+	cip->b[0] = source_node_id;
+	cip->b[1] = 0x78; /* packet size in quadlets (480/4) - even for empty packets! */
+	cip->b[2] = 0x00;
+	cip->b[3] = counter;
+
+	cip->b[4] = 0x80; /* const */
+
+	switch(format) {
+	case DV1394_PAL:
+		cip->b[5] = 0x80;
+		break;
+	case DV1394_NTSC:
+		cip->b[5] = 0x00;
+		break;
+	}
+
+	cip->b[6] = timestamp >> 8;
+	cip->b[7] = timestamp & 0xFF;
+}
+
+
+
+/*
+   DMA commands used to program the OHCI's DMA engine
+
+   See the Texas Instruments OHCI 1394 chipset documentation.
+*/
+
+struct output_more_immediate { u32 q[8]; };
+struct output_more { u32 q[4]; };
+struct output_last { u32 q[4]; };
+struct input_more { u32 q[4]; };
+struct input_last { u32 q[4]; };
+
+/* outputs */
+
+static inline void fill_output_more_immediate(struct output_more_immediate *omi,
+					      unsigned char tag,
+					      unsigned char channel,
+					      unsigned char sync_tag,
+					      unsigned int  payload_size)
+{
+	omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
+	omi->q[1] = 0;
+	omi->q[2] = 0;
+	omi->q[3] = 0;
+
+	/* IT packet header */
+	omi->q[4] = cpu_to_le32(  (0x0 << 16)  /* IEEE1394_SPEED_100 */
+				  | (tag << 14)
+				  | (channel << 8)
+				  | (TCODE_ISO_DATA << 4)
+				  | (sync_tag) );
+
+	/* reserved field; mimic behavior of my Sony DSR-40 */
+	omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
+
+	omi->q[6] = 0;
+	omi->q[7] = 0;
+}
+
+static inline void fill_output_more(struct output_more *om,
+				    unsigned int data_size,
+				    unsigned long data_phys_addr)
+{
+	om->q[0] = cpu_to_le32(data_size);
+	om->q[1] = cpu_to_le32(data_phys_addr);
+	om->q[2] = 0;
+	om->q[3] = 0;
+}
+
+static inline void fill_output_last(struct output_last *ol,
+				    int want_timestamp,
+				    int want_interrupt,
+				    unsigned int data_size,
+				    unsigned long data_phys_addr)
+{
+	u32 temp = 0;
+	temp |= 1 << 28; /* OUTPUT_LAST */
+
+	if (want_timestamp) /* controller will update timestamp at DMA time */
+		temp |= 1 << 27;
+
+	if (want_interrupt)
+		temp |= 3 << 20;
+
+	temp |= 3 << 18; /* must take branch */
+	temp |= data_size;
+
+	ol->q[0] = cpu_to_le32(temp);
+	ol->q[1] = cpu_to_le32(data_phys_addr);
+	ol->q[2] = 0;
+	ol->q[3] = 0;
+}
+
+/* inputs */
+
+static inline void fill_input_more(struct input_more *im,
+				   int want_interrupt,
+				   unsigned int data_size,
+				   unsigned long data_phys_addr)
+{
+	u32 temp =  2 << 28; /* INPUT_MORE */
+	temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
+	if (want_interrupt)
+		temp |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
+	temp |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
+	                       /* disable wait on sync field, not used in DV :-( */
+       	temp |= data_size;
+
+	im->q[0] = cpu_to_le32(temp);
+	im->q[1] = cpu_to_le32(data_phys_addr);
+	im->q[2] = 0; /* branchAddress and Z not use in packet-per-buffer mode */
+	im->q[3] = 0; /* xferStatus & resCount, resCount must be initialize to data_size */
+}
+ 
+static inline void fill_input_last(struct input_last *il,
+				    int want_interrupt,
+				    unsigned int data_size,
+				    unsigned long data_phys_addr)
+{
+	u32 temp =  3 << 28; /* INPUT_LAST */
+	temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
+	if (want_interrupt)
+		temp |= 3 << 20; /* enable interrupts */
+	temp |= 0xC << 16; /* enable branch to address */
+	                       /* disable wait on sync field, not used in DV :-( */
+	temp |= data_size;
+
+	il->q[0] = cpu_to_le32(temp);
+	il->q[1] = cpu_to_le32(data_phys_addr);
+	il->q[2] = cpu_to_le32(1); /* branchAddress (filled in later) and Z = 1 descriptor in next block */
+	il->q[3] = cpu_to_le32(data_size); /* xferStatus & resCount, resCount must be initialize to data_size */
+}
+
+
+
+/*
+   A "DMA descriptor block" consists of several contiguous DMA commands.
+   struct DMA_descriptor_block encapsulates all of the commands necessary
+   to send one packet of DV data.
+
+   There are three different types of these blocks:
+
+        1) command to send an empty packet (CIP header only, no DV data):
+
+	    OUTPUT_MORE-Immediate <-- contains the iso header in-line
+	    OUTPUT_LAST           <-- points to the CIP header
+
+	2) command to send a full packet when the DV data payload does NOT
+	   cross a page boundary:
+
+	    OUTPUT_MORE-Immediate <-- contains the iso header in-line
+	    OUTPUT_MORE           <-- points to the CIP header
+	    OUTPUT_LAST           <-- points to entire DV data payload
+
+	3) command to send a full packet when the DV payload DOES cross
+	   a page boundary:
+
+	    OUTPUT_MORE-Immediate <-- contains the iso header in-line
+	    OUTPUT_MORE           <-- points to the CIP header
+	    OUTPUT_MORE           <-- points to first part of DV data payload
+	    OUTPUT_LAST           <-- points to second part of DV data payload
+
+   This struct describes all three block types using unions.
+
+   !!! It is vital that an even number of these descriptor blocks fit on one
+   page of memory, since a block cannot cross a page boundary !!!
+
+ */
+
+struct DMA_descriptor_block {
+
+	union {
+		struct {
+			/*  iso header, common to all output block types */
+			struct output_more_immediate omi;
+
+			union {
+				/* empty packet */
+				struct {
+					struct output_last ol;  /* CIP header */
+				} empty;
+
+				/* full packet */
+				struct {
+					struct output_more om;  /* CIP header */
+
+					union {
+				               /* payload does not cross page boundary */
+						struct {
+							struct output_last ol;  /* data payload */
+						} nocross;
+
+				               /* payload crosses page boundary */
+						struct {
+							struct output_more om;  /* data payload */
+							struct output_last ol;  /* data payload */
+						} cross;
+					} u;
+
+				} full;
+			} u;
+		} out;
+
+		struct {
+			struct input_last il;
+		} in;
+
+	} u;
+
+	/* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
+	   by padding out to 128 bytes */
+	u32 __pad__[12];
+};
+
+
+/* struct frame contains all data associated with one frame in the
+   ringbuffer these are allocated when the DMA context is initialized
+   do_dv1394_init().  They are re-used after the card finishes
+   transmitting the frame. */
+
+struct video_card; /* forward declaration */
+
+struct frame {
+
+	/* points to the struct video_card that owns this frame */
+	struct video_card *video;
+
+	/* index of this frame in video_card->frames[] */
+	unsigned int frame_num;
+
+	/* FRAME_CLEAR - DMA program not set up, waiting for data
+	   FRAME_READY - DMA program written, ready to transmit
+
+	   Changes to these should be locked against the interrupt
+	*/
+	enum {
+		FRAME_CLEAR = 0,
+		FRAME_READY
+	} state;
+
+	/* whether this frame has been DMA'ed already; used only from
+	   the IRQ handler to determine whether the frame can be reset */
+	int done;
+
+
+	/* kernel virtual pointer to the start of this frame's data in
+	   the user ringbuffer. Use only for CPU access; to get the DMA
+	   bus address you must go through the video->user_dma mapping */
+	unsigned long data;
+
+	/* Max # of packets per frame */
+#define MAX_PACKETS 500
+
+
+	/* a PAGE_SIZE memory pool for allocating CIP headers
+	   !header_pool must be aligned to PAGE_SIZE! */
+	struct CIP_header *header_pool;
+	dma_addr_t         header_pool_dma;
+
+
+	/* a physically contiguous memory pool for allocating DMA
+	   descriptor blocks; usually around 64KB in size
+	   !descriptor_pool must be aligned to PAGE_SIZE! */
+	struct DMA_descriptor_block *descriptor_pool;
+	dma_addr_t                   descriptor_pool_dma;
+	unsigned long                descriptor_pool_size;
+
+
+	/* # of packets allocated for this frame */
+	unsigned int n_packets;
+
+
+	/* below are several pointers (kernel virtual addresses, not
+	   DMA bus addresses) to parts of the DMA program.  These are
+	   set each time the DMA program is written in
+	   frame_prepare(). They are used later on, e.g. from the
+	   interrupt handler, to check the status of the frame */
+
+	/* points to status/timestamp field of first DMA packet */
+	/* (we'll check it later to monitor timestamp accuracy) */
+	u32 *frame_begin_timestamp;
+
+	/* the timestamp we assigned to the first packet in the frame */
+	u32 assigned_timestamp;
+
+	/* pointer to the first packet's CIP header (where the timestamp goes) */
+	struct CIP_header *cip_syt1;
+
+	/* pointer to the second packet's CIP header
+	   (only set if the first packet was empty) */
+	struct CIP_header *cip_syt2;
+
+	/* in order to figure out what caused an interrupt,
+	   store pointers to the status fields of the two packets
+	   that can cause interrupts. We'll check these from the
+	   interrupt handler.
+	*/
+	u32 *mid_frame_timestamp;
+	u32 *frame_end_timestamp;
+
+	/* branch address field of final packet. This is effectively
+	   the "tail" in the chain of DMA descriptor blocks.
+	   We will fill it with the address of the first DMA descriptor
+	   block in the subsequent frame, once it is ready.
+	*/
+	u32 *frame_end_branch;
+
+	/* the number of descriptors in the first descriptor block
+	   of the frame. Needed to start DMA */
+	int first_n_descriptors;
+};
+
+
+struct packet {
+	u16	timestamp;
+	u16	invalid;
+	u16	iso_header;
+	u16	data_length;
+	u32	cip_h1;
+	u32	cip_h2;
+	unsigned char data[480];
+	unsigned char padding[16]; /* force struct size =512 for page alignment */
+};
+
+
+/* allocate/free a frame */
+static struct frame* frame_new(unsigned int frame_num, struct video_card *video);
+static void frame_delete(struct frame *f);
+
+/* reset f so that it can be used again */
+static void frame_reset(struct frame *f);
+
+/* struct video_card contains all data associated with one instance
+   of the dv1394 driver
+*/
+enum modes {
+	MODE_RECEIVE,
+	MODE_TRANSMIT
+};
+
+struct video_card {
+
+	/* ohci card to which this instance corresponds */
+	struct ti_ohci *ohci;
+
+	/* OHCI card id; the link between the VFS inode and a specific video_card
+	   (essentially the device minor number) */
+	int id;
+
+	/* entry in dv1394_cards */
+	struct list_head list;
+
+	/* OHCI card IT DMA context number, -1 if not in use */
+	int ohci_it_ctx;
+	struct ohci1394_iso_tasklet it_tasklet;
+
+	/* register offsets for current IT DMA context, 0 if not in use */
+	u32 ohci_IsoXmitContextControlSet;
+	u32 ohci_IsoXmitContextControlClear;
+	u32 ohci_IsoXmitCommandPtr;
+
+	/* OHCI card IR DMA context number, -1 if not in use */
+	struct ohci1394_iso_tasklet ir_tasklet;
+	int ohci_ir_ctx;
+
+	/* register offsets for current IR DMA context, 0 if not in use */
+	u32 ohci_IsoRcvContextControlSet;
+	u32 ohci_IsoRcvContextControlClear;
+	u32 ohci_IsoRcvCommandPtr;
+	u32 ohci_IsoRcvContextMatch;
+
+
+	/* CONCURRENCY CONTROL */
+
+	/* there are THREE levels of locking associated with video_card. */
+
+	/*
+	   1) the 'open' flag - this prevents more than one process from
+	   opening the device. (the driver currently assumes only one opener).
+	   This is a regular int, but use test_and_set_bit() (on bit zero) 
+	   for atomicity.
+	 */
+	unsigned long open;
+
+	/*
+	   2) the spinlock - this provides mutual exclusion between the interrupt
+	   handler and process-context operations. Generally you must take the
+	   spinlock under the following conditions:
+	     1) DMA (and hence the interrupt handler) may be running
+	     AND
+	     2) you need to operate on the video_card, especially active_frame
+
+	     It is OK to play with video_card without taking the spinlock if
+	     you are certain that DMA is not running. Even if DMA is running,
+	     it is OK to *read* active_frame with the lock, then drop it
+	     immediately. This is safe because the interrupt handler will never
+	     advance active_frame onto a frame that is not READY (and the spinlock
+	     must be held while marking a frame READY).
+
+	     spinlock is also used to protect ohci_it_ctx and ohci_ir_ctx,
+	     which can be accessed from both process and interrupt context
+	 */
+	spinlock_t spinlock;
+
+	/* flag to prevent spurious interrupts (which OHCI seems to
+	   generate a lot :) from accessing the struct */
+	int dma_running;
+
+	/*
+	  3) the sleeping semaphore 'sem' - this is used from process context only,
+	  to serialize various operations on the video_card. Even though only one
+	  open() is allowed, we still need to prevent multiple threads of execution
+	  from entering calls like read, write, ioctl, etc.
+
+	  I honestly can't think of a good reason to use dv1394 from several threads
+	  at once, but we need to serialize anyway to prevent oopses =).
+
+	  NOTE: if you need both spinlock and sem, take sem first to avoid deadlock!
+	 */
+	struct semaphore sem;
+
+	/* people waiting for buffer space, please form a line here... */
+	wait_queue_head_t waitq;
+
+	/* support asynchronous I/O signals (SIGIO) */
+	struct fasync_struct *fasync;
+
+	/* the large, non-contiguous (rvmalloc()) ringbuffer for DV
+           data, exposed to user-space via mmap() */
+	unsigned long      dv_buf_size;
+	struct dma_region  dv_buf;
+
+	/* next byte in the ringbuffer that a write() call will fill */
+	size_t write_off;
+
+	struct frame *frames[DV1394_MAX_FRAMES];
+
+	/* n_frames also serves as an indicator that this struct video_card is
+	   initialized and ready to run DMA buffers */
+
+	int n_frames;
+
+	/* this is the frame that is currently "owned" by the OHCI DMA controller
+	   (set to -1 iff DMA is not running)
+
+	   ! must lock against the interrupt handler when accessing it !
+
+	   RULES:
+
+	       Only the interrupt handler may change active_frame if DMA
+	          is running; if not, process may change it
+
+	       If the next frame is READY, the interrupt handler will advance
+	       active_frame when the current frame is finished.
+
+	       If the next frame is CLEAR, the interrupt handler will re-transmit
+	       the current frame, and the dropped_frames counter will be  incremented.
+
+	       The interrupt handler will NEVER advance active_frame to a
+	       frame that is not READY.
+	*/
+	int active_frame;
+	int first_run;
+
+	/* the same locking rules apply to these three fields also: */
+
+	/* altered ONLY from process context. Must check first_clear_frame->state;
+	   if it's READY, that means the ringbuffer is full with READY frames;
+	   if it's CLEAR, that means one or more ringbuffer frames are CLEAR */
+	unsigned int first_clear_frame;
+
+	/* altered both by process and interrupt */
+	unsigned int n_clear_frames;
+
+	/* only altered by the interrupt */
+	unsigned int dropped_frames;
+
+
+
+	/* the CIP accumulator and continuity counter are properties
+	   of the DMA stream as a whole (not a single frame), so they
+	   are stored here in the video_card */
+
+	unsigned long cip_accum;
+	unsigned long cip_n, cip_d;
+	unsigned int syt_offset;
+	unsigned int continuity_counter;
+
+	enum pal_or_ntsc pal_or_ntsc;
+
+	/* redundant, but simplifies the code somewhat */
+	unsigned int frame_size; /* in bytes */
+
+	/* the isochronous channel to use, -1 if video card is inactive */
+	int channel;
+
+
+	/* physically contiguous packet ringbuffer for receive */
+	struct dma_region packet_buf;
+	unsigned long  packet_buf_size;
+
+	unsigned int current_packet;
+	int first_frame; 	/* received first start frame marker? */
+	enum modes mode;
+};
+
+/*
+   if the video_card is not initialized, then the ONLY fields that are valid are:
+   ohci
+   open
+   n_frames
+*/
+
+static inline int video_card_initialized(struct video_card *v)
+{
+	return v->n_frames > 0;
+}
+
+static int do_dv1394_init(struct video_card *video, struct dv1394_init *init);
+static int do_dv1394_init_default(struct video_card *video);
+static void do_dv1394_shutdown(struct video_card *video, int free_user_buf);
+
+
+/* NTSC empty packet rate accurate to within 0.01%,
+   calibrated against a Sony DSR-40 DVCAM deck */
+
+#define CIP_N_NTSC   68000000
+#define CIP_D_NTSC 1068000000
+
+#define CIP_N_PAL  1
+#define CIP_D_PAL 16
+
+#endif /* _DV_1394_PRIVATE_H */
+
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
new file mode 100644
index 000000000000..68c7a5f07842
--- /dev/null
+++ b/drivers/ieee1394/dv1394.c
@@ -0,0 +1,2663 @@
+/*
+ * dv1394.c - DV input/output over IEEE 1394 on OHCI chips
+ *   Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
+ *     receive by Dan Dennedy <dan@dennedy.org>
+ *
+ * based on:
+ *  video1394.c - video driver for OHCI 1394 boards
+ *  Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+  OVERVIEW
+
+  I designed dv1394 as a "pipe" that you can use to shoot DV onto a
+  FireWire bus. In transmission mode, dv1394 does the following:
+
+   1. accepts contiguous frames of DV data from user-space, via write()
+      or mmap() (see dv1394.h for the complete API)
+   2. wraps IEC 61883 packets around the DV data, inserting
+      empty synchronization packets as necessary
+   3. assigns accurate SYT timestamps to the outgoing packets
+   4. shoots them out using the OHCI card's IT DMA engine
+
+   Thanks to Dan Dennedy, we now have a receive mode that does the following:
+
+   1. accepts raw IEC 61883 packets from the OHCI card
+   2. re-assembles the DV data payloads into contiguous frames,
+      discarding empty packets
+   3. sends the DV data to user-space via read() or mmap()
+*/
+
+/*
+  TODO:
+
+  - tunable frame-drop behavior: either loop last frame, or halt transmission
+
+  - use a scatter/gather buffer for DMA programs (f->descriptor_pool)
+    so that we don't rely on allocating 64KB of contiguous kernel memory
+    via pci_alloc_consistent()
+
+  DONE:
+  - during reception, better handling of dropped frames and continuity errors
+  - during reception, prevent DMA from bypassing the irq tasklets
+  - reduce irq rate during reception (1/250 packets).
+  - add many more internal buffers during reception with scatter/gather dma.
+  - add dbc (continuity) checking on receive, increment status.dropped_frames
+    if not continuous.
+  - restart IT DMA after a bus reset
+  - safely obtain and release ISO Tx channels in cooperation with OHCI driver
+  - map received DIF blocks to their proper location in DV frame (ensure
+    recovery if dropped packet)
+  - handle bus resets gracefully (OHCI card seems to take care of this itself(!))
+  - do not allow resizing the user_buf once allocated; eliminate nuke_buffer_mappings
+  - eliminated #ifdef DV1394_DEBUG_LEVEL by inventing macros debug_printk and irq_printk
+  - added wmb() and mb() to places where PCI read/write ordering needs to be enforced
+  - set video->id correctly
+  - store video_cards in an array indexed by OHCI card ID, rather than a list
+  - implement DMA context allocation to cooperate with other users of the OHCI
+  - fix all XXX showstoppers
+  - disable IR/IT DMA interrupts on shutdown
+  - flush pci writes to the card by issuing a read
+  - devfs and character device dispatching (* needs testing with Linux 2.2.x)
+  - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!)
+  - keep all video_cards in a list (for open() via chardev), set file->private_data = video
+  - dv1394_poll should indicate POLLIN when receiving buffers are available
+  - add proc fs interface to set cip_n, cip_d, syt_offset, and video signal
+  - expose xmit and recv as separate devices (not exclusive)
+  - expose NTSC and PAL as separate devices (can be overridden)
+
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/ioctl32.h>
+#include <linux/compat.h>
+#include <linux/cdev.h>
+
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "nodemgr.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "dv1394.h"
+#include "dv1394-private.h"
+
+#include "ohci1394.h"
+
+#ifndef virt_to_page
+#define virt_to_page(x) MAP_NR(x)
+#endif
+
+#ifndef vmalloc_32
+#define vmalloc_32(x) vmalloc(x)
+#endif
+
+
+/* DEBUG LEVELS:
+   0 - no debugging messages
+   1 - some debugging messages, but none during DMA frame transmission
+   2 - lots of messages, including during DMA frame transmission
+       (will cause undeflows if your machine is too slow!)
+*/
+
+#define DV1394_DEBUG_LEVEL 0
+
+/* for debugging use ONLY: allow more than one open() of the device */
+/* #define DV1394_ALLOW_MORE_THAN_ONE_OPEN 1 */
+
+#if DV1394_DEBUG_LEVEL >= 2
+#define irq_printk( args... ) printk( args )
+#else
+#define irq_printk( args... )
+#endif
+
+#if DV1394_DEBUG_LEVEL >= 1
+#define debug_printk( args... ) printk( args)
+#else
+#define debug_printk( args... )
+#endif
+
+/* issue a dummy PCI read to force the preceding write
+   to be posted to the PCI bus immediately */
+
+static inline void flush_pci_write(struct ti_ohci *ohci)
+{
+	mb();
+	reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+}
+
+static void it_tasklet_func(unsigned long data);
+static void ir_tasklet_func(unsigned long data);
+
+#ifdef CONFIG_COMPAT
+static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
+			       unsigned long arg);
+#endif
+
+/* GLOBAL DATA */
+
+/* list of all video_cards */
+static LIST_HEAD(dv1394_cards);
+static DEFINE_SPINLOCK(dv1394_cards_lock);
+
+/* translate from a struct file* to the corresponding struct video_card* */
+
+static inline struct video_card* file_to_video_card(struct file *file)
+{
+	return (struct video_card*) file->private_data;
+}
+
+/*** FRAME METHODS *********************************************************/
+
+static void frame_reset(struct frame *f)
+{
+	f->state = FRAME_CLEAR;
+	f->done = 0;
+	f->n_packets = 0;
+	f->frame_begin_timestamp = NULL;
+	f->assigned_timestamp = 0;
+	f->cip_syt1 = NULL;
+	f->cip_syt2 = NULL;
+	f->mid_frame_timestamp = NULL;
+	f->frame_end_timestamp = NULL;
+	f->frame_end_branch = NULL;
+}
+
+static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
+{
+	struct frame *f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	f->video = video;
+	f->frame_num = frame_num;
+
+	f->header_pool = pci_alloc_consistent(f->video->ohci->dev, PAGE_SIZE, &f->header_pool_dma);
+	if (!f->header_pool) {
+		printk(KERN_ERR "dv1394: failed to allocate CIP header pool\n");
+		kfree(f);
+		return NULL;
+	}
+
+	debug_printk("dv1394: frame_new: allocated CIP header pool at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
+		     (unsigned long) f->header_pool, (unsigned long) f->header_pool_dma, PAGE_SIZE);
+
+	f->descriptor_pool_size = MAX_PACKETS * sizeof(struct DMA_descriptor_block);
+	/* make it an even # of pages */
+	f->descriptor_pool_size += PAGE_SIZE - (f->descriptor_pool_size%PAGE_SIZE);
+
+	f->descriptor_pool = pci_alloc_consistent(f->video->ohci->dev,
+						  f->descriptor_pool_size,
+						  &f->descriptor_pool_dma);
+	if (!f->descriptor_pool) {
+		pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
+		kfree(f);
+		return NULL;
+	}
+
+	debug_printk("dv1394: frame_new: allocated DMA program memory at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
+		     (unsigned long) f->descriptor_pool, (unsigned long) f->descriptor_pool_dma, f->descriptor_pool_size);
+
+	f->data = 0;
+	frame_reset(f);
+
+	return f;
+}
+
+static void frame_delete(struct frame *f)
+{
+	pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
+	pci_free_consistent(f->video->ohci->dev, f->descriptor_pool_size, f->descriptor_pool, f->descriptor_pool_dma);
+	kfree(f);
+}
+
+
+
+
+/*
+   frame_prepare() - build the DMA program for transmitting
+
+   Frame_prepare() must be called OUTSIDE the video->spinlock.
+   However, frame_prepare() must still be serialized, so
+   it should be called WITH the video->sem taken.
+ */
+
+static void frame_prepare(struct video_card *video, unsigned int this_frame)
+{
+	struct frame *f = video->frames[this_frame];
+	int last_frame;
+
+	struct DMA_descriptor_block *block;
+	dma_addr_t block_dma;
+	struct CIP_header *cip;
+	dma_addr_t cip_dma;
+
+	unsigned int n_descriptors, full_packets, packets_per_frame, payload_size;
+
+	/* these flags denote packets that need special attention */
+	int empty_packet, first_packet, last_packet, mid_packet;
+
+	u32 *branch_address, *last_branch_address = NULL;
+	unsigned long data_p;
+	int first_packet_empty = 0;
+	u32 cycleTimer, ct_sec, ct_cyc, ct_off;
+	unsigned long irq_flags;
+
+	irq_printk("frame_prepare( %d ) ---------------------\n", this_frame);
+
+	full_packets = 0;
+
+
+
+	if (video->pal_or_ntsc == DV1394_PAL)
+		packets_per_frame = DV1394_PAL_PACKETS_PER_FRAME;
+	else
+		packets_per_frame = DV1394_NTSC_PACKETS_PER_FRAME;
+
+	while ( full_packets < packets_per_frame ) {
+		empty_packet = first_packet = last_packet = mid_packet = 0;
+
+		data_p = f->data + full_packets * 480;
+
+		/************************************************/
+		/* allocate a descriptor block and a CIP header */
+		/************************************************/
+
+		/* note: these should NOT cross a page boundary (DMA restriction) */
+
+		if (f->n_packets >= MAX_PACKETS) {
+			printk(KERN_ERR "dv1394: FATAL ERROR: max packet count exceeded\n");
+			return;
+		}
+
+		/* the block surely won't cross a page boundary,
+		   since an even number of descriptor_blocks fit on a page */
+		block = &(f->descriptor_pool[f->n_packets]);
+
+		/* DMA address of the block = offset of block relative
+		    to the kernel base address of the descriptor pool
+		    + DMA base address of the descriptor pool */
+		block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
+
+
+		/* the whole CIP pool fits on one page, so no worries about boundaries */
+		if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
+		    > PAGE_SIZE) {
+			printk(KERN_ERR "dv1394: FATAL ERROR: no room to allocate CIP header\n");
+			return;
+		}
+
+		cip = &(f->header_pool[f->n_packets]);
+
+		/* DMA address of the CIP header = offset of cip
+		   relative to kernel base address of the header pool
+		   + DMA base address of the header pool */
+		cip_dma = (unsigned long) cip % PAGE_SIZE + f->header_pool_dma;
+
+		/* is this an empty packet? */
+
+		if (video->cip_accum > (video->cip_d - video->cip_n)) {
+			empty_packet = 1;
+			payload_size = 8;
+			video->cip_accum -= (video->cip_d - video->cip_n);
+		} else {
+			payload_size = 488;
+			video->cip_accum += video->cip_n;
+		}
+
+		/* there are three important packets each frame:
+
+		   the first packet in the frame - we ask the card to record the timestamp when
+		                                   this packet is actually sent, so we can monitor
+						   how accurate our timestamps are. Also, the first
+						   packet serves as a semaphore to let us know that
+						   it's OK to free the *previous* frame's DMA buffer
+
+		   the last packet in the frame -  this packet is used to detect buffer underflows.
+		                                   if this is the last ready frame, the last DMA block
+						   will have a branch back to the beginning of the frame
+						   (so that the card will re-send the frame on underflow).
+						   if this branch gets taken, we know that at least one
+						   frame has been dropped. When the next frame is ready,
+						   the branch is pointed to its first packet, and the
+						   semaphore is disabled.
+
+		   a "mid" packet slightly before the end of the frame - this packet should trigger
+		                   an interrupt so we can go and assign a timestamp to the first packet
+				   in the next frame. We don't use the very last packet in the frame
+				   for this purpose, because that would leave very little time to set
+				   the timestamp before DMA starts on the next frame.
+		*/
+
+		if (f->n_packets == 0) {
+			first_packet = 1;
+		} else if ( full_packets == (packets_per_frame-1) ) {
+			last_packet = 1;
+		} else if (f->n_packets == packets_per_frame) {
+			mid_packet = 1;
+		}
+
+
+		/********************/
+		/* setup CIP header */
+		/********************/
+
+		/* the timestamp will be written later from the
+		   mid-frame interrupt handler. For now we just
+		   store the address of the CIP header(s) that
+		   need a timestamp. */
+
+		/* first packet in the frame needs a timestamp */
+		if (first_packet) {
+			f->cip_syt1 = cip;
+			if (empty_packet)
+				first_packet_empty = 1;
+
+		} else if (first_packet_empty && (f->n_packets == 1) ) {
+			/* if the first packet was empty, the second
+			   packet's CIP header also needs a timestamp */
+			f->cip_syt2 = cip;
+		}
+
+		fill_cip_header(cip,
+				/* the node ID number of the OHCI card */
+				reg_read(video->ohci, OHCI1394_NodeID) & 0x3F,
+				video->continuity_counter,
+				video->pal_or_ntsc,
+				0xFFFF /* the timestamp is filled in later */);
+
+		/* advance counter, only for full packets */
+		if ( ! empty_packet )
+			video->continuity_counter++;
+
+		/******************************/
+		/* setup DMA descriptor block */
+		/******************************/
+
+		/* first descriptor - OUTPUT_MORE_IMMEDIATE, for the controller's IT header */
+		fill_output_more_immediate( &(block->u.out.omi), 1, video->channel, 0, payload_size);
+
+		if (empty_packet) {
+			/* second descriptor - OUTPUT_LAST for CIP header */
+			fill_output_last( &(block->u.out.u.empty.ol),
+
+					  /* want completion status on all interesting packets */
+					  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+					  /* want interrupts on all interesting packets */
+					  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+					  sizeof(struct CIP_header), /* data size */
+					  cip_dma);
+
+			if (first_packet)
+				f->frame_begin_timestamp = &(block->u.out.u.empty.ol.q[3]);
+			else if (mid_packet)
+				f->mid_frame_timestamp = &(block->u.out.u.empty.ol.q[3]);
+			else if (last_packet) {
+				f->frame_end_timestamp = &(block->u.out.u.empty.ol.q[3]);
+				f->frame_end_branch = &(block->u.out.u.empty.ol.q[2]);
+			}
+
+			branch_address = &(block->u.out.u.empty.ol.q[2]);
+			n_descriptors = 3;
+			if (first_packet)
+				f->first_n_descriptors = n_descriptors;
+
+		} else { /* full packet */
+
+			/* second descriptor - OUTPUT_MORE for CIP header */
+			fill_output_more( &(block->u.out.u.full.om),
+					  sizeof(struct CIP_header), /* data size */
+					  cip_dma);
+
+
+			/* third (and possibly fourth) descriptor - for DV data */
+			/* the 480-byte payload can cross a page boundary; if so,
+			   we need to split it into two DMA descriptors */
+
+			/* does the 480-byte data payload cross a page boundary? */
+			if ( (PAGE_SIZE- ((unsigned long)data_p % PAGE_SIZE) ) < 480 ) {
+
+				/* page boundary crossed */
+
+				fill_output_more( &(block->u.out.u.full.u.cross.om),
+						  /* data size - how much of data_p fits on the first page */
+						  PAGE_SIZE - (data_p % PAGE_SIZE),
+
+						  /* DMA address of data_p */
+						  dma_region_offset_to_bus(&video->dv_buf,
+									   data_p - (unsigned long) video->dv_buf.kvirt));
+
+				fill_output_last( &(block->u.out.u.full.u.cross.ol),
+
+						  /* want completion status on all interesting packets */
+						  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+						  /* want interrupt on all interesting packets */
+						  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+						  /* data size - remaining portion of data_p */
+						  480 - (PAGE_SIZE - (data_p % PAGE_SIZE)),
+
+						  /* DMA address of data_p + PAGE_SIZE - (data_p % PAGE_SIZE) */
+						  dma_region_offset_to_bus(&video->dv_buf,
+									   data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) video->dv_buf.kvirt));
+
+				if (first_packet)
+					f->frame_begin_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
+				else if (mid_packet)
+					f->mid_frame_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
+				else if (last_packet) {
+					f->frame_end_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
+					f->frame_end_branch = &(block->u.out.u.full.u.cross.ol.q[2]);
+				}
+
+				branch_address = &(block->u.out.u.full.u.cross.ol.q[2]);
+
+				n_descriptors = 5;
+				if (first_packet)
+					f->first_n_descriptors = n_descriptors;
+
+				full_packets++;
+
+			} else {
+				/* fits on one page */
+
+				fill_output_last( &(block->u.out.u.full.u.nocross.ol),
+
+						  /* want completion status on all interesting packets */
+						  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+						  /* want interrupt on all interesting packets */
+						  (first_packet || mid_packet || last_packet) ? 1 : 0,
+
+						  480, /* data size (480 bytes of DV data) */
+
+
+						  /* DMA address of data_p */
+						  dma_region_offset_to_bus(&video->dv_buf,
+									   data_p - (unsigned long) video->dv_buf.kvirt));
+
+				if (first_packet)
+					f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
+				else if (mid_packet)
+					f->mid_frame_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
+				else if (last_packet) {
+					f->frame_end_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
+					f->frame_end_branch = &(block->u.out.u.full.u.nocross.ol.q[2]);
+				}
+
+				branch_address = &(block->u.out.u.full.u.nocross.ol.q[2]);
+
+				n_descriptors = 4;
+				if (first_packet)
+					f->first_n_descriptors = n_descriptors;
+
+				full_packets++;
+			}
+		}
+
+		/* link this descriptor block into the DMA program by filling in
+		   the branch address of the previous block */
+
+		/* note: we are not linked into the active DMA chain yet */
+
+		if (last_branch_address) {
+			*(last_branch_address) = cpu_to_le32(block_dma | n_descriptors);
+		}
+
+		last_branch_address = branch_address;
+
+
+		f->n_packets++;
+
+	}
+
+	/* when we first assemble a new frame, set the final branch
+	   to loop back up to the top */
+	*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
+
+	/* make the latest version of this frame visible to the PCI card */
+	dma_region_sync_for_device(&video->dv_buf, f->data - (unsigned long) video->dv_buf.kvirt, video->frame_size);
+
+	/* lock against DMA interrupt */
+	spin_lock_irqsave(&video->spinlock, irq_flags);
+
+	f->state = FRAME_READY;
+
+	video->n_clear_frames--;
+
+	last_frame = video->first_clear_frame - 1;
+	if (last_frame == -1)
+		last_frame = video->n_frames-1;
+
+	video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
+
+	irq_printk("   frame %d prepared, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n last=%d\n",
+		   this_frame, video->active_frame, video->n_clear_frames, video->first_clear_frame, last_frame);
+
+	irq_printk("   begin_ts %08lx mid_ts %08lx end_ts %08lx end_br %08lx\n",
+		   (unsigned long) f->frame_begin_timestamp,
+		   (unsigned long) f->mid_frame_timestamp,
+		   (unsigned long) f->frame_end_timestamp,
+		   (unsigned long) f->frame_end_branch);
+
+	if (video->active_frame != -1) {
+
+		/* if DMA is already active, we are almost done */
+		/* just link us onto the active DMA chain */
+		if (video->frames[last_frame]->frame_end_branch) {
+			u32 temp;
+
+			/* point the previous frame's tail to this frame's head */
+			*(video->frames[last_frame]->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
+
+			/* this write MUST precede the next one, or we could silently drop frames */
+			wmb();
+
+			/* disable the want_status semaphore on the last packet */
+			temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
+			temp &= 0xF7CFFFFF;
+			*(video->frames[last_frame]->frame_end_branch - 2) = cpu_to_le32(temp);
+
+			/* flush these writes to memory ASAP */
+			flush_pci_write(video->ohci);
+
+			/* NOTE:
+			   ideally the writes should be "atomic": if
+			   the OHCI card reads the want_status flag in
+			   between them, we'll falsely report a
+			   dropped frame. Hopefully this window is too
+			   small to really matter, and the consequence
+			   is rather harmless. */
+
+
+			irq_printk("     new frame %d linked onto DMA chain\n", this_frame);
+
+		} else {
+			printk(KERN_ERR "dv1394: last frame not ready???\n");
+		}
+
+	} else {
+
+		u32 transmit_sec, transmit_cyc;
+		u32 ts_cyc, ts_off;
+
+		/* DMA is stopped, so this is the very first frame */
+		video->active_frame = this_frame;
+
+	        /* set CommandPtr to address and size of first descriptor block */
+		reg_write(video->ohci, video->ohci_IsoXmitCommandPtr,
+			  video->frames[video->active_frame]->descriptor_pool_dma |
+			  f->first_n_descriptors);
+
+		/* assign a timestamp based on the current cycle time...
+		   We'll tell the card to begin DMA 100 cycles from now,
+		   and assign a timestamp 103 cycles from now */
+
+		cycleTimer = reg_read(video->ohci, OHCI1394_IsochronousCycleTimer);
+
+		ct_sec = cycleTimer >> 25;
+		ct_cyc = (cycleTimer >> 12) & 0x1FFF;
+		ct_off = cycleTimer & 0xFFF;
+
+		transmit_sec = ct_sec;
+		transmit_cyc = ct_cyc + 100;
+
+		transmit_sec += transmit_cyc/8000;
+		transmit_cyc %= 8000;
+
+		ts_off = ct_off;
+		ts_cyc = transmit_cyc + 3;
+		ts_cyc %= 8000;
+
+		f->assigned_timestamp = (ts_cyc&0xF) << 12;
+
+		/* now actually write the timestamp into the appropriate CIP headers */
+		if (f->cip_syt1) {
+			f->cip_syt1->b[6] = f->assigned_timestamp >> 8;
+			f->cip_syt1->b[7] = f->assigned_timestamp & 0xFF;
+		}
+		if (f->cip_syt2) {
+			f->cip_syt2->b[6] = f->assigned_timestamp >> 8;
+			f->cip_syt2->b[7] = f->assigned_timestamp & 0xFF;
+		}
+
+		/* --- start DMA --- */
+
+		/* clear all bits in ContextControl register */
+
+		reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, 0xFFFFFFFF);
+		wmb();
+
+		/* the OHCI card has the ability to start ISO transmission on a
+		   particular cycle (start-on-cycle). This way we can ensure that
+		   the first DV frame will have an accurate timestamp.
+
+		   However, start-on-cycle only appears to work if the OHCI card
+		   is cycle master! Since the consequences of messing up the first
+		   timestamp are minimal*, just disable start-on-cycle for now.
+
+		   * my DV deck drops the first few frames before it "locks in;"
+		     so the first frame having an incorrect timestamp is inconsequential.
+		*/
+
+#if 0
+		reg_write(video->ohci, video->ohci_IsoXmitContextControlSet,
+			  (1 << 31) /* enable start-on-cycle */
+			  | ( (transmit_sec & 0x3) << 29)
+			  | (transmit_cyc << 16));
+		wmb();
+#endif
+
+		video->dma_running = 1;
+
+		/* set the 'run' bit */
+		reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, 0x8000);
+		flush_pci_write(video->ohci);
+
+		/* --- DMA should be running now --- */
+
+		debug_printk("    Cycle = %4u ContextControl = %08x CmdPtr = %08x\n",
+			     (reg_read(video->ohci, OHCI1394_IsochronousCycleTimer) >> 12) & 0x1FFF,
+			     reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
+			     reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
+
+		debug_printk("    DMA start - current cycle %4u, transmit cycle %4u (%2u), assigning ts cycle %2u\n",
+			     ct_cyc, transmit_cyc, transmit_cyc & 0xF, ts_cyc & 0xF);
+
+#if DV1394_DEBUG_LEVEL >= 2
+		{
+			/* check if DMA is really running */
+			int i = 0;
+			while (i < 20) {
+				mb();
+				mdelay(1);
+				if (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) {
+					printk("DMA ACTIVE after %d msec\n", i);
+					break;
+				}
+				i++;
+			}
+
+			printk("set = %08x, cmdPtr = %08x\n",
+			       reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
+			       reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
+			       );
+
+			if ( ! (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) &  (1 << 10)) ) {
+				printk("DMA did NOT go active after 20ms, event = %x\n",
+				       reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & 0x1F);
+			} else
+				printk("DMA is RUNNING!\n");
+		}
+#endif
+
+	}
+
+
+	spin_unlock_irqrestore(&video->spinlock, irq_flags);
+}
+
+
+
+/*** RECEIVE FUNCTIONS *****************************************************/
+
+/*
+	frame method put_packet
+
+	map and copy the packet data to its location in the frame
+	based upon DIF section and sequence
+*/
+
+static void inline
+frame_put_packet (struct frame *f, struct packet *p)
+{
+	int section_type = p->data[0] >> 5;           /* section type is in bits 5 - 7 */
+	int dif_sequence = p->data[1] >> 4;           /* dif sequence number is in bits 4 - 7 */
+	int dif_block = p->data[2];
+
+	/* sanity check */
+	if (dif_sequence > 11 || dif_block > 149) return;
+
+	switch (section_type) {
+	case 0:           /* 1 Header block */
+	        memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
+	        break;
+
+	case 1:           /* 2 Subcode blocks */
+	        memcpy( (void *) f->data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p->data, 480);
+	        break;
+
+	case 2:           /* 3 VAUX blocks */
+	        memcpy( (void *) f->data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p->data, 480);
+	        break;
+
+	case 3:           /* 9 Audio blocks interleaved with video */
+	        memcpy( (void *) f->data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p->data, 480);
+	        break;
+
+	case 4:           /* 135 Video blocks interleaved with audio */
+	        memcpy( (void *) f->data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) + dif_block) * 80, p->data, 480);
+	        break;
+
+	default:           /* we can not handle any other data */
+	        break;
+	}
+}
+
+
+static void start_dma_receive(struct video_card *video)
+{
+	if (video->first_run == 1) {
+		video->first_run = 0;
+
+		/* start DMA once all of the frames are READY */
+		video->n_clear_frames = 0;
+		video->first_clear_frame = -1;
+		video->current_packet = 0;
+		video->active_frame = 0;
+
+		/* reset iso recv control register */
+		reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
+		wmb();
+
+		/* clear bufferFill, set isochHeader and speed (0=100) */
+		reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x40000000);
+
+		/* match on all tags, listen on channel */
+		reg_write(video->ohci, video->ohci_IsoRcvContextMatch, 0xf0000000 | video->channel);
+
+		/* address and first descriptor block + Z=1 */
+		reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
+			  video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
+		wmb();
+
+		video->dma_running = 1;
+
+		/* run */
+		reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x8000);
+		flush_pci_write(video->ohci);
+
+		debug_printk("dv1394: DMA started\n");
+
+#if DV1394_DEBUG_LEVEL >= 2
+		{
+			int i;
+
+			for (i = 0; i < 1000; ++i) {
+				mdelay(1);
+				if (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) {
+					printk("DMA ACTIVE after %d msec\n", i);
+					break;
+				}
+			}
+			if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) &  (1 << 11) ) {
+				printk("DEAD, event = %x\n",
+					   reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
+			} else
+				printk("RUNNING!\n");
+		}
+#endif
+	} else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) &  (1 << 11) ) {
+		debug_printk("DEAD, event = %x\n",
+			     reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
+
+		/* wake */
+		reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
+	}
+}
+
+
+/*
+   receive_packets() - build the DMA program for receiving
+*/
+
+static void receive_packets(struct video_card *video)
+{
+	struct DMA_descriptor_block *block = NULL;
+	dma_addr_t block_dma = 0;
+	struct packet *data = NULL;
+	dma_addr_t data_dma = 0;
+	u32 *last_branch_address = NULL;
+	unsigned long irq_flags;
+	int want_interrupt = 0;
+	struct frame *f = NULL;
+	int i, j;
+
+	spin_lock_irqsave(&video->spinlock, irq_flags);
+
+	for (j = 0; j < video->n_frames; j++) {
+
+		/* connect frames */
+		if (j > 0 && f != NULL && f->frame_end_branch != NULL)
+			*(f->frame_end_branch) = cpu_to_le32(video->frames[j]->descriptor_pool_dma | 1); /* set Z=1 */
+
+		f = video->frames[j];
+
+		for (i = 0; i < MAX_PACKETS; i++) {
+			/* locate a descriptor block and packet from the buffer */
+			block = &(f->descriptor_pool[i]);
+			block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
+
+			data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
+			data_dma = dma_region_offset_to_bus( &video->packet_buf,
+							     ((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
+
+			/* setup DMA descriptor block */
+			want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
+			fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
+
+			/* link descriptors */
+			last_branch_address = f->frame_end_branch;
+
+			if (last_branch_address != NULL)
+				*(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
+
+			f->frame_end_branch = &(block->u.in.il.q[2]);
+		}
+
+	} /* next j */
+
+	spin_unlock_irqrestore(&video->spinlock, irq_flags);
+
+}
+
+
+
+/*** MANAGEMENT FUNCTIONS **************************************************/
+
+static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
+{
+	unsigned long flags, new_buf_size;
+	int i;
+	u64 chan_mask;
+	int retval = -EINVAL;
+
+	debug_printk("dv1394: initialising %d\n", video->id);
+	if (init->api_version != DV1394_API_VERSION)
+		return -EINVAL;
+
+	/* first sanitize all the parameters */
+	if ( (init->n_frames < 2) || (init->n_frames > DV1394_MAX_FRAMES) )
+		return -EINVAL;
+
+	if ( (init->format != DV1394_NTSC) && (init->format != DV1394_PAL) )
+		return -EINVAL;
+
+	if ( (init->syt_offset == 0) || (init->syt_offset > 50) )
+		/* default SYT offset is 3 cycles */
+		init->syt_offset = 3;
+
+	if ( (init->channel > 63) || (init->channel < 0) )
+		init->channel = 63;
+
+	chan_mask = (u64)1 << init->channel;
+
+	/* calculate what size DMA buffer is needed */
+	if (init->format == DV1394_NTSC)
+		new_buf_size = DV1394_NTSC_FRAME_SIZE * init->n_frames;
+	else
+		new_buf_size = DV1394_PAL_FRAME_SIZE * init->n_frames;
+
+	/* round up to PAGE_SIZE */
+	if (new_buf_size % PAGE_SIZE) new_buf_size += PAGE_SIZE - (new_buf_size % PAGE_SIZE);
+
+	/* don't allow the user to allocate the DMA buffer more than once */
+	if (video->dv_buf.kvirt && video->dv_buf_size != new_buf_size) {
+		printk("dv1394: re-sizing the DMA buffer is not allowed\n");
+		return -EINVAL;
+	}
+
+	/* shutdown the card if it's currently active */
+	/* (the card should not be reset if the parameters are screwy) */
+
+	do_dv1394_shutdown(video, 0);
+
+	/* try to claim the ISO channel */
+	spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
+	if (video->ohci->ISO_channel_usage & chan_mask) {
+		spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
+		retval = -EBUSY;
+		goto err;
+	}
+	video->ohci->ISO_channel_usage |= chan_mask;
+	spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
+
+	video->channel = init->channel;
+
+	/* initialize misc. fields of video */
+	video->n_frames = init->n_frames;
+	video->pal_or_ntsc = init->format;
+
+	video->cip_accum = 0;
+	video->continuity_counter = 0;
+
+	video->active_frame = -1;
+	video->first_clear_frame = 0;
+	video->n_clear_frames = video->n_frames;
+	video->dropped_frames = 0;
+
+	video->write_off = 0;
+
+	video->first_run = 1;
+	video->current_packet = -1;
+	video->first_frame = 0;
+
+	if (video->pal_or_ntsc == DV1394_NTSC) {
+		video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
+		video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
+		video->frame_size = DV1394_NTSC_FRAME_SIZE;
+	} else {
+		video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
+		video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
+		video->frame_size = DV1394_PAL_FRAME_SIZE;
+	}
+
+	video->syt_offset = init->syt_offset;
+
+	/* find and claim DMA contexts on the OHCI card */
+
+	if (video->ohci_it_ctx == -1) {
+		ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
+					  it_tasklet_func, (unsigned long) video);
+
+		if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
+			printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
+			retval = -EBUSY;
+			goto err;
+		}
+
+		video->ohci_it_ctx = video->it_tasklet.context;
+		debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
+	}
+
+	if (video->ohci_ir_ctx == -1) {
+		ohci1394_init_iso_tasklet(&video->ir_tasklet, OHCI_ISO_RECEIVE,
+					  ir_tasklet_func, (unsigned long) video);
+
+		if (ohci1394_register_iso_tasklet(video->ohci, &video->ir_tasklet) < 0) {
+			printk(KERN_ERR "dv1394: could not find an available IR DMA context\n");
+			retval = -EBUSY;
+			goto err;
+		}
+		video->ohci_ir_ctx = video->ir_tasklet.context;
+		debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
+	}
+
+	/* allocate struct frames */
+	for (i = 0; i < init->n_frames; i++) {
+		video->frames[i] = frame_new(i, video);
+
+		if (!video->frames[i]) {
+			printk(KERN_ERR "dv1394: Cannot allocate frame structs\n");
+			retval = -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (!video->dv_buf.kvirt) {
+		/* allocate the ringbuffer */
+		retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
+		if (retval)
+			goto err;
+
+		video->dv_buf_size = new_buf_size;
+
+		debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n", 
+			     video->n_frames, video->dv_buf.n_pages,
+			     video->dv_buf.n_dma_pages, video->dv_buf_size);
+	}
+
+	/* set up the frame->data pointers */
+	for (i = 0; i < video->n_frames; i++)
+		video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
+
+	if (!video->packet_buf.kvirt) {
+		/* allocate packet buffer */
+		video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
+		if (video->packet_buf_size % PAGE_SIZE)
+			video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
+
+		retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
+					  video->ohci->dev, PCI_DMA_FROMDEVICE);
+		if (retval)
+			goto err;
+
+		debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
+				 video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
+				 video->packet_buf.n_dma_pages, video->packet_buf_size);
+	}
+
+	/* set up register offsets for IT context */
+	/* IT DMA context registers are spaced 16 bytes apart */
+	video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
+	video->ohci_IsoXmitContextControlClear = OHCI1394_IsoXmitContextControlClear+16*video->ohci_it_ctx;
+	video->ohci_IsoXmitCommandPtr = OHCI1394_IsoXmitCommandPtr+16*video->ohci_it_ctx;
+
+	/* enable interrupts for IT context */
+	reg_write(video->ohci, OHCI1394_IsoXmitIntMaskSet, (1 << video->ohci_it_ctx));
+	debug_printk("dv1394: interrupts enabled for IT context %d\n", video->ohci_it_ctx);
+
+	/* set up register offsets for IR context */
+	/* IR DMA context registers are spaced 32 bytes apart */
+	video->ohci_IsoRcvContextControlSet = OHCI1394_IsoRcvContextControlSet+32*video->ohci_ir_ctx;
+	video->ohci_IsoRcvContextControlClear = OHCI1394_IsoRcvContextControlClear+32*video->ohci_ir_ctx;
+	video->ohci_IsoRcvCommandPtr = OHCI1394_IsoRcvCommandPtr+32*video->ohci_ir_ctx;
+	video->ohci_IsoRcvContextMatch = OHCI1394_IsoRcvContextMatch+32*video->ohci_ir_ctx;
+
+	/* enable interrupts for IR context */
+	reg_write(video->ohci, OHCI1394_IsoRecvIntMaskSet, (1 << video->ohci_ir_ctx) );
+	debug_printk("dv1394: interrupts enabled for IR context %d\n", video->ohci_ir_ctx);
+
+	return 0;
+
+err:
+	do_dv1394_shutdown(video, 1);
+	return retval;
+}
+
+/* if the user doesn't bother to call ioctl(INIT) before starting
+   mmap() or read()/write(), just give him some default values */
+
+static int do_dv1394_init_default(struct video_card *video)
+{
+	struct dv1394_init init;
+
+	init.api_version = DV1394_API_VERSION;
+	init.n_frames = DV1394_MAX_FRAMES / 4;
+	/* the following are now set via devfs */
+	init.channel = video->channel;
+	init.format = video->pal_or_ntsc;
+	init.cip_n = video->cip_n;
+	init.cip_d = video->cip_d;
+	init.syt_offset = video->syt_offset;
+
+	return do_dv1394_init(video, &init);
+}
+
+/* do NOT call from interrupt context */
+static void stop_dma(struct video_card *video)
+{
+	unsigned long flags;
+	int i;
+
+	/* no interrupts */
+	spin_lock_irqsave(&video->spinlock, flags);
+
+	video->dma_running = 0;
+
+	if ( (video->ohci_it_ctx == -1) && (video->ohci_ir_ctx == -1) )
+		goto out;
+
+	/* stop DMA if in progress */
+	if ( (video->active_frame != -1) ||
+	    (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
+	    (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) &  (1 << 10)) ) {
+
+		/* clear the .run bits */
+		reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
+		reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
+		flush_pci_write(video->ohci);
+
+		video->active_frame = -1;
+		video->first_run = 1;
+
+		/* wait until DMA really stops */
+		i = 0;
+		while (i < 1000) {
+
+			/* wait 0.1 millisecond */
+			udelay(100);
+
+			if ( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
+			    (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear)  & (1 << 10)) ) {
+				/* still active */
+				debug_printk("dv1394: stop_dma: DMA not stopped yet\n" );
+				mb();
+			} else {
+				debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
+				break;
+			}
+
+			i++;
+		}
+
+		if (i == 1000) {
+			printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
+		}
+	}
+	else
+		debug_printk("dv1394: stop_dma: already stopped.\n");
+
+out:
+	spin_unlock_irqrestore(&video->spinlock, flags);
+}
+
+
+
+static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
+{
+	int i;
+
+	debug_printk("dv1394: shutdown...\n");
+
+	/* stop DMA if in progress */
+	stop_dma(video);
+
+	/* release the DMA contexts */
+	if (video->ohci_it_ctx != -1) {
+		video->ohci_IsoXmitContextControlSet = 0;
+		video->ohci_IsoXmitContextControlClear = 0;
+		video->ohci_IsoXmitCommandPtr = 0;
+
+		/* disable interrupts for IT context */
+		reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
+
+		/* remove tasklet */
+		ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
+		debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
+		video->ohci_it_ctx = -1;
+	}
+
+	if (video->ohci_ir_ctx != -1) {
+		video->ohci_IsoRcvContextControlSet = 0;
+		video->ohci_IsoRcvContextControlClear = 0;
+		video->ohci_IsoRcvCommandPtr = 0;
+		video->ohci_IsoRcvContextMatch = 0;
+
+		/* disable interrupts for IR context */
+		reg_write(video->ohci, OHCI1394_IsoRecvIntMaskClear, (1 << video->ohci_ir_ctx));
+
+		/* remove tasklet */
+		ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
+		debug_printk("dv1394: IR context %d released\n", video->ohci_ir_ctx);
+		video->ohci_ir_ctx = -1;
+	}
+
+	/* release the ISO channel */
+	if (video->channel != -1) {
+		u64 chan_mask;
+		unsigned long flags;
+
+		chan_mask = (u64)1 << video->channel;
+
+		spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
+		video->ohci->ISO_channel_usage &= ~(chan_mask);
+		spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
+
+		video->channel = -1;
+	}
+
+	/* free the frame structs */
+	for (i = 0; i < DV1394_MAX_FRAMES; i++) {
+		if (video->frames[i])
+			frame_delete(video->frames[i]);
+		video->frames[i] = NULL;
+	}
+
+	video->n_frames = 0;
+
+	/* we can't free the DMA buffer unless it is guaranteed that
+	   no more user-space mappings exist */
+
+	if (free_dv_buf) {
+		dma_region_free(&video->dv_buf);
+		video->dv_buf_size = 0;
+	}
+
+	/* free packet buffer */
+	dma_region_free(&video->packet_buf);
+	video->packet_buf_size = 0;
+
+	debug_printk("dv1394: shutdown OK\n");
+}
+
+/*
+       **********************************
+       *** MMAP() THEORY OF OPERATION ***
+       **********************************
+
+        The ringbuffer cannot be re-allocated or freed while
+        a user program maintains a mapping of it. (note that a mapping
+	can persist even after the device fd is closed!)
+
+	So, only let the user process allocate the DMA buffer once.
+	To resize or deallocate it, you must close the device file
+	and open it again.
+
+	Previously Dan M. hacked out a scheme that allowed the DMA
+	buffer to change by forcefully unmapping it from the user's
+	address space. It was prone to error because it's very hard to
+	track all the places the buffer could have been mapped (we
+	would have had to walk the vma list of every process in the
+	system to be sure we found all the mappings!). Instead, we
+	force the user to choose one buffer size and stick with
+	it. This small sacrifice is worth the huge reduction in
+	error-prone code in dv1394.
+*/
+
+static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct video_card *video = file_to_video_card(file);
+	int retval = -EINVAL;
+
+	/* serialize mmap */
+	down(&video->sem);
+
+	if ( ! video_card_initialized(video) ) {
+		retval = do_dv1394_init_default(video);
+		if (retval)
+			goto out;
+	}
+
+	retval = dma_region_mmap(&video->dv_buf, file, vma);
+out:
+	up(&video->sem);
+	return retval;
+}
+
+/*** DEVICE FILE INTERFACE *************************************************/
+
+/* no need to serialize, multiple threads OK */
+static unsigned int dv1394_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct video_card *video = file_to_video_card(file);
+	unsigned int mask = 0;
+	unsigned long flags;
+
+	poll_wait(file, &video->waitq, wait);
+
+	spin_lock_irqsave(&video->spinlock, flags);
+	if ( video->n_frames == 0 ) {
+
+	} else if ( video->active_frame == -1 ) {
+		/* nothing going on */
+		mask |= POLLOUT;
+	} else {
+		/* any clear/ready buffers? */
+		if (video->n_clear_frames >0)
+			mask |= POLLOUT | POLLIN;
+	}
+	spin_unlock_irqrestore(&video->spinlock, flags);
+
+	return mask;
+}
+
+static int dv1394_fasync(int fd, struct file *file, int on)
+{
+	/* I just copied this code verbatim from Alan Cox's mouse driver example
+	   (Documentation/DocBook/) */
+
+	struct video_card *video = file_to_video_card(file);
+
+	int retval = fasync_helper(fd, file, on, &video->fasync);
+
+	if (retval < 0)
+		return retval;
+        return 0;
+}
+
+static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct video_card *video = file_to_video_card(file);
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t ret;
+	size_t cnt;
+	unsigned long flags;
+	int target_frame;
+
+	/* serialize this to prevent multi-threaded mayhem */
+	if (file->f_flags & O_NONBLOCK) {
+		if (down_trylock(&video->sem))
+			return -EAGAIN;
+	} else {
+		if (down_interruptible(&video->sem))
+			return -ERESTARTSYS;
+	}
+
+	if ( !video_card_initialized(video) ) {
+		ret = do_dv1394_init_default(video);
+		if (ret) {
+			up(&video->sem);
+			return ret;
+		}
+	}
+
+	ret = 0;
+	add_wait_queue(&video->waitq, &wait);
+
+	while (count > 0) {
+
+		/* must set TASK_INTERRUPTIBLE *before* checking for free
+		   buffers; otherwise we could miss a wakeup if the interrupt
+		   fires between the check and the schedule() */
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irqsave(&video->spinlock, flags);
+
+		target_frame = video->first_clear_frame;
+
+		spin_unlock_irqrestore(&video->spinlock, flags);
+
+		if (video->frames[target_frame]->state == FRAME_CLEAR) {
+
+			/* how much room is left in the target frame buffer */
+			cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
+
+		} else {
+			/* buffer is already used */
+			cnt = 0;
+		}
+
+		if (cnt > count)
+			cnt = count;
+
+		if (cnt <= 0) {
+			/* no room left, gotta wait */
+			if (file->f_flags & O_NONBLOCK) {
+				if (!ret)
+					ret = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				if (!ret)
+					ret = -ERESTARTSYS;
+				break;
+			}
+
+			schedule();
+
+			continue; /* start over from 'while(count > 0)...' */
+		}
+
+		if (copy_from_user(video->dv_buf.kvirt + video->write_off, buffer, cnt)) {
+			if (!ret)
+				ret = -EFAULT;
+			break;
+		}
+
+		video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
+
+		count -= cnt;
+		buffer += cnt;
+		ret += cnt;
+
+		if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames))
+				frame_prepare(video, target_frame);
+	}
+
+	remove_wait_queue(&video->waitq, &wait);
+	set_current_state(TASK_RUNNING);
+	up(&video->sem);
+	return ret;
+}
+
+
+static ssize_t dv1394_read(struct file *file,  char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct video_card *video = file_to_video_card(file);
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t ret;
+	size_t cnt;
+	unsigned long flags;
+	int target_frame;
+
+	/* serialize this to prevent multi-threaded mayhem */
+	if (file->f_flags & O_NONBLOCK) {
+		if (down_trylock(&video->sem))
+			return -EAGAIN;
+	} else {
+		if (down_interruptible(&video->sem))
+			return -ERESTARTSYS;
+	}
+
+	if ( !video_card_initialized(video) ) {
+		ret = do_dv1394_init_default(video);
+		if (ret) {
+			up(&video->sem);
+			return ret;
+		}
+		video->continuity_counter = -1;
+
+		receive_packets(video);
+
+		start_dma_receive(video);
+	}
+
+	ret = 0;
+	add_wait_queue(&video->waitq, &wait);
+
+	while (count > 0) {
+
+		/* must set TASK_INTERRUPTIBLE *before* checking for free
+		   buffers; otherwise we could miss a wakeup if the interrupt
+		   fires between the check and the schedule() */
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irqsave(&video->spinlock, flags);
+
+		target_frame = video->first_clear_frame;
+
+		spin_unlock_irqrestore(&video->spinlock, flags);
+
+		if (target_frame >= 0 &&
+			video->n_clear_frames > 0 &&
+			video->frames[target_frame]->state == FRAME_CLEAR) {
+
+			/* how much room is left in the target frame buffer */
+			cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
+
+		} else {
+			/* buffer is already used */
+			cnt = 0;
+		}
+
+		if (cnt > count)
+			cnt = count;
+
+		if (cnt <= 0) {
+			/* no room left, gotta wait */
+			if (file->f_flags & O_NONBLOCK) {
+				if (!ret)
+					ret = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				if (!ret)
+					ret = -ERESTARTSYS;
+				break;
+			}
+
+			schedule();
+
+			continue; /* start over from 'while(count > 0)...' */
+		}
+
+		if (copy_to_user(buffer, video->dv_buf.kvirt + video->write_off, cnt)) {
+				if (!ret)
+					ret = -EFAULT;
+				break;
+		}
+
+		video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
+
+		count -= cnt;
+		buffer += cnt;
+		ret += cnt;
+
+		if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames)) {
+			spin_lock_irqsave(&video->spinlock, flags);
+			video->n_clear_frames--;
+			video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
+			spin_unlock_irqrestore(&video->spinlock, flags);
+		}
+	}
+
+	remove_wait_queue(&video->waitq, &wait);
+	set_current_state(TASK_RUNNING);
+	up(&video->sem);
+	return ret;
+}
+
+
+/*** DEVICE IOCTL INTERFACE ************************************************/
+
+static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct video_card *video;
+	unsigned long flags;
+	int ret = -EINVAL;
+	void __user *argp = (void __user *)arg;
+
+	DECLARE_WAITQUEUE(wait, current);
+
+	lock_kernel();
+	video = file_to_video_card(file);
+
+	/* serialize this to prevent multi-threaded mayhem */
+	if (file->f_flags & O_NONBLOCK) {
+		if (down_trylock(&video->sem)) {
+			unlock_kernel();
+			return -EAGAIN;
+		}
+	} else {
+		if (down_interruptible(&video->sem)) {
+			unlock_kernel();
+			return -ERESTARTSYS;
+		}
+	}
+
+	switch(cmd)
+	{
+	case DV1394_IOC_SUBMIT_FRAMES: {
+		unsigned int n_submit;
+
+		if ( !video_card_initialized(video) ) {
+			ret = do_dv1394_init_default(video);
+			if (ret)
+				goto out;
+		}
+
+		n_submit = (unsigned int) arg;
+
+		if (n_submit > video->n_frames) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		while (n_submit > 0) {
+
+			add_wait_queue(&video->waitq, &wait);
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			spin_lock_irqsave(&video->spinlock, flags);
+
+			/* wait until video->first_clear_frame is really CLEAR */
+			while (video->frames[video->first_clear_frame]->state != FRAME_CLEAR) {
+
+				spin_unlock_irqrestore(&video->spinlock, flags);
+
+				if (signal_pending(current)) {
+					remove_wait_queue(&video->waitq, &wait);
+					set_current_state(TASK_RUNNING);
+					ret = -EINTR;
+					goto out;
+				}
+
+				schedule();
+				set_current_state(TASK_INTERRUPTIBLE);
+
+				spin_lock_irqsave(&video->spinlock, flags);
+			}
+			spin_unlock_irqrestore(&video->spinlock, flags);
+
+			remove_wait_queue(&video->waitq, &wait);
+			set_current_state(TASK_RUNNING);
+
+			frame_prepare(video, video->first_clear_frame);
+
+			n_submit--;
+		}
+
+		ret = 0;
+		break;
+	}
+
+	case DV1394_IOC_WAIT_FRAMES: {
+		unsigned int n_wait;
+
+		if ( !video_card_initialized(video) ) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		n_wait = (unsigned int) arg;
+
+		/* since we re-run the last frame on underflow, we will
+		   never actually have n_frames clear frames; at most only
+		   n_frames - 1 */
+
+		if (n_wait > (video->n_frames-1) ) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		add_wait_queue(&video->waitq, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irqsave(&video->spinlock, flags);
+
+		while (video->n_clear_frames < n_wait) {
+
+			spin_unlock_irqrestore(&video->spinlock, flags);
+
+			if (signal_pending(current)) {
+				remove_wait_queue(&video->waitq, &wait);
+				set_current_state(TASK_RUNNING);
+				ret = -EINTR;
+				goto out;
+			}
+
+			schedule();
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			spin_lock_irqsave(&video->spinlock, flags);
+		}
+
+		spin_unlock_irqrestore(&video->spinlock, flags);
+
+		remove_wait_queue(&video->waitq, &wait);
+		set_current_state(TASK_RUNNING);
+		ret = 0;
+		break;
+	}
+
+	case DV1394_IOC_RECEIVE_FRAMES: {
+		unsigned int n_recv;
+
+		if ( !video_card_initialized(video) ) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		n_recv = (unsigned int) arg;
+
+		/* at least one frame must be active */
+		if (n_recv > (video->n_frames-1) ) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		spin_lock_irqsave(&video->spinlock, flags);
+
+		/* release the clear frames */
+		video->n_clear_frames -= n_recv;
+
+		/* advance the clear frame cursor */
+		video->first_clear_frame = (video->first_clear_frame + n_recv) % video->n_frames;
+
+		/* reset dropped_frames */
+		video->dropped_frames = 0;
+
+		spin_unlock_irqrestore(&video->spinlock, flags);
+
+		ret = 0;
+		break;
+	}
+
+	case DV1394_IOC_START_RECEIVE: {
+		if ( !video_card_initialized(video) ) {
+			ret = do_dv1394_init_default(video);
+			if (ret)
+				goto out;
+		}
+
+		video->continuity_counter = -1;
+
+		receive_packets(video);
+
+		start_dma_receive(video);
+
+		ret = 0;
+		break;
+	}
+
+	case DV1394_IOC_INIT: {
+		struct dv1394_init init;
+		if (!argp) {
+			ret = do_dv1394_init_default(video);
+		} else {
+			if (copy_from_user(&init, argp, sizeof(init))) {
+				ret = -EFAULT;
+				goto out;
+			}
+			ret = do_dv1394_init(video, &init);
+		}
+		break;
+	}
+
+	case DV1394_IOC_SHUTDOWN:
+		do_dv1394_shutdown(video, 0);
+		ret = 0;
+		break;
+
+
+        case DV1394_IOC_GET_STATUS: {
+		struct dv1394_status status;
+
+		if ( !video_card_initialized(video) ) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		status.init.api_version = DV1394_API_VERSION;
+		status.init.channel = video->channel;
+		status.init.n_frames = video->n_frames;
+		status.init.format = video->pal_or_ntsc;
+		status.init.cip_n = video->cip_n;
+		status.init.cip_d = video->cip_d;
+		status.init.syt_offset = video->syt_offset;
+
+		status.first_clear_frame = video->first_clear_frame;
+
+		/* the rest of the fields need to be locked against the interrupt */
+		spin_lock_irqsave(&video->spinlock, flags);
+
+		status.active_frame = video->active_frame;
+		status.n_clear_frames = video->n_clear_frames;
+
+		status.dropped_frames = video->dropped_frames;
+
+		/* reset dropped_frames */
+		video->dropped_frames = 0;
+
+		spin_unlock_irqrestore(&video->spinlock, flags);
+
+		if (copy_to_user(argp, &status, sizeof(status))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		ret = 0;
+		break;
+	}
+
+	default:
+		break;
+	}
+
+ out:
+	up(&video->sem);
+	unlock_kernel();
+	return ret;
+}
+
+/*** DEVICE FILE INTERFACE CONTINUED ***************************************/
+
+static int dv1394_open(struct inode *inode, struct file *file)
+{
+	struct video_card *video = NULL;
+
+	/* if the device was opened through devfs, then file->private_data
+	   has already been set to video by devfs */
+	if (file->private_data) {
+		video = (struct video_card*) file->private_data;
+
+	} else {
+		/* look up the card by ID */
+		unsigned long flags;
+
+		spin_lock_irqsave(&dv1394_cards_lock, flags);
+		if (!list_empty(&dv1394_cards)) {
+			struct video_card *p;
+			list_for_each_entry(p, &dv1394_cards, list) {
+				if ((p->id) == ieee1394_file_to_instance(file)) {
+					video = p;
+					break;
+				}
+			}
+		}
+		spin_unlock_irqrestore(&dv1394_cards_lock, flags);
+
+		if (!video) {
+			debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file));
+			return -ENODEV;
+		}
+
+		file->private_data = (void*) video;
+	}
+
+#ifndef DV1394_ALLOW_MORE_THAN_ONE_OPEN
+
+	if ( test_and_set_bit(0, &video->open) ) {
+		/* video is already open by someone else */
+		return -EBUSY;
+ 	}
+
+#endif
+
+	return 0;
+}
+
+
+static int dv1394_release(struct inode *inode, struct file *file)
+{
+	struct video_card *video = file_to_video_card(file);
+
+	/* OK to free the DMA buffer, no more mappings can exist */
+	do_dv1394_shutdown(video, 1);
+
+	/* clean up async I/O users */
+	dv1394_fasync(-1, file, 0);
+
+	/* give someone else a turn */
+	clear_bit(0, &video->open);
+
+	return 0;
+}
+
+
+/*** DEVICE DRIVER HANDLERS ************************************************/
+
+static void it_tasklet_func(unsigned long data)
+{
+	int wake = 0;
+	struct video_card *video = (struct video_card*) data;
+
+	spin_lock(&video->spinlock);
+
+	if (!video->dma_running)
+		goto out;
+
+	irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
+	       reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
+	       reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
+	       );
+
+
+	if ( (video->ohci_it_ctx != -1) &&
+	    (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
+
+		struct frame *f;
+		unsigned int frame, i;
+
+
+		if (video->active_frame == -1)
+			frame = 0;
+		else
+			frame = video->active_frame;
+
+		/* check all the DMA-able frames */
+		for (i = 0; i < video->n_frames; i++, frame = (frame+1) % video->n_frames) {
+
+			irq_printk("IRQ checking frame %d...", frame);
+			f = video->frames[frame];
+			if (f->state != FRAME_READY) {
+				irq_printk("clear, skipping\n");
+				/* we don't own this frame */
+				continue;
+			}
+
+			irq_printk("DMA\n");
+
+			/* check the frame begin semaphore to see if we can free the previous frame */
+			if ( *(f->frame_begin_timestamp) ) {
+				int prev_frame;
+				struct frame *prev_f;
+
+
+
+				/* don't reset, need this later *(f->frame_begin_timestamp) = 0; */
+				irq_printk("  BEGIN\n");
+
+				prev_frame = frame - 1;
+				if (prev_frame == -1)
+					prev_frame += video->n_frames;
+				prev_f = video->frames[prev_frame];
+
+				/* make sure we can actually garbage collect
+				   this frame */
+				if ( (prev_f->state == FRAME_READY) &&
+				    prev_f->done && (!f->done) )
+				{
+					frame_reset(prev_f);
+					video->n_clear_frames++;
+					wake = 1;
+					video->active_frame = frame;
+
+					irq_printk("  BEGIN - freeing previous frame %d, new active frame is %d\n", prev_frame, frame);
+				} else {
+					irq_printk("  BEGIN - can't free yet\n");
+				}
+
+				f->done = 1;
+			}
+
+
+			/* see if we need to set the timestamp for the next frame */
+			if ( *(f->mid_frame_timestamp) ) {
+				struct frame *next_frame;
+				u32 begin_ts, ts_cyc, ts_off;
+
+				*(f->mid_frame_timestamp) = 0;
+
+				begin_ts = le32_to_cpu(*(f->frame_begin_timestamp));
+
+				irq_printk("  MIDDLE - first packet was sent at cycle %4u (%2u), assigned timestamp was (%2u) %4u\n",
+					   begin_ts & 0x1FFF, begin_ts & 0xF,
+					   f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
+
+				/* prepare next frame and assign timestamp */
+				next_frame = video->frames[ (frame+1) % video->n_frames ];
+
+				if (next_frame->state == FRAME_READY) {
+					irq_printk("  MIDDLE - next frame is ready, good\n");
+				} else {
+					debug_printk("dv1394: Underflow! At least one frame has been dropped.\n");
+					next_frame = f;
+				}
+
+				/* set the timestamp to the timestamp of the last frame sent,
+				   plus the length of the last frame sent, plus the syt latency */
+				ts_cyc = begin_ts & 0xF;
+				/* advance one frame, plus syt latency (typically 2-3) */
+				ts_cyc += f->n_packets + video->syt_offset ;
+
+				ts_off = 0;
+
+				ts_cyc += ts_off/3072;
+				ts_off %= 3072;
+
+				next_frame->assigned_timestamp = ((ts_cyc&0xF) << 12) + ts_off;
+				if (next_frame->cip_syt1) {
+					next_frame->cip_syt1->b[6] = next_frame->assigned_timestamp >> 8;
+					next_frame->cip_syt1->b[7] = next_frame->assigned_timestamp & 0xFF;
+				}
+				if (next_frame->cip_syt2) {
+					next_frame->cip_syt2->b[6] = next_frame->assigned_timestamp >> 8;
+					next_frame->cip_syt2->b[7] = next_frame->assigned_timestamp & 0xFF;
+				}
+
+			}
+
+			/* see if the frame looped */
+			if ( *(f->frame_end_timestamp) ) {
+
+				*(f->frame_end_timestamp) = 0;
+
+				debug_printk("  END - the frame looped at least once\n");
+
+				video->dropped_frames++;
+			}
+
+		} /* for (each frame) */
+	}
+
+	if (wake) {
+		kill_fasync(&video->fasync, SIGIO, POLL_OUT);
+
+		/* wake readers/writers/ioctl'ers */
+		wake_up_interruptible(&video->waitq);
+	}
+
+out:
+	spin_unlock(&video->spinlock);
+}
+
+static void ir_tasklet_func(unsigned long data)
+{
+	int wake = 0;
+	struct video_card *video = (struct video_card*) data;
+
+	spin_lock(&video->spinlock);
+
+	if (!video->dma_running)
+		goto out;
+
+	if ( (video->ohci_ir_ctx != -1) &&
+	    (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
+
+		int sof=0; /* start-of-frame flag */
+		struct frame *f;
+		u16 packet_length, packet_time;
+		int i, dbc=0;
+		struct DMA_descriptor_block *block = NULL;
+		u16 xferstatus;
+
+		int next_i, prev_i;
+		struct DMA_descriptor_block *next = NULL;
+		dma_addr_t next_dma = 0;
+		struct DMA_descriptor_block *prev = NULL;
+
+		/* loop over all descriptors in all frames */
+		for (i = 0; i < video->n_frames*MAX_PACKETS; i++) {
+			struct packet *p = dma_region_i(&video->packet_buf, struct packet, video->current_packet);
+
+			/* make sure we are seeing the latest changes to p */
+			dma_region_sync_for_cpu(&video->packet_buf,
+						(unsigned long) p - (unsigned long) video->packet_buf.kvirt,
+						sizeof(struct packet));
+
+			packet_length = le16_to_cpu(p->data_length);
+			packet_time   = le16_to_cpu(p->timestamp);
+
+			irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
+				   packet_time, packet_length,
+				   p->data[0], p->data[1]);
+
+			/* get the descriptor based on packet_buffer cursor */
+			f = video->frames[video->current_packet / MAX_PACKETS];
+			block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
+			xferstatus = le32_to_cpu(block->u.in.il.q[3]) >> 16;
+			xferstatus &= 0x1F;
+			irq_printk("ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x\n", i, le32_to_cpu(block->u.in.il.q[3]) );
+
+			/* get the current frame */
+			f = video->frames[video->active_frame];
+
+			/* exclude empty packet */
+			if (packet_length > 8 && xferstatus == 0x11) {
+				/* check for start of frame */
+				/* DRD> Changed to check section type ([0]>>5==0)
+				   and dif sequence ([1]>>4==0) */
+				sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
+
+				dbc = (int) (p->cip_h1 >> 24);
+				if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
+				{
+					printk(KERN_WARNING "dv1394: discontinuity detected, dropping all frames\n" );
+					video->dropped_frames += video->n_clear_frames + 1;
+					video->first_frame = 0;
+					video->n_clear_frames = 0;
+					video->first_clear_frame = -1;
+				}
+				video->continuity_counter = dbc;
+
+				if (!video->first_frame) {
+					if (sof) {
+						video->first_frame = 1;
+					}
+
+				} else if (sof) {
+					/* close current frame */
+					frame_reset(f);  /* f->state = STATE_CLEAR */
+					video->n_clear_frames++;
+					if (video->n_clear_frames > video->n_frames) {
+						video->dropped_frames++;
+						printk(KERN_WARNING "dv1394: dropped a frame during reception\n" );
+						video->n_clear_frames = video->n_frames-1;
+						video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
+					}
+					if (video->first_clear_frame == -1)
+						video->first_clear_frame = video->active_frame;
+
+					/* get the next frame */
+					video->active_frame = (video->active_frame + 1) % video->n_frames;
+					f = video->frames[video->active_frame];
+					irq_printk("   frame received, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n",
+						   video->active_frame, video->n_clear_frames, video->first_clear_frame);
+				}
+				if (video->first_frame) {
+					if (sof) {
+						/* open next frame */
+						f->state = FRAME_READY;
+					}
+
+					/* copy to buffer */
+					if (f->n_packets > (video->frame_size / 480)) {
+						printk(KERN_ERR "frame buffer overflow during receive\n");
+					}
+
+					frame_put_packet(f, p);
+
+				} /* first_frame */
+			}
+
+			/* stop, end of ready packets */
+			else if (xferstatus == 0) {
+				break;
+			}
+
+			/* reset xferStatus & resCount */
+			block->u.in.il.q[3] = cpu_to_le32(512);
+
+			/* terminate dma chain at this (next) packet */
+			next_i = video->current_packet;
+			f = video->frames[next_i / MAX_PACKETS];
+			next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
+			next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
+			next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
+			next->u.in.il.q[2] = 0; /* disable branch */
+
+			/* link previous to next */
+			prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
+			f = video->frames[prev_i / MAX_PACKETS];
+			prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
+			if (prev_i % (MAX_PACKETS/2)) {
+				prev->u.in.il.q[0] &= ~(3 << 20); /* no interrupt */
+			} else {
+				prev->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
+			}
+			prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
+			wmb();
+
+			/* wake up DMA in case it fell asleep */
+			reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
+
+			/* advance packet_buffer cursor */
+			video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
+
+		} /* for all packets */
+
+		wake = 1; /* why the hell not? */
+
+	} /* receive interrupt */
+
+	if (wake) {
+		kill_fasync(&video->fasync, SIGIO, POLL_IN);
+
+		/* wake readers/writers/ioctl'ers */
+		wake_up_interruptible(&video->waitq);
+	}
+
+out:
+	spin_unlock(&video->spinlock);
+}
+
+static struct cdev dv1394_cdev;
+static struct file_operations dv1394_fops=
+{
+	.owner =	THIS_MODULE,
+	.poll =         dv1394_poll,
+	.unlocked_ioctl = dv1394_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = dv1394_compat_ioctl,
+#endif
+	.mmap =		dv1394_mmap,
+	.open =		dv1394_open,
+	.write =        dv1394_write,
+	.read =         dv1394_read,
+	.release =	dv1394_release,
+	.fasync =       dv1394_fasync,
+};
+
+
+/*** HOTPLUG STUFF **********************************************************/
+/*
+ * Export information about protocols/devices supported by this driver.
+ */
+static struct ieee1394_device_id dv1394_id_table[] = {
+	{
+		.match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+		.specifier_id	= AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
+		.version	= AVC_SW_VERSION_ENTRY & 0xffffff
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
+
+static struct hpsb_protocol_driver dv1394_driver = {
+	.name		= "DV/1394 Driver",
+	.id_table	= dv1394_id_table,
+	.driver         = {
+		.name	= "dv1394",
+		.bus	= &ieee1394_bus_type,
+	},
+};
+
+
+/*** IEEE1394 HPSB CALLBACKS ***********************************************/
+
+static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes mode)
+{
+	struct video_card *video;
+	unsigned long flags;
+	int i;
+
+	video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
+	if (!video) {
+		printk(KERN_ERR "dv1394: cannot allocate video_card\n");
+		goto err;
+	}
+
+	memset(video, 0, sizeof(struct video_card));
+
+	video->ohci = ohci;
+	/* lower 2 bits of id indicate which of four "plugs"
+	   per host */
+	video->id = ohci->host->id << 2;
+	if (format == DV1394_NTSC)
+		video->id |= mode;
+	else
+		video->id |= 2 + mode;
+
+	video->ohci_it_ctx = -1;
+	video->ohci_ir_ctx = -1;
+
+	video->ohci_IsoXmitContextControlSet = 0;
+	video->ohci_IsoXmitContextControlClear = 0;
+	video->ohci_IsoXmitCommandPtr = 0;
+
+	video->ohci_IsoRcvContextControlSet = 0;
+	video->ohci_IsoRcvContextControlClear = 0;
+	video->ohci_IsoRcvCommandPtr = 0;
+	video->ohci_IsoRcvContextMatch = 0;
+
+	video->n_frames = 0; /* flag that video is not initialized */
+	video->channel = 63; /* default to broadcast channel */
+	video->active_frame = -1;
+
+	/* initialize the following */
+	video->pal_or_ntsc = format;
+	video->cip_n = 0; /* 0 = use builtin default */
+	video->cip_d = 0;
+	video->syt_offset = 0;
+	video->mode = mode;
+
+	for (i = 0; i < DV1394_MAX_FRAMES; i++)
+		video->frames[i] = NULL;
+
+	dma_region_init(&video->dv_buf);
+	video->dv_buf_size = 0;
+	dma_region_init(&video->packet_buf);
+	video->packet_buf_size = 0;
+
+	clear_bit(0, &video->open);
+	spin_lock_init(&video->spinlock);
+	video->dma_running = 0;
+	init_MUTEX(&video->sem);
+	init_waitqueue_head(&video->waitq);
+	video->fasync = NULL;
+
+	spin_lock_irqsave(&dv1394_cards_lock, flags);
+	INIT_LIST_HEAD(&video->list);
+	list_add_tail(&video->list, &dv1394_cards);
+	spin_unlock_irqrestore(&dv1394_cards_lock, flags);
+
+	if (devfs_mk_cdev(MKDEV(IEEE1394_MAJOR,
+				IEEE1394_MINOR_BLOCK_DV1394*16 + video->id),
+			S_IFCHR|S_IRUGO|S_IWUGO,
+			 "ieee1394/dv/host%d/%s/%s",
+			 (video->id>>2),
+			 (video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
+			 (video->mode == MODE_RECEIVE ? "in" : "out")) < 0)
+			goto err_free;
+
+	debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
+
+	return 0;
+
+ err_free:
+	kfree(video);
+ err:
+	return -1;
+}
+
+static void dv1394_un_init(struct video_card *video)
+{
+	char buf[32];
+
+	/* obviously nobody has the driver open at this point */
+	do_dv1394_shutdown(video, 1);
+	snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
+		(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
+		(video->mode == MODE_RECEIVE ? "in" : "out")
+		);
+
+	devfs_remove("ieee1394/%s", buf);
+	kfree(video);
+}
+
+
+static void dv1394_remove_host (struct hpsb_host *host)
+{
+	struct video_card *video;
+	unsigned long flags;
+	int id = host->id;
+
+	/* We only work with the OHCI-1394 driver */
+	if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
+		return;
+
+	/* find the corresponding video_cards */
+	do {
+		struct video_card *tmp_vid;
+
+		video = NULL;
+
+		spin_lock_irqsave(&dv1394_cards_lock, flags);
+		list_for_each_entry(tmp_vid, &dv1394_cards, list) {
+			if ((tmp_vid->id >> 2) == id) {
+				list_del(&tmp_vid->list);
+				video = tmp_vid;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&dv1394_cards_lock, flags);
+
+		if (video)
+			dv1394_un_init(video);
+	} while (video != NULL);
+
+	class_simple_device_remove(MKDEV(
+		IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)));
+	devfs_remove("ieee1394/dv/host%d/NTSC", id);
+	devfs_remove("ieee1394/dv/host%d/PAL", id);
+	devfs_remove("ieee1394/dv/host%d", id);
+}
+
+static void dv1394_add_host (struct hpsb_host *host)
+{
+	struct ti_ohci *ohci;
+	int id = host->id;
+
+	/* We only work with the OHCI-1394 driver */
+	if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
+		return;
+
+	ohci = (struct ti_ohci *)host->hostdata;
+
+	class_simple_device_add(hpsb_protocol_class, MKDEV(
+		IEEE1394_MAJOR,	IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)), 
+		NULL, "dv1394-%d", id);
+	devfs_mk_dir("ieee1394/dv/host%d", id);
+	devfs_mk_dir("ieee1394/dv/host%d/NTSC", id);
+	devfs_mk_dir("ieee1394/dv/host%d/PAL", id);
+
+	dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
+	dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
+	dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
+	dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
+}
+
+
+/* Bus reset handler. In the event of a bus reset, we may need to
+   re-start the DMA contexts - otherwise the user program would
+   end up waiting forever.
+*/
+
+static void dv1394_host_reset(struct hpsb_host *host)
+{
+	struct ti_ohci *ohci;
+	struct video_card *video = NULL, *tmp_vid;
+	unsigned long flags;
+
+	/* We only work with the OHCI-1394 driver */
+	if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
+		return;
+
+	ohci = (struct ti_ohci *)host->hostdata;
+
+
+	/* find the corresponding video_cards */
+	spin_lock_irqsave(&dv1394_cards_lock, flags);
+	list_for_each_entry(tmp_vid, &dv1394_cards, list) {
+		if ((tmp_vid->id >> 2) == host->id) {
+			video = tmp_vid;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dv1394_cards_lock, flags);
+
+	if (!video)
+		return;
+
+
+	spin_lock_irqsave(&video->spinlock, flags);
+
+	if (!video->dma_running)
+		goto out;
+
+	/* check IT context */
+	if (video->ohci_it_ctx != -1) {
+		u32 ctx;
+
+		ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
+
+		/* if (RUN but not ACTIVE) */
+		if ( (ctx & (1<<15)) &&
+		    !(ctx & (1<<10)) ) {
+
+			debug_printk("dv1394: IT context stopped due to bus reset; waking it up\n");
+
+			/* to be safe, assume a frame has been dropped. User-space programs
+			   should handle this condition like an underflow. */
+			video->dropped_frames++;
+
+			/* for some reason you must clear, then re-set the RUN bit to restart DMA */
+
+			/* clear RUN */
+			reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
+			flush_pci_write(video->ohci);
+
+			/* set RUN */
+			reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
+			flush_pci_write(video->ohci);
+
+			/* set the WAKE bit (just in case; this isn't strictly necessary) */
+			reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
+			flush_pci_write(video->ohci);
+
+			irq_printk("dv1394: AFTER IT restart ctx 0x%08x ptr 0x%08x\n",
+				   reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
+				   reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
+		}
+	}
+
+	/* check IR context */
+	if (video->ohci_ir_ctx != -1) {
+		u32 ctx;
+
+		ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
+
+		/* if (RUN but not ACTIVE) */
+		if ( (ctx & (1<<15)) &&
+		    !(ctx & (1<<10)) ) {
+
+			debug_printk("dv1394: IR context stopped due to bus reset; waking it up\n");
+
+			/* to be safe, assume a frame has been dropped. User-space programs
+			   should handle this condition like an overflow. */
+			video->dropped_frames++;
+
+			/* for some reason you must clear, then re-set the RUN bit to restart DMA */
+			/* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
+
+			/* clear RUN */
+			reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
+			flush_pci_write(video->ohci);
+
+			/* set RUN */
+			reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
+			flush_pci_write(video->ohci);
+
+			/* set the WAKE bit (just in case; this isn't strictly necessary) */
+			reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
+			flush_pci_write(video->ohci);
+
+			irq_printk("dv1394: AFTER IR restart ctx 0x%08x ptr 0x%08x\n",
+				   reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
+				   reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&video->spinlock, flags);
+
+	/* wake readers/writers/ioctl'ers */
+	wake_up_interruptible(&video->waitq);
+}
+
+static struct hpsb_highlevel dv1394_highlevel = {
+	.name =		"dv1394",
+	.add_host =	dv1394_add_host,
+	.remove_host =	dv1394_remove_host,
+	.host_reset =   dv1394_host_reset,
+};
+
+#ifdef CONFIG_COMPAT
+
+#define DV1394_IOC32_INIT       _IOW('#', 0x06, struct dv1394_init32)
+#define DV1394_IOC32_GET_STATUS _IOR('#', 0x0c, struct dv1394_status32)
+
+struct dv1394_init32 {
+	u32 api_version;
+	u32 channel;
+	u32 n_frames;
+	u32 format;
+	u32 cip_n;
+	u32 cip_d;
+	u32 syt_offset;
+};
+
+struct dv1394_status32 {
+	struct dv1394_init32 init;
+	s32 active_frame;
+	u32 first_clear_frame;
+	u32 n_clear_frames;
+	u32 dropped_frames;
+};
+
+/* RED-PEN: this should use compat_alloc_userspace instead */
+
+static int handle_dv1394_init(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct dv1394_init32 dv32;
+	struct dv1394_init dv;
+	mm_segment_t old_fs;
+	int ret;
+
+	if (file->f_op->unlocked_ioctl != dv1394_ioctl)
+		return -EFAULT;
+
+	if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
+		return -EFAULT;
+
+	dv.api_version = dv32.api_version;
+	dv.channel = dv32.channel;
+	dv.n_frames = dv32.n_frames;
+	dv.format = dv32.format;
+	dv.cip_n = (unsigned long)dv32.cip_n;
+	dv.cip_d = (unsigned long)dv32.cip_d;
+	dv.syt_offset = dv32.syt_offset;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = dv1394_ioctl(file, DV1394_IOC_INIT, (unsigned long)&dv);
+	set_fs(old_fs);
+
+	return ret;
+}
+
+static int handle_dv1394_get_status(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct dv1394_status32 dv32;
+	struct dv1394_status dv;
+	mm_segment_t old_fs;
+	int ret;
+
+	if (file->f_op->unlocked_ioctl != dv1394_ioctl)
+		return -EFAULT;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = dv1394_ioctl(file, DV1394_IOC_GET_STATUS, (unsigned long)&dv);
+	set_fs(old_fs);
+
+	if (!ret) {
+		dv32.init.api_version = dv.init.api_version;
+		dv32.init.channel = dv.init.channel;
+		dv32.init.n_frames = dv.init.n_frames;
+		dv32.init.format = dv.init.format;
+		dv32.init.cip_n = (u32)dv.init.cip_n;
+		dv32.init.cip_d = (u32)dv.init.cip_d;
+		dv32.init.syt_offset = dv.init.syt_offset;
+		dv32.active_frame = dv.active_frame;
+		dv32.first_clear_frame = dv.first_clear_frame;
+		dv32.n_clear_frames = dv.n_clear_frames;
+		dv32.dropped_frames = dv.dropped_frames;
+
+		if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+
+
+static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	switch (cmd) {
+	case DV1394_IOC_SHUTDOWN:
+	case DV1394_IOC_SUBMIT_FRAMES:
+	case DV1394_IOC_WAIT_FRAMES:
+	case DV1394_IOC_RECEIVE_FRAMES:
+	case DV1394_IOC_START_RECEIVE:
+		return dv1394_ioctl(file, cmd, arg);
+
+	case DV1394_IOC32_INIT:
+		return handle_dv1394_init(file, cmd, arg);
+	case DV1394_IOC32_GET_STATUS:
+		return handle_dv1394_get_status(file, cmd, arg);
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+/*** KERNEL MODULE HANDLERS ************************************************/
+
+MODULE_AUTHOR("Dan Maas <dmaas@dcine.com>, Dan Dennedy <dan@dennedy.org>");
+MODULE_DESCRIPTION("driver for DV input/output on OHCI board");
+MODULE_SUPPORTED_DEVICE("dv1394");
+MODULE_LICENSE("GPL");
+
+static void __exit dv1394_exit_module(void)
+{
+	hpsb_unregister_protocol(&dv1394_driver);
+
+	hpsb_unregister_highlevel(&dv1394_highlevel);
+	cdev_del(&dv1394_cdev);
+	devfs_remove("ieee1394/dv");
+}
+
+static int __init dv1394_init_module(void)
+{
+	int ret;
+
+	cdev_init(&dv1394_cdev, &dv1394_fops);
+	dv1394_cdev.owner = THIS_MODULE;
+	kobject_set_name(&dv1394_cdev.kobj, "dv1394");
+	ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
+	if (ret) {
+		printk(KERN_ERR "dv1394: unable to register character device\n");
+		return ret;
+	}
+
+	devfs_mk_dir("ieee1394/dv");
+
+	hpsb_register_highlevel(&dv1394_highlevel);
+
+	ret = hpsb_register_protocol(&dv1394_driver);
+	if (ret) {
+		printk(KERN_ERR "dv1394: failed to register protocol\n");
+		hpsb_unregister_highlevel(&dv1394_highlevel);
+		devfs_remove("ieee1394/dv");
+		cdev_del(&dv1394_cdev);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(dv1394_init_module);
+module_exit(dv1394_exit_module);
+MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16);
diff --git a/drivers/ieee1394/dv1394.h b/drivers/ieee1394/dv1394.h
new file mode 100644
index 000000000000..5807f5289810
--- /dev/null
+++ b/drivers/ieee1394/dv1394.h
@@ -0,0 +1,305 @@
+/*
+ * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
+ *   Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
+ *     receive by Dan Dennedy <dan@dennedy.org>
+ *
+ * based on:
+ *   video1394.h - driver for OHCI 1394 boards
+ *   Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                          Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DV_1394_H
+#define _DV_1394_H
+
+/* This is the public user-space interface. Try not to break it. */
+
+#define DV1394_API_VERSION 0x20011127
+
+/* ********************
+   **                **
+   **   DV1394 API   **
+   **                **
+   ********************
+
+   There are two methods of operating the DV1394 DV output device.
+
+   1)
+
+   The simplest is an interface based on write(): simply write
+   full DV frames of data to the device, and they will be transmitted
+   as quickly as possible. The FD may be set for non-blocking I/O,
+   in which case you can use select() or poll() to wait for output
+   buffer space.
+
+   To set the DV output parameters (e.g. whether you want NTSC or PAL
+   video), use the DV1394_INIT ioctl, passing in the parameters you
+   want in a struct dv1394_init.
+
+   Example 1:
+         To play a raw .DV file:   cat foo.DV > /dev/dv1394
+	 (cat will use write() internally)
+
+   Example 2:
+           static struct dv1394_init init = {
+	      0x63,        (broadcast channel)
+              4,           (four-frame ringbuffer)
+	      DV1394_NTSC, (send NTSC video)
+	      0, 0         (default empty packet rate)
+           }
+
+	   ioctl(fd, DV1394_INIT, &init);
+
+	   while (1) {
+	          read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
+		  write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
+           }
+
+   2)
+
+   For more control over buffering, and to avoid unnecessary copies
+   of the DV data, you can use the more sophisticated the mmap() interface.
+   First, call the DV1394_INIT ioctl to specify your parameters,
+   including the number of frames in the ringbuffer. Then, calling mmap()
+   on the dv1394 device will give you direct access to the ringbuffer
+   from which the DV card reads your frame data.
+
+   The ringbuffer is simply one large, contiguous region of memory
+   containing two or more frames of packed DV data. Each frame of DV data
+   is 120000 bytes (NTSC) or 144000 bytes (PAL).
+
+   Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
+   ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
+   or select()/poll() to wait until the frames are transmitted. Next, you'll
+   need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
+   frames are clear (ready to be filled with new DV data). Finally, use
+   DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
+
+
+   Example: here is what a four-frame ringbuffer might look like
+            during DV transmission:
+
+
+         frame 0   frame 1   frame 2   frame 3
+
+	*--------------------------------------*
+        | CLEAR   | DV data | DV data | CLEAR  |
+        *--------------------------------------*
+                   <ACTIVE>
+
+	transmission goes in this direction --->>>
+
+
+   The DV hardware is currently transmitting the data in frame 1.
+   Once frame 1 is finished, it will automatically transmit frame 2.
+   (if frame 2 finishes before frame 3 is submitted, the device
+   will continue to transmit frame 2, and will increase the dropped_frames
+   counter each time it repeats the transmission).
+
+
+   If you called DV1394_GET_STATUS at this instant, you would
+   receive the following values:
+
+                  n_frames          = 4
+		  active_frame      = 1
+		  first_clear_frame = 3
+		  n_clear_frames    = 2
+
+   At this point, you should write new DV data into frame 3 and optionally
+   frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
+   it may transmit the new frames.
+
+   ERROR HANDLING
+
+   An error (buffer underflow/overflow or a break in the DV stream due
+   to a 1394 bus reset) can be detected by checking the dropped_frames
+   field of struct dv1394_status (obtained through the
+   DV1394_GET_STATUS ioctl).
+
+   The best way to recover from such an error is to re-initialize
+   dv1394, either by using the DV1394_INIT ioctl call, or closing the
+   file descriptor and opening it again. (note that you must unmap all
+   ringbuffer mappings when closing the file descriptor, or else
+   dv1394 will still be considered 'in use').
+
+   MAIN LOOP
+
+   For maximum efficiency and robustness against bus errors, you are
+   advised to model the main loop of your application after the
+   following pseudo-code example:
+
+   (checks of system call return values omitted for brevity; always
+   check return values in your code!)
+
+   while ( frames left ) {
+
+    struct pollfd *pfd = ...;
+
+    pfd->fd = dv1394_fd;
+    pfd->revents = 0;
+    pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
+
+    (add other sources of I/O here)
+
+    poll(pfd, 1, -1); (or select(); add a timeout if you want)
+
+    if (pfd->revents) {
+         struct dv1394_status status;
+
+         ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
+
+	 if (status.dropped_frames > 0) {
+	      reset_dv1394();
+         } else {
+              for (int i = 0; i < status.n_clear_frames; i++) {
+	          copy_DV_frame();
+              }
+         }
+    }
+   }
+
+   where copy_DV_frame() reads or writes on the dv1394 file descriptor
+   (read/write mode) or copies data to/from the mmap ringbuffer and
+   then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
+   frames are availble (mmap mode).
+
+   reset_dv1394() is called in the event of a buffer
+   underflow/overflow or a halt in the DV stream (e.g. due to a 1394
+   bus reset). To guarantee recovery from the error, this function
+   should close the dv1394 file descriptor (and munmap() all
+   ringbuffer mappings, if you are using them), then re-open the
+   dv1394 device (and re-map the ringbuffer).
+
+*/
+
+
+/* maximum number of frames in the ringbuffer */
+#define DV1394_MAX_FRAMES 32
+
+/* number of *full* isochronous packets per DV frame */
+#define DV1394_NTSC_PACKETS_PER_FRAME 250
+#define DV1394_PAL_PACKETS_PER_FRAME  300
+
+/* size of one frame's worth of DV data, in bytes */
+#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
+#define DV1394_PAL_FRAME_SIZE  (480 * DV1394_PAL_PACKETS_PER_FRAME)
+
+
+/* ioctl() commands */
+#include "ieee1394-ioctl.h"
+
+
+enum pal_or_ntsc {
+	DV1394_NTSC = 0,
+	DV1394_PAL
+};
+
+
+
+
+/* this is the argument to DV1394_INIT */
+struct dv1394_init {
+	/* DV1394_API_VERSION */
+	unsigned int api_version;
+
+	/* isochronous transmission channel to use */
+	unsigned int channel;
+
+	/* number of frames in the ringbuffer. Must be at least 2
+	   and at most DV1394_MAX_FRAMES. */
+	unsigned int n_frames;
+
+	/* send/receive PAL or NTSC video format */
+	enum pal_or_ntsc format;
+
+	/* the following are used only for transmission */
+
+	/* set these to zero unless you want a
+	   non-default empty packet rate (see below) */
+	unsigned long cip_n;
+	unsigned long cip_d;
+
+	/* set this to zero unless you want a
+	   non-default SYT cycle offset (default = 3 cycles) */
+	unsigned int syt_offset;
+};
+
+/* NOTE: you may only allocate the DV frame ringbuffer once each time
+   you open the dv1394 device. DV1394_INIT will fail if you call it a
+   second time with different 'n_frames' or 'format' arguments (which
+   would imply a different size for the ringbuffer). If you need a
+   different buffer size, simply close and re-open the device, then
+   initialize it with your new settings. */
+
+/* Q: What are cip_n and cip_d? */
+
+/*
+  A: DV video streams do not utilize 100% of the potential bandwidth offered
+  by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
+  DV devices must periodically insert empty packets into the 1394 data stream.
+  Typically there is one empty packet per 14-16 data-carrying packets.
+
+  Some DV devices will accept a wide range of empty packet rates, while others
+  require a precise rate. If the dv1394 driver produces empty packets at
+  a rate that your device does not accept, you may see ugly patterns on the
+  DV output, or even no output at all.
+
+  The default empty packet insertion rate seems to work for many people; if
+  your DV output is stable, you can simply ignore this discussion. However,
+  we have exposed the empty packet rate as a parameter to support devices that
+  do not work with the default rate.
+
+  The decision to insert an empty packet is made with a numerator/denominator
+  algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
+  You can alter the empty packet rate by passing non-zero values for cip_n
+  and cip_d to the INIT ioctl.
+
+ */
+
+
+
+struct dv1394_status {
+	/* this embedded init struct returns the current dv1394
+	   parameters in use */
+	struct dv1394_init init;
+
+	/* the ringbuffer frame that is currently being
+	   displayed. (-1 if the device is not transmitting anything) */
+	int active_frame;
+
+	/* index of the first buffer (ahead of active_frame) that
+	   is ready to be filled with data */
+	unsigned int first_clear_frame;
+
+	/* how many buffers, including first_clear_buffer, are
+	   ready to be filled with data */
+	unsigned int n_clear_frames;
+
+	/* how many times the DV stream has underflowed, overflowed,
+	   or otherwise encountered an error, since the previous call
+	   to DV1394_GET_STATUS */
+	unsigned int dropped_frames;
+
+	/* N.B. The dropped_frames counter is only a lower bound on the actual
+	   number of dropped frames, with the special case that if dropped_frames
+	   is zero, then it is guaranteed that NO frames have been dropped
+	   since the last call to DV1394_GET_STATUS.
+	*/
+};
+
+
+#endif /* _DV_1394_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
new file mode 100644
index 000000000000..654da76bf811
--- /dev/null
+++ b/drivers/ieee1394/eth1394.c
@@ -0,0 +1,1801 @@
+/*
+ * eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem
+ *
+ * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
+ *               2000 Bonin Franck <boninf@free.fr>
+ *               2003 Steve Kinneberg <kinnebergsteve@acmsystems.com>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* This driver intends to support RFC 2734, which describes a method for
+ * transporting IPv4 datagrams over IEEE-1394 serial busses. This driver
+ * will ultimately support that method, but currently falls short in
+ * several areas.
+ *
+ * TODO:
+ * RFC 2734 related:
+ * - Add MCAP. Limited Multicast exists only to 224.0.0.1 and 224.0.0.2.
+ *
+ * Non-RFC 2734 related:
+ * - Handle fragmented skb's coming from the networking layer.
+ * - Move generic GASP reception to core 1394 code
+ * - Convert kmalloc/kfree for link fragments to use kmem_cache_* instead
+ * - Stability improvements
+ * - Performance enhancements
+ * - Consider garbage collecting old partial datagrams after X amount of time
+ */
+
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/semaphore.h>
+#include <net/arp.h>
+
+#include "csr1212.h"
+#include "ieee1394_types.h"
+#include "ieee1394_core.h"
+#include "ieee1394_transactions.h"
+#include "ieee1394.h"
+#include "highlevel.h"
+#include "iso.h"
+#include "nodemgr.h"
+#include "eth1394.h"
+#include "config_roms.h"
+
+#define ETH1394_PRINT_G(level, fmt, args...) \
+	printk(level "%s: " fmt, driver_name, ## args)
+
+#define ETH1394_PRINT(level, dev_name, fmt, args...) \
+	printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
+
+#define DEBUG(fmt, args...) \
+	printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
+#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
+
+static char version[] __devinitdata =
+	"$Rev: 1247 $ Ben Collins <bcollins@debian.org>";
+
+struct fragment_info {
+	struct list_head list;
+	int offset;
+	int len;
+};
+
+struct partial_datagram {
+	struct list_head list;
+	u16 dgl;
+	u16 dg_size;
+	u16 ether_type;
+	struct sk_buff *skb;
+	char *pbuf;
+	struct list_head frag_info;
+};
+
+struct pdg_list {
+	struct list_head list;		/* partial datagram list per node	*/
+	unsigned int sz;		/* partial datagram list size per node	*/
+	spinlock_t lock;		/* partial datagram lock		*/
+};
+
+struct eth1394_host_info {
+	struct hpsb_host *host;
+	struct net_device *dev;
+};
+
+struct eth1394_node_ref {
+	struct unit_directory *ud;
+	struct list_head list;
+};
+
+struct eth1394_node_info {
+	u16 maxpayload;			/* Max payload			*/
+	u8 sspd;			/* Max speed			*/
+	u64 fifo;			/* FIFO address			*/
+	struct pdg_list pdg;		/* partial RX datagram lists	*/
+	int dgl;			/* Outgoing datagram label	*/
+};
+
+/* Our ieee1394 highlevel driver */
+#define ETH1394_DRIVER_NAME "eth1394"
+static const char driver_name[] = ETH1394_DRIVER_NAME;
+
+static kmem_cache_t *packet_task_cache;
+
+static struct hpsb_highlevel eth1394_highlevel;
+
+/* Use common.lf to determine header len */
+static const int hdr_type_len[] = {
+	sizeof (struct eth1394_uf_hdr),
+	sizeof (struct eth1394_ff_hdr),
+	sizeof (struct eth1394_sf_hdr),
+	sizeof (struct eth1394_sf_hdr)
+};
+
+/* Change this to IEEE1394_SPEED_S100 to make testing easier */
+#define ETH1394_SPEED_DEF	IEEE1394_SPEED_MAX
+
+/* For now, this needs to be 1500, so that XP works with us */
+#define ETH1394_DATA_LEN	ETH_DATA_LEN
+
+static const u16 eth1394_speedto_maxpayload[] = {
+/*     S100, S200, S400, S800, S1600, S3200 */
+	512, 1024, 2048, 4096,  4096,  4096
+};
+
+MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
+MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
+MODULE_LICENSE("GPL");
+
+/* The max_partial_datagrams parameter is the maximum number of fragmented
+ * datagrams per node that eth1394 will keep in memory.  Providing an upper
+ * bound allows us to limit the amount of memory that partial datagrams
+ * consume in the event that some partial datagrams are never completed.
+ */
+static int max_partial_datagrams = 25;
+module_param(max_partial_datagrams, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_partial_datagrams,
+		 "Maximum number of partially received fragmented datagrams "
+		 "(default = 25).");
+
+
+static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len);
+static int ether1394_rebuild_header(struct sk_buff *skb);
+static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr);
+static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh);
+static void ether1394_header_cache_update(struct hh_cache *hh,
+					  struct net_device *dev,
+					  unsigned char * haddr);
+static int ether1394_mac_addr(struct net_device *dev, void *p);
+
+static void purge_partial_datagram(struct list_head *old);
+static int ether1394_tx(struct sk_buff *skb, struct net_device *dev);
+static void ether1394_iso(struct hpsb_iso *iso);
+
+static struct ethtool_ops ethtool_ops;
+
+static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
+			   quadlet_t *data, u64 addr, size_t len, u16 flags);
+static void ether1394_add_host (struct hpsb_host *host);
+static void ether1394_remove_host (struct hpsb_host *host);
+static void ether1394_host_reset (struct hpsb_host *host);
+
+/* Function for incoming 1394 packets */
+static struct hpsb_address_ops addr_ops = {
+	.write =	ether1394_write,
+};
+
+/* Ieee1394 highlevel driver functions */
+static struct hpsb_highlevel eth1394_highlevel = {
+	.name =		driver_name,
+	.add_host =	ether1394_add_host,
+	.remove_host =	ether1394_remove_host,
+	.host_reset =	ether1394_host_reset,
+};
+
+
+/* This is called after an "ifup" */
+static int ether1394_open (struct net_device *dev)
+{
+	struct eth1394_priv *priv = netdev_priv(dev);
+	int ret = 0;
+
+	/* Something bad happened, don't even try */
+	if (priv->bc_state == ETHER1394_BC_ERROR) {
+		/* we'll try again */
+		priv->iso = hpsb_iso_recv_init(priv->host,
+					       ETHER1394_GASP_BUFFERS * 2 *
+					       (1 << (priv->host->csr.max_rec +
+						      1)),
+					       ETHER1394_GASP_BUFFERS,
+					       priv->broadcast_channel,
+					       HPSB_ISO_DMA_PACKET_PER_BUFFER,
+					       1, ether1394_iso);
+		if (priv->iso == NULL) {
+			ETH1394_PRINT(KERN_ERR, dev->name,
+				      "Could not allocate isochronous receive "
+				      "context for the broadcast channel\n");
+			priv->bc_state = ETHER1394_BC_ERROR;
+			ret = -EAGAIN;
+		} else {
+			if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
+				priv->bc_state = ETHER1394_BC_STOPPED;
+			else
+				priv->bc_state = ETHER1394_BC_RUNNING;
+		}
+	}
+
+	if (ret)
+		return ret;
+
+	netif_start_queue (dev);
+	return 0;
+}
+
+/* This is called after an "ifdown" */
+static int ether1394_stop (struct net_device *dev)
+{
+	netif_stop_queue (dev);
+	return 0;
+}
+
+/* Return statistics to the caller */
+static struct net_device_stats *ether1394_stats (struct net_device *dev)
+{
+	return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
+}
+
+/* What to do if we timeout. I think a host reset is probably in order, so
+ * that's what we do. Should we increment the stat counters too?  */
+static void ether1394_tx_timeout (struct net_device *dev)
+{
+	ETH1394_PRINT (KERN_ERR, dev->name, "Timeout, resetting host %s\n",
+		       ((struct eth1394_priv *)netdev_priv(dev))->host->driver->name);
+
+	highlevel_host_reset (((struct eth1394_priv *)netdev_priv(dev))->host);
+
+	netif_wake_queue (dev);
+}
+
+static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct eth1394_priv *priv = netdev_priv(dev);
+
+	if ((new_mtu < 68) ||
+	    (new_mtu > min(ETH1394_DATA_LEN,
+			   (int)((1 << (priv->host->csr.max_rec + 1)) -
+				 (sizeof(union eth1394_hdr) +
+				  ETHER1394_GASP_OVERHEAD)))))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static void purge_partial_datagram(struct list_head *old)
+{
+	struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+	struct list_head *lh, *n;
+
+	list_for_each_safe(lh, n, &pd->frag_info) {
+		struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+		list_del(lh);
+		kfree(fi);
+	}
+	list_del(old);
+	kfree_skb(pd->skb);
+	kfree(pd);
+}
+
+/******************************************
+ * 1394 bus activity functions
+ ******************************************/
+
+static struct eth1394_node_ref *eth1394_find_node(struct list_head *inl,
+						  struct unit_directory *ud)
+{
+	struct eth1394_node_ref *node;
+
+	list_for_each_entry(node, inl, list)
+		if (node->ud == ud)
+			return node;
+
+	return NULL;
+}
+
+static struct eth1394_node_ref *eth1394_find_node_guid(struct list_head *inl,
+						       u64 guid)
+{
+	struct eth1394_node_ref *node;
+
+	list_for_each_entry(node, inl, list)
+		if (node->ud->ne->guid == guid)
+			return node;
+
+	return NULL;
+}
+
+static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
+							 nodeid_t nodeid)
+{
+	struct eth1394_node_ref *node;
+	list_for_each_entry(node, inl, list) {
+		if (node->ud->ne->nodeid == nodeid)
+			return node;
+	}
+
+	return NULL;
+}
+
+static int eth1394_probe(struct device *dev)
+{
+	struct unit_directory *ud;
+	struct eth1394_host_info *hi;
+	struct eth1394_priv *priv;
+	struct eth1394_node_ref *new_node;
+	struct eth1394_node_info *node_info;
+
+	ud = container_of(dev, struct unit_directory, device);
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
+	if (!hi)
+		return -ENOENT;
+
+	new_node = kmalloc(sizeof(struct eth1394_node_ref),
+			   in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+	if (!new_node)
+		return -ENOMEM;
+
+	node_info = kmalloc(sizeof(struct eth1394_node_info),
+			    in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+	if (!node_info) {
+		kfree(new_node);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&node_info->pdg.lock);
+	INIT_LIST_HEAD(&node_info->pdg.list);
+	node_info->pdg.sz = 0;
+	node_info->fifo = ETHER1394_INVALID_ADDR;
+
+	ud->device.driver_data = node_info;
+	new_node->ud = ud;
+
+	priv = netdev_priv(hi->dev);
+	list_add_tail(&new_node->list, &priv->ip_node_list);
+
+	return 0;
+}
+
+static int eth1394_remove(struct device *dev)
+{
+	struct unit_directory *ud;
+	struct eth1394_host_info *hi;
+	struct eth1394_priv *priv;
+	struct eth1394_node_ref *old_node;
+	struct eth1394_node_info *node_info;
+	struct list_head *lh, *n;
+	unsigned long flags;
+
+	ud = container_of(dev, struct unit_directory, device);
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
+	if (!hi)
+		return -ENOENT;
+
+	priv = netdev_priv(hi->dev);
+
+	old_node = eth1394_find_node(&priv->ip_node_list, ud);
+
+	if (old_node) {
+		list_del(&old_node->list);
+		kfree(old_node);
+
+		node_info = (struct eth1394_node_info*)ud->device.driver_data;
+
+		spin_lock_irqsave(&node_info->pdg.lock, flags);
+		/* The partial datagram list should be empty, but we'll just
+		 * make sure anyway... */
+		list_for_each_safe(lh, n, &node_info->pdg.list) {
+			purge_partial_datagram(lh);
+		}
+		spin_unlock_irqrestore(&node_info->pdg.lock, flags);
+
+		kfree(node_info);
+		ud->device.driver_data = NULL;
+	}
+	return 0;
+}
+
+static int eth1394_update(struct unit_directory *ud)
+{
+	struct eth1394_host_info *hi;
+	struct eth1394_priv *priv;
+	struct eth1394_node_ref *node;
+	struct eth1394_node_info *node_info;
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
+	if (!hi)
+		return -ENOENT;
+
+	priv = netdev_priv(hi->dev);
+
+	node = eth1394_find_node(&priv->ip_node_list, ud);
+
+	if (!node) {
+		node = kmalloc(sizeof(struct eth1394_node_ref),
+			       in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+		if (!node)
+			return -ENOMEM;
+
+		node_info = kmalloc(sizeof(struct eth1394_node_info),
+				    in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+		if (!node_info) {
+			kfree(node);
+			return -ENOMEM;
+		}
+
+		spin_lock_init(&node_info->pdg.lock);
+		INIT_LIST_HEAD(&node_info->pdg.list);
+		node_info->pdg.sz = 0;
+
+		ud->device.driver_data = node_info;
+		node->ud = ud;
+
+		priv = netdev_priv(hi->dev);
+		list_add_tail(&node->list, &priv->ip_node_list);
+	}
+
+	return 0;
+}
+
+
+static struct ieee1394_device_id eth1394_id_table[] = {
+	{
+		.match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
+				IEEE1394_MATCH_VERSION),
+		.specifier_id =	ETHER1394_GASP_SPECIFIER_ID,
+		.version = ETHER1394_GASP_VERSION,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
+
+static struct hpsb_protocol_driver eth1394_proto_driver = {
+	.name		= "IPv4 over 1394 Driver",
+	.id_table	= eth1394_id_table,
+	.update		= eth1394_update,
+	.driver		= {
+		.name		= ETH1394_DRIVER_NAME,
+		.bus		= &ieee1394_bus_type,
+		.probe		= eth1394_probe,
+		.remove		= eth1394_remove,
+	},
+};
+
+
+static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
+{
+	unsigned long flags;
+	int i;
+	struct eth1394_priv *priv = netdev_priv(dev);
+	struct hpsb_host *host = priv->host;
+	u64 guid = *((u64*)&(host->csr.rom->bus_info_data[3]));
+	u16 maxpayload = 1 << (host->csr.max_rec + 1);
+	int max_speed = IEEE1394_SPEED_MAX;
+
+	spin_lock_irqsave (&priv->lock, flags);
+
+	memset(priv->ud_list, 0, sizeof(struct node_entry*) * ALL_NODES);
+	priv->bc_maxpayload = 512;
+
+	/* Determine speed limit */
+	for (i = 0; i < host->node_count; i++)
+		if (max_speed > host->speed_map[NODEID_TO_NODE(host->node_id) *
+						64 + i])
+			max_speed = host->speed_map[NODEID_TO_NODE(host->node_id) *
+						    64 + i];
+	priv->bc_sspd = max_speed;
+
+	/* We'll use our maxpayload as the default mtu */
+	if (set_mtu) {
+		dev->mtu = min(ETH1394_DATA_LEN,
+			       (int)(maxpayload -
+				     (sizeof(union eth1394_hdr) +
+				      ETHER1394_GASP_OVERHEAD)));
+
+		/* Set our hardware address while we're at it */
+		*(u64*)dev->dev_addr = guid;
+		*(u64*)dev->broadcast = ~0x0ULL;
+	}
+
+	spin_unlock_irqrestore (&priv->lock, flags);
+}
+
+/* This function is called right before register_netdev */
+static void ether1394_init_dev (struct net_device *dev)
+{
+	/* Our functions */
+	dev->open		= ether1394_open;
+	dev->stop		= ether1394_stop;
+	dev->hard_start_xmit	= ether1394_tx;
+	dev->get_stats		= ether1394_stats;
+	dev->tx_timeout		= ether1394_tx_timeout;
+	dev->change_mtu		= ether1394_change_mtu;
+
+	dev->hard_header	= ether1394_header;
+	dev->rebuild_header	= ether1394_rebuild_header;
+	dev->hard_header_cache	= ether1394_header_cache;
+	dev->header_cache_update= ether1394_header_cache_update;
+	dev->hard_header_parse	= ether1394_header_parse;
+	dev->set_mac_address	= ether1394_mac_addr;
+	SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+	/* Some constants */
+	dev->watchdog_timeo	= ETHER1394_TIMEOUT;
+	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
+	dev->features		= NETIF_F_HIGHDMA;
+	dev->addr_len		= ETH1394_ALEN;
+	dev->hard_header_len 	= ETH1394_HLEN;
+	dev->type		= ARPHRD_IEEE1394;
+
+	ether1394_reset_priv (dev, 1);
+}
+
+/*
+ * This function is called every time a card is found. It is generally called
+ * when the module is installed. This is where we add all of our ethernet
+ * devices. One for each host.
+ */
+static void ether1394_add_host (struct hpsb_host *host)
+{
+	struct eth1394_host_info *hi = NULL;
+	struct net_device *dev = NULL;
+	struct eth1394_priv *priv;
+	static int version_printed = 0;
+	u64 fifo_addr;
+
+	if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
+		return;
+
+	fifo_addr = hpsb_allocate_and_register_addrspace(&eth1394_highlevel,
+							 host,
+							 &addr_ops,
+							 ETHER1394_REGION_ADDR_LEN,
+							 ETHER1394_REGION_ADDR_LEN,
+							 -1, -1);
+	if (fifo_addr == ~0ULL)
+		goto out;
+
+	if (version_printed++ == 0)
+		ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
+
+	/* We should really have our own alloc_hpsbdev() function in
+	 * net_init.c instead of calling the one for ethernet then hijacking
+	 * it for ourselves.  That way we'd be a real networking device. */
+	dev = alloc_etherdev(sizeof (struct eth1394_priv));
+
+	if (dev == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate "
+				 "etherdevice for IEEE 1394 device %s-%d\n",
+				 host->driver->name, host->id);
+		goto out;
+        }
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &host->device);
+
+	priv = netdev_priv(dev);
+
+	INIT_LIST_HEAD(&priv->ip_node_list);
+
+	spin_lock_init(&priv->lock);
+	priv->host = host;
+	priv->local_fifo = fifo_addr;
+
+	hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
+
+	if (hi == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create "
+				 "hostinfo for IEEE 1394 device %s-%d\n",
+				 host->driver->name, host->id);
+		goto out;
+        }
+
+	ether1394_init_dev(dev);
+
+	if (register_netdev (dev)) {
+		ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
+		goto out;
+	}
+
+	ETH1394_PRINT (KERN_INFO, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet (fw-host%d)\n",
+		       host->id);
+
+	hi->host = host;
+	hi->dev = dev;
+
+	/* Ignore validity in hopes that it will be set in the future.  It'll
+	 * be checked when the eth device is opened. */
+	priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
+
+	priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 *
+					      (1 << (host->csr.max_rec + 1))),
+				       ETHER1394_GASP_BUFFERS,
+				       priv->broadcast_channel,
+				       HPSB_ISO_DMA_PACKET_PER_BUFFER,
+				       1, ether1394_iso);
+	if (priv->iso == NULL) {
+		ETH1394_PRINT(KERN_ERR, dev->name,
+			      "Could not allocate isochronous receive context "
+			      "for the broadcast channel\n");
+		priv->bc_state = ETHER1394_BC_ERROR;
+	} else {
+		if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
+			priv->bc_state = ETHER1394_BC_STOPPED;
+		else
+			priv->bc_state = ETHER1394_BC_RUNNING;
+	}
+
+	return;
+
+out:
+	if (dev != NULL)
+		free_netdev(dev);
+	if (hi)
+		hpsb_destroy_hostinfo(&eth1394_highlevel, host);
+
+	return;
+}
+
+/* Remove a card from our list */
+static void ether1394_remove_host (struct hpsb_host *host)
+{
+	struct eth1394_host_info *hi;
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	if (hi != NULL) {
+		struct eth1394_priv *priv = netdev_priv(hi->dev);
+
+		hpsb_unregister_addrspace(&eth1394_highlevel, host,
+					  priv->local_fifo);
+
+		if (priv->iso != NULL)
+			hpsb_iso_shutdown(priv->iso);
+
+		if (hi->dev) {
+			unregister_netdev (hi->dev);
+			free_netdev(hi->dev);
+		}
+	}
+
+	return;
+}
+
+/* A reset has just arisen */
+static void ether1394_host_reset (struct hpsb_host *host)
+{
+	struct eth1394_host_info *hi;
+	struct eth1394_priv *priv;
+	struct net_device *dev;
+	struct list_head *lh, *n;
+	struct eth1394_node_ref *node;
+	struct eth1394_node_info *node_info;
+	unsigned long flags;
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+
+	/* This can happen for hosts that we don't use */
+	if (hi == NULL)
+		return;
+
+	dev = hi->dev;
+	priv = netdev_priv(dev);
+
+	/* Reset our private host data, but not our mtu */
+	netif_stop_queue (dev);
+	ether1394_reset_priv (dev, 0);
+
+	list_for_each_entry(node, &priv->ip_node_list, list) {
+		node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
+
+		spin_lock_irqsave(&node_info->pdg.lock, flags);
+
+		list_for_each_safe(lh, n, &node_info->pdg.list) {
+			purge_partial_datagram(lh);
+		}
+
+		INIT_LIST_HEAD(&(node_info->pdg.list));
+		node_info->pdg.sz = 0;
+
+		spin_unlock_irqrestore(&node_info->pdg.lock, flags);
+	}
+
+	netif_wake_queue (dev);
+}
+
+/******************************************
+ * HW Header net device functions
+ ******************************************/
+/* These functions have been adapted from net/ethernet/eth.c */
+
+
+/* Create a fake MAC header for an arbitrary protocol layer.
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp). */
+static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len)
+{
+	struct eth1394hdr *eth = (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
+
+	eth->h_proto = htons(type);
+
+	if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) {
+		memset(eth->h_dest, 0, dev->addr_len);
+		return(dev->hard_header_len);
+	}
+
+	if (daddr) {
+		memcpy(eth->h_dest,daddr,dev->addr_len);
+		return dev->hard_header_len;
+	}
+
+	return -dev->hard_header_len;
+
+}
+
+
+/* Rebuild the faked MAC header. This is called after an ARP
+ * (or in future other address resolution) has completed on this
+ * sk_buff. We now let ARP fill in the other fields.
+ *
+ * This routine CANNOT use cached dst->neigh!
+ * Really, it is used only when dst->neigh is wrong.
+ */
+static int ether1394_rebuild_header(struct sk_buff *skb)
+{
+	struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
+	struct net_device *dev = skb->dev;
+
+	switch (eth->h_proto) {
+
+#ifdef CONFIG_INET
+	case __constant_htons(ETH_P_IP):
+ 		return arp_find((unsigned char*)&eth->h_dest, skb);
+#endif
+	default:
+		ETH1394_PRINT(KERN_DEBUG, dev->name,
+			      "unable to resolve type %04x addresses.\n",
+			      eth->h_proto);
+		break;
+	}
+
+	return 0;
+}
+
+static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+	struct net_device *dev = skb->dev;
+	memcpy(haddr, dev->dev_addr, ETH1394_ALEN);
+	return ETH1394_ALEN;
+}
+
+
+static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
+{
+	unsigned short type = hh->hh_type;
+	struct eth1394hdr *eth = (struct eth1394hdr*)(((u8*)hh->hh_data) +
+						      (16 - ETH1394_HLEN));
+	struct net_device *dev = neigh->dev;
+
+	if (type == __constant_htons(ETH_P_802_3)) {
+		return -1;
+	}
+
+	eth->h_proto = type;
+	memcpy(eth->h_dest, neigh->ha, dev->addr_len);
+
+	hh->hh_len = ETH1394_HLEN;
+	return 0;
+}
+
+/* Called by Address Resolution module to notify changes in address. */
+static void ether1394_header_cache_update(struct hh_cache *hh,
+					  struct net_device *dev,
+					  unsigned char * haddr)
+{
+	memcpy(((u8*)hh->hh_data) + (16 - ETH1394_HLEN), haddr, dev->addr_len);
+}
+
+static int ether1394_mac_addr(struct net_device *dev, void *p)
+{
+	if (netif_running(dev))
+		return -EBUSY;
+
+	/* Not going to allow setting the MAC address, we really need to use
+	 * the real one supplied by the hardware */
+	 return -EINVAL;
+ }
+
+
+
+/******************************************
+ * Datagram reception code
+ ******************************************/
+
+/* Copied from net/ethernet/eth.c */
+static inline u16 ether1394_type_trans(struct sk_buff *skb,
+				       struct net_device *dev)
+{
+	struct eth1394hdr *eth;
+	unsigned char *rawp;
+
+	skb->mac.raw = skb->data;
+	skb_pull (skb, ETH1394_HLEN);
+	eth = eth1394_hdr(skb);
+
+	if (*eth->h_dest & 1) {
+		if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
+			skb->pkt_type = PACKET_BROADCAST;
+#if 0
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+#endif
+	} else {
+		if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
+			skb->pkt_type = PACKET_OTHERHOST;
+        }
+
+	if (ntohs (eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+        if (*(unsigned short *)rawp == 0xFFFF)
+		return htons (ETH_P_802_3);
+
+        return htons (ETH_P_802_2);
+}
+
+/* Parse an encapsulated IP1394 header into an ethernet frame packet.
+ * We also perform ARP translation here, if need be.  */
+static inline u16 ether1394_parse_encap(struct sk_buff *skb,
+					struct net_device *dev,
+					nodeid_t srcid, nodeid_t destid,
+					u16 ether_type)
+{
+	struct eth1394_priv *priv = netdev_priv(dev);
+	u64 dest_hw;
+	unsigned short ret = 0;
+
+	/* Setup our hw addresses. We use these to build the
+	 * ethernet header.  */
+	if (destid == (LOCAL_BUS | ALL_NODES))
+		dest_hw = ~0ULL;  /* broadcast */
+	else
+		dest_hw = cpu_to_be64((((u64)priv->host->csr.guid_hi) << 32) |
+				      priv->host->csr.guid_lo);
+
+	/* If this is an ARP packet, convert it. First, we want to make
+	 * use of some of the fields, since they tell us a little bit
+	 * about the sending machine.  */
+	if (ether_type == __constant_htons (ETH_P_ARP)) {
+		struct eth1394_arp *arp1394 = (struct eth1394_arp*)skb->data;
+		struct arphdr *arp = (struct arphdr *)skb->data;
+		unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+		u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
+			ntohl(arp1394->fifo_lo);
+		u8 max_rec = min(priv->host->csr.max_rec,
+				 (u8)(arp1394->max_rec));
+		int sspd = arp1394->sspd;
+		u16 maxpayload;
+		struct eth1394_node_ref *node;
+		struct eth1394_node_info *node_info;
+
+		/* Sanity check. MacOSX seems to be sending us 131 in this
+		 * field (atleast on my Panther G5). Not sure why. */
+		if (sspd > 5 || sspd < 0)
+			sspd = 0;
+
+		maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
+
+		node = eth1394_find_node_guid(&priv->ip_node_list,
+					      be64_to_cpu(arp1394->s_uniq_id));
+		if (!node) {
+			return 0;
+		}
+
+		node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
+
+		/* Update our speed/payload/fifo_offset table */
+		node_info->maxpayload =	maxpayload;
+		node_info->sspd =	sspd;
+		node_info->fifo =	fifo_addr;
+
+		/* Now that we're done with the 1394 specific stuff, we'll
+		 * need to alter some of the data.  Believe it or not, all
+		 * that needs to be done is sender_IP_address needs to be
+		 * moved, the destination hardware address get stuffed
+		 * in and the hardware address length set to 8.
+		 *
+		 * IMPORTANT: The code below overwrites 1394 specific data
+		 * needed above so keep the munging of the data for the
+		 * higher level IP stack last. */
+
+		arp->ar_hln = 8;
+		arp_ptr += arp->ar_hln;		/* skip over sender unique id */
+		*(u32*)arp_ptr = arp1394->sip;	/* move sender IP addr */
+		arp_ptr += arp->ar_pln;		/* skip over sender IP addr */
+
+		if (arp->ar_op == 1)
+			/* just set ARP req target unique ID to 0 */
+			*((u64*)arp_ptr) = 0;
+		else
+			*((u64*)arp_ptr) = *((u64*)dev->dev_addr);
+	}
+
+	/* Now add the ethernet header. */
+	if (dev->hard_header (skb, dev, __constant_ntohs (ether_type),
+			      &dest_hw, NULL, skb->len) >= 0)
+		ret = ether1394_type_trans(skb, dev);
+
+	return ret;
+}
+
+static inline int fragment_overlap(struct list_head *frag_list, int offset, int len)
+{
+	struct fragment_info *fi;
+
+	list_for_each_entry(fi, frag_list, list) {
+		if ( ! ((offset > (fi->offset + fi->len - 1)) ||
+		       ((offset + len - 1) < fi->offset)))
+			return 1;
+	}
+	return 0;
+}
+
+static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
+{
+	struct partial_datagram *pd;
+
+	list_for_each_entry(pd, pdgl, list) {
+		if (pd->dgl == dgl)
+			return &pd->list;
+	}
+	return NULL;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static inline int new_fragment(struct list_head *frag_info, int offset, int len)
+{
+	struct list_head *lh;
+	struct fragment_info *fi, *fi2, *new;
+
+	list_for_each(lh, frag_info) {
+		fi = list_entry(lh, struct fragment_info, list);
+		if ((fi->offset + fi->len) == offset) {
+			/* The new fragment can be tacked on to the end */
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->next, struct fragment_info, list);
+			if ((fi->offset + fi->len) == fi2->offset) {
+				/* glue fragments together */
+				fi->len += fi2->len;
+				list_del(lh->next);
+				kfree(fi2);
+			}
+			return 0;
+		} else if ((offset + len) == fi->offset) {
+			/* The new fragment can be tacked on to the beginning */
+			fi->offset = offset;
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->prev, struct fragment_info, list);
+			if ((fi2->offset + fi2->len) == fi->offset) {
+				/* glue fragments together */
+				fi2->len += fi->len;
+				list_del(lh);
+				kfree(fi);
+			}
+			return 0;
+		} else if (offset > (fi->offset + fi->len)) {
+			break;
+		} else if ((offset + len) < fi->offset) {
+			lh = lh->prev;
+			break;
+		}
+	}
+
+	new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	new->offset = offset;
+	new->len = len;
+
+	list_add(&new->list, lh);
+
+	return 0;
+}
+
+static inline int new_partial_datagram(struct net_device *dev,
+				       struct list_head *pdgl, int dgl,
+				       int dg_size, char *frag_buf,
+				       int frag_off, int frag_len)
+{
+	struct partial_datagram *new;
+
+	new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&new->frag_info);
+
+	if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	new->dgl = dgl;
+	new->dg_size = dg_size;
+
+	new->skb = dev_alloc_skb(dg_size + dev->hard_header_len + 15);
+	if (!new->skb) {
+		struct fragment_info *fi = list_entry(new->frag_info.next,
+						      struct fragment_info,
+						      list);
+		kfree(fi);
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	skb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
+	new->pbuf = skb_put(new->skb, dg_size);
+	memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+
+	list_add(&new->list, pdgl);
+
+	return 0;
+}
+
+static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
+					  char *frag_buf, int frag_off, int frag_len)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+
+	if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) {
+		return -ENOMEM;
+	}
+
+	memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+	/* Move list entry to beginnig of list so that oldest partial
+	 * datagrams percolate to the end of the list */
+	list_del(lh);
+	list_add(lh, pdgl);
+
+	return 0;
+}
+
+static inline int is_datagram_complete(struct list_head *lh, int dg_size)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+	struct fragment_info *fi = list_entry(pd->frag_info.next,
+					      struct fragment_info, list);
+
+	return (fi->len == dg_size);
+}
+
+/* Packet reception. We convert the IP1394 encapsulation header to an
+ * ethernet header, and fill it with some of our other fields. This is
+ * an incoming packet from the 1394 bus.  */
+static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
+				  char *buf, int len)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	struct eth1394_priv *priv = netdev_priv(dev);
+	union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
+	u16 ether_type = 0;  /* initialized to clear warning */
+	int hdr_len;
+	struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
+	struct eth1394_node_info *node_info;
+
+	if (!ud) {
+		struct eth1394_node_ref *node;
+		node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
+		if (!node) {
+			HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
+				   "lookup failure: " NODE_BUS_FMT,
+				   NODE_BUS_ARGS(priv->host, srcid));
+			priv->stats.rx_dropped++;
+			return -1;
+		}
+		ud = node->ud;
+
+		priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
+	}
+
+	node_info = (struct eth1394_node_info*)ud->device.driver_data;
+
+	/* First, did we receive a fragmented or unfragmented datagram? */
+	hdr->words.word1 = ntohs(hdr->words.word1);
+
+	hdr_len = hdr_type_len[hdr->common.lf];
+
+	if (hdr->common.lf == ETH1394_HDR_LF_UF) {
+		/* An unfragmented datagram has been received by the ieee1394
+		 * bus. Build an skbuff around it so we can pass it to the
+		 * high level network layer. */
+
+		skb = dev_alloc_skb(len + dev->hard_header_len + 15);
+		if (!skb) {
+			HPSB_PRINT (KERN_ERR, "ether1394 rx: low on mem\n");
+			priv->stats.rx_dropped++;
+			return -1;
+		}
+		skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
+		memcpy(skb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len);
+		ether_type = hdr->uf.ether_type;
+	} else {
+		/* A datagram fragment has been received, now the fun begins. */
+
+		struct list_head *pdgl, *lh;
+		struct partial_datagram *pd;
+		int fg_off;
+		int fg_len = len - hdr_len;
+		int dg_size;
+		int dgl;
+		int retval;
+		struct pdg_list *pdg = &(node_info->pdg);
+
+		hdr->words.word3 = ntohs(hdr->words.word3);
+		/* The 4th header word is reserved so no need to do ntohs() */
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			ether_type = hdr->ff.ether_type;
+			dgl = hdr->ff.dgl;
+			dg_size = hdr->ff.dg_size + 1;
+			fg_off = 0;
+		} else {
+			hdr->words.word2 = ntohs(hdr->words.word2);
+			dgl = hdr->sf.dgl;
+			dg_size = hdr->sf.dg_size + 1;
+			fg_off = hdr->sf.fg_off;
+		}
+		spin_lock_irqsave(&pdg->lock, flags);
+
+		pdgl = &(pdg->list);
+		lh = find_partial_datagram(pdgl, dgl);
+
+		if (lh == NULL) {
+			while (pdg->sz >= max_partial_datagrams) {
+				/* remove the oldest */
+				purge_partial_datagram(pdgl->prev);
+				pdg->sz--;
+			}
+
+			retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
+						      buf + hdr_len, fg_off,
+						      fg_len);
+			if (retval < 0) {
+				spin_unlock_irqrestore(&pdg->lock, flags);
+				goto bad_proto;
+			}
+			pdg->sz++;
+			lh = find_partial_datagram(pdgl, dgl);
+		} else {
+			struct partial_datagram *pd;
+
+			pd = list_entry(lh, struct partial_datagram, list);
+
+			if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
+				/* Overlapping fragments, obliterate old
+				 * datagram and start new one. */
+				purge_partial_datagram(lh);
+				retval = new_partial_datagram(dev, pdgl, dgl,
+							      dg_size,
+							      buf + hdr_len,
+							      fg_off, fg_len);
+				if (retval < 0) {
+					pdg->sz--;
+					spin_unlock_irqrestore(&pdg->lock, flags);
+					goto bad_proto;
+				}
+			} else {
+				retval = update_partial_datagram(pdgl, lh,
+								 buf + hdr_len,
+								 fg_off, fg_len);
+				if (retval < 0) {
+					/* Couldn't save off fragment anyway
+					 * so might as well obliterate the
+					 * datagram now. */
+					purge_partial_datagram(lh);
+					pdg->sz--;
+					spin_unlock_irqrestore(&pdg->lock, flags);
+					goto bad_proto;
+				}
+			} /* fragment overlap */
+		} /* new datagram or add to existing one */
+
+		pd = list_entry(lh, struct partial_datagram, list);
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			pd->ether_type = ether_type;
+		}
+
+		if (is_datagram_complete(lh, dg_size)) {
+			ether_type = pd->ether_type;
+			pdg->sz--;
+			skb = skb_get(pd->skb);
+			purge_partial_datagram(lh);
+			spin_unlock_irqrestore(&pdg->lock, flags);
+		} else {
+			/* Datagram is not complete, we're done for the
+			 * moment. */
+			spin_unlock_irqrestore(&pdg->lock, flags);
+			return 0;
+		}
+	} /* unframgented datagram or fragmented one */
+
+	/* Write metadata, and then pass to the receive level */
+	skb->dev = dev;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;	/* don't check it */
+
+	/* Parse the encapsulation header. This actually does the job of
+	 * converting to an ethernet frame header, aswell as arp
+	 * conversion if needed. ARP conversion is easier in this
+	 * direction, since we are using ethernet as our backend.  */
+	skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
+					      ether_type);
+
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (!skb->protocol) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		dev_kfree_skb_any(skb);
+		goto bad_proto;
+	}
+
+	if (netif_rx(skb) == NET_RX_DROP) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		goto bad_proto;
+	}
+
+	/* Statistics */
+	priv->stats.rx_packets++;
+	priv->stats.rx_bytes += skb->len;
+
+bad_proto:
+	if (netif_queue_stopped(dev))
+		netif_wake_queue(dev);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	dev->last_rx = jiffies;
+
+	return 0;
+}
+
+static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
+			   quadlet_t *data, u64 addr, size_t len, u16 flags)
+{
+	struct eth1394_host_info *hi;
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				host->driver->name);
+		return RCODE_ADDRESS_ERROR;
+	}
+
+	if (ether1394_data_handler(hi->dev, srcid, destid, (char*)data, len))
+		return RCODE_ADDRESS_ERROR;
+	else
+		return RCODE_COMPLETE;
+}
+
+static void ether1394_iso(struct hpsb_iso *iso)
+{
+	quadlet_t *data;
+	char *buf;
+	struct eth1394_host_info *hi;
+	struct net_device *dev;
+	struct eth1394_priv *priv;
+	unsigned int len;
+	u32 specifier_id;
+	u16 source_id;
+	int i;
+	int nready;
+
+	hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				iso->host->driver->name);
+		return;
+	}
+
+	dev = hi->dev;
+
+	nready = hpsb_iso_n_ready(iso);
+	for (i = 0; i < nready; i++) {
+		struct hpsb_iso_packet_info *info =
+			&iso->infos[(iso->first_packet + i) % iso->buf_packets];
+		data = (quadlet_t*) (iso->data_buf.kvirt + info->offset);
+
+		/* skip over GASP header */
+		buf = (char *)data + 8;
+		len = info->len - 8;
+
+		specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) |
+				((be32_to_cpu(data[1]) & 0xff000000) >> 24));
+		source_id = be32_to_cpu(data[0]) >> 16;
+
+		priv = netdev_priv(dev);
+
+		if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) ||
+		   specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
+			/* This packet is not for us */
+			continue;
+		}
+		ether1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
+				       buf, len);
+	}
+
+	hpsb_iso_recv_release_packets(iso, i);
+
+	dev->last_rx = jiffies;
+}
+
+/******************************************
+ * Datagram transmission code
+ ******************************************/
+
+/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
+ * arphdr) is the same format as the ip1394 header, so they overlap.  The rest
+ * needs to be munged a bit.  The remainder of the arphdr is formatted based
+ * on hwaddr len and ipaddr len.  We know what they'll be, so it's easy to
+ * judge.
+ *
+ * Now that the EUI is used for the hardware address all we need to do to make
+ * this work for 1394 is to insert 2 quadlets that contain max_rec size,
+ * speed, and unicast FIFO address information between the sender_unique_id
+ * and the IP addresses.
+ */
+static inline void ether1394_arp_to_1394arp(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	struct eth1394_priv *priv = netdev_priv(dev);
+
+	struct arphdr *arp = (struct arphdr *)skb->data;
+	unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+	struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
+
+	/* Believe it or not, all that need to happen is sender IP get moved
+	 * and set hw_addr_len, max_rec, sspd, fifo_hi and fifo_lo.  */
+	arp1394->hw_addr_len	= 16;
+	arp1394->sip		= *(u32*)(arp_ptr + ETH1394_ALEN);
+	arp1394->max_rec	= priv->host->csr.max_rec;
+	arp1394->sspd		= priv->host->csr.lnk_spd;
+	arp1394->fifo_hi	= htons (priv->local_fifo >> 32);
+	arp1394->fifo_lo	= htonl (priv->local_fifo & ~0x0);
+
+	return;
+}
+
+/* We need to encapsulate the standard header with our own. We use the
+ * ethernet header's proto for our own. */
+static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
+						      int proto,
+						      union eth1394_hdr *hdr,
+						      u16 dg_size, u16 dgl)
+{
+	unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
+
+	/* Does it all fit in one packet? */
+	if (dg_size <= adj_max_payload) {
+		hdr->uf.lf = ETH1394_HDR_LF_UF;
+		hdr->uf.ether_type = proto;
+	} else {
+		hdr->ff.lf = ETH1394_HDR_LF_FF;
+		hdr->ff.ether_type = proto;
+		hdr->ff.dg_size = dg_size - 1;
+		hdr->ff.dgl = dgl;
+		adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
+	}
+	return((dg_size + (adj_max_payload - 1)) / adj_max_payload);
+}
+
+static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
+						 unsigned int max_payload,
+						 union eth1394_hdr *hdr)
+{
+	union eth1394_hdr *bufhdr;
+	int ftype = hdr->common.lf;
+	int hdrsz = hdr_type_len[ftype];
+	unsigned int adj_max_payload = max_payload - hdrsz;
+
+	switch(ftype) {
+	case ETH1394_HDR_LF_UF:
+		bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		break;
+
+	case ETH1394_HDR_LF_FF:
+		bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+
+		/* Set frag type here for future interior fragments */
+		hdr->common.lf = ETH1394_HDR_LF_IF;
+		hdr->sf.fg_off = 0;
+		break;
+
+	default:
+		hdr->sf.fg_off += adj_max_payload;
+		bufhdr = (union eth1394_hdr *)skb_pull(skb, adj_max_payload);
+		if (max_payload >= skb->len)
+			hdr->common.lf = ETH1394_HDR_LF_LF;
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = htons(hdr->words.word2);
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+	}
+
+	return min(max_payload, skb->len);
+}
+
+static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
+{
+	struct hpsb_packet *p;
+
+	p = hpsb_alloc_packet(0);
+	if (p) {
+		p->host = host;
+		p->generation = get_hpsb_generation(host);
+		p->type = hpsb_async;
+	}
+	return p;
+}
+
+static inline int ether1394_prep_write_packet(struct hpsb_packet *p,
+					      struct hpsb_host *host,
+					      nodeid_t node, u64 addr,
+					      void * data, int tx_len)
+{
+	p->node_id = node;
+	p->data = NULL;
+
+	p->tcode = TCODE_WRITEB;
+	p->header[1] = (host->node_id << 16) | (addr >> 32);
+	p->header[2] = addr & 0xffffffff;
+
+	p->header_size = 16;
+	p->expect_response = 1;
+
+	if (hpsb_get_tlabel(p)) {
+		ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
+				"to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
+		return -1;
+	}
+	p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
+		| (1 << 8) | (TCODE_WRITEB << 4);
+
+	p->header[3] = tx_len << 16;
+	p->data_size = (tx_len + 3) & ~3;
+	p->data = (quadlet_t*)data;
+
+	return 0;
+}
+
+static inline void ether1394_prep_gasp_packet(struct hpsb_packet *p,
+					      struct eth1394_priv *priv,
+					      struct sk_buff *skb, int length)
+{
+	p->header_size = 4;
+	p->tcode = TCODE_STREAM_DATA;
+
+	p->header[0] = (length << 16) | (3 << 14)
+		| ((priv->broadcast_channel) << 8)
+		| (TCODE_STREAM_DATA << 4);
+	p->data_size = length;
+	p->data = ((quadlet_t*)skb->data) - 2;
+	p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
+				 ETHER1394_GASP_SPECIFIER_ID_HI);
+	p->data[1] = __constant_cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
+					    ETHER1394_GASP_VERSION);
+
+	/* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
+	 * prevents hpsb_send_packet() from setting the speed to an arbitrary
+	 * value based on packet->node_id if packet->node_id is not set. */
+	p->node_id = ALL_NODES;
+	p->speed_code = priv->bc_sspd;
+}
+
+static inline void ether1394_free_packet(struct hpsb_packet *packet)
+{
+	if (packet->tcode != TCODE_STREAM_DATA)
+		hpsb_free_tlabel(packet);
+	hpsb_free_packet(packet);
+}
+
+static void ether1394_complete_cb(void *__ptask);
+
+static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
+{
+	struct eth1394_priv *priv = ptask->priv;
+	struct hpsb_packet *packet = NULL;
+
+	packet = ether1394_alloc_common_packet(priv->host);
+	if (!packet)
+		return -1;
+
+	if (ptask->tx_type == ETH1394_GASP) {
+		int length = tx_len + (2 * sizeof(quadlet_t));
+
+		ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
+	} else if (ether1394_prep_write_packet(packet, priv->host,
+					       ptask->dest_node,
+					       ptask->addr, ptask->skb->data,
+					       tx_len)) {
+		hpsb_free_packet(packet);
+		return -1;
+	}
+
+	ptask->packet = packet;
+	hpsb_set_packet_complete_task(ptask->packet, ether1394_complete_cb,
+				      ptask);
+
+	if (hpsb_send_packet(packet) < 0) {
+		ether1394_free_packet(packet);
+		return -1;
+	}
+
+	return 0;
+}
+
+
+/* Task function to be run when a datagram transmission is completed */
+static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
+{
+	struct sk_buff *skb = ptask->skb;
+	struct net_device *dev = skb->dev;
+	struct eth1394_priv *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	/* Statistics */
+	spin_lock_irqsave(&priv->lock, flags);
+	if (fail) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+	} else {
+		priv->stats.tx_bytes += skb->len;
+		priv->stats.tx_packets++;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	dev_kfree_skb_any(skb);
+	kmem_cache_free(packet_task_cache, ptask);
+}
+
+
+/* Callback for when a packet has been sent and the status of that packet is
+ * known */
+static void ether1394_complete_cb(void *__ptask)
+{
+	struct packet_task *ptask = (struct packet_task *)__ptask;
+	struct hpsb_packet *packet = ptask->packet;
+	int fail = 0;
+
+	if (packet->tcode != TCODE_STREAM_DATA)
+		fail = hpsb_packet_success(packet);
+
+	ether1394_free_packet(packet);
+
+	ptask->outstanding_pkts--;
+	if (ptask->outstanding_pkts > 0 && !fail) {
+		int tx_len;
+
+		/* Add the encapsulation header to the fragment */
+		tx_len = ether1394_encapsulate(ptask->skb, ptask->max_payload,
+					       &ptask->hdr);
+		if (ether1394_send_packet(ptask, tx_len))
+			ether1394_dg_complete(ptask, 1);
+	} else {
+		ether1394_dg_complete(ptask, fail);
+	}
+}
+
+
+
+/* Transmit a packet (called by kernel) */
+static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
+{
+	int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+	struct eth1394hdr *eth;
+	struct eth1394_priv *priv = netdev_priv(dev);
+	int proto;
+	unsigned long flags;
+	nodeid_t dest_node;
+	eth1394_tx_type tx_type;
+	int ret = 0;
+	unsigned int tx_len;
+	unsigned int max_payload;
+	u16 dg_size;
+	u16 dgl;
+	struct packet_task *ptask;
+	struct eth1394_node_ref *node;
+	struct eth1394_node_info *node_info = NULL;
+
+	ptask = kmem_cache_alloc(packet_task_cache, kmflags);
+	if (ptask == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
+	 * it does not set our validity bit. We need to compensate for
+	 * that somewhere else, but not in eth1394. */
+#if 0
+	if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+#endif
+
+	if ((skb = skb_share_check (skb, kmflags)) == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* Get rid of the fake eth1394 header, but save a pointer */
+	eth = (struct eth1394hdr*)skb->data;
+	skb_pull(skb, ETH1394_HLEN);
+
+	proto = eth->h_proto;
+	dg_size = skb->len;
+
+	/* Set the transmission type for the packet.  ARP packets and IP
+	 * broadcast packets are sent via GASP. */
+	if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
+	    proto == __constant_htons(ETH_P_ARP) ||
+	    (proto == __constant_htons(ETH_P_IP) &&
+	     IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) {
+		tx_type = ETH1394_GASP;
+		dest_node = LOCAL_BUS | ALL_NODES;
+		max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
+		BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
+		dgl = priv->bc_dgl;
+		if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
+			priv->bc_dgl++;
+	} else {
+		node = eth1394_find_node_guid(&priv->ip_node_list,
+					      be64_to_cpu(*(u64*)eth->h_dest));
+		if (!node) {
+			ret = -EAGAIN;
+			goto fail;
+		}
+		node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
+		if (node_info->fifo == ETHER1394_INVALID_ADDR) {
+			ret = -EAGAIN;
+			goto fail;
+		}
+
+		dest_node = node->ud->ne->nodeid;
+		max_payload = node_info->maxpayload;
+		BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
+
+		dgl = node_info->dgl;
+		if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
+			node_info->dgl++;
+		tx_type = ETH1394_WRREQ;
+	}
+
+	/* If this is an ARP packet, convert it */
+	if (proto == __constant_htons (ETH_P_ARP))
+		ether1394_arp_to_1394arp (skb, dev);
+
+	ptask->hdr.words.word1 = 0;
+	ptask->hdr.words.word2 = 0;
+	ptask->hdr.words.word3 = 0;
+	ptask->hdr.words.word4 = 0;
+	ptask->skb = skb;
+	ptask->priv = priv;
+	ptask->tx_type = tx_type;
+
+	if (tx_type != ETH1394_GASP) {
+		u64 addr;
+
+		spin_lock_irqsave(&priv->lock, flags);
+		addr = node_info->fifo;
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+		ptask->addr = addr;
+		ptask->dest_node = dest_node;
+	}
+
+	ptask->tx_type = tx_type;
+	ptask->max_payload = max_payload;
+        ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload, proto,
+							     &ptask->hdr, dg_size,
+							     dgl);
+
+	/* Add the encapsulation header to the fragment */
+	tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
+	dev->trans_start = jiffies;
+	if (ether1394_send_packet(ptask, tx_len))
+		goto fail;
+
+	netif_wake_queue(dev);
+	return 0;
+fail:
+	if (ptask)
+		kmem_cache_free(packet_task_cache, ptask);
+
+	if (skb != NULL)
+		dev_kfree_skb(skb);
+
+	spin_lock_irqsave (&priv->lock, flags);
+	priv->stats.tx_dropped++;
+	priv->stats.tx_errors++;
+	spin_unlock_irqrestore (&priv->lock, flags);
+
+	if (netif_queue_stopped(dev))
+		netif_wake_queue(dev);
+
+	return 0;  /* returning non-zero causes serious problems */
+}
+
+static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	strcpy (info->driver, driver_name);
+	strcpy (info->version, "$Rev: 1247 $");
+	/* FIXME XXX provide sane businfo */
+	strcpy (info->bus_info, "ieee1394");
+}
+
+static struct ethtool_ops ethtool_ops = {
+	.get_drvinfo = ether1394_get_drvinfo
+};
+
+static int __init ether1394_init_module (void)
+{
+	packet_task_cache = kmem_cache_create("packet_task", sizeof(struct packet_task),
+					      0, 0, NULL, NULL);
+
+	/* Register ourselves as a highlevel driver */
+	hpsb_register_highlevel(&eth1394_highlevel);
+
+	return hpsb_register_protocol(&eth1394_proto_driver);
+}
+
+static void __exit ether1394_exit_module (void)
+{
+	hpsb_unregister_protocol(&eth1394_proto_driver);
+	hpsb_unregister_highlevel(&eth1394_highlevel);
+	kmem_cache_destroy(packet_task_cache);
+}
+
+module_init(ether1394_init_module);
+module_exit(ether1394_exit_module);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
new file mode 100644
index 000000000000..ed8f1c4b7fd8
--- /dev/null
+++ b/drivers/ieee1394/eth1394.h
@@ -0,0 +1,236 @@
+/*
+ * eth1394.h -- Ethernet driver for Linux IEEE-1394 Subsystem
+ *
+ * Copyright (C) 2000 Bonin Franck <boninf@free.fr>
+ *           (C) 2001 Ben Collins <bcollins@debian.org>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ETH1394_H
+#define __ETH1394_H
+
+#include <linux/netdevice.h>
+
+#include "ieee1394.h"
+
+/* Register for incoming packets. This is 4096 bytes, which supports up to
+ * S3200 (per Table 16-3 of IEEE 1394b-2002). */
+#define ETHER1394_REGION_ADDR_LEN	4096
+
+#define ETHER1394_INVALID_ADDR		~0ULL
+
+/* GASP identifier numbers for IPv4 over IEEE 1394 */
+#define ETHER1394_GASP_SPECIFIER_ID	0x00005E
+#define ETHER1394_GASP_SPECIFIER_ID_HI	((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
+#define ETHER1394_GASP_SPECIFIER_ID_LO	(ETHER1394_GASP_SPECIFIER_ID & 0xff)
+#define ETHER1394_GASP_VERSION		1
+
+#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t))  /* GASP header overhead */
+
+#define ETHER1394_GASP_BUFFERS 16
+
+/* Node set == 64 */
+#define NODE_SET			(ALL_NODES + 1)
+
+enum eth1394_bc_states { ETHER1394_BC_ERROR,
+			 ETHER1394_BC_RUNNING,
+			 ETHER1394_BC_STOPPED };
+
+
+/* Private structure for our ethernet driver */
+struct eth1394_priv {
+	struct net_device_stats stats;	/* Device stats			 */
+	struct hpsb_host *host;		/* The card for this dev	 */
+	u16 bc_maxpayload;		/* Max broadcast payload	 */
+	u8 bc_sspd;			/* Max broadcast speed		 */
+	u64 local_fifo;			/* Local FIFO Address		 */
+	spinlock_t lock;		/* Private lock			 */
+	int broadcast_channel;		/* Async stream Broadcast Channel */
+	enum eth1394_bc_states bc_state; /* broadcast channel state	 */
+	struct hpsb_iso *iso;		/* Async stream recv handle	 */
+	int bc_dgl;			/* Outgoing broadcast datagram label */
+	struct list_head ip_node_list;	/* List of IP capable nodes	 */
+	struct unit_directory *ud_list[ALL_NODES]; /* Cached unit dir list */
+};
+
+
+/* Define a fake hardware header format for the networking core.  Note that
+ * header size cannot exceed 16 bytes as that is the size of the header cache.
+ * Also, we do not need the source address in the header so we omit it and
+ * keep the header to under 16 bytes */
+#define ETH1394_ALEN (8)
+#define ETH1394_HLEN (10)
+
+struct eth1394hdr {
+	unsigned char	h_dest[ETH1394_ALEN];	/* destination eth1394 addr	*/
+	unsigned short	h_proto;		/* packet type ID field	*/
+}  __attribute__((packed));
+
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
+{
+	return (struct eth1394hdr *)skb->mac.raw;
+}
+#endif
+
+typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
+
+/* IP1394 headers */
+#include <asm/byteorder.h>
+
+/* Unfragmented */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 lf:2;
+	u16 res:14;
+	u16 ether_type;		/* Ethernet packet type */
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 res:14;
+	u16 lf:2;
+	u16 ether_type;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* First fragment */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 ether_type;		/* Ethernet packet type */
+	u16 dgl;		/* Datagram label */
+	u16 res2;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 ether_type;
+	u16 dgl;
+	u16 res2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* XXX: Subsequent fragments, including last */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 res2:4;
+	u16 fg_off:12;		/* Fragment offset */
+	u16 dgl;		/* Datagram label */
+	u16 res3;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 fg_off:12;
+	u16 res2:4;
+	u16 dgl;
+	u16 res3;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 lf:2;
+	u16 pad1:14;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 pad1:14;
+	u16 lf:2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+struct eth1394_hdr_words {
+	u16 word1;
+	u16 word2;
+	u16 word3;
+	u16 word4;
+};
+
+union eth1394_hdr {
+	struct eth1394_common_hdr common;
+	struct eth1394_uf_hdr uf;
+	struct eth1394_ff_hdr ff;
+	struct eth1394_sf_hdr sf;
+	struct eth1394_hdr_words words;
+};
+
+/* End of IP1394 headers */
+
+/* Fragment types */
+#define ETH1394_HDR_LF_UF	0	/* unfragmented		*/
+#define ETH1394_HDR_LF_FF	1	/* first fragment	*/
+#define ETH1394_HDR_LF_LF	2	/* last fragment	*/
+#define ETH1394_HDR_LF_IF	3	/* interior fragment	*/
+
+#define IP1394_HW_ADDR_LEN	16	/* As per RFC		*/
+
+/* Our arp packet (ARPHRD_IEEE1394) */
+struct eth1394_arp {
+	u16 hw_type;		/* 0x0018	*/
+	u16 proto_type;		/* 0x0806	*/
+	u8 hw_addr_len;		/* 16 		*/
+	u8 ip_addr_len;		/* 4		*/
+	u16 opcode;		/* ARP Opcode	*/
+	/* Above is exactly the same format as struct arphdr */
+
+	u64 s_uniq_id;		/* Sender's 64bit EUI			*/
+	u8 max_rec;		/* Sender's max packet size		*/
+	u8 sspd;		/* Sender's max speed			*/
+	u16 fifo_hi;		/* hi 16bits of sender's FIFO addr	*/
+	u32 fifo_lo;		/* lo 32bits of sender's FIFO addr	*/
+	u32 sip;		/* Sender's IP Address			*/
+	u32 tip;		/* IP Address of requested hw addr	*/
+};
+
+/* Network timeout */
+#define ETHER1394_TIMEOUT	100000
+
+/* This is our task struct. It's used for the packet complete callback.  */
+struct packet_task {
+	struct sk_buff *skb;
+	int outstanding_pkts;
+	eth1394_tx_type tx_type;
+	int max_payload;
+	struct hpsb_packet *packet;
+	struct eth1394_priv *priv;
+	union eth1394_hdr hdr;
+	u64 addr;
+	u16 dest_node;
+};
+
+#endif /* __ETH1394_H */
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
new file mode 100644
index 000000000000..997e1bf6297f
--- /dev/null
+++ b/drivers/ieee1394/highlevel.c
@@ -0,0 +1,704 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * Copyright (C) 1999 Andreas E. Bombe
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ *
+ *
+ * Contributions:
+ *
+ * Christian Toegel <christian.toegel@gmx.at>
+ *        unregister address space
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *        unregister address space
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "nodemgr.h"
+
+
+struct hl_host_info {
+	struct list_head list;
+	struct hpsb_host *host;
+	size_t size;
+	unsigned long key;
+	void *data;
+};
+
+
+static LIST_HEAD(hl_drivers);
+static DECLARE_RWSEM(hl_drivers_sem);
+
+static LIST_HEAD(hl_irqs);
+static DEFINE_RWLOCK(hl_irqs_lock);
+
+static DEFINE_RWLOCK(addr_space_lock);
+
+/* addr_space list will have zero and max already included as bounds */
+static struct hpsb_address_ops dummy_ops = { NULL, NULL, NULL, NULL };
+static struct hpsb_address_serve dummy_zero_addr, dummy_max_addr;
+
+
+static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
+					      struct hpsb_host *host)
+{
+	struct hl_host_info *hi = NULL;
+
+	if (!hl || !host)
+		return NULL;
+
+	read_lock(&hl->host_info_lock);
+	list_for_each_entry(hi, &hl->host_info_list, list) {
+		if (hi->host == host) {
+			read_unlock(&hl->host_info_lock);
+			return hi;
+		}
+	}
+	read_unlock(&hl->host_info_lock);
+
+	return NULL;
+}
+
+
+/* Returns a per host/driver data structure that was previously stored by
+ * hpsb_create_hostinfo. */
+void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
+{
+	struct hl_host_info *hi = hl_get_hostinfo(hl, host);
+
+	if (hi)
+		return hi->data;
+
+	return NULL;
+}
+
+
+/* If size is zero, then the return here is only valid for error checking */
+void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
+			   size_t data_size)
+{
+	struct hl_host_info *hi;
+	void *data;
+	unsigned long flags;
+
+	hi = hl_get_hostinfo(hl, host);
+	if (hi) {
+		HPSB_ERR("%s called hpsb_create_hostinfo when hostinfo already exists",
+			 hl->name);
+		return NULL;
+	}
+
+	hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
+	if (!hi)
+		return NULL;
+
+	memset(hi, 0, sizeof(*hi) + data_size);
+
+	if (data_size) {
+		data = hi->data = hi + 1;
+		hi->size = data_size;
+	} else
+		data = hi;
+
+	hi->host = host;
+
+	write_lock_irqsave(&hl->host_info_lock, flags);
+	list_add_tail(&hi->list, &hl->host_info_list);
+	write_unlock_irqrestore(&hl->host_info_lock, flags);
+
+	return data;
+}
+
+
+int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
+		      void *data)
+{
+	struct hl_host_info *hi;
+
+	hi = hl_get_hostinfo(hl, host);
+	if (hi) {
+		if (!hi->size && !hi->data) {
+			hi->data = data;
+			return 0;
+		} else
+			HPSB_ERR("%s called hpsb_set_hostinfo when hostinfo already has data",
+				 hl->name);
+	} else
+		HPSB_ERR("%s called hpsb_set_hostinfo when no hostinfo exists",
+			 hl->name);
+
+	return -EINVAL;
+}
+
+
+void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
+{
+	struct hl_host_info *hi;
+
+	hi = hl_get_hostinfo(hl, host);
+	if (hi) {
+		unsigned long flags;
+		write_lock_irqsave(&hl->host_info_lock, flags);
+		list_del(&hi->list);
+		write_unlock_irqrestore(&hl->host_info_lock, flags);
+		kfree(hi);
+	}
+
+	return;
+}
+
+
+void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key)
+{
+	struct hl_host_info *hi;
+
+	hi = hl_get_hostinfo(hl, host);
+	if (hi)
+		hi->key = key;
+
+	return;
+}
+
+
+void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
+{
+	struct hl_host_info *hi;
+	void *data = NULL;
+
+	if (!hl)
+		return NULL;
+
+	read_lock(&hl->host_info_lock);
+	list_for_each_entry(hi, &hl->host_info_list, list) {
+		if (hi->key == key) {
+			data = hi->data;
+			break;
+		}
+	}
+	read_unlock(&hl->host_info_lock);
+
+	return data;
+}
+
+
+static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
+{
+	struct hpsb_highlevel *hl = __data;
+
+	hl->add_host(host);
+
+        if (host->update_config_rom) {
+		if (hpsb_update_config_rom_image(host) < 0) {
+			HPSB_ERR("Failed to generate Configuration ROM image for host "
+				 "%s-%d", hl->name, host->id);
+		}
+	}
+
+	return 0;
+}
+
+void hpsb_register_highlevel(struct hpsb_highlevel *hl)
+{
+        INIT_LIST_HEAD(&hl->addr_list);
+	INIT_LIST_HEAD(&hl->host_info_list);
+
+	rwlock_init(&hl->host_info_lock);
+
+	down_write(&hl_drivers_sem);
+        list_add_tail(&hl->hl_list, &hl_drivers);
+	up_write(&hl_drivers_sem);
+
+	write_lock(&hl_irqs_lock);
+	list_add_tail(&hl->irq_list, &hl_irqs);
+	write_unlock(&hl_irqs_lock);
+
+	if (hl->add_host)
+		nodemgr_for_each_host(hl, highlevel_for_each_host_reg);
+
+        return;
+}
+
+static void __delete_addr(struct hpsb_address_serve *as)
+{
+	list_del(&as->host_list);
+	list_del(&as->hl_list);
+	kfree(as);
+}
+
+static void __unregister_host(struct hpsb_highlevel *hl, struct hpsb_host *host, int update_cr)
+{
+	unsigned long flags;
+	struct list_head *lh, *next;
+	struct hpsb_address_serve *as;
+
+	/* First, let the highlevel driver unreg */
+	if (hl->remove_host)
+		hl->remove_host(host);
+
+	/* Remove any addresses that are matched for this highlevel driver
+	 * and this particular host. */
+	write_lock_irqsave(&addr_space_lock, flags);
+	list_for_each_safe (lh, next, &hl->addr_list) {
+		as = list_entry(lh, struct hpsb_address_serve, hl_list);
+
+		if (as->host == host)
+			__delete_addr(as);
+	}
+	write_unlock_irqrestore(&addr_space_lock, flags);
+
+	/* Now update the config-rom to reflect anything removed by the
+	 * highlevel driver. */
+	if (update_cr && host->update_config_rom) {
+		if (hpsb_update_config_rom_image(host) < 0) {
+			HPSB_ERR("Failed to generate Configuration ROM image for host "
+				 "%s-%d", hl->name, host->id);
+		}
+	}
+
+	/* And finally, remove all the host info associated between these
+	 * two. */
+	hpsb_destroy_hostinfo(hl, host);
+}
+
+static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
+{
+	struct hpsb_highlevel *hl = __data;
+
+	__unregister_host(hl, host, 1);
+
+	return 0;
+}
+
+void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
+{
+	write_lock(&hl_irqs_lock);
+	list_del(&hl->irq_list);
+	write_unlock(&hl_irqs_lock);
+
+	down_write(&hl_drivers_sem);
+        list_del(&hl->hl_list);
+	up_write(&hl_drivers_sem);
+
+	nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
+}
+
+u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
+					 struct hpsb_host *host,
+					 struct hpsb_address_ops *ops,
+					 u64 size, u64 alignment,
+					 u64 start, u64 end)
+{
+	struct hpsb_address_serve *as, *a1, *a2;
+	struct list_head *entry;
+	u64 retval = ~0ULL;
+	unsigned long flags;
+	u64 align_mask = ~(alignment - 1);
+
+	if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
+	    ((hweight32(alignment >> 32) +
+	      hweight32(alignment & 0xffffffff) != 1))) {
+		HPSB_ERR("%s called with invalid alignment: 0x%048llx",
+			 __FUNCTION__, (unsigned long long)alignment);
+		return retval;
+	}
+
+	if (start == ~0ULL && end == ~0ULL) {
+		start = CSR1212_ALL_SPACE_BASE + 0xffff00000000ULL;  /* ohci1394.c limit */
+		end = CSR1212_ALL_SPACE_END;
+	}
+
+	if (((start|end) & ~align_mask) || (start >= end) || (end > 0x1000000000000ULL)) {
+		HPSB_ERR("%s called with invalid addresses (start = %012Lx    end = %012Lx)",
+			 __FUNCTION__, (unsigned long long)start, (unsigned long long)end);
+		return retval;
+	}
+
+	as = (struct hpsb_address_serve *)
+		kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL);
+	if (as == NULL) {
+		return retval;
+	}
+
+	INIT_LIST_HEAD(&as->host_list);
+	INIT_LIST_HEAD(&as->hl_list);
+	as->op = ops;
+	as->host = host;
+
+	write_lock_irqsave(&addr_space_lock, flags);
+
+	list_for_each(entry, &host->addr_space) {
+		u64 a1sa, a1ea;
+		u64 a2sa, a2ea;
+
+		a1 = list_entry(entry, struct hpsb_address_serve, host_list);
+		a2 = list_entry(entry->next, struct hpsb_address_serve, host_list);
+
+		a1sa = a1->start & align_mask;
+		a1ea = (a1->end + alignment -1) & align_mask;
+		a2sa = a2->start & align_mask;
+		a2ea = (a2->end + alignment -1) & align_mask;
+
+		if ((a2sa - a1ea >= size) && (a2sa - start >= size) && (a2sa > start)) {
+			as->start = max(start, a1ea);
+			as->end = as->start + size;
+			list_add(&as->host_list, entry);
+			list_add_tail(&as->hl_list, &hl->addr_list);
+			retval = as->start;
+			break;
+		}
+	}
+
+	write_unlock_irqrestore(&addr_space_lock, flags);
+
+	if (retval == ~0ULL) {
+		kfree(as);
+	}
+
+	return retval;
+}
+
+int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                            struct hpsb_address_ops *ops, u64 start, u64 end)
+{
+        struct hpsb_address_serve *as;
+	struct list_head *lh;
+        int retval = 0;
+        unsigned long flags;
+
+        if (((start|end) & 3) || (start >= end) || (end > 0x1000000000000ULL)) {
+                HPSB_ERR("%s called with invalid addresses", __FUNCTION__);
+                return 0;
+        }
+
+        as = (struct hpsb_address_serve *)
+                kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC);
+        if (as == NULL) {
+                return 0;
+        }
+
+        INIT_LIST_HEAD(&as->host_list);
+        INIT_LIST_HEAD(&as->hl_list);
+        as->op = ops;
+        as->start = start;
+        as->end = end;
+	as->host = host;
+
+	write_lock_irqsave(&addr_space_lock, flags);
+
+	list_for_each(lh, &host->addr_space) {
+		struct hpsb_address_serve *as_this =
+			list_entry(lh, struct hpsb_address_serve, host_list);
+		struct hpsb_address_serve *as_next =
+			list_entry(lh->next, struct hpsb_address_serve, host_list);
+
+		if (as_this->end > as->start)
+			break;
+
+		if (as_next->start >= as->end) {
+			list_add(&as->host_list, lh);
+			list_add_tail(&as->hl_list, &hl->addr_list);
+			retval = 1;
+			break;
+		}
+	}
+	write_unlock_irqrestore(&addr_space_lock, flags);
+
+	if (retval == 0)
+		kfree(as);
+
+        return retval;
+}
+
+int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                              u64 start)
+{
+        int retval = 0;
+        struct hpsb_address_serve *as;
+        struct list_head *lh, *next;
+        unsigned long flags;
+
+        write_lock_irqsave(&addr_space_lock, flags);
+
+	list_for_each_safe (lh, next, &hl->addr_list) {
+                as = list_entry(lh, struct hpsb_address_serve, hl_list);
+                if (as->start == start && as->host == host) {
+			__delete_addr(as);
+                        retval = 1;
+                        break;
+                }
+        }
+
+        write_unlock_irqrestore(&addr_space_lock, flags);
+
+        return retval;
+}
+
+int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                         unsigned int channel)
+{
+        if (channel > 63) {
+                HPSB_ERR("%s called with invalid channel", __FUNCTION__);
+                return -EINVAL;
+        }
+
+        if (host->iso_listen_count[channel]++ == 0) {
+                return host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
+        }
+
+	return 0;
+}
+
+void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                           unsigned int channel)
+{
+        if (channel > 63) {
+                HPSB_ERR("%s called with invalid channel", __FUNCTION__);
+                return;
+        }
+
+        if (--host->iso_listen_count[channel] == 0) {
+                host->driver->devctl(host, ISO_UNLISTEN_CHANNEL, channel);
+        }
+}
+
+static void init_hpsb_highlevel(struct hpsb_host *host)
+{
+	INIT_LIST_HEAD(&dummy_zero_addr.host_list);
+	INIT_LIST_HEAD(&dummy_zero_addr.hl_list);
+	INIT_LIST_HEAD(&dummy_max_addr.host_list);
+	INIT_LIST_HEAD(&dummy_max_addr.hl_list);
+
+	dummy_zero_addr.op = dummy_max_addr.op = &dummy_ops;
+
+	dummy_zero_addr.start = dummy_zero_addr.end = 0;
+	dummy_max_addr.start = dummy_max_addr.end = ((u64) 1) << 48;
+
+	list_add_tail(&dummy_zero_addr.host_list, &host->addr_space);
+	list_add_tail(&dummy_max_addr.host_list, &host->addr_space);
+}
+
+void highlevel_add_host(struct hpsb_host *host)
+{
+        struct hpsb_highlevel *hl;
+
+	init_hpsb_highlevel(host);
+
+	down_read(&hl_drivers_sem);
+        list_for_each_entry(hl, &hl_drivers, hl_list) {
+		if (hl->add_host)
+			hl->add_host(host);
+        }
+	up_read(&hl_drivers_sem);
+	if (host->update_config_rom) {
+		if (hpsb_update_config_rom_image(host) < 0)
+			HPSB_ERR("Failed to generate Configuration ROM image for "
+				 "host %s-%d", hl->name, host->id);
+	}
+}
+
+void highlevel_remove_host(struct hpsb_host *host)
+{
+        struct hpsb_highlevel *hl;
+
+	down_read(&hl_drivers_sem);
+	list_for_each_entry(hl, &hl_drivers, hl_list)
+		__unregister_host(hl, host, 0);
+	up_read(&hl_drivers_sem);
+}
+
+void highlevel_host_reset(struct hpsb_host *host)
+{
+        struct hpsb_highlevel *hl;
+
+	read_lock(&hl_irqs_lock);
+	list_for_each_entry(hl, &hl_irqs, irq_list) {
+                if (hl->host_reset)
+                        hl->host_reset(host);
+        }
+	read_unlock(&hl_irqs_lock);
+}
+
+void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length)
+{
+        struct hpsb_highlevel *hl;
+        int channel = (((quadlet_t *)data)[0] >> 8) & 0x3f;
+
+        read_lock(&hl_irqs_lock);
+	list_for_each_entry(hl, &hl_irqs, irq_list) {
+                if (hl->iso_receive)
+                        hl->iso_receive(host, channel, data, length);
+        }
+        read_unlock(&hl_irqs_lock);
+}
+
+void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
+			   void *data, size_t length)
+{
+        struct hpsb_highlevel *hl;
+        int cts = ((quadlet_t *)data)[0] >> 4;
+
+        read_lock(&hl_irqs_lock);
+	list_for_each_entry(hl, &hl_irqs, irq_list) {
+                if (hl->fcp_request)
+                        hl->fcp_request(host, nodeid, direction, cts, data,
+					length);
+        }
+        read_unlock(&hl_irqs_lock);
+}
+
+int highlevel_read(struct hpsb_host *host, int nodeid, void *data,
+                   u64 addr, unsigned int length, u16 flags)
+{
+        struct hpsb_address_serve *as;
+        unsigned int partlength;
+        int rcode = RCODE_ADDRESS_ERROR;
+
+        read_lock(&addr_space_lock);
+
+	list_for_each_entry(as, &host->addr_space, host_list) {
+		if (as->start > addr)
+			break;
+
+                if (as->end > addr) {
+                        partlength = min(as->end - addr, (u64) length);
+
+                        if (as->op->read) {
+                                rcode = as->op->read(host, nodeid, data,
+						     addr, partlength, flags);
+                        } else {
+                                rcode = RCODE_TYPE_ERROR;
+                        }
+
+			data += partlength;
+                        length -= partlength;
+                        addr += partlength;
+
+                        if ((rcode != RCODE_COMPLETE) || !length) {
+                                break;
+                        }
+                }
+        }
+
+        read_unlock(&addr_space_lock);
+
+        if (length && (rcode == RCODE_COMPLETE)) {
+                rcode = RCODE_ADDRESS_ERROR;
+        }
+
+        return rcode;
+}
+
+int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
+		    void *data, u64 addr, unsigned int length, u16 flags)
+{
+        struct hpsb_address_serve *as;
+        unsigned int partlength;
+        int rcode = RCODE_ADDRESS_ERROR;
+
+        read_lock(&addr_space_lock);
+
+	list_for_each_entry(as, &host->addr_space, host_list) {
+		if (as->start > addr)
+			break;
+
+                if (as->end > addr) {
+                        partlength = min(as->end - addr, (u64) length);
+
+                        if (as->op->write) {
+                                rcode = as->op->write(host, nodeid, destid,
+						      data, addr, partlength, flags);
+                        } else {
+                                rcode = RCODE_TYPE_ERROR;
+                        }
+
+			data += partlength;
+                        length -= partlength;
+                        addr += partlength;
+
+                        if ((rcode != RCODE_COMPLETE) || !length) {
+                                break;
+                        }
+                }
+        }
+
+        read_unlock(&addr_space_lock);
+
+        if (length && (rcode == RCODE_COMPLETE)) {
+                rcode = RCODE_ADDRESS_ERROR;
+        }
+
+        return rcode;
+}
+
+
+int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
+                   u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags)
+{
+        struct hpsb_address_serve *as;
+        int rcode = RCODE_ADDRESS_ERROR;
+
+        read_lock(&addr_space_lock);
+
+	list_for_each_entry(as, &host->addr_space, host_list) {
+		if (as->start > addr)
+			break;
+
+                if (as->end > addr) {
+                        if (as->op->lock) {
+                                rcode = as->op->lock(host, nodeid, store, addr,
+                                                     data, arg, ext_tcode, flags);
+                        } else {
+                                rcode = RCODE_TYPE_ERROR;
+                        }
+
+                        break;
+                }
+        }
+
+        read_unlock(&addr_space_lock);
+
+        return rcode;
+}
+
+int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
+                     u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags)
+{
+        struct hpsb_address_serve *as;
+        int rcode = RCODE_ADDRESS_ERROR;
+
+        read_lock(&addr_space_lock);
+
+	list_for_each_entry(as, &host->addr_space, host_list) {
+		if (as->start > addr)
+			break;
+
+                if (as->end > addr) {
+                        if (as->op->lock64) {
+                                rcode = as->op->lock64(host, nodeid, store,
+                                                       addr, data, arg,
+                                                       ext_tcode, flags);
+                        } else {
+                                rcode = RCODE_TYPE_ERROR;
+                        }
+
+                        break;
+                }
+        }
+
+        read_unlock(&addr_space_lock);
+
+        return rcode;
+}
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
new file mode 100644
index 000000000000..e119fb87e5b5
--- /dev/null
+++ b/drivers/ieee1394/highlevel.h
@@ -0,0 +1,190 @@
+
+#ifndef IEEE1394_HIGHLEVEL_H
+#define IEEE1394_HIGHLEVEL_H
+
+
+struct hpsb_address_serve {
+        struct list_head host_list; /* per host list */
+
+        struct list_head hl_list; /* hpsb_highlevel list */
+
+        struct hpsb_address_ops *op;
+
+	struct hpsb_host *host;
+
+        /* first address handled and first address behind, quadlet aligned */
+        u64 start, end;
+};
+
+
+/*
+ * The above structs are internal to highlevel driver handling.  Only the
+ * following structures are of interest to actual highlevel drivers.
+ */
+
+struct hpsb_highlevel {
+	struct module *owner;
+	const char *name;
+
+        /* Any of the following pointers can legally be NULL, except for
+         * iso_receive which can only be NULL when you don't request
+         * channels. */
+
+        /* New host initialized.  Will also be called during
+         * hpsb_register_highlevel for all hosts already installed. */
+        void (*add_host) (struct hpsb_host *host);
+
+        /* Host about to be removed.  Will also be called during
+         * hpsb_unregister_highlevel once for each host. */
+        void (*remove_host) (struct hpsb_host *host);
+
+        /* Host experienced bus reset with possible configuration changes.
+	 * Note that this one may occur during interrupt/bottom half handling.
+	 * You can not expect to be able to do stock hpsb_reads. */
+        void (*host_reset) (struct hpsb_host *host);
+
+        /* An isochronous packet was received.  Channel contains the channel
+         * number for your convenience, it is also contained in the included
+         * packet header (first quadlet, CRCs are missing).  You may get called
+         * for channel/host combinations you did not request. */
+        void (*iso_receive) (struct hpsb_host *host, int channel,
+                             quadlet_t *data, size_t length);
+
+        /* A write request was received on either the FCP_COMMAND (direction =
+         * 0) or the FCP_RESPONSE (direction = 1) register.  The cts arg
+         * contains the cts field (first byte of data). */
+        void (*fcp_request) (struct hpsb_host *host, int nodeid, int direction,
+                             int cts, u8 *data, size_t length);
+
+	/* These are initialized by the subsystem when the
+	 * hpsb_higlevel is registered. */
+	struct list_head hl_list;
+	struct list_head irq_list;
+	struct list_head addr_list;
+
+	struct list_head host_info_list;
+	rwlock_t host_info_lock;
+};
+
+struct hpsb_address_ops {
+        /*
+         * Null function pointers will make the respective operation complete
+         * with RCODE_TYPE_ERROR.  Makes for easy to implement read-only
+         * registers (just leave everything but read NULL).
+         *
+         * All functions shall return appropriate IEEE 1394 rcodes.
+         */
+
+        /* These functions have to implement block reads for themselves. */
+        /* These functions either return a response code
+           or a negative number. In the first case a response will be generated; in the
+           later case, no response will be sent and the driver, that handled the request
+           will send the response itself
+        */
+        int (*read) (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
+                     u64 addr, size_t length, u16 flags);
+        int (*write) (struct hpsb_host *host, int nodeid, int destid,
+		      quadlet_t *data, u64 addr, size_t length, u16 flags);
+
+        /* Lock transactions: write results of ext_tcode operation into
+         * *store. */
+        int (*lock) (struct hpsb_host *host, int nodeid, quadlet_t *store,
+                     u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags);
+        int (*lock64) (struct hpsb_host *host, int nodeid, octlet_t *store,
+                       u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags);
+};
+
+
+void highlevel_add_host(struct hpsb_host *host);
+void highlevel_remove_host(struct hpsb_host *host);
+void highlevel_host_reset(struct hpsb_host *host);
+
+
+/* these functions are called to handle transactions. They are called, when
+   a packet arrives. The flags argument contains the second word of the first header
+   quadlet of the incoming packet (containing transaction label, retry code,
+   transaction code and priority). These functions either return a response code
+   or a negative number. In the first case a response will be generated; in the
+   later case, no response will be sent and the driver, that handled the request
+   will send the response itself.
+*/
+int highlevel_read(struct hpsb_host *host, int nodeid, void *data,
+                   u64 addr, unsigned int length, u16 flags);
+int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
+		    void *data, u64 addr, unsigned int length, u16 flags);
+int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
+                   u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags);
+int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
+                     u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags);
+
+void highlevel_iso_receive(struct hpsb_host *host, void *data,
+                           size_t length);
+void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
+                           void *data, size_t length);
+
+
+/*
+ * Register highlevel driver.  The name pointer has to stay valid at all times
+ * because the string is not copied.
+ */
+void hpsb_register_highlevel(struct hpsb_highlevel *hl);
+void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
+
+/*
+ * Register handlers for host address spaces.  Start and end are 48 bit pointers
+ * and have to be quadlet aligned (end points to the first address behind the
+ * handled addresses.  This function can be called multiple times for a single
+ * hpsb_highlevel to implement sparse register sets.  The requested region must
+ * not overlap any previously allocated region, otherwise registering will fail.
+ *
+ * It returns true for successful allocation.  There is no unregister function,
+ * all address spaces are deallocated together with the hpsb_highlevel.
+ */
+u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
+					 struct hpsb_host *host,
+					 struct hpsb_address_ops *ops,
+					 u64 size, u64 alignment,
+					 u64 start, u64 end);
+int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                            struct hpsb_address_ops *ops, u64 start, u64 end);
+
+int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                              u64 start);
+
+/*
+ * Enable or disable receving a certain isochronous channel through the
+ * iso_receive op.
+ */
+int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                         unsigned int channel);
+void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
+                           unsigned int channel);
+
+
+/* Retrieve a hostinfo pointer bound to this driver/host */
+void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
+
+/* Allocate a hostinfo pointer of data_size bound to this driver/host */
+void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
+			   size_t data_size);
+
+/* Free and remove the hostinfo pointer bound to this driver/host */
+void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
+
+/* Set an alternate lookup key for the hostinfo bound to this driver/host */
+void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key);
+
+/* Retrieve the alternate lookup key for the hostinfo bound to this driver/host */
+unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host);
+
+/* Retrieve a hostinfo pointer bound to this driver using its alternate key */
+void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
+
+/* Set the hostinfo pointer to something useful. Usually follows a call to
+ * hpsb_create_hostinfo, where the size is 0. */
+int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, void *data);
+
+/* Retrieve hpsb_host using a highlevel handle and a key */
+struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl, unsigned long key);
+
+#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
new file mode 100644
index 000000000000..c502c6e9c440
--- /dev/null
+++ b/drivers/ieee1394/hosts.c
@@ -0,0 +1,233 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * Low level (host adapter) management.
+ *
+ * Copyright (C) 1999 Andreas E. Bombe
+ * Copyright (C) 1999 Emanuel Pirker
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "nodemgr.h"
+#include "csr.h"
+#include "config_roms.h"
+
+
+static void delayed_reset_bus(void * __reset_info)
+{
+	struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+	int generation = host->csr.generation + 1;
+
+	/* The generation field rolls over to 2 rather than 0 per IEEE
+	 * 1394a-2000. */
+	if (generation > 0xf || generation < 2)
+		generation = 2;
+
+	CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
+	if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
+		/* CSR image creation failed, reset generation field and do not
+		 * issue a bus reset. */
+		CSR_SET_BUS_INFO_GENERATION(host->csr.rom, host->csr.generation);
+		return;
+	}
+
+	host->csr.generation = generation;
+
+	host->update_config_rom = 0;
+	if (host->driver->set_hw_config_rom)
+		host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
+
+	host->csr.gen_timestamp[host->csr.generation] = jiffies;
+	hpsb_reset_bus(host, SHORT_RESET);
+}
+
+static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
+{
+        return 0;
+}
+
+static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
+{
+        return -1;
+}
+
+static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
+{
+	return -1;
+}
+
+static struct hpsb_host_driver dummy_driver = {
+        .transmit_packet = dummy_transmit_packet,
+        .devctl =          dummy_devctl,
+	.isoctl =          dummy_isoctl
+};
+
+static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
+{
+	int *hostnum = __data;
+
+	if (host->id == *hostnum)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * hpsb_alloc_host - allocate a new host controller.
+ * @drv: the driver that will manage the host controller
+ * @extra: number of extra bytes to allocate for the driver
+ *
+ * Allocate a &hpsb_host and initialize the general subsystem specific
+ * fields.  If the driver needs to store per host data, as drivers
+ * usually do, the amount of memory required can be specified by the
+ * @extra parameter.  Once allocated, the driver should initialize the
+ * driver specific parts, enable the controller and make it available
+ * to the general subsystem using hpsb_add_host().
+ *
+ * Return Value: a pointer to the &hpsb_host if succesful, %NULL if
+ * no memory was available.
+ */
+static DECLARE_MUTEX(host_num_alloc);
+
+struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
+				  struct device *dev)
+{
+        struct hpsb_host *h;
+	int i;
+	int hostnum = 0;
+
+        h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
+        if (!h) return NULL;
+        memset(h, 0, sizeof(struct hpsb_host) + extra);
+
+	h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
+	if (!h->csr.rom) {
+		kfree(h);
+		return NULL;
+	}
+
+	h->hostdata = h + 1;
+        h->driver = drv;
+
+	skb_queue_head_init(&h->pending_packet_queue);
+	INIT_LIST_HEAD(&h->addr_space);
+
+	for (i = 2; i < 16; i++)
+		h->csr.gen_timestamp[i] = jiffies - 60 * HZ;
+
+	for (i = 0; i < ARRAY_SIZE(h->tpool); i++)
+		HPSB_TPOOL_INIT(&h->tpool[i]);
+
+	atomic_set(&h->generation, 0);
+
+	INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+	
+	init_timer(&h->timeout);
+	h->timeout.data = (unsigned long) h;
+	h->timeout.function = abort_timedouts;
+	h->timeout_interval = HZ / 20; // 50ms by default
+
+        h->topology_map = h->csr.topology_map + 3;
+        h->speed_map = (u8 *)(h->csr.speed_map + 2);
+
+	down(&host_num_alloc);
+
+	while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
+		hostnum++;
+
+	h->id = hostnum;
+
+	memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
+	h->device.parent = dev;
+	snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id);
+
+	h->class_dev.dev = &h->device;
+	h->class_dev.class = &hpsb_host_class;
+	snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id);
+
+	device_register(&h->device);
+	class_device_register(&h->class_dev);
+	get_device(&h->device);
+
+	up(&host_num_alloc);
+
+	return h;
+}
+
+int hpsb_add_host(struct hpsb_host *host)
+{
+	if (hpsb_default_host_entry(host))
+		return -ENOMEM;
+
+	hpsb_add_extra_config_roms(host);
+
+	highlevel_add_host(host);
+
+	return 0;
+}
+
+void hpsb_remove_host(struct hpsb_host *host)
+{
+        host->is_shutdown = 1;
+
+	cancel_delayed_work(&host->delayed_reset);
+	flush_scheduled_work();
+
+        host->driver = &dummy_driver;
+
+        highlevel_remove_host(host);
+
+	hpsb_remove_extra_config_roms(host);
+
+	class_device_unregister(&host->class_dev);
+	device_unregister(&host->device);
+}
+
+int hpsb_update_config_rom_image(struct hpsb_host *host)
+{
+	unsigned long reset_delay;
+	int next_gen = host->csr.generation + 1;
+
+	if (!host->update_config_rom)
+		return -EINVAL;
+
+	if (next_gen > 0xf)
+		next_gen = 2;
+
+	/* Stop the delayed interrupt, we're about to change the config rom and
+	 * it would be a waste to do a bus reset twice. */
+	cancel_delayed_work(&host->delayed_reset);
+
+	/* IEEE 1394a-2000 prohibits using the same generation number
+	 * twice in a 60 second period. */
+	if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ)
+		/* Wait 60 seconds from the last time this generation number was
+		 * used. */
+		reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
+	else
+		/* Wait 1 second in case some other code wants to change the
+		 * Config ROM in the near future. */
+		reset_delay = HZ;
+
+	PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+	schedule_delayed_work(&host->delayed_reset, reset_delay);
+
+	return 0;
+}
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
new file mode 100644
index 000000000000..739e76840d51
--- /dev/null
+++ b/drivers/ieee1394/hosts.h
@@ -0,0 +1,215 @@
+#ifndef _IEEE1394_HOSTS_H
+#define _IEEE1394_HOSTS_H
+
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+
+#include <asm/semaphore.h>
+
+#include "ieee1394_types.h"
+#include "csr.h"
+
+
+struct hpsb_packet;
+struct hpsb_iso;
+
+struct hpsb_host {
+        struct list_head host_list;
+
+        void *hostdata;
+
+        atomic_t generation;
+
+	struct sk_buff_head pending_packet_queue;
+
+	struct timer_list timeout;
+	unsigned long timeout_interval;
+
+        unsigned char iso_listen_count[64];
+
+        int node_count; /* number of identified nodes on this bus */
+        int selfid_count; /* total number of SelfIDs received */
+	int nodes_active; /* number of nodes that are actually active */
+
+        nodeid_t node_id; /* node ID of this host */
+        nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
+        nodeid_t busmgr_id; /* ID of this bus' bus manager */
+
+        /* this nodes state */
+        unsigned in_bus_reset:1;
+        unsigned is_shutdown:1;
+
+        /* this nodes' duties on the bus */
+        unsigned is_root:1;
+        unsigned is_cycmst:1;
+        unsigned is_irm:1;
+        unsigned is_busmgr:1;
+
+        int reset_retries;
+        quadlet_t *topology_map;
+        u8 *speed_map;
+        struct csr_control csr;
+
+	/* Per node tlabel pool allocation */
+	struct hpsb_tlabel_pool tpool[64];
+
+        struct hpsb_host_driver *driver;
+
+	struct pci_dev *pdev;
+
+	int id;
+
+	struct device device;
+	struct class_device class_dev;
+
+	int update_config_rom;
+	struct work_struct delayed_reset;
+
+	unsigned int config_roms;
+
+	struct list_head addr_space;
+};
+
+
+
+enum devctl_cmd {
+        /* Host is requested to reset its bus and cancel all outstanding async
+         * requests.  If arg == 1, it shall also attempt to become root on the
+         * bus.  Return void. */
+        RESET_BUS,
+
+        /* Arg is void, return value is the hardware cycle counter value. */
+        GET_CYCLE_COUNTER,
+
+        /* Set the hardware cycle counter to the value in arg, return void.
+         * FIXME - setting is probably not required. */
+        SET_CYCLE_COUNTER,
+
+        /* Configure hardware for new bus ID in arg, return void. */
+        SET_BUS_ID,
+
+        /* If arg true, start sending cycle start packets, stop if arg == 0.
+         * Return void. */
+        ACT_CYCLE_MASTER,
+
+        /* Cancel all outstanding async requests without resetting the bus.
+         * Return void. */
+        CANCEL_REQUESTS,
+
+        /* Start or stop receiving isochronous channel in arg.  Return void.
+         * This acts as an optimization hint, hosts are not required not to
+         * listen on unrequested channels. */
+        ISO_LISTEN_CHANNEL,
+        ISO_UNLISTEN_CHANNEL
+};
+
+enum isoctl_cmd {
+	/* rawiso API - see iso.h for the meanings of these commands
+	   (they correspond exactly to the hpsb_iso_* API functions)
+	 * INIT = allocate resources
+	 * START = begin transmission/reception
+	 * STOP = halt transmission/reception
+	 * QUEUE/RELEASE = produce/consume packets
+	 * SHUTDOWN = deallocate resources
+	 */
+
+	XMIT_INIT,
+	XMIT_START,
+	XMIT_STOP,
+	XMIT_QUEUE,
+	XMIT_SHUTDOWN,
+
+	RECV_INIT,
+	RECV_LISTEN_CHANNEL,   /* multi-channel only */
+	RECV_UNLISTEN_CHANNEL, /* multi-channel only */
+	RECV_SET_CHANNEL_MASK, /* multi-channel only; arg is a *u64 */
+	RECV_START,
+	RECV_STOP,
+	RECV_RELEASE,
+	RECV_SHUTDOWN,
+	RECV_FLUSH
+};
+
+enum reset_types {
+        /* 166 microsecond reset -- only type of reset available on
+           non-1394a capable IEEE 1394 controllers */
+        LONG_RESET,
+
+        /* Short (arbitrated) reset -- only available on 1394a capable
+           IEEE 1394 capable controllers */
+        SHORT_RESET,
+
+	/* Variants, that set force_root before issueing the bus reset */
+	LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
+
+	/* Variants, that clear force_root before issueing the bus reset */
+	LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
+};
+
+struct hpsb_host_driver {
+	struct module *owner;
+	const char *name;
+
+	/* The hardware driver may optionally support a function that is used
+	 * to set the hardware ConfigROM if the hardware supports handling
+	 * reads to the ConfigROM on its own. */
+	void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom);
+
+        /* This function shall implement packet transmission based on
+         * packet->type.  It shall CRC both parts of the packet (unless
+         * packet->type == raw) and do byte-swapping as necessary or instruct
+         * the hardware to do so.  It can return immediately after the packet
+         * was queued for sending.  After sending, hpsb_sent_packet() has to be
+         * called.  Return 0 on success, negative errno on failure.
+         * NOTE: The function must be callable in interrupt context.
+         */
+        int (*transmit_packet) (struct hpsb_host *host,
+                                struct hpsb_packet *packet);
+
+        /* This function requests miscellanous services from the driver, see
+         * above for command codes and expected actions.  Return -1 for unknown
+         * command, though that should never happen.
+         */
+        int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
+
+	 /* ISO transmission/reception functions. Return 0 on success, -1
+	  * (or -EXXX errno code) on failure. If the low-level driver does not
+	  * support the new ISO API, set isoctl to NULL.
+	  */
+	int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
+
+        /* This function is mainly to redirect local CSR reads/locks to the iso
+         * management registers (bus manager id, bandwidth available, channels
+         * available) to the hardware registers in OHCI.  reg is 0,1,2,3 for bus
+         * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
+         * as OHCI uses).  data and compare are the new data and expected data
+         * respectively, return value is the old value.
+         */
+        quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
+                                 quadlet_t data, quadlet_t compare);
+};
+
+
+struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
+				  struct device *dev);
+int hpsb_add_host(struct hpsb_host *host);
+void hpsb_remove_host(struct hpsb_host *h);
+
+/* The following 2 functions are deprecated and will be removed when the
+ * raw1394/libraw1394 update is complete. */
+int hpsb_update_config_rom(struct hpsb_host *host,
+      const quadlet_t *new_rom, size_t size, unsigned char rom_version);
+int hpsb_get_config_rom(struct hpsb_host *host, quadlet_t *buffer,
+      size_t buffersize, size_t *rom_size, unsigned char *rom_version);
+
+/* Updates the configuration rom image of a host.  rom_version must be the
+ * current version, otherwise it will fail with return value -1. If this
+ * host does not support config-rom-update, it will return -EINVAL.
+ * Return value 0 indicates success.
+ */
+int hpsb_update_config_rom_image(struct hpsb_host *host);
+
+#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
new file mode 100644
index 000000000000..f92b566363d5
--- /dev/null
+++ b/drivers/ieee1394/ieee1394-ioctl.h
@@ -0,0 +1,111 @@
+/* Base file for all ieee1394 ioctl's. Linux-1394 has allocated base '#'
+ * with a range of 0x00-0x3f. */
+
+#ifndef __IEEE1394_IOCTL_H
+#define __IEEE1394_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+
+/* AMDTP Gets 6 */
+#define AMDTP_IOC_CHANNEL	_IOW('#', 0x00, struct amdtp_ioctl)
+#define AMDTP_IOC_PLUG		_IOW('#', 0x01, struct amdtp_ioctl)
+#define AMDTP_IOC_PING		_IOW('#', 0x02, struct amdtp_ioctl)
+#define AMDTP_IOC_ZAP		_IO ('#', 0x03)
+
+
+/* DV1394 Gets 10 */
+
+/* Get the driver ready to transmit video.  pass a struct dv1394_init* as
+ * the parameter (see below), or NULL to get default parameters */
+#define DV1394_IOC_INIT			_IOW('#', 0x06, struct dv1394_init)
+
+/* Stop transmitting video and free the ringbuffer */
+#define DV1394_IOC_SHUTDOWN		_IO ('#', 0x07)
+
+/* Submit N new frames to be transmitted, where the index of the first new
+ * frame is first_clear_buffer, and the index of the last new frame is
+ * (first_clear_buffer + N) % n_frames */
+#define DV1394_IOC_SUBMIT_FRAMES	_IO ('#', 0x08)
+
+/* Block until N buffers are clear (pass N as the parameter) Because we
+ * re-transmit the last frame on underrun, there will at most be n_frames
+ * - 1 clear frames at any time */
+#define DV1394_IOC_WAIT_FRAMES		_IO ('#', 0x09)
+
+/* Capture new frames that have been received, where the index of the
+ * first new frame is first_clear_buffer, and the index of the last new
+ * frame is (first_clear_buffer + N) % n_frames */
+#define DV1394_IOC_RECEIVE_FRAMES	_IO ('#', 0x0a)
+
+/* Tell card to start receiving DMA */
+#define DV1394_IOC_START_RECEIVE	_IO ('#', 0x0b)
+
+/* Pass a struct dv1394_status* as the parameter */
+#define DV1394_IOC_GET_STATUS		_IOR('#', 0x0c, struct dv1394_status)
+
+
+/* Video1394 Gets 10 */
+
+#define VIDEO1394_IOC_LISTEN_CHANNEL		\
+	_IOWR('#', 0x10, struct video1394_mmap)
+#define VIDEO1394_IOC_UNLISTEN_CHANNEL		\
+	_IOW ('#', 0x11, int)
+#define VIDEO1394_IOC_LISTEN_QUEUE_BUFFER	\
+	_IOW ('#', 0x12, struct video1394_wait)
+#define VIDEO1394_IOC_LISTEN_WAIT_BUFFER	\
+	_IOWR('#', 0x13, struct video1394_wait)
+#define VIDEO1394_IOC_TALK_CHANNEL		\
+	_IOWR('#', 0x14, struct video1394_mmap)
+#define VIDEO1394_IOC_UNTALK_CHANNEL		\
+	_IOW ('#', 0x15, int)
+/*
+ * This one is broken: it really wanted
+ * "sizeof (struct video1394_wait) + sizeof (struct video1394_queue_variable)"
+ * but got just a "size_t"
+ */
+#define VIDEO1394_IOC_TALK_QUEUE_BUFFER 	\
+	_IOW ('#', 0x16, size_t)
+#define VIDEO1394_IOC_TALK_WAIT_BUFFER		\
+	_IOW ('#', 0x17, struct video1394_wait)
+#define VIDEO1394_IOC_LISTEN_POLL_BUFFER	\
+	_IOWR('#', 0x18, struct video1394_wait)
+
+
+/* Raw1394's ISO interface */
+#define RAW1394_IOC_ISO_XMIT_INIT		\
+	_IOW ('#', 0x1a, struct raw1394_iso_status)
+#define RAW1394_IOC_ISO_RECV_INIT		\
+	_IOWR('#', 0x1b, struct raw1394_iso_status)
+#define RAW1394_IOC_ISO_RECV_START		\
+	_IOC (_IOC_WRITE, '#', 0x1c, sizeof(int) * 3)
+#define RAW1394_IOC_ISO_XMIT_START		\
+	_IOC (_IOC_WRITE, '#', 0x1d, sizeof(int) * 2)
+#define RAW1394_IOC_ISO_XMIT_RECV_STOP		\
+	_IO  ('#', 0x1e)
+#define RAW1394_IOC_ISO_GET_STATUS		\
+	_IOR ('#', 0x1f, struct raw1394_iso_status)
+#define RAW1394_IOC_ISO_SHUTDOWN		\
+	_IO  ('#', 0x20)
+#define RAW1394_IOC_ISO_QUEUE_ACTIVITY		\
+	_IO  ('#', 0x21)
+#define RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL	\
+	_IOW ('#', 0x22, unsigned char)
+#define RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL	\
+	_IOW ('#', 0x23, unsigned char)
+#define RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK	\
+	_IOW ('#', 0x24, __u64)
+#define RAW1394_IOC_ISO_RECV_PACKETS		\
+	_IOW ('#', 0x25, struct raw1394_iso_packets)
+#define RAW1394_IOC_ISO_RECV_RELEASE_PACKETS	\
+	_IOW ('#', 0x26, unsigned int)
+#define RAW1394_IOC_ISO_XMIT_PACKETS		\
+	_IOW ('#', 0x27, struct raw1394_iso_packets)
+#define RAW1394_IOC_ISO_XMIT_SYNC		\
+	_IO  ('#', 0x28)
+#define RAW1394_IOC_ISO_RECV_FLUSH              \
+	_IO  ('#', 0x29)
+
+
+#endif /* __IEEE1394_IOCTL_H */
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
new file mode 100644
index 000000000000..b634a9bb365c
--- /dev/null
+++ b/drivers/ieee1394/ieee1394.h
@@ -0,0 +1,202 @@
+/*
+ * Generic IEEE 1394 definitions
+ */
+
+#ifndef _IEEE1394_IEEE1394_H
+#define _IEEE1394_IEEE1394_H
+
+#define TCODE_WRITEQ             0x0
+#define TCODE_WRITEB             0x1
+#define TCODE_WRITE_RESPONSE     0x2
+#define TCODE_READQ              0x4
+#define TCODE_READB              0x5
+#define TCODE_READQ_RESPONSE     0x6
+#define TCODE_READB_RESPONSE     0x7
+#define TCODE_CYCLE_START        0x8
+#define TCODE_LOCK_REQUEST       0x9
+#define TCODE_ISO_DATA           0xa
+#define TCODE_STREAM_DATA        0xa
+#define TCODE_LOCK_RESPONSE      0xb
+
+#define RCODE_COMPLETE           0x0
+#define RCODE_CONFLICT_ERROR     0x4
+#define RCODE_DATA_ERROR         0x5
+#define RCODE_TYPE_ERROR         0x6
+#define RCODE_ADDRESS_ERROR      0x7
+
+#define EXTCODE_MASK_SWAP        0x1
+#define EXTCODE_COMPARE_SWAP     0x2
+#define EXTCODE_FETCH_ADD        0x3
+#define EXTCODE_LITTLE_ADD       0x4
+#define EXTCODE_BOUNDED_ADD      0x5
+#define EXTCODE_WRAP_ADD         0x6
+
+#define ACK_COMPLETE             0x1
+#define ACK_PENDING              0x2
+#define ACK_BUSY_X               0x4
+#define ACK_BUSY_A               0x5
+#define ACK_BUSY_B               0x6
+#define ACK_TARDY                0xb
+#define ACK_CONFLICT_ERROR       0xc
+#define ACK_DATA_ERROR           0xd
+#define ACK_TYPE_ERROR           0xe
+#define ACK_ADDRESS_ERROR        0xf
+
+/* Non-standard "ACK codes" for internal use */
+#define ACKX_NONE                (-1)
+#define ACKX_SEND_ERROR          (-2)
+#define ACKX_ABORTED             (-3)
+#define ACKX_TIMEOUT             (-4)
+
+
+#define IEEE1394_SPEED_100		0x00
+#define IEEE1394_SPEED_200		0x01
+#define IEEE1394_SPEED_400		0x02
+#define IEEE1394_SPEED_800		0x03
+#define IEEE1394_SPEED_1600		0x04
+#define IEEE1394_SPEED_3200		0x05
+/* The current highest tested speed supported by the subsystem */
+#define IEEE1394_SPEED_MAX		IEEE1394_SPEED_800
+
+/* Maps speed values above to a string representation */
+extern const char *hpsb_speedto_str[];
+
+
+#define SELFID_PWRCL_NO_POWER    0x0
+#define SELFID_PWRCL_PROVIDE_15W 0x1
+#define SELFID_PWRCL_PROVIDE_30W 0x2
+#define SELFID_PWRCL_PROVIDE_45W 0x3
+#define SELFID_PWRCL_USE_1W      0x4
+#define SELFID_PWRCL_USE_3W      0x5
+#define SELFID_PWRCL_USE_6W      0x6
+#define SELFID_PWRCL_USE_10W     0x7
+
+#define SELFID_PORT_CHILD        0x3
+#define SELFID_PORT_PARENT       0x2
+#define SELFID_PORT_NCONN        0x1
+#define SELFID_PORT_NONE         0x0
+
+
+/* 1394a PHY bitmasks */
+#define PHY_00_PHYSICAL_ID       0xFC
+#define PHY_00_R                 0x02 /* Root */
+#define PHY_00_PS                0x01 /* Power Status*/
+#define PHY_01_RHB               0x80 /* Root Hold-Off */
+#define PHY_01_IBR               0x80 /* Initiate Bus Reset */
+#define PHY_01_GAP_COUNT         0x3F
+#define PHY_02_EXTENDED          0xE0 /* 0x7 for 1394a-compliant PHY */
+#define PHY_02_TOTAL_PORTS       0x1F
+#define PHY_03_MAX_SPEED         0xE0
+#define PHY_03_DELAY             0x0F
+#define PHY_04_LCTRL             0x80 /* Link Active Report Control */
+#define PHY_04_CONTENDER         0x40
+#define PHY_04_JITTER            0x38
+#define PHY_04_PWR_CLASS         0x07 /* Power Class */
+#define PHY_05_WATCHDOG          0x80
+#define PHY_05_ISBR              0x40 /* Initiate Short Bus Reset */
+#define PHY_05_LOOP              0x20 /* Loop Detect */
+#define PHY_05_PWR_FAIL          0x10 /* Cable Power Failure Detect */
+#define PHY_05_TIMEOUT           0x08 /* Arbitration State Machine Timeout */
+#define PHY_05_PORT_EVENT        0x04 /* Port Event Detect */
+#define PHY_05_ENAB_ACCEL        0x02 /* Enable Arbitration Acceleration */
+#define PHY_05_ENAB_MULTI        0x01 /* Ena. Multispeed Packet Concatenation */
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN_BITFIELD
+
+struct selfid {
+        u32 packet_identifier:2; /* always binary 10 */
+        u32 phy_id:6;
+        /* byte */
+        u32 extended:1; /* if true is struct ext_selfid */
+        u32 link_active:1;
+        u32 gap_count:6;
+        /* byte */
+        u32 speed:2;
+        u32 phy_delay:2;
+        u32 contender:1;
+        u32 power_class:3;
+        /* byte */
+        u32 port0:2;
+        u32 port1:2;
+        u32 port2:2;
+        u32 initiated_reset:1;
+        u32 more_packets:1;
+} __attribute__((packed));
+
+struct ext_selfid {
+        u32 packet_identifier:2; /* always binary 10 */
+        u32 phy_id:6;
+        /* byte */
+        u32 extended:1; /* if false is struct selfid */
+        u32 seq_nr:3;
+        u32 reserved:2;
+        u32 porta:2;
+        /* byte */
+        u32 portb:2;
+        u32 portc:2;
+        u32 portd:2;
+        u32 porte:2;
+        /* byte */
+        u32 portf:2;
+        u32 portg:2;
+        u32 porth:2;
+        u32 reserved2:1;
+        u32 more_packets:1;
+} __attribute__((packed));
+
+#elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */
+
+/*
+ * Note: these mean to be bit fields of a big endian SelfID as seen on a little
+ * endian machine.  Without swapping.
+ */
+
+struct selfid {
+        u32 phy_id:6;
+        u32 packet_identifier:2; /* always binary 10 */
+        /* byte */
+        u32 gap_count:6;
+        u32 link_active:1;
+        u32 extended:1; /* if true is struct ext_selfid */
+        /* byte */
+        u32 power_class:3;
+        u32 contender:1;
+        u32 phy_delay:2;
+        u32 speed:2;
+        /* byte */
+        u32 more_packets:1;
+        u32 initiated_reset:1;
+        u32 port2:2;
+        u32 port1:2;
+        u32 port0:2;
+} __attribute__((packed));
+
+struct ext_selfid {
+        u32 phy_id:6;
+        u32 packet_identifier:2; /* always binary 10 */
+        /* byte */
+        u32 porta:2;
+        u32 reserved:2;
+        u32 seq_nr:3;
+        u32 extended:1; /* if false is struct selfid */
+        /* byte */
+        u32 porte:2;
+        u32 portd:2;
+        u32 portc:2;
+        u32 portb:2;
+        /* byte */
+        u32 more_packets:1;
+        u32 reserved2:1;
+        u32 porth:2;
+        u32 portg:2;
+        u32 portf:2;
+} __attribute__((packed));
+
+#else
+#error What? PDP endian?
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+#endif /* _IEEE1394_IEEE1394_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
new file mode 100644
index 000000000000..1c5845f7e4ab
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -0,0 +1,1330 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * Core support: hpsb_packet management, packet handling and forwarding to
+ *               highlevel or lowlevel code
+ *
+ * Copyright (C) 1999, 2000 Andreas E. Bombe
+ *                     2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ *
+ *
+ * Contributions:
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *        loopback functionality in hpsb_send_packet
+ *        allow highlevel drivers to disable automatic response generation
+ *              and to generate responses themselves (deferred)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/kdev_t.h>
+#include <linux/skbuff.h>
+#include <linux/suspend.h>
+
+#include <asm/byteorder.h>
+#include <asm/semaphore.h>
+
+#include "ieee1394_types.h"
+#include "ieee1394.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "ieee1394_transactions.h"
+#include "csr.h"
+#include "nodemgr.h"
+#include "dma.h"
+#include "iso.h"
+#include "config_roms.h"
+
+/*
+ * Disable the nodemgr detection and config rom reading functionality.
+ */
+static int disable_nodemgr = 0;
+module_param(disable_nodemgr, int, 0444);
+MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
+
+/* Disable Isochronous Resource Manager functionality */
+int hpsb_disable_irm = 0;
+module_param_named(disable_irm, hpsb_disable_irm, bool, 0);
+MODULE_PARM_DESC(disable_irm,
+		 "Disable Isochronous Resource Manager functionality.");
+
+/* We are GPL, so treat us special */
+MODULE_LICENSE("GPL");
+
+/* Some globals used */
+const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
+struct class_simple *hpsb_protocol_class;
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+static void dump_packet(const char *text, quadlet_t *data, int size)
+{
+	int i;
+
+	size /= 4;
+	size = (size > 4 ? 4 : size);
+
+	printk(KERN_DEBUG "ieee1394: %s", text);
+	for (i = 0; i < size; i++)
+		printk(" %08x", data[i]);
+	printk("\n");
+}
+#else
+#define dump_packet(x,y,z)
+#endif
+
+static void abort_requests(struct hpsb_host *host);
+static void queue_packet_complete(struct hpsb_packet *packet);
+
+
+/**
+ * hpsb_set_packet_complete_task - set the task that runs when a packet
+ * completes. You cannot call this more than once on a single packet
+ * before it is sent.
+ *
+ * @packet: the packet whose completion we want the task added to
+ * @routine: function to call
+ * @data: data (if any) to pass to the above function
+ */
+void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
+				   void (*routine)(void *), void *data)
+{
+	WARN_ON(packet->complete_routine != NULL);
+	packet->complete_routine = routine;
+	packet->complete_data = data;
+	return;
+}
+
+/**
+ * hpsb_alloc_packet - allocate new packet structure
+ * @data_size: size of the data block to be allocated
+ *
+ * This function allocates, initializes and returns a new &struct hpsb_packet.
+ * It can be used in interrupt context.  A header block is always included, its
+ * size is big enough to contain all possible 1394 headers.  The data block is
+ * only allocated when @data_size is not zero.
+ *
+ * For packets for which responses will be received the @data_size has to be big
+ * enough to contain the response's data block since no further allocation
+ * occurs at response matching time.
+ *
+ * The packet's generation value will be set to the current generation number
+ * for ease of use.  Remember to overwrite it with your own recorded generation
+ * number if you can not be sure that your code will not race with a bus reset.
+ *
+ * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
+ * failure.
+ */
+struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
+{
+	struct hpsb_packet *packet = NULL;
+	struct sk_buff *skb;
+
+	data_size = ((data_size + 3) & ~3);
+
+	skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
+	if (skb == NULL)
+		return NULL;
+
+	memset(skb->data, 0, data_size + sizeof(*packet));
+
+	packet = (struct hpsb_packet *)skb->data;
+	packet->skb = skb;
+
+	packet->header = packet->embedded_header;
+	packet->state = hpsb_unused;
+	packet->generation = -1;
+	INIT_LIST_HEAD(&packet->driver_list);
+	atomic_set(&packet->refcnt, 1);
+
+	if (data_size) {
+		packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
+		packet->data_size = data_size;
+	}
+
+	return packet;
+}
+
+
+/**
+ * hpsb_free_packet - free packet and data associated with it
+ * @packet: packet to free (is NULL safe)
+ *
+ * This function will free packet->data and finally the packet itself.
+ */
+void hpsb_free_packet(struct hpsb_packet *packet)
+{
+	if (packet && atomic_dec_and_test(&packet->refcnt)) {
+		BUG_ON(!list_empty(&packet->driver_list));
+		kfree_skb(packet->skb);
+	}
+}
+
+
+int hpsb_reset_bus(struct hpsb_host *host, int type)
+{
+        if (!host->in_bus_reset) {
+                host->driver->devctl(host, RESET_BUS, type);
+                return 0;
+        } else {
+                return 1;
+        }
+}
+
+
+int hpsb_bus_reset(struct hpsb_host *host)
+{
+        if (host->in_bus_reset) {
+                HPSB_NOTICE("%s called while bus reset already in progress",
+			    __FUNCTION__);
+                return 1;
+        }
+
+        abort_requests(host);
+        host->in_bus_reset = 1;
+        host->irm_id = -1;
+	host->is_irm = 0;
+        host->busmgr_id = -1;
+	host->is_busmgr = 0;
+	host->is_cycmst = 0;
+        host->node_count = 0;
+        host->selfid_count = 0;
+
+        return 0;
+}
+
+
+/*
+ * Verify num_of_selfids SelfIDs and return number of nodes.  Return zero in
+ * case verification failed.
+ */
+static int check_selfids(struct hpsb_host *host)
+{
+        int nodeid = -1;
+        int rest_of_selfids = host->selfid_count;
+        struct selfid *sid = (struct selfid *)host->topology_map;
+        struct ext_selfid *esid;
+        int esid_seq = 23;
+
+	host->nodes_active = 0;
+
+        while (rest_of_selfids--) {
+                if (!sid->extended) {
+                        nodeid++;
+                        esid_seq = 0;
+
+                        if (sid->phy_id != nodeid) {
+                                HPSB_INFO("SelfIDs failed monotony check with "
+                                          "%d", sid->phy_id);
+                                return 0;
+                        }
+
+			if (sid->link_active) {
+				host->nodes_active++;
+				if (sid->contender)
+					host->irm_id = LOCAL_BUS | sid->phy_id;
+			}
+                } else {
+                        esid = (struct ext_selfid *)sid;
+
+                        if ((esid->phy_id != nodeid)
+                            || (esid->seq_nr != esid_seq)) {
+                                HPSB_INFO("SelfIDs failed monotony check with "
+                                          "%d/%d", esid->phy_id, esid->seq_nr);
+                                return 0;
+                        }
+                        esid_seq++;
+                }
+                sid++;
+        }
+
+        esid = (struct ext_selfid *)(sid - 1);
+        while (esid->extended) {
+                if ((esid->porta == 0x2) || (esid->portb == 0x2)
+                    || (esid->portc == 0x2) || (esid->portd == 0x2)
+                    || (esid->porte == 0x2) || (esid->portf == 0x2)
+                    || (esid->portg == 0x2) || (esid->porth == 0x2)) {
+			HPSB_INFO("SelfIDs failed root check on "
+				  "extended SelfID");
+			return 0;
+                }
+                esid--;
+        }
+
+        sid = (struct selfid *)esid;
+        if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
+		HPSB_INFO("SelfIDs failed root check");
+		return 0;
+        }
+
+	host->node_count = nodeid + 1;
+        return 1;
+}
+
+static void build_speed_map(struct hpsb_host *host, int nodecount)
+{
+	u8 speedcap[nodecount];
+	u8 cldcnt[nodecount];
+        u8 *map = host->speed_map;
+        struct selfid *sid;
+        struct ext_selfid *esid;
+        int i, j, n;
+
+        for (i = 0; i < (nodecount * 64); i += 64) {
+                for (j = 0; j < nodecount; j++) {
+                        map[i+j] = IEEE1394_SPEED_MAX;
+                }
+        }
+
+        for (i = 0; i < nodecount; i++) {
+                cldcnt[i] = 0;
+        }
+
+        /* find direct children count and speed */
+        for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
+                     n = nodecount - 1;
+             (void *)sid >= (void *)host->topology_map; sid--) {
+                if (sid->extended) {
+                        esid = (struct ext_selfid *)sid;
+
+                        if (esid->porta == 0x3) cldcnt[n]++;
+                        if (esid->portb == 0x3) cldcnt[n]++;
+                        if (esid->portc == 0x3) cldcnt[n]++;
+                        if (esid->portd == 0x3) cldcnt[n]++;
+                        if (esid->porte == 0x3) cldcnt[n]++;
+                        if (esid->portf == 0x3) cldcnt[n]++;
+                        if (esid->portg == 0x3) cldcnt[n]++;
+                        if (esid->porth == 0x3) cldcnt[n]++;
+                } else {
+                        if (sid->port0 == 0x3) cldcnt[n]++;
+                        if (sid->port1 == 0x3) cldcnt[n]++;
+                        if (sid->port2 == 0x3) cldcnt[n]++;
+
+                        speedcap[n] = sid->speed;
+                        n--;
+                }
+        }
+
+        /* set self mapping */
+        for (i = 0; i < nodecount; i++) {
+                map[64*i + i] = speedcap[i];
+        }
+
+        /* fix up direct children count to total children count;
+         * also fix up speedcaps for sibling and parent communication */
+        for (i = 1; i < nodecount; i++) {
+                for (j = cldcnt[i], n = i - 1; j > 0; j--) {
+                        cldcnt[i] += cldcnt[n];
+                        speedcap[n] = min(speedcap[n], speedcap[i]);
+                        n -= cldcnt[n] + 1;
+                }
+        }
+
+        for (n = 0; n < nodecount; n++) {
+                for (i = n - cldcnt[n]; i <= n; i++) {
+                        for (j = 0; j < (n - cldcnt[n]); j++) {
+                                map[j*64 + i] = map[i*64 + j] =
+                                        min(map[i*64 + j], speedcap[n]);
+                        }
+                        for (j = n + 1; j < nodecount; j++) {
+                                map[j*64 + i] = map[i*64 + j] =
+                                        min(map[i*64 + j], speedcap[n]);
+                        }
+                }
+        }
+}
+
+
+void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
+{
+        if (host->in_bus_reset) {
+                HPSB_VERBOSE("Including SelfID 0x%x", sid);
+                host->topology_map[host->selfid_count++] = sid;
+        } else {
+                HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
+			    sid, NODEID_TO_BUS(host->node_id));
+        }
+}
+
+void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
+{
+	if (!host->in_bus_reset)
+		HPSB_NOTICE("SelfID completion called outside of bus reset!");
+
+        host->node_id = LOCAL_BUS | phyid;
+        host->is_root = isroot;
+
+        if (!check_selfids(host)) {
+                if (host->reset_retries++ < 20) {
+                        /* selfid stage did not complete without error */
+                        HPSB_NOTICE("Error in SelfID stage, resetting");
+			host->in_bus_reset = 0;
+			/* this should work from ohci1394 now... */
+                        hpsb_reset_bus(host, LONG_RESET);
+                        return;
+                } else {
+                        HPSB_NOTICE("Stopping out-of-control reset loop");
+                        HPSB_NOTICE("Warning - topology map and speed map will not be valid");
+			host->reset_retries = 0;
+                }
+        } else {
+		host->reset_retries = 0;
+                build_speed_map(host, host->node_count);
+        }
+
+	HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
+		     "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
+
+        /* irm_id is kept up to date by check_selfids() */
+        if (host->irm_id == host->node_id) {
+                host->is_irm = 1;
+        } else {
+                host->is_busmgr = 0;
+                host->is_irm = 0;
+        }
+
+        if (isroot) {
+		host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
+		host->is_cycmst = 1;
+	}
+	atomic_inc(&host->generation);
+	host->in_bus_reset = 0;
+        highlevel_host_reset(host);
+}
+
+
+void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
+                      int ackcode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
+
+	packet->ack_code = ackcode;
+
+	if (packet->no_waiter || packet->state == hpsb_complete) {
+		/* if packet->no_waiter, must not have a tlabel allocated */
+		spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+		hpsb_free_packet(packet);
+		return;
+	}
+
+	atomic_dec(&packet->refcnt);	/* drop HC's reference */
+	/* here the packet must be on the host->pending_packet_queue */
+
+	if (ackcode != ACK_PENDING || !packet->expect_response) {
+		packet->state = hpsb_complete;
+		__skb_unlink(packet->skb, &host->pending_packet_queue);
+		spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+		queue_packet_complete(packet);
+		return;
+	}
+
+	packet->state = hpsb_pending;
+	packet->sendtime = jiffies;
+
+	spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+
+	mod_timer(&host->timeout, jiffies + host->timeout_interval);
+}
+
+/**
+ * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
+ * @host: host that PHY config packet gets sent through
+ * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
+ * @gapcnt: gap count value to set (-1 = don't set gap count)
+ *
+ * This function sends a PHY config packet on the bus through the specified host.
+ *
+ * Return value: 0 for success or error number otherwise.
+ */
+int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
+{
+	struct hpsb_packet *packet;
+	int retval = 0;
+
+	if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
+	   (rootid == -1 && gapcnt == -1)) {
+		HPSB_DEBUG("Invalid Parameter: rootid = %d   gapcnt = %d",
+			   rootid, gapcnt);
+		return -EINVAL;
+	}
+
+	packet = hpsb_alloc_packet(0);
+	if (!packet)
+		return -ENOMEM;
+
+	packet->host = host;
+	packet->header_size = 8;
+	packet->data_size = 0;
+	packet->expect_response = 0;
+	packet->no_waiter = 0;
+	packet->type = hpsb_raw;
+	packet->header[0] = 0;
+	if (rootid != -1)
+		packet->header[0] |= rootid << 24 | 1 << 23;
+	if (gapcnt != -1)
+		packet->header[0] |= gapcnt << 16 | 1 << 22;
+
+	packet->header[1] = ~packet->header[0];
+
+	packet->generation = get_hpsb_generation(host);
+
+	retval = hpsb_send_packet_and_wait(packet);
+	hpsb_free_packet(packet);
+
+	return retval;
+}
+
+/**
+ * hpsb_send_packet - transmit a packet on the bus
+ * @packet: packet to send
+ *
+ * The packet is sent through the host specified in the packet->host field.
+ * Before sending, the packet's transmit speed is automatically determined
+ * using the local speed map when it is an async, non-broadcast packet.
+ *
+ * Possibilities for failure are that host is either not initialized, in bus
+ * reset, the packet's generation number doesn't match the current generation
+ * number or the host reports a transmit error.
+ *
+ * Return value: 0 on success, negative errno on failure.
+ */
+int hpsb_send_packet(struct hpsb_packet *packet)
+{
+	struct hpsb_host *host = packet->host;
+
+        if (host->is_shutdown)
+		return -EINVAL;
+	if (host->in_bus_reset ||
+	    (packet->generation != get_hpsb_generation(host)))
+                return -EAGAIN;
+
+        packet->state = hpsb_queued;
+
+	/* This just seems silly to me */
+	WARN_ON(packet->no_waiter && packet->expect_response);
+
+	if (!packet->no_waiter || packet->expect_response) {
+		atomic_inc(&packet->refcnt);
+		packet->sendtime = jiffies;
+		skb_queue_tail(&host->pending_packet_queue, packet->skb);
+	}
+
+        if (packet->node_id == host->node_id) {
+		/* it is a local request, so handle it locally */
+
+                quadlet_t *data;
+                size_t size = packet->data_size + packet->header_size;
+
+                data = kmalloc(size, GFP_ATOMIC);
+                if (!data) {
+                        HPSB_ERR("unable to allocate memory for concatenating header and data");
+                        return -ENOMEM;
+                }
+
+                memcpy(data, packet->header, packet->header_size);
+
+                if (packet->data_size)
+			memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
+
+                dump_packet("send packet local:", packet->header,
+                            packet->header_size);
+
+                hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
+                hpsb_packet_received(host, data, size, 0);
+
+                kfree(data);
+
+                return 0;
+        }
+
+        if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
+                packet->speed_code =
+                        host->speed_map[NODEID_TO_NODE(host->node_id) * 64
+                                       + NODEID_TO_NODE(packet->node_id)];
+        }
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+        switch (packet->speed_code) {
+        case 2:
+                dump_packet("send packet 400:", packet->header,
+                            packet->header_size);
+                break;
+        case 1:
+                dump_packet("send packet 200:", packet->header,
+                            packet->header_size);
+                break;
+        default:
+                dump_packet("send packet 100:", packet->header,
+                            packet->header_size);
+        }
+#endif
+
+        return host->driver->transmit_packet(host, packet);
+}
+
+/* We could just use complete() directly as the packet complete
+ * callback, but this is more typesafe, in the sense that we get a
+ * compiler error if the prototype for complete() changes. */
+
+static void complete_packet(void *data)
+{
+	complete((struct completion *) data);
+}
+
+int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
+{
+	struct completion done;
+	int retval;
+
+	init_completion(&done);
+	hpsb_set_packet_complete_task(packet, complete_packet, &done);
+	retval = hpsb_send_packet(packet);
+	if (retval == 0)
+		wait_for_completion(&done);
+
+	return retval;
+}
+
+static void send_packet_nocare(struct hpsb_packet *packet)
+{
+        if (hpsb_send_packet(packet) < 0) {
+                hpsb_free_packet(packet);
+        }
+}
+
+
+static void handle_packet_response(struct hpsb_host *host, int tcode,
+				   quadlet_t *data, size_t size)
+{
+        struct hpsb_packet *packet = NULL;
+	struct sk_buff *skb;
+        int tcode_match = 0;
+        int tlabel;
+        unsigned long flags;
+
+        tlabel = (data[0] >> 10) & 0x3f;
+
+	spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
+
+	skb_queue_walk(&host->pending_packet_queue, skb) {
+		packet = (struct hpsb_packet *)skb->data;
+                if ((packet->tlabel == tlabel)
+                    && (packet->node_id == (data[1] >> 16))){
+                        break;
+                }
+
+		packet = NULL;
+        }
+
+	if (packet == NULL) {
+                HPSB_DEBUG("unsolicited response packet received - no tlabel match");
+                dump_packet("contents:", data, 16);
+		spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+                return;
+        }
+
+        switch (packet->tcode) {
+        case TCODE_WRITEQ:
+        case TCODE_WRITEB:
+                if (tcode != TCODE_WRITE_RESPONSE)
+			break;
+		tcode_match = 1;
+		memcpy(packet->header, data, 12);
+                break;
+        case TCODE_READQ:
+                if (tcode != TCODE_READQ_RESPONSE)
+			break;
+		tcode_match = 1;
+		memcpy(packet->header, data, 16);
+                break;
+        case TCODE_READB:
+                if (tcode != TCODE_READB_RESPONSE)
+			break;
+		tcode_match = 1;
+		BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
+		memcpy(packet->header, data, 16);
+		memcpy(packet->data, data + 4, size - 16);
+                break;
+        case TCODE_LOCK_REQUEST:
+                if (tcode != TCODE_LOCK_RESPONSE)
+			break;
+		tcode_match = 1;
+		size = min((size - 16), (size_t)8);
+		BUG_ON(packet->skb->len - sizeof(*packet) < size);
+		memcpy(packet->header, data, 16);
+		memcpy(packet->data, data + 4, size);
+                break;
+        }
+
+        if (!tcode_match) {
+		spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+                HPSB_INFO("unsolicited response packet received - tcode mismatch");
+                dump_packet("contents:", data, 16);
+                return;
+        }
+
+	__skb_unlink(skb, skb->list);
+
+	if (packet->state == hpsb_queued) {
+		packet->sendtime = jiffies;
+		packet->ack_code = ACK_PENDING;
+	}
+
+	packet->state = hpsb_complete;
+	spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+
+	queue_packet_complete(packet);
+}
+
+
+static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
+					       quadlet_t *data, size_t dsize)
+{
+        struct hpsb_packet *p;
+
+        p = hpsb_alloc_packet(dsize);
+        if (unlikely(p == NULL)) {
+                /* FIXME - send data_error response */
+                return NULL;
+        }
+
+        p->type = hpsb_async;
+        p->state = hpsb_unused;
+        p->host = host;
+        p->node_id = data[1] >> 16;
+        p->tlabel = (data[0] >> 10) & 0x3f;
+        p->no_waiter = 1;
+
+	p->generation = get_hpsb_generation(host);
+
+	if (dsize % 4)
+		p->data[dsize / 4] = 0;
+
+        return p;
+}
+
+#define PREP_ASYNC_HEAD_RCODE(tc) \
+	packet->tcode = tc; \
+	packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
+		| (1 << 8) | (tc << 4); \
+	packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
+	packet->header[2] = 0
+
+static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
+                              quadlet_t data)
+{
+	PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
+	packet->header[3] = data;
+	packet->header_size = 16;
+	packet->data_size = 0;
+}
+
+static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
+                               int length)
+{
+	if (rcode != RCODE_COMPLETE)
+		length = 0;
+
+	PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
+	packet->header[3] = length << 16;
+	packet->header_size = 16;
+	packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
+}
+
+static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
+{
+	PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
+	packet->header[2] = 0;
+	packet->header_size = 12;
+	packet->data_size = 0;
+}
+
+static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
+                          int length)
+{
+	if (rcode != RCODE_COMPLETE)
+		length = 0;
+
+	PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
+	packet->header[3] = (length << 16) | extcode;
+	packet->header_size = 16;
+	packet->data_size = length;
+}
+
+#define PREP_REPLY_PACKET(length) \
+                packet = create_reply_packet(host, data, length); \
+                if (packet == NULL) break
+
+static void handle_incoming_packet(struct hpsb_host *host, int tcode,
+				   quadlet_t *data, size_t size, int write_acked)
+{
+        struct hpsb_packet *packet;
+        int length, rcode, extcode;
+        quadlet_t buffer;
+        nodeid_t source = data[1] >> 16;
+        nodeid_t dest = data[0] >> 16;
+        u16 flags = (u16) data[0];
+        u64 addr;
+
+        /* big FIXME - no error checking is done for an out of bounds length */
+
+        switch (tcode) {
+        case TCODE_WRITEQ:
+                addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+                rcode = highlevel_write(host, source, dest, data+3,
+					addr, 4, flags);
+
+                if (!write_acked
+                    && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
+                    && (rcode >= 0)) {
+                        /* not a broadcast write, reply */
+                        PREP_REPLY_PACKET(0);
+                        fill_async_write_resp(packet, rcode);
+                        send_packet_nocare(packet);
+                }
+                break;
+
+        case TCODE_WRITEB:
+                addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+                rcode = highlevel_write(host, source, dest, data+4,
+					addr, data[3]>>16, flags);
+
+                if (!write_acked
+                    && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
+                    && (rcode >= 0)) {
+                        /* not a broadcast write, reply */
+                        PREP_REPLY_PACKET(0);
+                        fill_async_write_resp(packet, rcode);
+                        send_packet_nocare(packet);
+                }
+                break;
+
+        case TCODE_READQ:
+                addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+                rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
+
+                if (rcode >= 0) {
+                        PREP_REPLY_PACKET(0);
+                        fill_async_readquad_resp(packet, rcode, buffer);
+                        send_packet_nocare(packet);
+                }
+                break;
+
+        case TCODE_READB:
+                length = data[3] >> 16;
+                PREP_REPLY_PACKET(length);
+
+                addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+                rcode = highlevel_read(host, source, packet->data, addr,
+                                       length, flags);
+
+                if (rcode >= 0) {
+                        fill_async_readblock_resp(packet, rcode, length);
+                        send_packet_nocare(packet);
+                } else {
+                        hpsb_free_packet(packet);
+                }
+                break;
+
+        case TCODE_LOCK_REQUEST:
+                length = data[3] >> 16;
+                extcode = data[3] & 0xffff;
+                addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+
+                PREP_REPLY_PACKET(8);
+
+                if ((extcode == 0) || (extcode >= 7)) {
+                        /* let switch default handle error */
+                        length = 0;
+                }
+
+                switch (length) {
+                case 4:
+                        rcode = highlevel_lock(host, source, packet->data, addr,
+                                               data[4], 0, extcode,flags);
+                        fill_async_lock_resp(packet, rcode, extcode, 4);
+                        break;
+                case 8:
+                        if ((extcode != EXTCODE_FETCH_ADD)
+                            && (extcode != EXTCODE_LITTLE_ADD)) {
+                                rcode = highlevel_lock(host, source,
+                                                       packet->data, addr,
+                                                       data[5], data[4],
+                                                       extcode, flags);
+                                fill_async_lock_resp(packet, rcode, extcode, 4);
+                        } else {
+                                rcode = highlevel_lock64(host, source,
+                                             (octlet_t *)packet->data, addr,
+                                             *(octlet_t *)(data + 4), 0ULL,
+                                             extcode, flags);
+                                fill_async_lock_resp(packet, rcode, extcode, 8);
+                        }
+                        break;
+                case 16:
+                        rcode = highlevel_lock64(host, source,
+                                                 (octlet_t *)packet->data, addr,
+                                                 *(octlet_t *)(data + 6),
+                                                 *(octlet_t *)(data + 4),
+                                                 extcode, flags);
+                        fill_async_lock_resp(packet, rcode, extcode, 8);
+                        break;
+                default:
+                        rcode = RCODE_TYPE_ERROR;
+                        fill_async_lock_resp(packet, rcode,
+                                             extcode, 0);
+                }
+
+                if (rcode >= 0) {
+                        send_packet_nocare(packet);
+                } else {
+                        hpsb_free_packet(packet);
+                }
+                break;
+        }
+
+}
+#undef PREP_REPLY_PACKET
+
+
+void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
+                          int write_acked)
+{
+        int tcode;
+
+        if (host->in_bus_reset) {
+                HPSB_INFO("received packet during reset; ignoring");
+                return;
+        }
+
+        dump_packet("received packet:", data, size);
+
+        tcode = (data[0] >> 4) & 0xf;
+
+        switch (tcode) {
+        case TCODE_WRITE_RESPONSE:
+        case TCODE_READQ_RESPONSE:
+        case TCODE_READB_RESPONSE:
+        case TCODE_LOCK_RESPONSE:
+                handle_packet_response(host, tcode, data, size);
+                break;
+
+        case TCODE_WRITEQ:
+        case TCODE_WRITEB:
+        case TCODE_READQ:
+        case TCODE_READB:
+        case TCODE_LOCK_REQUEST:
+                handle_incoming_packet(host, tcode, data, size, write_acked);
+                break;
+
+
+        case TCODE_ISO_DATA:
+                highlevel_iso_receive(host, data, size);
+                break;
+
+        case TCODE_CYCLE_START:
+                /* simply ignore this packet if it is passed on */
+                break;
+
+        default:
+                HPSB_NOTICE("received packet with bogus transaction code %d",
+                            tcode);
+                break;
+        }
+}
+
+
+static void abort_requests(struct hpsb_host *host)
+{
+	struct hpsb_packet *packet;
+	struct sk_buff *skb;
+
+	host->driver->devctl(host, CANCEL_REQUESTS, 0);
+
+	while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
+		packet = (struct hpsb_packet *)skb->data;
+
+		packet->state = hpsb_complete;
+		packet->ack_code = ACKX_ABORTED;
+		queue_packet_complete(packet);
+	}
+}
+
+void abort_timedouts(unsigned long __opaque)
+{
+	struct hpsb_host *host = (struct hpsb_host *)__opaque;
+	unsigned long flags;
+	struct hpsb_packet *packet;
+	struct sk_buff *skb;
+	unsigned long expire;
+
+	spin_lock_irqsave(&host->csr.lock, flags);
+	expire = host->csr.expire;
+	spin_unlock_irqrestore(&host->csr.lock, flags);
+
+	/* Hold the lock around this, since we aren't dequeuing all
+	 * packets, just ones we need. */
+	spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
+
+	while (!skb_queue_empty(&host->pending_packet_queue)) {
+		skb = skb_peek(&host->pending_packet_queue);
+
+		packet = (struct hpsb_packet *)skb->data;
+
+		if (time_before(packet->sendtime + expire, jiffies)) {
+			__skb_unlink(skb, skb->list);
+			packet->state = hpsb_complete;
+			packet->ack_code = ACKX_TIMEOUT;
+			queue_packet_complete(packet);
+		} else {
+			/* Since packets are added to the tail, the oldest
+			 * ones are first, always. When we get to one that
+			 * isn't timed out, the rest aren't either. */
+			break;
+		}
+	}
+
+	if (!skb_queue_empty(&host->pending_packet_queue))
+		mod_timer(&host->timeout, jiffies + host->timeout_interval);
+
+	spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
+}
+
+
+/* Kernel thread and vars, which handles packets that are completed. Only
+ * packets that have a "complete" function are sent here. This way, the
+ * completion is run out of kernel context, and doesn't block the rest of
+ * the stack. */
+static int khpsbpkt_pid = -1, khpsbpkt_kill;
+static DECLARE_COMPLETION(khpsbpkt_complete);
+static struct sk_buff_head hpsbpkt_queue;
+static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
+
+
+static void queue_packet_complete(struct hpsb_packet *packet)
+{
+	if (packet->no_waiter) {
+		hpsb_free_packet(packet);
+		return;
+	}
+	if (packet->complete_routine != NULL) {
+		skb_queue_tail(&hpsbpkt_queue, packet->skb);
+
+		/* Signal the kernel thread to handle this */
+		up(&khpsbpkt_sig);
+	}
+	return;
+}
+
+static int hpsbpkt_thread(void *__hi)
+{
+	struct sk_buff *skb;
+	struct hpsb_packet *packet;
+	void (*complete_routine)(void*);
+	void *complete_data;
+
+	daemonize("khpsbpkt");
+
+	while (1) {
+		if (down_interruptible(&khpsbpkt_sig)) {
+			if (current->flags & PF_FREEZE) {
+				refrigerator(0);
+				continue;
+			}
+			printk("khpsbpkt: received unexpected signal?!\n" );
+			break;
+		}
+
+		if (khpsbpkt_kill)
+			break;
+
+		while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
+			packet = (struct hpsb_packet *)skb->data;
+
+			complete_routine = packet->complete_routine;
+			complete_data = packet->complete_data;
+
+			packet->complete_routine = packet->complete_data = NULL;
+
+			complete_routine(complete_data);
+		}
+	}
+
+	complete_and_exit(&khpsbpkt_complete, 0);
+}
+
+static int __init ieee1394_init(void)
+{
+	int i, ret;
+
+	skb_queue_head_init(&hpsbpkt_queue);
+
+	/* non-fatal error */
+	if (hpsb_init_config_roms()) {
+		HPSB_ERR("Failed to initialize some config rom entries.\n");
+		HPSB_ERR("Some features may not be available\n");
+	}
+
+	khpsbpkt_pid = kernel_thread(hpsbpkt_thread, NULL, CLONE_KERNEL);
+	if (khpsbpkt_pid < 0) {
+		HPSB_ERR("Failed to start hpsbpkt thread!\n");
+		ret = -ENOMEM;
+		goto exit_cleanup_config_roms;
+	}
+
+	if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
+		HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
+		ret = -ENODEV;
+		goto exit_release_kernel_thread;
+	}
+
+	/* actually this is a non-fatal error */
+	ret = devfs_mk_dir("ieee1394");
+	if (ret < 0) {
+		HPSB_ERR("unable to make devfs dir for device major %d!\n", IEEE1394_MAJOR);
+		goto release_chrdev;
+	}
+
+	ret = bus_register(&ieee1394_bus_type);
+	if (ret < 0) {
+		HPSB_INFO("bus register failed");
+		goto release_devfs;
+	}
+
+	for (i = 0; fw_bus_attrs[i]; i++) {
+		ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
+		if (ret < 0) {
+			while (i >= 0) {
+				bus_remove_file(&ieee1394_bus_type,
+						fw_bus_attrs[i--]);
+			}
+			bus_unregister(&ieee1394_bus_type);
+			goto release_devfs;
+		}
+	}
+
+	ret = class_register(&hpsb_host_class);
+	if (ret < 0)
+		goto release_all_bus;
+
+	hpsb_protocol_class = class_simple_create(THIS_MODULE, "ieee1394_protocol");
+	if (IS_ERR(hpsb_protocol_class)) {
+		ret = PTR_ERR(hpsb_protocol_class);
+		goto release_class_host;
+	}
+
+	ret = init_csr();
+	if (ret) {
+		HPSB_INFO("init csr failed");
+		ret = -ENOMEM;
+		goto release_class_protocol;
+	}
+
+	if (disable_nodemgr) {
+		HPSB_INFO("nodemgr and IRM functionality disabled");
+		/* We shouldn't contend for IRM with nodemgr disabled, since
+		   nodemgr implements functionality required of ieee1394a-2000
+		   IRMs */
+		hpsb_disable_irm = 1;
+                      
+		return 0;
+	}
+
+	if (hpsb_disable_irm) {
+		HPSB_INFO("IRM functionality disabled");
+	}
+
+	ret = init_ieee1394_nodemgr();
+	if (ret < 0) {
+		HPSB_INFO("init nodemgr failed");
+		goto cleanup_csr;
+	}
+
+	return 0;
+
+cleanup_csr:
+	cleanup_csr();
+release_class_protocol:
+	class_simple_destroy(hpsb_protocol_class);
+release_class_host:
+	class_unregister(&hpsb_host_class);
+release_all_bus:
+	for (i = 0; fw_bus_attrs[i]; i++)
+		bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
+	bus_unregister(&ieee1394_bus_type);
+release_devfs:
+	devfs_remove("ieee1394");
+release_chrdev:
+	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
+exit_release_kernel_thread:
+	if (khpsbpkt_pid >= 0) {
+		kill_proc(khpsbpkt_pid, SIGTERM, 1);
+		wait_for_completion(&khpsbpkt_complete);
+	}
+exit_cleanup_config_roms:
+	hpsb_cleanup_config_roms();
+	return ret;
+}
+
+static void __exit ieee1394_cleanup(void)
+{
+	int i;
+
+	if (!disable_nodemgr)
+		cleanup_ieee1394_nodemgr();
+
+	cleanup_csr();
+
+	class_simple_destroy(hpsb_protocol_class);
+	class_unregister(&hpsb_host_class);
+	for (i = 0; fw_bus_attrs[i]; i++)
+		bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
+	bus_unregister(&ieee1394_bus_type);
+
+	if (khpsbpkt_pid >= 0) {
+		khpsbpkt_kill = 1;
+		mb();
+		up(&khpsbpkt_sig);
+		wait_for_completion(&khpsbpkt_complete);
+	}
+
+	hpsb_cleanup_config_roms();
+
+	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
+	devfs_remove("ieee1394");
+}
+
+module_init(ieee1394_init);
+module_exit(ieee1394_cleanup);
+
+/* Exported symbols */
+
+/** hosts.c **/
+EXPORT_SYMBOL(hpsb_alloc_host);
+EXPORT_SYMBOL(hpsb_add_host);
+EXPORT_SYMBOL(hpsb_remove_host);
+EXPORT_SYMBOL(hpsb_update_config_rom_image);
+
+/** ieee1394_core.c **/
+EXPORT_SYMBOL(hpsb_speedto_str);
+EXPORT_SYMBOL(hpsb_protocol_class);
+EXPORT_SYMBOL(hpsb_set_packet_complete_task);
+EXPORT_SYMBOL(hpsb_alloc_packet);
+EXPORT_SYMBOL(hpsb_free_packet);
+EXPORT_SYMBOL(hpsb_send_phy_config);
+EXPORT_SYMBOL(hpsb_send_packet);
+EXPORT_SYMBOL(hpsb_send_packet_and_wait);
+EXPORT_SYMBOL(hpsb_reset_bus);
+EXPORT_SYMBOL(hpsb_bus_reset);
+EXPORT_SYMBOL(hpsb_selfid_received);
+EXPORT_SYMBOL(hpsb_selfid_complete);
+EXPORT_SYMBOL(hpsb_packet_sent);
+EXPORT_SYMBOL(hpsb_packet_received);
+EXPORT_SYMBOL_GPL(hpsb_disable_irm);
+
+/** ieee1394_transactions.c **/
+EXPORT_SYMBOL(hpsb_get_tlabel);
+EXPORT_SYMBOL(hpsb_free_tlabel);
+EXPORT_SYMBOL(hpsb_make_readpacket);
+EXPORT_SYMBOL(hpsb_make_writepacket);
+EXPORT_SYMBOL(hpsb_make_streampacket);
+EXPORT_SYMBOL(hpsb_make_lockpacket);
+EXPORT_SYMBOL(hpsb_make_lock64packet);
+EXPORT_SYMBOL(hpsb_make_phypacket);
+EXPORT_SYMBOL(hpsb_make_isopacket);
+EXPORT_SYMBOL(hpsb_read);
+EXPORT_SYMBOL(hpsb_write);
+EXPORT_SYMBOL(hpsb_lock);
+EXPORT_SYMBOL(hpsb_packet_success);
+
+/** highlevel.c **/
+EXPORT_SYMBOL(hpsb_register_highlevel);
+EXPORT_SYMBOL(hpsb_unregister_highlevel);
+EXPORT_SYMBOL(hpsb_register_addrspace);
+EXPORT_SYMBOL(hpsb_unregister_addrspace);
+EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
+EXPORT_SYMBOL(hpsb_listen_channel);
+EXPORT_SYMBOL(hpsb_unlisten_channel);
+EXPORT_SYMBOL(hpsb_get_hostinfo);
+EXPORT_SYMBOL(hpsb_create_hostinfo);
+EXPORT_SYMBOL(hpsb_destroy_hostinfo);
+EXPORT_SYMBOL(hpsb_set_hostinfo_key);
+EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
+EXPORT_SYMBOL(hpsb_set_hostinfo);
+EXPORT_SYMBOL(highlevel_add_host);
+EXPORT_SYMBOL(highlevel_remove_host);
+EXPORT_SYMBOL(highlevel_host_reset);
+
+/** nodemgr.c **/
+EXPORT_SYMBOL(hpsb_node_fill_packet);
+EXPORT_SYMBOL(hpsb_node_write);
+EXPORT_SYMBOL(hpsb_register_protocol);
+EXPORT_SYMBOL(hpsb_unregister_protocol);
+EXPORT_SYMBOL(ieee1394_bus_type);
+EXPORT_SYMBOL(nodemgr_for_each_host);
+
+/** csr.c **/
+EXPORT_SYMBOL(hpsb_update_config_rom);
+
+/** dma.c **/
+EXPORT_SYMBOL(dma_prog_region_init);
+EXPORT_SYMBOL(dma_prog_region_alloc);
+EXPORT_SYMBOL(dma_prog_region_free);
+EXPORT_SYMBOL(dma_region_init);
+EXPORT_SYMBOL(dma_region_alloc);
+EXPORT_SYMBOL(dma_region_free);
+EXPORT_SYMBOL(dma_region_sync_for_cpu);
+EXPORT_SYMBOL(dma_region_sync_for_device);
+EXPORT_SYMBOL(dma_region_mmap);
+EXPORT_SYMBOL(dma_region_offset_to_bus);
+
+/** iso.c **/
+EXPORT_SYMBOL(hpsb_iso_xmit_init);
+EXPORT_SYMBOL(hpsb_iso_recv_init);
+EXPORT_SYMBOL(hpsb_iso_xmit_start);
+EXPORT_SYMBOL(hpsb_iso_recv_start);
+EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
+EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
+EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
+EXPORT_SYMBOL(hpsb_iso_stop);
+EXPORT_SYMBOL(hpsb_iso_shutdown);
+EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
+EXPORT_SYMBOL(hpsb_iso_xmit_sync);
+EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
+EXPORT_SYMBOL(hpsb_iso_n_ready);
+EXPORT_SYMBOL(hpsb_iso_packet_sent);
+EXPORT_SYMBOL(hpsb_iso_packet_received);
+EXPORT_SYMBOL(hpsb_iso_wake);
+EXPORT_SYMBOL(hpsb_iso_recv_flush);
+
+/** csr1212.c **/
+EXPORT_SYMBOL(csr1212_create_csr);
+EXPORT_SYMBOL(csr1212_init_local_csr);
+EXPORT_SYMBOL(csr1212_new_immediate);
+EXPORT_SYMBOL(csr1212_new_directory);
+EXPORT_SYMBOL(csr1212_associate_keyval);
+EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
+EXPORT_SYMBOL(csr1212_new_string_descriptor_leaf);
+EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
+EXPORT_SYMBOL(csr1212_release_keyval);
+EXPORT_SYMBOL(csr1212_destroy_csr);
+EXPORT_SYMBOL(csr1212_read);
+EXPORT_SYMBOL(csr1212_generate_csr_image);
+EXPORT_SYMBOL(csr1212_parse_keyval);
+EXPORT_SYMBOL(csr1212_parse_csr);
+EXPORT_SYMBOL(_csr1212_read_keyval);
+EXPORT_SYMBOL(_csr1212_destroy_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
new file mode 100644
index 000000000000..c4b4408e2e05
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -0,0 +1,228 @@
+
+#ifndef _IEEE1394_CORE_H
+#define _IEEE1394_CORE_H
+
+#include <linux/slab.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include "hosts.h"
+
+
+struct hpsb_packet {
+        /* This struct is basically read-only for hosts with the exception of
+         * the data buffer contents and xnext - see below. */
+
+	/* This can be used for host driver internal linking.
+	 *
+	 * NOTE: This must be left in init state when the driver is done
+	 * with it (e.g. by using list_del_init()), since the core does
+	 * some sanity checks to make sure the packet is not on a
+	 * driver_list when free'ing it. */
+	struct list_head driver_list;
+
+        nodeid_t node_id;
+
+        /* Async and Iso types should be clear, raw means send-as-is, do not
+         * CRC!  Byte swapping shall still be done in this case. */
+        enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
+
+        /* Okay, this is core internal and a no care for hosts.
+         * queued   = queued for sending
+         * pending  = sent, waiting for response
+         * complete = processing completed, successful or not
+         */
+        enum {
+                hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
+        } __attribute__((packed)) state;
+
+        /* These are core internal. */
+        signed char tlabel;
+        char ack_code;
+        char tcode;
+
+        unsigned expect_response:1;
+        unsigned no_waiter:1;
+
+        /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
+        unsigned speed_code:2;
+
+        /*
+         * *header and *data are guaranteed to be 32-bit DMAable and may be
+         * overwritten to allow in-place byte swapping.  Neither of these is
+         * CRCed (the sizes also don't include CRC), but contain space for at
+         * least one additional quadlet to allow in-place CRCing.  The memory is
+         * also guaranteed to be DMA mappable.
+         */
+        quadlet_t *header;
+        quadlet_t *data;
+        size_t header_size;
+        size_t data_size;
+
+
+        struct hpsb_host *host;
+        unsigned int generation;
+
+	atomic_t refcnt;
+
+	/* Function (and possible data to pass to it) to call when this
+	 * packet is completed.  */
+	void (*complete_routine)(void *);
+	void *complete_data;
+
+	/* XXX This is just a hack at the moment */
+	struct sk_buff *skb;
+
+        /* Store jiffies for implementing bus timeouts. */
+        unsigned long sendtime;
+
+        quadlet_t embedded_header[5];
+};
+
+/* Set a task for when a packet completes */
+void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
+		void (*routine)(void *), void *data);
+
+static inline struct hpsb_packet *driver_packet(struct list_head *l)
+{
+	return list_entry(l, struct hpsb_packet, driver_list);
+}
+
+void abort_timedouts(unsigned long __opaque);
+
+struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
+void hpsb_free_packet(struct hpsb_packet *packet);
+
+
+/*
+ * Generation counter for the complete 1394 subsystem.  Generation gets
+ * incremented on every change in the subsystem (e.g. bus reset).
+ *
+ * Use the functions, not the variable.
+ */
+static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
+{
+        return atomic_read(&host->generation);
+}
+
+/*
+ * Send a PHY configuration packet, return 0 on success, negative
+ * errno on failure.
+ */
+int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
+
+/*
+ * Queue packet for transmitting, return 0 on success, negative errno
+ * on failure.
+ */
+int hpsb_send_packet(struct hpsb_packet *packet);
+
+/*
+ * Queue packet for transmitting, and block until the transaction
+ * completes. Return 0 on success, negative errno on failure.
+ */
+int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
+
+/* Initiate bus reset on the given host.  Returns 1 if bus reset already in
+ * progress, 0 otherwise. */
+int hpsb_reset_bus(struct hpsb_host *host, int type);
+
+/*
+ * The following functions are exported for host driver module usage.  All of
+ * them are safe to use in interrupt contexts, although some are quite
+ * complicated so you may want to run them in bottom halves instead of calling
+ * them directly.
+ */
+
+/* Notify a bus reset to the core.  Returns 1 if bus reset already in progress,
+ * 0 otherwise. */
+int hpsb_bus_reset(struct hpsb_host *host);
+
+/*
+ * Hand over received selfid packet to the core.  Complement check (second
+ * quadlet is complement of first) is expected to be done and succesful.
+ */
+void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
+
+/*
+ * Notify completion of SelfID stage to the core and report new physical ID
+ * and whether host is root now.
+ */
+void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
+
+/*
+ * Notify core of sending a packet.  Ackcode is the ack code returned for async
+ * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
+ * for other cases (internal errors that don't justify a panic).  Safe to call
+ * from within a transmit packet routine.
+ */
+void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
+                      int ackcode);
+
+/*
+ * Hand over received packet to the core.  The contents of data are expected to
+ * be the full packet but with the CRCs left out (data block follows header
+ * immediately), with the header (i.e. the first four quadlets) in machine byte
+ * order and the data block in big endian.  *data can be safely overwritten
+ * after this call.
+ *
+ * If the packet is a write request, write_acked is to be set to true if it was
+ * ack_complete'd already, false otherwise.  This arg is ignored for any other
+ * packet type.
+ */
+void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
+                          int write_acked);
+
+
+/*
+ * CHARACTER DEVICE DISPATCHING
+ *
+ * All ieee1394 character device drivers share the same major number
+ * (major 171).  The 256 minor numbers are allocated to the various
+ * task-specific interfaces (raw1394, video1394, dv1394, etc) in
+ * blocks of 16.
+ *
+ * The core ieee1394.o module allocates the device number region
+ * 171:0-255, the various drivers must then cdev_add() their cdev
+ * objects to handle their respective sub-regions.
+ *
+ * Minor device number block allocations:
+ *
+ * Block 0  (  0- 15)  raw1394
+ * Block 1  ( 16- 31)  video1394
+ * Block 2  ( 32- 47)  dv1394
+ *
+ * Blocks 3-14 free for future allocation
+ *
+ * Block 15 (240-255)  reserved for drivers under development, etc.
+ */
+
+#define IEEE1394_MAJOR               171
+
+#define IEEE1394_MINOR_BLOCK_RAW1394       0
+#define IEEE1394_MINOR_BLOCK_VIDEO1394     1
+#define IEEE1394_MINOR_BLOCK_DV1394        2
+#define IEEE1394_MINOR_BLOCK_AMDTP         3
+#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
+
+#define IEEE1394_CORE_DEV		MKDEV(IEEE1394_MAJOR, 0)
+#define IEEE1394_RAW1394_DEV		MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
+#define IEEE1394_VIDEO1394_DEV		MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
+#define IEEE1394_DV1394_DEV		MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
+#define IEEE1394_AMDTP_DEV		MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
+#define IEEE1394_EXPERIMENTAL_DEV	MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
+
+/* return the index (within a minor number block) of a file */
+static inline unsigned char ieee1394_file_to_instance(struct file *file)
+{
+	return file->f_dentry->d_inode->i_cindex;
+}
+
+extern int hpsb_disable_irm;
+
+/* Our sysfs bus entry */
+extern struct bus_type ieee1394_bus_type;
+extern struct class hpsb_host_class;
+extern struct class_simple *hpsb_protocol_class;
+
+#endif /* _IEEE1394_CORE_H */
diff --git a/drivers/ieee1394/ieee1394_hotplug.h b/drivers/ieee1394/ieee1394_hotplug.h
new file mode 100644
index 000000000000..5be70d31b007
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_hotplug.h
@@ -0,0 +1,33 @@
+#ifndef _IEEE1394_HOTPLUG_H
+#define _IEEE1394_HOTPLUG_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+/* Unit spec id and sw version entry for some protocols */
+#define AVC_UNIT_SPEC_ID_ENTRY		0x0000A02D
+#define AVC_SW_VERSION_ENTRY		0x00010001
+#define CAMERA_UNIT_SPEC_ID_ENTRY	0x0000A02D
+#define CAMERA_SW_VERSION_ENTRY		0x00000100
+
+/* Check to make sure this all isn't already defined */
+#ifndef IEEE1394_MATCH_VENDOR_ID
+
+#define IEEE1394_MATCH_VENDOR_ID	0x0001
+#define IEEE1394_MATCH_MODEL_ID		0x0002
+#define IEEE1394_MATCH_SPECIFIER_ID	0x0004
+#define IEEE1394_MATCH_VERSION		0x0008
+
+struct ieee1394_device_id {
+	u32 match_flags;
+	u32 vendor_id;
+	u32 model_id;
+	u32 specifier_id;
+	u32 version;
+	void *driver_data;
+};
+
+#endif
+
+#endif /* _IEEE1394_HOTPLUG_H */
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
new file mode 100644
index 000000000000..09908b9564d8
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -0,0 +1,601 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * Transaction support.
+ *
+ * Copyright (C) 1999 Andreas E. Bombe
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+
+#include <asm/errno.h>
+
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "nodemgr.h"
+
+
+#define PREP_ASYNC_HEAD_ADDRESS(tc) \
+        packet->tcode = tc; \
+        packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
+                | (1 << 8) | (tc << 4); \
+        packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
+        packet->header[2] = addr & 0xffffffff
+
+
+static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
+{
+        PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
+        packet->header_size = 12;
+        packet->data_size = 0;
+        packet->expect_response = 1;
+}
+
+static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
+{
+        PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
+        packet->header[3] = length << 16;
+        packet->header_size = 16;
+        packet->data_size = 0;
+        packet->expect_response = 1;
+}
+
+static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
+{
+        PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
+        packet->header[3] = data;
+        packet->header_size = 16;
+        packet->data_size = 0;
+        packet->expect_response = 1;
+}
+
+static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
+{
+        PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
+        packet->header[3] = length << 16;
+        packet->header_size = 16;
+        packet->expect_response = 1;
+        packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
+}
+
+static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
+                     int length)
+{
+        PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
+        packet->header[3] = (length << 16) | extcode;
+        packet->header_size = 16;
+        packet->data_size = length;
+        packet->expect_response = 1;
+}
+
+static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
+                     int tag, int sync)
+{
+        packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
+                | (TCODE_ISO_DATA << 4) | sync;
+
+        packet->header_size = 4;
+        packet->data_size = length;
+        packet->type = hpsb_iso;
+        packet->tcode = TCODE_ISO_DATA;
+}
+
+static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
+{
+        packet->header[0] = data;
+        packet->header[1] = ~data;
+        packet->header_size = 8;
+        packet->data_size = 0;
+        packet->expect_response = 0;
+        packet->type = hpsb_raw;             /* No CRC added */
+        packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
+}
+
+static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
+				     int channel, int tag, int sync)
+{
+	packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
+	                  | (TCODE_STREAM_DATA << 4) | sync;
+
+	packet->header_size = 4;
+	packet->data_size = length;
+	packet->type = hpsb_async;
+	packet->tcode = TCODE_ISO_DATA;
+}
+
+/**
+ * hpsb_get_tlabel - allocate a transaction label
+ * @packet: the packet who's tlabel/tpool we set
+ *
+ * Every asynchronous transaction on the 1394 bus needs a transaction
+ * label to match the response to the request.  This label has to be
+ * different from any other transaction label in an outstanding request to
+ * the same node to make matching possible without ambiguity.
+ *
+ * There are 64 different tlabels, so an allocated tlabel has to be freed
+ * with hpsb_free_tlabel() after the transaction is complete (unless it's
+ * reused again for the same target node).
+ *
+ * Return value: Zero on success, otherwise non-zero. A non-zero return
+ * generally means there are no available tlabels. If this is called out
+ * of interrupt or atomic context, then it will sleep until can return a
+ * tlabel.
+ */
+int hpsb_get_tlabel(struct hpsb_packet *packet)
+{
+	unsigned long flags;
+	struct hpsb_tlabel_pool *tp;
+
+	tp = &packet->host->tpool[packet->node_id & NODE_MASK];
+
+	if (irqs_disabled() || in_atomic()) {
+		if (down_trylock(&tp->count))
+			return 1;
+	} else {
+		down(&tp->count);
+	}
+
+	spin_lock_irqsave(&tp->lock, flags);
+
+	packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
+	if (packet->tlabel > 63)
+		packet->tlabel = find_first_zero_bit(tp->pool, 64);
+	tp->next = (packet->tlabel + 1) % 64;
+	/* Should _never_ happen */
+	BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
+	tp->allocations++;
+	spin_unlock_irqrestore(&tp->lock, flags);
+
+	return 0;
+}
+
+/**
+ * hpsb_free_tlabel - free an allocated transaction label
+ * @packet: packet whos tlabel/tpool needs to be cleared
+ *
+ * Frees the transaction label allocated with hpsb_get_tlabel().  The
+ * tlabel has to be freed after the transaction is complete (i.e. response
+ * was received for a split transaction or packet was sent for a unified
+ * transaction).
+ *
+ * A tlabel must not be freed twice.
+ */
+void hpsb_free_tlabel(struct hpsb_packet *packet)
+{
+        unsigned long flags;
+	struct hpsb_tlabel_pool *tp;
+
+	tp = &packet->host->tpool[packet->node_id & NODE_MASK];
+
+	BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
+
+        spin_lock_irqsave(&tp->lock, flags);
+	BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
+        spin_unlock_irqrestore(&tp->lock, flags);
+
+	up(&tp->count);
+}
+
+
+
+int hpsb_packet_success(struct hpsb_packet *packet)
+{
+        switch (packet->ack_code) {
+        case ACK_PENDING:
+                switch ((packet->header[1] >> 12) & 0xf) {
+                case RCODE_COMPLETE:
+                        return 0;
+                case RCODE_CONFLICT_ERROR:
+                        return -EAGAIN;
+                case RCODE_DATA_ERROR:
+                        return -EREMOTEIO;
+                case RCODE_TYPE_ERROR:
+                        return -EACCES;
+                case RCODE_ADDRESS_ERROR:
+                        return -EINVAL;
+                default:
+                        HPSB_ERR("received reserved rcode %d from node %d",
+                                 (packet->header[1] >> 12) & 0xf,
+                                 packet->node_id);
+                        return -EAGAIN;
+                }
+                HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
+
+        case ACK_BUSY_X:
+        case ACK_BUSY_A:
+        case ACK_BUSY_B:
+                return -EBUSY;
+
+        case ACK_TYPE_ERROR:
+                return -EACCES;
+
+        case ACK_COMPLETE:
+                if (packet->tcode == TCODE_WRITEQ
+                    || packet->tcode == TCODE_WRITEB) {
+                        return 0;
+                } else {
+                        HPSB_ERR("impossible ack_complete from node %d "
+                                 "(tcode %d)", packet->node_id, packet->tcode);
+                        return -EAGAIN;
+                }
+
+
+        case ACK_DATA_ERROR:
+                if (packet->tcode == TCODE_WRITEB
+                    || packet->tcode == TCODE_LOCK_REQUEST) {
+                        return -EAGAIN;
+                } else {
+                        HPSB_ERR("impossible ack_data_error from node %d "
+                                 "(tcode %d)", packet->node_id, packet->tcode);
+                        return -EAGAIN;
+                }
+
+        case ACK_ADDRESS_ERROR:
+                return -EINVAL;
+
+        case ACK_TARDY:
+        case ACK_CONFLICT_ERROR:
+        case ACKX_NONE:
+        case ACKX_SEND_ERROR:
+        case ACKX_ABORTED:
+        case ACKX_TIMEOUT:
+                /* error while sending */
+                return -EAGAIN;
+
+        default:
+                HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
+                         packet->ack_code, packet->node_id, packet->tcode);
+                return -EAGAIN;
+        }
+
+        HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
+}
+
+struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
+					 u64 addr, size_t length)
+{
+        struct hpsb_packet *packet;
+
+	if (length == 0)
+		return NULL;
+
+	packet = hpsb_alloc_packet(length);
+	if (!packet)
+		return NULL;
+
+	packet->host = host;
+	packet->node_id = node;
+
+	if (hpsb_get_tlabel(packet)) {
+		hpsb_free_packet(packet);
+		return NULL;
+	}
+
+	if (length == 4)
+		fill_async_readquad(packet, addr);
+	else
+		fill_async_readblock(packet, addr, length);
+
+	return packet;
+}
+
+struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
+					   u64 addr, quadlet_t *buffer, size_t length)
+{
+	struct hpsb_packet *packet;
+
+	if (length == 0)
+		return NULL;
+
+	packet = hpsb_alloc_packet(length);
+	if (!packet)
+		return NULL;
+
+	if (length % 4) { /* zero padding bytes */
+		packet->data[length >> 2] = 0;
+	}
+	packet->host = host;
+	packet->node_id = node;
+
+	if (hpsb_get_tlabel(packet)) {
+		hpsb_free_packet(packet);
+		return NULL;
+	}
+
+	if (length == 4) {
+		fill_async_writequad(packet, addr, buffer ? *buffer : 0);
+	} else {
+		fill_async_writeblock(packet, addr, length);
+		if (buffer)
+			memcpy(packet->data, buffer, length);
+	}
+
+	return packet;
+}
+
+struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length,
+                                           int channel, int tag, int sync)
+{
+	struct hpsb_packet *packet;
+
+	if (length == 0)
+		return NULL;
+
+	packet = hpsb_alloc_packet(length);
+	if (!packet)
+		return NULL;
+
+	if (length % 4) { /* zero padding bytes */
+		packet->data[length >> 2] = 0;
+	}
+	packet->host = host;
+
+	if (hpsb_get_tlabel(packet)) {
+		hpsb_free_packet(packet);
+		return NULL;
+	}
+
+	fill_async_stream_packet(packet, length, channel, tag, sync);
+	if (buffer)
+		memcpy(packet->data, buffer, length);
+
+	return packet;
+}
+
+struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
+                                         u64 addr, int extcode, quadlet_t *data,
+					 quadlet_t arg)
+{
+	struct hpsb_packet *p;
+	u32 length;
+
+	p = hpsb_alloc_packet(8);
+	if (!p) return NULL;
+
+	p->host = host;
+	p->node_id = node;
+	if (hpsb_get_tlabel(p)) {
+		hpsb_free_packet(p);
+		return NULL;
+	}
+
+	switch (extcode) {
+	case EXTCODE_FETCH_ADD:
+	case EXTCODE_LITTLE_ADD:
+		length = 4;
+		if (data)
+			p->data[0] = *data;
+		break;
+	default:
+		length = 8;
+		if (data) {
+			p->data[0] = arg;
+			p->data[1] = *data;
+		}
+		break;
+	}
+	fill_async_lock(p, addr, extcode, length);
+
+	return p;
+}
+
+struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
+                                           u64 addr, int extcode, octlet_t *data,
+					   octlet_t arg)
+{
+	struct hpsb_packet *p;
+	u32 length;
+
+	p = hpsb_alloc_packet(16);
+	if (!p) return NULL;
+
+	p->host = host;
+	p->node_id = node;
+	if (hpsb_get_tlabel(p)) {
+		hpsb_free_packet(p);
+		return NULL;
+	}
+
+	switch (extcode) {
+	case EXTCODE_FETCH_ADD:
+	case EXTCODE_LITTLE_ADD:
+		length = 8;
+		if (data) {
+			p->data[0] = *data >> 32;
+			p->data[1] = *data & 0xffffffff;
+		}
+		break;
+	default:
+		length = 16;
+		if (data) {
+			p->data[0] = arg >> 32;
+			p->data[1] = arg & 0xffffffff;
+			p->data[2] = *data >> 32;
+			p->data[3] = *data & 0xffffffff;
+		}
+		break;
+	}
+	fill_async_lock(p, addr, extcode, length);
+
+	return p;
+}
+
+struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
+                                        quadlet_t data)
+{
+        struct hpsb_packet *p;
+
+        p = hpsb_alloc_packet(0);
+        if (!p) return NULL;
+
+        p->host = host;
+        fill_phy_packet(p, data);
+
+        return p;
+}
+
+struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
+					int length, int channel,
+					int tag, int sync)
+{
+	struct hpsb_packet *p;
+
+	p = hpsb_alloc_packet(length);
+	if (!p) return NULL;
+
+	p->host = host;
+	fill_iso_packet(p, length, channel, tag, sync);
+
+	p->generation = get_hpsb_generation(host);
+
+	return p;
+}
+
+/*
+ * FIXME - these functions should probably read from / write to user space to
+ * avoid in kernel buffers for user space callers
+ */
+
+int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	      u64 addr, quadlet_t *buffer, size_t length)
+{
+        struct hpsb_packet *packet;
+        int retval = 0;
+
+        if (length == 0)
+                return -EINVAL;
+
+	BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+
+	packet = hpsb_make_readpacket(host, node, addr, length);
+
+        if (!packet) {
+                return -ENOMEM;
+        }
+
+	packet->generation = generation;
+        retval = hpsb_send_packet_and_wait(packet);
+	if (retval < 0)
+		goto hpsb_read_fail;
+
+        retval = hpsb_packet_success(packet);
+
+        if (retval == 0) {
+                if (length == 4) {
+                        *buffer = packet->header[3];
+                } else {
+                        memcpy(buffer, packet->data, length);
+                }
+        }
+
+hpsb_read_fail:
+        hpsb_free_tlabel(packet);
+        hpsb_free_packet(packet);
+
+        return retval;
+}
+
+
+int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	       u64 addr, quadlet_t *buffer, size_t length)
+{
+	struct hpsb_packet *packet;
+	int retval;
+
+	if (length == 0)
+		return -EINVAL;
+
+	BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+
+	packet = hpsb_make_writepacket (host, node, addr, buffer, length);
+
+	if (!packet)
+		return -ENOMEM;
+
+	packet->generation = generation;
+        retval = hpsb_send_packet_and_wait(packet);
+	if (retval < 0)
+		goto hpsb_write_fail;
+
+        retval = hpsb_packet_success(packet);
+
+hpsb_write_fail:
+        hpsb_free_tlabel(packet);
+        hpsb_free_packet(packet);
+
+        return retval;
+}
+
+
+int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	      u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
+{
+        struct hpsb_packet *packet;
+        int retval = 0;
+
+	BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+
+	packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
+        if (!packet)
+                return -ENOMEM;
+
+	packet->generation = generation;
+        retval = hpsb_send_packet_and_wait(packet);
+	if (retval < 0)
+		goto hpsb_lock_fail;
+
+        retval = hpsb_packet_success(packet);
+
+        if (retval == 0) {
+                *data = packet->data[0];
+        }
+
+hpsb_lock_fail:
+        hpsb_free_tlabel(packet);
+        hpsb_free_packet(packet);
+
+        return retval;
+}
+
+
+int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
+		   quadlet_t *buffer, size_t length, u32 specifier_id,
+		   unsigned int version)
+{
+	struct hpsb_packet *packet;
+	int retval = 0;
+	u16 specifier_id_hi = (specifier_id & 0x00ffff00) >> 8;
+	u8 specifier_id_lo = specifier_id & 0xff;
+
+	HPSB_VERBOSE("Send GASP: channel = %d, length = %Zd", channel, length);
+
+	length += 8;
+
+	packet = hpsb_make_streampacket(host, NULL, length, channel, 3, 0);
+	if (!packet)
+		return -ENOMEM;
+
+	packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
+	packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
+
+	memcpy(&(packet->data[2]), buffer, length - 8);
+
+	packet->generation = generation;
+
+	packet->no_waiter = 1;
+
+	retval = hpsb_send_packet(packet);
+	if (retval < 0)
+		hpsb_free_packet(packet);
+
+	return retval;
+}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
new file mode 100644
index 000000000000..526a43ceb496
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -0,0 +1,64 @@
+#ifndef _IEEE1394_TRANSACTIONS_H
+#define _IEEE1394_TRANSACTIONS_H
+
+#include "ieee1394_core.h"
+
+
+/*
+ * Get and free transaction labels.
+ */
+int hpsb_get_tlabel(struct hpsb_packet *packet);
+void hpsb_free_tlabel(struct hpsb_packet *packet);
+
+struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
+					 u64 addr, size_t length);
+struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
+                                         u64 addr, int extcode, quadlet_t *data,
+					 quadlet_t arg);
+struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
+                                          u64 addr, int extcode, octlet_t *data,
+					  octlet_t arg);
+struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
+                                        quadlet_t data) ;
+struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
+					int length, int channel,
+					int tag, int sync);
+struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
+					   u64 addr, quadlet_t *buffer, size_t length);
+struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
+                                           int length, int channel, int tag, int sync);
+
+/*
+ * hpsb_packet_success - Make sense of the ack and reply codes and
+ * return more convenient error codes:
+ * 0           success
+ * -EBUSY      node is busy, try again
+ * -EAGAIN     error which can probably resolved by retry
+ * -EREMOTEIO  node suffers from an internal error
+ * -EACCES     this transaction is not allowed on requested address
+ * -EINVAL     invalid address at node
+ */
+int hpsb_packet_success(struct hpsb_packet *packet);
+
+
+/*
+ * The generic read, write and lock functions.  All recognize the local node ID
+ * and act accordingly.  Read and write automatically use quadlet commands if
+ * length == 4 and and block commands otherwise (however, they do not yet
+ * support lengths that are not a multiple of 4).  You must explicitly specifiy
+ * the generation for which the node ID is valid, to avoid sending packets to
+ * the wrong nodes when we race with a bus reset.
+ */
+int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	      u64 addr, quadlet_t *buffer, size_t length);
+int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	       u64 addr, quadlet_t *buffer, size_t length);
+int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+	      u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
+int hpsb_lock64(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+		u64 addr, int extcode, octlet_t *data, octlet_t arg);
+int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
+                   quadlet_t *buffer, size_t length, u32 specifier_id,
+                   unsigned int version);
+
+#endif /* _IEEE1394_TRANSACTIONS_H */
diff --git a/drivers/ieee1394/ieee1394_types.h b/drivers/ieee1394/ieee1394_types.h
new file mode 100644
index 000000000000..3165609ec1ec
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_types.h
@@ -0,0 +1,101 @@
+
+#ifndef _IEEE1394_TYPES_H
+#define _IEEE1394_TYPES_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+
+
+/* Transaction Label handling */
+struct hpsb_tlabel_pool {
+	DECLARE_BITMAP(pool, 64);
+	spinlock_t lock;
+	u8 next;
+	u32 allocations;
+	struct semaphore count;
+};
+
+#define HPSB_TPOOL_INIT(_tp)			\
+do {						\
+	bitmap_zero((_tp)->pool, 64);		\
+	spin_lock_init(&(_tp)->lock);		\
+	(_tp)->next = 0;			\
+	(_tp)->allocations = 0;			\
+	sema_init(&(_tp)->count, 63);		\
+} while (0)
+
+
+typedef u32 quadlet_t;
+typedef u64 octlet_t;
+typedef u16 nodeid_t;
+
+typedef u8  byte_t;
+typedef u64 nodeaddr_t;
+typedef u16 arm_length_t;
+
+#define BUS_MASK  0xffc0
+#define BUS_SHIFT 6
+#define NODE_MASK 0x003f
+#define LOCAL_BUS 0xffc0
+#define ALL_NODES 0x003f
+
+#define NODEID_TO_BUS(nodeid)	((nodeid & BUS_MASK) >> BUS_SHIFT)
+#define NODEID_TO_NODE(nodeid)	(nodeid & NODE_MASK)
+
+/* Can be used to consistently print a node/bus ID. */
+#define NODE_BUS_FMT		"%d-%02d:%04d"
+#define NODE_BUS_ARGS(__host, __nodeid)	\
+	__host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid)
+
+#define HPSB_PRINT(level, fmt, args...) printk(level "ieee1394: " fmt "\n" , ## args)
+
+#define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
+#define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args)
+#define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args)
+#define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args)
+#define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args)
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+#define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
+#else
+#define HPSB_VERBOSE(fmt, args...)
+#endif
+
+#define HPSB_PANIC(fmt, args...) panic("ieee1394: " fmt "\n" , ## args)
+
+#define HPSB_TRACE() HPSB_PRINT(KERN_INFO, "TRACE - %s, %s(), line %d", __FILE__, __FUNCTION__, __LINE__)
+
+
+#ifdef __BIG_ENDIAN
+
+static __inline__ void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
+{
+        void *tmp = dest;
+	u32 *src = (u32 *)__src;
+
+        count /= 4;
+
+        while (count--) {
+                *dest++ = swab32p(src++);
+        }
+
+        return tmp;
+}
+
+#else
+
+static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
+{
+        return memcpy(dest, src, count);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#endif /* _IEEE1394_TYPES_H */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
new file mode 100644
index 000000000000..f05759107f7e
--- /dev/null
+++ b/drivers/ieee1394/iso.c
@@ -0,0 +1,451 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * kernel ISO transmission/reception
+ *
+ * Copyright (C) 2002 Maas Digital LLC
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "iso.h"
+
+void hpsb_iso_stop(struct hpsb_iso *iso)
+{
+	if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
+		return;
+
+	iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
+				  XMIT_STOP : RECV_STOP, 0);
+	iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
+}
+
+void hpsb_iso_shutdown(struct hpsb_iso *iso)
+{
+	if (iso->flags & HPSB_ISO_DRIVER_INIT) {
+		hpsb_iso_stop(iso);
+		iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
+					  XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
+		iso->flags &= ~HPSB_ISO_DRIVER_INIT;
+	}
+
+	dma_region_free(&iso->data_buf);
+	kfree(iso);
+}
+
+static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
+					     unsigned int data_buf_size,
+					     unsigned int buf_packets,
+					     int channel,
+					     int dma_mode,
+					     int irq_interval,
+					     void (*callback)(struct hpsb_iso*))
+{
+	struct hpsb_iso *iso;
+	int dma_direction;
+
+	/* make sure driver supports the ISO API */
+	if (!host->driver->isoctl) {
+		printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n",
+		       host->driver->name);
+		return NULL;
+	}
+
+	/* sanitize parameters */
+
+	if (buf_packets < 2)
+		buf_packets = 2;
+
+	if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
+		dma_mode=HPSB_ISO_DMA_DEFAULT;
+
+	if (irq_interval == 0)     /* really interrupt for each packet*/
+		irq_interval = 1;
+	else if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
+ 		irq_interval = buf_packets / 4;
+
+	if (channel < -1 || channel >= 64)
+		return NULL;
+
+	/* channel = -1 is OK for multi-channel recv but not for xmit */
+	if (type == HPSB_ISO_XMIT && channel < 0)
+		return NULL;
+
+	/* allocate and write the struct hpsb_iso */
+
+	iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL);
+	if (!iso)
+		return NULL;
+
+	iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
+
+	iso->type = type;
+	iso->host = host;
+	iso->hostdata = NULL;
+	iso->callback = callback;
+	init_waitqueue_head(&iso->waitq);
+	iso->channel = channel;
+	iso->irq_interval = irq_interval;
+	iso->dma_mode = dma_mode;
+	dma_region_init(&iso->data_buf);
+	iso->buf_size = PAGE_ALIGN(data_buf_size);
+	iso->buf_packets = buf_packets;
+	iso->pkt_dma = 0;
+	iso->first_packet = 0;
+	spin_lock_init(&iso->lock);
+
+	if (iso->type == HPSB_ISO_XMIT) {
+		iso->n_ready_packets = iso->buf_packets;
+		dma_direction = PCI_DMA_TODEVICE;
+	} else {
+		iso->n_ready_packets = 0;
+		dma_direction = PCI_DMA_FROMDEVICE;
+	}
+
+	atomic_set(&iso->overflows, 0);
+	iso->flags = 0;
+	iso->prebuffer = 0;
+
+	/* allocate the packet buffer */
+	if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
+		goto err;
+
+	return iso;
+
+err:
+	hpsb_iso_shutdown(iso);
+	return NULL;
+}
+
+int hpsb_iso_n_ready(struct hpsb_iso* iso)
+{
+	unsigned long flags;
+	int val;
+
+	spin_lock_irqsave(&iso->lock, flags);
+	val = iso->n_ready_packets;
+	spin_unlock_irqrestore(&iso->lock, flags);
+
+	return val;
+}
+
+
+struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
+				    unsigned int data_buf_size,
+				    unsigned int buf_packets,
+				    int channel,
+				    int speed,
+				    int irq_interval,
+				    void (*callback)(struct hpsb_iso*))
+{
+	struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
+						    data_buf_size, buf_packets,
+						    channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback);
+	if (!iso)
+		return NULL;
+
+	iso->speed = speed;
+
+	/* tell the driver to start working */
+	if (host->driver->isoctl(iso, XMIT_INIT, 0))
+		goto err;
+
+	iso->flags |= HPSB_ISO_DRIVER_INIT;
+	return iso;
+
+err:
+	hpsb_iso_shutdown(iso);
+	return NULL;
+}
+
+struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
+				    unsigned int data_buf_size,
+				    unsigned int buf_packets,
+				    int channel,
+				    int dma_mode,
+				    int irq_interval,
+				    void (*callback)(struct hpsb_iso*))
+{
+	struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
+						    data_buf_size, buf_packets,
+						    channel, dma_mode, irq_interval, callback);
+	if (!iso)
+		return NULL;
+
+	/* tell the driver to start working */
+	if (host->driver->isoctl(iso, RECV_INIT, 0))
+		goto err;
+
+	iso->flags |= HPSB_ISO_DRIVER_INIT;
+	return iso;
+
+err:
+	hpsb_iso_shutdown(iso);
+	return NULL;
+}
+
+int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
+{
+	if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
+		return -EINVAL;
+	return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
+}
+
+int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
+{
+       if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
+               return -EINVAL;
+       return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
+}
+
+int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
+{
+	if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
+		return -EINVAL;
+	return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask);
+}
+
+int hpsb_iso_recv_flush(struct hpsb_iso *iso)
+{
+	if (iso->type != HPSB_ISO_RECV)
+		return -EINVAL;
+	return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
+}
+
+static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
+{
+	int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
+	if (retval)
+		return retval;
+
+	iso->flags |= HPSB_ISO_DRIVER_STARTED;
+	return retval;
+}
+
+int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
+{
+	if (iso->type != HPSB_ISO_XMIT)
+		return -1;
+
+	if (iso->flags & HPSB_ISO_DRIVER_STARTED)
+		return 0;
+
+	if (cycle < -1)
+		cycle = -1;
+	else if (cycle >= 8000)
+		cycle %= 8000;
+
+	iso->xmit_cycle = cycle;
+
+	if (prebuffer < 0)
+		prebuffer = iso->buf_packets;
+	else if (prebuffer == 0)
+		prebuffer = 1;
+
+	if (prebuffer > iso->buf_packets)
+		prebuffer = iso->buf_packets;
+
+	iso->prebuffer = prebuffer;
+
+	/* remember the starting cycle; DMA will commence from xmit_queue_packets()
+	   once enough packets have been buffered */
+	iso->start_cycle = cycle;
+
+	return 0;
+}
+
+int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
+{
+	int retval = 0;
+	int isoctl_args[3];
+
+	if (iso->type != HPSB_ISO_RECV)
+		return -1;
+
+	if (iso->flags & HPSB_ISO_DRIVER_STARTED)
+		return 0;
+
+	if (cycle < -1)
+		cycle = -1;
+	else if (cycle >= 8000)
+		cycle %= 8000;
+
+	isoctl_args[0] = cycle;
+
+	if (tag_mask < 0)
+		/* match all tags */
+		tag_mask = 0xF;
+	isoctl_args[1] = tag_mask;
+
+	isoctl_args[2] = sync;
+
+	retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]);
+	if (retval)
+		return retval;
+
+	iso->flags |= HPSB_ISO_DRIVER_STARTED;
+	return retval;
+}
+
+/* check to make sure the user has not supplied bogus values of offset/len
+   that would cause the kernel to access memory outside the buffer */
+
+static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
+				     unsigned int offset, unsigned short len,
+				     unsigned int *out_offset, unsigned short *out_len)
+{
+	if (offset >= iso->buf_size)
+		return -EFAULT;
+
+	/* make sure the packet does not go beyond the end of the buffer */
+	if (offset + len > iso->buf_size)
+		return -EFAULT;
+
+	/* check for wrap-around */
+	if (offset + len < offset)
+		return -EFAULT;
+
+	/* now we can trust 'offset' and 'length' */
+	*out_offset = offset;
+	*out_len = len;
+
+	return 0;
+}
+
+
+int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy)
+{
+	struct hpsb_iso_packet_info *info;
+	unsigned long flags;
+	int rv;
+
+	if (iso->type != HPSB_ISO_XMIT)
+		return -EINVAL;
+
+	/* is there space in the buffer? */
+	if (iso->n_ready_packets <= 0) {
+		return -EBUSY;
+	}
+
+	info = &iso->infos[iso->first_packet];
+
+	/* check for bogus offset/length */
+	if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len))
+		return -EFAULT;
+
+	info->tag = tag;
+	info->sy = sy;
+
+	spin_lock_irqsave(&iso->lock, flags);
+
+	rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info);
+	if (rv)
+		goto out;
+
+	/* increment cursors */
+	iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
+	iso->xmit_cycle = (iso->xmit_cycle+1) % 8000;
+	iso->n_ready_packets--;
+
+	if (iso->prebuffer != 0) {
+		iso->prebuffer--;
+		if (iso->prebuffer <= 0) {
+			iso->prebuffer = 0;
+			rv = do_iso_xmit_start(iso, iso->start_cycle);
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&iso->lock, flags);
+	return rv;
+}
+
+int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
+{
+	if (iso->type != HPSB_ISO_XMIT)
+		return -EINVAL;
+
+	return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets);
+}
+
+void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&iso->lock, flags);
+
+	/* predict the cycle of the next packet to be queued */
+
+	/* jump ahead by the number of packets that are already buffered */
+	cycle += iso->buf_packets - iso->n_ready_packets;
+	cycle %= 8000;
+
+	iso->xmit_cycle = cycle;
+	iso->n_ready_packets++;
+	iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
+
+	if (iso->n_ready_packets == iso->buf_packets || error != 0) {
+		/* the buffer has run empty! */
+		atomic_inc(&iso->overflows);
+	}
+
+	spin_unlock_irqrestore(&iso->lock, flags);
+}
+
+void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
+			      u16 cycle, u8 channel, u8 tag, u8 sy)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&iso->lock, flags);
+
+	if (iso->n_ready_packets == iso->buf_packets) {
+		/* overflow! */
+		atomic_inc(&iso->overflows);
+	} else {
+		struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
+		info->offset = offset;
+		info->len = len;
+		info->cycle = cycle;
+		info->channel = channel;
+		info->tag = tag;
+		info->sy = sy;
+
+		iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets;
+		iso->n_ready_packets++;
+	}
+
+	spin_unlock_irqrestore(&iso->lock, flags);
+}
+
+int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
+{
+	unsigned long flags;
+	unsigned int i;
+	int rv = 0;
+
+	if (iso->type != HPSB_ISO_RECV)
+		return -1;
+
+	spin_lock_irqsave(&iso->lock, flags);
+	for (i = 0; i < n_packets; i++) {
+		rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
+					       (unsigned long) &iso->infos[iso->first_packet]);
+		if (rv)
+			break;
+
+		iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
+		iso->n_ready_packets--;
+	}
+	spin_unlock_irqrestore(&iso->lock, flags);
+	return rv;
+}
+
+void hpsb_iso_wake(struct hpsb_iso *iso)
+{
+	wake_up_interruptible(&iso->waitq);
+
+	if (iso->callback)
+		iso->callback(iso);
+}
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
new file mode 100644
index 000000000000..fb654d9639a7
--- /dev/null
+++ b/drivers/ieee1394/iso.h
@@ -0,0 +1,201 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * kernel ISO transmission/reception
+ *
+ * Copyright (C) 2002 Maas Digital LLC
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#ifndef IEEE1394_ISO_H
+#define IEEE1394_ISO_H
+
+#include "hosts.h"
+#include "dma.h"
+
+/* high-level ISO interface */
+
+/* This API sends and receives isochronous packets on a large,
+   virtually-contiguous kernel memory buffer. The buffer may be mapped
+   into a user-space process for zero-copy transmission and reception.
+
+   There are no explicit boundaries between packets in the buffer. A
+   packet may be transmitted or received at any location. However,
+   low-level drivers may impose certain restrictions on alignment or
+   size of packets. (e.g. in OHCI no packet may cross a page boundary,
+   and packets should be quadlet-aligned)
+*/
+
+/* Packet descriptor - the API maintains a ring buffer of these packet
+   descriptors in kernel memory (hpsb_iso.infos[]).  */
+
+struct hpsb_iso_packet_info {
+	/* offset of data payload relative to the first byte of the buffer */
+	__u32 offset;
+
+	/* length of the data payload, in bytes (not including the isochronous header) */
+	__u16 len;
+
+	/* (recv only) the cycle number (mod 8000) on which the packet was received */
+	__u16 cycle;
+
+	/* (recv only) channel on which the packet was received */
+	__u8 channel;
+
+	/* 2-bit 'tag' and 4-bit 'sy' fields of the isochronous header */
+	__u8 tag;
+	__u8 sy;
+};
+
+enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
+
+/* The mode of the dma when receiving iso data. Must be supported by chip */
+enum raw1394_iso_dma_recv_mode {
+	HPSB_ISO_DMA_DEFAULT = -1,
+	HPSB_ISO_DMA_OLD_ABI = 0,
+	HPSB_ISO_DMA_BUFFERFILL = 1,
+	HPSB_ISO_DMA_PACKET_PER_BUFFER = 2
+};
+
+struct hpsb_iso {
+	enum hpsb_iso_type type;
+
+	/* pointer to low-level driver and its private data */
+	struct hpsb_host *host;
+	void *hostdata;
+
+	/* a function to be called (from interrupt context) after
+           outgoing packets have been sent, or incoming packets have
+           arrived */
+	void (*callback)(struct hpsb_iso*);
+
+	/* wait for buffer space */
+	wait_queue_head_t waitq;
+
+	int speed; /* IEEE1394_SPEED_100, 200, or 400 */
+	int channel; /* -1 if multichannel */
+	int dma_mode; /* dma receive mode */
+
+
+	/* greatest # of packets between interrupts - controls
+	   the maximum latency of the buffer */
+	int irq_interval;
+
+	/* the buffer for packet data payloads */
+	struct dma_region data_buf;
+
+	/* size of data_buf, in bytes (always a multiple of PAGE_SIZE) */
+	unsigned int buf_size;
+
+	/* # of packets in the ringbuffer */
+	unsigned int buf_packets;
+
+	/* protects packet cursors */
+	spinlock_t lock;
+
+	/* the index of the next packet that will be produced
+	   or consumed by the user */
+	int first_packet;
+
+	/* the index of the next packet that will be transmitted
+	   or received by the 1394 hardware */
+	int pkt_dma;
+
+	/* how many packets, starting at first_packet:
+	   (transmit) are ready to be filled with data
+	   (receive)  contain received data */
+	int n_ready_packets;
+
+	/* how many times the buffer has overflowed or underflowed */
+	atomic_t overflows;
+
+	/* private flags to track initialization progress */
+#define HPSB_ISO_DRIVER_INIT     (1<<0)
+#define HPSB_ISO_DRIVER_STARTED  (1<<1)
+	unsigned int flags;
+
+	/* # of packets left to prebuffer (xmit only) */
+	int prebuffer;
+
+	/* starting cycle for DMA (xmit only) */
+	int start_cycle;
+
+	/* cycle at which next packet will be transmitted,
+	   -1 if not known */
+	int xmit_cycle;
+
+	/* ringbuffer of packet descriptors in regular kernel memory
+	 * XXX Keep this last, since we use over-allocated memory from
+	 * this entry to fill this field. */
+	struct hpsb_iso_packet_info *infos;
+};
+
+/* functions available to high-level drivers (e.g. raw1394) */
+
+/* allocate the buffer and DMA context */
+
+struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
+				    unsigned int data_buf_size,
+				    unsigned int buf_packets,
+				    int channel,
+				    int speed,
+				    int irq_interval,
+				    void (*callback)(struct hpsb_iso*));
+
+/* note: if channel = -1, multi-channel receive is enabled */
+struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
+				    unsigned int data_buf_size,
+				    unsigned int buf_packets,
+				    int channel,
+				    int dma_mode,
+				    int irq_interval,
+				    void (*callback)(struct hpsb_iso*));
+
+/* multi-channel only */
+int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
+int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
+int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
+
+/* start/stop DMA */
+int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer);
+int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, int tag_mask, int sync);
+void hpsb_iso_stop(struct hpsb_iso *iso);
+
+/* deallocate buffer and DMA context */
+void hpsb_iso_shutdown(struct hpsb_iso *iso);
+
+/* queue a packet for transmission. 'offset' is relative to the beginning of the
+   DMA buffer, where the packet's data payload should already have been placed */
+int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy);
+
+/* wait until all queued packets have been transmitted to the bus */
+int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
+
+/* N packets have been read out of the buffer, re-use the buffer space */
+int  hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets);
+
+/* check for arrival of new packets immediately (even if irq_interval
+   has not yet been reached) */
+int hpsb_iso_recv_flush(struct hpsb_iso *iso);
+
+/* returns # of packets ready to send or receive */
+int hpsb_iso_n_ready(struct hpsb_iso *iso);
+
+/* the following are callbacks available to low-level drivers */
+
+/* call after a packet has been transmitted to the bus (interrupt context is OK)
+   'cycle' is the _exact_ cycle the packet was sent on
+   'error' should be non-zero if some sort of error occurred when sending the packet
+*/
+void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
+
+/* call after a packet has been received (interrupt context OK) */
+void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
+			      u16 cycle, u8 channel, u8 tag, u8 sy);
+
+/* call to wake waiting processes after buffer space has opened up. */
+void hpsb_iso_wake(struct hpsb_iso *iso);
+
+#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
new file mode 100644
index 000000000000..a1e30a66297b
--- /dev/null
+++ b/drivers/ieee1394/nodemgr.c
@@ -0,0 +1,1732 @@
+/*
+ * Node information (ConfigROM) collection and management.
+ *
+ * Copyright (C) 2000		Andreas E. Bombe
+ *               2001-2003	Ben Collins <bcollins@debian.net>
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include <asm/atomic.h>
+
+#include "ieee1394_types.h"
+#include "ieee1394.h"
+#include "ieee1394_core.h"
+#include "hosts.h"
+#include "ieee1394_transactions.h"
+#include "highlevel.h"
+#include "csr.h"
+#include "nodemgr.h"
+
+static int ignore_drivers = 0;
+module_param(ignore_drivers, int, 0444);
+MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers.");
+
+struct nodemgr_csr_info {
+	struct hpsb_host *host;
+	nodeid_t nodeid;
+	unsigned int generation;
+};
+
+
+static char *nodemgr_find_oui_name(int oui)
+{
+#ifdef CONFIG_IEEE1394_OUI_DB
+	extern struct oui_list_struct {
+		int oui;
+		char *name;
+	} oui_list[];
+	int i;
+
+	for (i = 0; oui_list[i].name; i++)
+		if (oui_list[i].oui == oui)
+			return oui_list[i].name;
+#endif
+	return NULL;
+}
+
+
+static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
+                            void *buffer, void *__ci)
+{
+	struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
+	int i, ret = 0;
+
+	for (i = 0; i < 3; i++) {
+		ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
+				buffer, length);
+		if (!ret)
+			break;
+
+		if (msleep_interruptible(334))
+			return -EINTR;
+	}
+
+	return ret;
+}
+
+static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
+{
+	return (CSR1212_BE32_TO_CPU(bus_info_data[2]) >> 8) & 0x3;
+}
+
+static struct csr1212_bus_ops nodemgr_csr_ops = {
+	.bus_read =	nodemgr_bus_read,
+	.get_max_rom =	nodemgr_get_max_rom
+};
+
+
+/*
+ * Basically what we do here is start off retrieving the bus_info block.
+ * From there will fill in some info about the node, verify it is of IEEE
+ * 1394 type, and that the crc checks out ok. After that we start off with
+ * the root directory, and subdirectories. To do this, we retrieve the
+ * quadlet header for a directory, find out the length, and retrieve the
+ * complete directory entry (be it a leaf or a directory). We then process
+ * it and add the info to our structure for that particular node.
+ *
+ * We verify CRC's along the way for each directory/block/leaf. The entire
+ * node structure is generic, and simply stores the information in a way
+ * that's easy to parse by the protocol interface.
+ */
+
+/*
+ * The nodemgr relies heavily on the Driver Model for device callbacks and
+ * driver/device mappings. The old nodemgr used to handle all this itself,
+ * but now we are much simpler because of the LDM.
+ */
+
+static DECLARE_MUTEX(nodemgr_serialize);
+
+struct host_info {
+	struct hpsb_host *host;
+	struct list_head list;
+	struct completion exited;
+	struct semaphore reset_sem;
+	int pid;
+	char daemon_name[15];
+	int kill_me;
+};
+
+static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
+static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
+			   char *buffer, int buffer_size);
+static void nodemgr_resume_ne(struct node_entry *ne);
+static void nodemgr_remove_ne(struct node_entry *ne);
+static struct node_entry *find_entry_by_guid(u64 guid);
+
+struct bus_type ieee1394_bus_type = {
+	.name		= "ieee1394",
+	.match		= nodemgr_bus_match,
+};
+
+static void host_cls_release(struct class_device *class_dev)
+{
+	put_device(&container_of((class_dev), struct hpsb_host, class_dev)->device);
+}
+
+struct class hpsb_host_class = {
+	.name		= "ieee1394_host",
+	.release	= host_cls_release,
+};
+
+static void ne_cls_release(struct class_device *class_dev)
+{
+	put_device(&container_of((class_dev), struct node_entry, class_dev)->device);
+}
+
+static struct class nodemgr_ne_class = {
+	.name		= "ieee1394_node",
+	.release	= ne_cls_release,
+};
+
+static void ud_cls_release(struct class_device *class_dev)
+{
+	put_device(&container_of((class_dev), struct unit_directory, class_dev)->device);
+}
+
+/* The name here is only so that unit directory hotplug works with old
+ * style hotplug, which only ever did unit directories anyway. */
+static struct class nodemgr_ud_class = {
+	.name		= "ieee1394",
+	.release	= ud_cls_release,
+	.hotplug	= nodemgr_hotplug,
+};
+
+static struct hpsb_highlevel nodemgr_highlevel;
+
+
+static void nodemgr_release_ud(struct device *dev)
+{
+	struct unit_directory *ud = container_of(dev, struct unit_directory, device);
+
+	if (ud->vendor_name_kv)
+		csr1212_release_keyval(ud->vendor_name_kv);
+	if (ud->model_name_kv)
+		csr1212_release_keyval(ud->model_name_kv);
+
+	kfree(ud);
+}
+
+static void nodemgr_release_ne(struct device *dev)
+{
+	struct node_entry *ne = container_of(dev, struct node_entry, device);
+
+	if (ne->vendor_name_kv)
+		csr1212_release_keyval(ne->vendor_name_kv);
+
+	kfree(ne);
+}
+
+
+static void nodemgr_release_host(struct device *dev)
+{
+	struct hpsb_host *host = container_of(dev, struct hpsb_host, device);
+
+	csr1212_destroy_csr(host->csr.rom);
+
+	kfree(host);
+}
+
+static int nodemgr_ud_platform_data;
+
+static struct device nodemgr_dev_template_ud = {
+	.bus		= &ieee1394_bus_type,
+	.release	= nodemgr_release_ud,
+	.platform_data	= &nodemgr_ud_platform_data,
+};
+
+static struct device nodemgr_dev_template_ne = {
+	.bus		= &ieee1394_bus_type,
+	.release	= nodemgr_release_ne,
+};
+
+struct device nodemgr_dev_template_host = {
+	.bus		= &ieee1394_bus_type,
+	.release	= nodemgr_release_host,
+};
+
+
+#define fw_attr(class, class_type, field, type, format_string)		\
+static ssize_t fw_show_##class##_##field (struct device *dev, char *buf)\
+{									\
+	class_type *class;						\
+	class = container_of(dev, class_type, device);			\
+	return sprintf(buf, format_string, (type)class->field);		\
+}									\
+static struct device_attribute dev_attr_##class##_##field = {		\
+	.attr = {.name = __stringify(field), .mode = S_IRUGO },		\
+	.show   = fw_show_##class##_##field,				\
+};
+
+#define fw_attr_td(class, class_type, td_kv)				\
+static ssize_t fw_show_##class##_##td_kv (struct device *dev, char *buf)\
+{									\
+	int len;							\
+	class_type *class = container_of(dev, class_type, device);	\
+	len = (class->td_kv->value.leaf.len - 2) * sizeof(quadlet_t);	\
+	memcpy(buf,							\
+	       CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(class->td_kv),	\
+	       len);							\
+	while ((buf + len - 1) == '\0')					\
+		len--;							\
+	buf[len++] = '\n';						\
+	buf[len] = '\0';						\
+	return len;							\
+}									\
+static struct device_attribute dev_attr_##class##_##td_kv = {		\
+	.attr = {.name = __stringify(td_kv), .mode = S_IRUGO },		\
+	.show   = fw_show_##class##_##td_kv,				\
+};
+
+
+#define fw_drv_attr(field, type, format_string)			\
+static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
+{								\
+	struct hpsb_protocol_driver *driver;			\
+	driver = container_of(drv, struct hpsb_protocol_driver, driver); \
+	return sprintf(buf, format_string, (type)driver->field);\
+}								\
+static struct driver_attribute driver_attr_drv_##field = {	\
+        .attr = {.name = __stringify(field), .mode = S_IRUGO },	\
+        .show   = fw_drv_show_##field,				\
+};
+
+
+static ssize_t fw_show_ne_bus_options(struct device *dev, char *buf)
+{
+	struct node_entry *ne = container_of(dev, struct node_entry, device);
+
+	return sprintf(buf, "IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d) "
+		       "LSPD(%d) MAX_REC(%d) MAX_ROM(%d) CYC_CLK_ACC(%d)\n",
+		       ne->busopt.irmc,
+		       ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
+		       ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
+		       ne->busopt.max_rec,
+		       ne->busopt.max_rom,
+		       ne->busopt.cyc_clk_acc);
+}
+static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
+
+
+static ssize_t fw_show_ne_tlabels_free(struct device *dev, char *buf)
+{
+	struct node_entry *ne = container_of(dev, struct node_entry, device);
+	return sprintf(buf, "%d\n", atomic_read(&ne->tpool->count.count) + 1);
+}
+static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
+
+
+static ssize_t fw_show_ne_tlabels_allocations(struct device *dev, char *buf)
+{
+	struct node_entry *ne = container_of(dev, struct node_entry, device);
+	return sprintf(buf, "%u\n", ne->tpool->allocations);
+}
+static DEVICE_ATTR(tlabels_allocations,S_IRUGO,fw_show_ne_tlabels_allocations,NULL);
+
+
+static ssize_t fw_show_ne_tlabels_mask(struct device *dev, char *buf)
+{
+	struct node_entry *ne = container_of(dev, struct node_entry, device);
+#if (BITS_PER_LONG <= 32)
+	return sprintf(buf, "0x%08lx%08lx\n", ne->tpool->pool[0], ne->tpool->pool[1]);
+#else
+	return sprintf(buf, "0x%016lx\n", ne->tpool->pool[0]);
+#endif
+}
+static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
+
+
+static ssize_t fw_set_ignore_driver(struct device *dev, const char *buf, size_t count)
+{
+	struct unit_directory *ud = container_of(dev, struct unit_directory, device);
+	int state = simple_strtoul(buf, NULL, 10);
+
+	if (state == 1) {
+		down_write(&dev->bus->subsys.rwsem);
+		device_release_driver(dev);
+		ud->ignore_driver = 1;
+		up_write(&dev->bus->subsys.rwsem);
+	} else if (!state)
+		ud->ignore_driver = 0;
+
+	return count;
+}
+static ssize_t fw_get_ignore_driver(struct device *dev, char *buf)
+{
+	struct unit_directory *ud = container_of(dev, struct unit_directory, device);
+
+	return sprintf(buf, "%d\n", ud->ignore_driver);
+}
+static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
+
+
+static ssize_t fw_set_destroy_node(struct bus_type *bus, const char *buf, size_t count)
+{
+	struct node_entry *ne;
+	u64 guid = (u64)simple_strtoull(buf, NULL, 16);
+
+	ne = find_entry_by_guid(guid);
+
+	if (ne == NULL || !ne->in_limbo)
+		return -EINVAL;
+
+	nodemgr_remove_ne(ne);
+
+	return count;
+}
+static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
+{
+	return sprintf(buf, "You can destroy in_limbo nodes by writing their GUID to this file\n");
+}
+static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
+
+static int nodemgr_rescan_bus_thread(void *__unused)
+{
+	/* No userlevel access needed */
+	daemonize("kfwrescan");
+
+	bus_rescan_devices(&ieee1394_bus_type);
+
+	return 0;
+}
+
+static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count)
+{
+	int state = simple_strtoul(buf, NULL, 10);
+
+	/* Don't wait for this, or care about errors. Root could do
+	 * something stupid and spawn this a lot of times, but that's
+	 * root's fault. */
+	if (state == 1)
+		kernel_thread(nodemgr_rescan_bus_thread, NULL, CLONE_KERNEL);
+
+	return count;
+}
+static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
+{
+	return sprintf(buf, "You can force a rescan of the bus for "
+			"drivers by writing a 1 to this file\n");
+}
+static BUS_ATTR(rescan, S_IWUSR | S_IRUGO, fw_get_rescan, fw_set_rescan);
+
+
+static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size_t count)
+{
+	int state = simple_strtoul(buf, NULL, 10);
+
+	if (state == 1)
+		ignore_drivers = 1;
+	else if (!state)
+		ignore_drivers = 0;
+
+	return count;
+}
+static ssize_t fw_get_ignore_drivers(struct bus_type *bus, char *buf)
+{
+	return sprintf(buf, "%d\n", ignore_drivers);
+}
+static BUS_ATTR(ignore_drivers, S_IWUSR | S_IRUGO, fw_get_ignore_drivers, fw_set_ignore_drivers);
+
+
+struct bus_attribute *const fw_bus_attrs[] = {
+	&bus_attr_destroy_node,
+	&bus_attr_rescan,
+	&bus_attr_ignore_drivers,
+	NULL
+};
+
+
+fw_attr(ne, struct node_entry, capabilities, unsigned int, "0x%06x\n")
+fw_attr(ne, struct node_entry, nodeid, unsigned int, "0x%04x\n")
+
+fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n")
+fw_attr_td(ne, struct node_entry, vendor_name_kv)
+fw_attr(ne, struct node_entry, vendor_oui, const char *, "%s\n")
+
+fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n")
+fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n")
+fw_attr(ne, struct node_entry, guid_vendor_oui, const char *, "%s\n")
+fw_attr(ne, struct node_entry, in_limbo, int, "%d\n");
+
+static struct device_attribute *const fw_ne_attrs[] = {
+	&dev_attr_ne_guid,
+	&dev_attr_ne_guid_vendor_id,
+	&dev_attr_ne_capabilities,
+	&dev_attr_ne_vendor_id,
+	&dev_attr_ne_nodeid,
+	&dev_attr_bus_options,
+	&dev_attr_tlabels_free,
+	&dev_attr_tlabels_allocations,
+	&dev_attr_tlabels_mask,
+};
+
+
+
+fw_attr(ud, struct unit_directory, address, unsigned long long, "0x%016Lx\n")
+fw_attr(ud, struct unit_directory, length, int, "%d\n")
+/* These are all dependent on the value being provided */
+fw_attr(ud, struct unit_directory, vendor_id, unsigned int, "0x%06x\n")
+fw_attr(ud, struct unit_directory, model_id, unsigned int, "0x%06x\n")
+fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n")
+fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n")
+fw_attr_td(ud, struct unit_directory, vendor_name_kv)
+fw_attr(ud, struct unit_directory, vendor_oui, const char *, "%s\n")
+fw_attr_td(ud, struct unit_directory, model_name_kv)
+
+static struct device_attribute *const fw_ud_attrs[] = {
+	&dev_attr_ud_address,
+	&dev_attr_ud_length,
+	&dev_attr_ignore_driver,
+};
+
+
+fw_attr(host, struct hpsb_host, node_count, int, "%d\n")
+fw_attr(host, struct hpsb_host, selfid_count, int, "%d\n")
+fw_attr(host, struct hpsb_host, nodes_active, int, "%d\n")
+fw_attr(host, struct hpsb_host, in_bus_reset, int, "%d\n")
+fw_attr(host, struct hpsb_host, is_root, int, "%d\n")
+fw_attr(host, struct hpsb_host, is_cycmst, int, "%d\n")
+fw_attr(host, struct hpsb_host, is_irm, int, "%d\n")
+fw_attr(host, struct hpsb_host, is_busmgr, int, "%d\n")
+
+static struct device_attribute *const fw_host_attrs[] = {
+	&dev_attr_host_node_count,
+	&dev_attr_host_selfid_count,
+	&dev_attr_host_nodes_active,
+	&dev_attr_host_in_bus_reset,
+	&dev_attr_host_is_root,
+	&dev_attr_host_is_cycmst,
+	&dev_attr_host_is_irm,
+	&dev_attr_host_is_busmgr,
+};
+
+
+static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
+{
+	struct hpsb_protocol_driver *driver;
+	struct ieee1394_device_id *id;
+	int length = 0;
+	char *scratch = buf;
+
+        driver = container_of(drv, struct hpsb_protocol_driver, driver);
+
+	for (id = driver->id_table; id->match_flags != 0; id++) {
+		int need_coma = 0;
+
+		if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) {
+			length += sprintf(scratch, "vendor_id=0x%06x", id->vendor_id);
+			scratch = buf + length;
+			need_coma++;
+		}
+
+		if (id->match_flags & IEEE1394_MATCH_MODEL_ID) {
+			length += sprintf(scratch, "%smodel_id=0x%06x",
+					  need_coma++ ? "," : "",
+					  id->model_id);
+			scratch = buf + length;
+		}
+
+		if (id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) {
+			length += sprintf(scratch, "%sspecifier_id=0x%06x",
+					  need_coma++ ? "," : "",
+					  id->specifier_id);
+			scratch = buf + length;
+		}
+
+		if (id->match_flags & IEEE1394_MATCH_VERSION) {
+			length += sprintf(scratch, "%sversion=0x%06x",
+					  need_coma++ ? "," : "",
+					  id->version);
+			scratch = buf + length;
+		}
+
+		if (need_coma) {
+			*scratch++ = '\n';
+			length++;
+		}
+	}
+
+	return length;
+}
+static DRIVER_ATTR(device_ids,S_IRUGO,fw_show_drv_device_ids,NULL);
+
+
+fw_drv_attr(name, const char *, "%s\n")
+
+static struct driver_attribute *const fw_drv_attrs[] = {
+	&driver_attr_drv_name,
+	&driver_attr_device_ids,
+};
+
+
+static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
+{
+	struct device_driver *drv = &driver->driver;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
+		driver_create_file(drv, fw_drv_attrs[i]);
+}
+
+
+static void nodemgr_remove_drv_files(struct hpsb_protocol_driver *driver)
+{
+	struct device_driver *drv = &driver->driver;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
+		driver_remove_file(drv, fw_drv_attrs[i]);
+}
+
+
+static void nodemgr_create_ne_dev_files(struct node_entry *ne)
+{
+	struct device *dev = &ne->device;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
+		device_create_file(dev, fw_ne_attrs[i]);
+}
+
+
+static void nodemgr_create_host_dev_files(struct hpsb_host *host)
+{
+	struct device *dev = &host->device;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
+		device_create_file(dev, fw_host_attrs[i]);
+}
+
+
+static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid);
+
+static void nodemgr_update_host_dev_links(struct hpsb_host *host)
+{
+	struct device *dev = &host->device;
+	struct node_entry *ne;
+
+	sysfs_remove_link(&dev->kobj, "irm_id");
+	sysfs_remove_link(&dev->kobj, "busmgr_id");
+	sysfs_remove_link(&dev->kobj, "host_id");
+
+	if ((ne = find_entry_by_nodeid(host, host->irm_id)))
+		sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id");
+	if ((ne = find_entry_by_nodeid(host, host->busmgr_id)))
+		sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id");
+	if ((ne = find_entry_by_nodeid(host, host->node_id)))
+		sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id");
+}
+
+static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
+{
+	struct device *dev = &ud->device;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
+		device_create_file(dev, fw_ud_attrs[i]);
+
+	if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
+		device_create_file(dev, &dev_attr_ud_specifier_id);
+
+	if (ud->flags & UNIT_DIRECTORY_VERSION)
+		device_create_file(dev, &dev_attr_ud_version);
+
+	if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
+		device_create_file(dev, &dev_attr_ud_vendor_id);
+		if (ud->vendor_name_kv)
+			device_create_file(dev, &dev_attr_ud_vendor_name_kv);
+	}
+
+	if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
+		device_create_file(dev, &dev_attr_ud_model_id);
+		if (ud->model_name_kv)
+			device_create_file(dev, &dev_attr_ud_model_name_kv);
+	}
+}
+
+
+static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
+{
+        struct hpsb_protocol_driver *driver;
+        struct unit_directory *ud;
+	struct ieee1394_device_id *id;
+
+	/* We only match unit directories */
+	if (dev->platform_data != &nodemgr_ud_platform_data)
+		return 0;
+
+	ud = container_of(dev, struct unit_directory, device);
+	driver = container_of(drv, struct hpsb_protocol_driver, driver);
+
+	if (ud->ne->in_limbo || ud->ignore_driver)
+		return 0;
+
+        for (id = driver->id_table; id->match_flags != 0; id++) {
+                if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
+                    id->vendor_id != ud->vendor_id)
+                        continue;
+
+                if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
+                    id->model_id != ud->model_id)
+                        continue;
+
+                if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
+                    id->specifier_id != ud->specifier_id)
+                        continue;
+
+                if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
+                    id->version != ud->version)
+                        continue;
+
+		return 1;
+        }
+
+	return 0;
+}
+
+
+static void nodemgr_remove_uds(struct node_entry *ne)
+{
+	struct class_device *cdev, *next;
+	struct unit_directory *ud;
+
+	list_for_each_entry_safe(cdev, next, &nodemgr_ud_class.children, node) {
+		ud = container_of(cdev, struct unit_directory, class_dev);
+
+		if (ud->ne != ne)
+			continue;
+
+		class_device_unregister(&ud->class_dev);
+		device_unregister(&ud->device);
+	}
+}
+
+
+static void nodemgr_remove_ne(struct node_entry *ne)
+{
+	struct device *dev = &ne->device;
+
+	dev = get_device(&ne->device);
+	if (!dev)
+		return;
+
+	HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
+
+	nodemgr_remove_uds(ne);
+
+	class_device_unregister(&ne->class_dev);
+	device_unregister(dev);
+
+	put_device(dev);
+}
+
+
+static void nodemgr_remove_host_dev(struct device *dev)
+{
+	struct device *ne_dev, *next;
+
+	list_for_each_entry_safe(ne_dev, next, &dev->children, node)
+		nodemgr_remove_ne(container_of(ne_dev, struct node_entry, device));
+
+	sysfs_remove_link(&dev->kobj, "irm_id");
+	sysfs_remove_link(&dev->kobj, "busmgr_id");
+	sysfs_remove_link(&dev->kobj, "host_id");
+}
+
+
+static void nodemgr_update_bus_options(struct node_entry *ne)
+{
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+	static const u16 mr[] = { 4, 64, 1024, 0};
+#endif
+	quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
+
+	ne->busopt.irmc         = (busoptions >> 31) & 1;
+	ne->busopt.cmc          = (busoptions >> 30) & 1;
+	ne->busopt.isc          = (busoptions >> 29) & 1;
+	ne->busopt.bmc          = (busoptions >> 28) & 1;
+	ne->busopt.pmc          = (busoptions >> 27) & 1;
+	ne->busopt.cyc_clk_acc  = (busoptions >> 16) & 0xff;
+	ne->busopt.max_rec      = 1 << (((busoptions >> 12) & 0xf) + 1);
+	ne->busopt.max_rom	= (busoptions >> 8) & 0x3;
+	ne->busopt.generation   = (busoptions >> 4) & 0xf;
+	ne->busopt.lnkspd       = busoptions & 0x7;
+
+	HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
+		     "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
+		     busoptions, ne->busopt.irmc, ne->busopt.cmc,
+		     ne->busopt.isc, ne->busopt.bmc, ne->busopt.pmc,
+		     ne->busopt.cyc_clk_acc, ne->busopt.max_rec,
+		     mr[ne->busopt.max_rom],
+		     ne->busopt.generation, ne->busopt.lnkspd);
+}
+
+
+static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr *csr,
+					      struct host_info *hi, nodeid_t nodeid,
+					      unsigned int generation)
+{
+	struct hpsb_host *host = hi->host;
+        struct node_entry *ne;
+
+	ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL);
+        if (!ne) return NULL;
+
+	memset(ne, 0, sizeof(struct node_entry));
+
+	ne->tpool = &host->tpool[nodeid & NODE_MASK];
+
+        ne->host = host;
+        ne->nodeid = nodeid;
+	ne->generation = generation;
+	ne->needs_probe = 1;
+
+        ne->guid = guid;
+	ne->guid_vendor_id = (guid >> 40) & 0xffffff;
+	ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
+	ne->csr = csr;
+
+	memcpy(&ne->device, &nodemgr_dev_template_ne,
+	       sizeof(ne->device));
+	ne->device.parent = &host->device;
+	snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx",
+		 (unsigned long long)(ne->guid));
+
+	ne->class_dev.dev = &ne->device;
+	ne->class_dev.class = &nodemgr_ne_class;
+	snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx",
+		 (unsigned long long)(ne->guid));
+
+	device_register(&ne->device);
+	class_device_register(&ne->class_dev);
+	get_device(&ne->device);
+
+	if (ne->guid_vendor_oui)
+		device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui);
+	nodemgr_create_ne_dev_files(ne);
+
+	nodemgr_update_bus_options(ne);
+
+	HPSB_DEBUG("%s added: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+		   (host->node_id == nodeid) ? "Host" : "Node",
+		   NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
+
+        return ne;
+}
+
+
+static struct node_entry *find_entry_by_guid(u64 guid)
+{
+	struct class *class = &nodemgr_ne_class;
+	struct class_device *cdev;
+	struct node_entry *ne, *ret_ne = NULL;
+
+	down_read(&class->subsys.rwsem);
+	list_for_each_entry(cdev, &class->children, node) {
+		ne = container_of(cdev, struct node_entry, class_dev);
+
+		if (ne->guid == guid) {
+			ret_ne = ne;
+			break;
+		}
+	}
+	up_read(&class->subsys.rwsem);
+
+        return ret_ne;
+}
+
+
+static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid)
+{
+	struct class *class = &nodemgr_ne_class;
+	struct class_device *cdev;
+	struct node_entry *ne, *ret_ne = NULL;
+
+	down_read(&class->subsys.rwsem);
+	list_for_each_entry(cdev, &class->children, node) {
+		ne = container_of(cdev, struct node_entry, class_dev);
+
+		if (ne->host == host && ne->nodeid == nodeid) {
+			ret_ne = ne;
+			break;
+		}
+	}
+	up_read(&class->subsys.rwsem);
+
+	return ret_ne;
+}
+
+
+static void nodemgr_register_device(struct node_entry *ne, 
+	struct unit_directory *ud, struct device *parent)
+{
+	memcpy(&ud->device, &nodemgr_dev_template_ud,
+	       sizeof(ud->device));
+
+	ud->device.parent = parent;
+
+	snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u",
+		 ne->device.bus_id, ud->id);
+
+	ud->class_dev.dev = &ud->device;
+	ud->class_dev.class = &nodemgr_ud_class;
+	snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u",
+		 ne->device.bus_id, ud->id);
+
+	device_register(&ud->device);
+	class_device_register(&ud->class_dev);
+	get_device(&ud->device);
+
+	if (ud->vendor_oui)
+		device_create_file(&ud->device, &dev_attr_ud_vendor_oui);
+	nodemgr_create_ud_dev_files(ud);
+}	
+
+
+/* This implementation currently only scans the config rom and its
+ * immediate unit directories looking for software_id and
+ * software_version entries, in order to get driver autoloading working. */
+static struct unit_directory *nodemgr_process_unit_directory
+	(struct host_info *hi, struct node_entry *ne, struct csr1212_keyval *ud_kv,
+	 unsigned int *id, struct unit_directory *parent)
+{
+	struct unit_directory *ud;
+	struct unit_directory *ud_child = NULL;
+	struct csr1212_dentry *dentry;
+	struct csr1212_keyval *kv;
+	u8 last_key_id = 0;
+
+	ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
+	if (!ud)
+		goto unit_directory_error;
+
+	memset (ud, 0, sizeof(struct unit_directory));
+
+	ud->ne = ne;
+	ud->ignore_driver = ignore_drivers;
+	ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE;
+	ud->ud_kv = ud_kv;
+	ud->id = (*id)++;
+
+	csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
+		switch (kv->key.id) {
+		case CSR1212_KV_ID_VENDOR:
+			if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
+				ud->vendor_id = kv->value.immediate;
+				ud->flags |= UNIT_DIRECTORY_VENDOR_ID;
+
+				if (ud->vendor_id)
+					ud->vendor_oui = nodemgr_find_oui_name(ud->vendor_id);
+			}
+			break;
+
+		case CSR1212_KV_ID_MODEL:
+			ud->model_id = kv->value.immediate;
+			ud->flags |= UNIT_DIRECTORY_MODEL_ID;
+			break;
+
+		case CSR1212_KV_ID_SPECIFIER_ID:
+			ud->specifier_id = kv->value.immediate;
+			ud->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
+			break;
+
+		case CSR1212_KV_ID_VERSION:
+			ud->version = kv->value.immediate;
+			ud->flags |= UNIT_DIRECTORY_VERSION;
+			break;
+
+		case CSR1212_KV_ID_DESCRIPTOR:
+			if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
+			    CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
+			    CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
+			    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
+			    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
+			    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
+				switch (last_key_id) {
+				case CSR1212_KV_ID_VENDOR:
+					ud->vendor_name_kv = kv;
+					csr1212_keep_keyval(kv);
+					break;
+
+				case CSR1212_KV_ID_MODEL:
+					ud->model_name_kv = kv;
+					csr1212_keep_keyval(kv);
+					break;
+
+				}
+			} /* else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) ... */
+			break;
+
+		case CSR1212_KV_ID_DEPENDENT_INFO:
+			/* Logical Unit Number */
+			if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
+				if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
+					ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
+					if (!ud_child)
+						goto unit_directory_error;
+					memcpy(ud_child, ud, sizeof(struct unit_directory));
+					nodemgr_register_device(ne, ud_child, &ne->device);
+					ud_child = NULL;
+					
+					ud->id = (*id)++;
+				}
+				ud->lun = kv->value.immediate;
+				ud->flags |= UNIT_DIRECTORY_HAS_LUN;
+
+			/* Logical Unit Directory */
+			} else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) {
+				/* This should really be done in SBP2 as this is
+				 * doing SBP2 specific parsing.
+				 */
+				
+				/* first register the parent unit */
+				ud->flags |= UNIT_DIRECTORY_HAS_LUN_DIRECTORY;
+				if (ud->device.bus != &ieee1394_bus_type)
+					nodemgr_register_device(ne, ud, &ne->device);
+				
+				/* process the child unit */
+				ud_child = nodemgr_process_unit_directory(hi, ne, kv, id, ud);
+
+				if (ud_child == NULL)
+					break;
+				
+				/* inherit unspecified values so hotplug picks it up */
+				if ((ud->flags & UNIT_DIRECTORY_MODEL_ID) &&
+				    !(ud_child->flags & UNIT_DIRECTORY_MODEL_ID))
+				{
+					ud_child->flags |=  UNIT_DIRECTORY_MODEL_ID;
+					ud_child->model_id = ud->model_id;
+				}
+				if ((ud->flags & UNIT_DIRECTORY_SPECIFIER_ID) &&
+				    !(ud_child->flags & UNIT_DIRECTORY_SPECIFIER_ID))
+				{
+					ud_child->flags |=  UNIT_DIRECTORY_SPECIFIER_ID;
+					ud_child->specifier_id = ud->specifier_id;
+				}
+				if ((ud->flags & UNIT_DIRECTORY_VERSION) &&
+				    !(ud_child->flags & UNIT_DIRECTORY_VERSION))
+				{
+					ud_child->flags |=  UNIT_DIRECTORY_VERSION;
+					ud_child->version = ud->version;
+				}
+				
+				/* register the child unit */
+				ud_child->flags |= UNIT_DIRECTORY_LUN_DIRECTORY;
+				nodemgr_register_device(ne, ud_child, &ud->device);
+			}
+
+			break;
+
+		default:
+			break;
+		}
+		last_key_id = kv->key.id;
+	}
+	
+	/* do not process child units here and only if not already registered */
+	if (!parent && ud->device.bus != &ieee1394_bus_type)
+		nodemgr_register_device(ne, ud, &ne->device);
+
+	return ud;
+
+unit_directory_error:
+	if (ud != NULL)
+		kfree(ud);
+	return NULL;
+}
+
+
+static void nodemgr_process_root_directory(struct host_info *hi, struct node_entry *ne)
+{
+	unsigned int ud_id = 0;
+	struct csr1212_dentry *dentry;
+	struct csr1212_keyval *kv;
+	u8 last_key_id = 0;
+
+	ne->needs_probe = 0;
+
+	csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) {
+		switch (kv->key.id) {
+		case CSR1212_KV_ID_VENDOR:
+			ne->vendor_id = kv->value.immediate;
+
+			if (ne->vendor_id)
+				ne->vendor_oui = nodemgr_find_oui_name(ne->vendor_id);
+			break;
+
+		case CSR1212_KV_ID_NODE_CAPABILITIES:
+			ne->capabilities = kv->value.immediate;
+			break;
+
+		case CSR1212_KV_ID_UNIT:
+			nodemgr_process_unit_directory(hi, ne, kv, &ud_id, NULL);
+			break;
+
+		case CSR1212_KV_ID_DESCRIPTOR:
+			if (last_key_id == CSR1212_KV_ID_VENDOR) {
+				if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
+				    CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
+				    CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
+				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
+				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
+				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
+					ne->vendor_name_kv = kv;
+					csr1212_keep_keyval(kv);
+				}
+			}
+			break;
+		}
+		last_key_id = kv->key.id;
+	}
+
+	if (ne->vendor_oui)
+		device_create_file(&ne->device, &dev_attr_ne_vendor_oui);
+	if (ne->vendor_name_kv)
+		device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv);
+}
+
+#ifdef CONFIG_HOTPLUG
+
+static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
+			   char *buffer, int buffer_size)
+{
+	struct unit_directory *ud;
+	int i = 0;
+	int length = 0;
+
+	if (!cdev)
+		return -ENODEV;
+
+	ud = container_of(cdev, struct unit_directory, class_dev);
+
+	if (ud->ne->in_limbo || ud->ignore_driver)
+		return -ENODEV;
+
+#define PUT_ENVP(fmt,val) 					\
+do {								\
+    	int printed;						\
+	envp[i++] = buffer;					\
+	printed = snprintf(buffer, buffer_size - length,	\
+			   fmt, val);				\
+	if ((buffer_size - (length+printed) <= 0) || (i >= num_envp))	\
+		return -ENOMEM;					\
+	length += printed+1;					\
+	buffer += printed+1;					\
+} while (0)
+
+	PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
+	PUT_ENVP("MODEL_ID=%06x", ud->model_id);
+	PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
+	PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
+	PUT_ENVP("VERSION=%06x", ud->version);
+
+#undef PUT_ENVP
+
+	envp[i] = NULL;
+
+	return 0;
+}
+
+#else
+
+static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
+			   char *buffer, int buffer_size)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_HOTPLUG */
+
+
+int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
+{
+	int ret;
+
+	/* This will cause a probe for devices */
+	ret = driver_register(&driver->driver);
+	if (!ret)
+		nodemgr_create_drv_files(driver);
+
+	return ret;
+}
+
+void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
+{
+	nodemgr_remove_drv_files(driver);
+	/* This will subsequently disconnect all devices that our driver
+	 * is attached to. */
+	driver_unregister(&driver->driver);
+}
+
+
+/*
+ * This function updates nodes that were present on the bus before the
+ * reset and still are after the reset.  The nodeid and the config rom
+ * may have changed, and the drivers managing this device must be
+ * informed that this device just went through a bus reset, to allow
+ * the to take whatever actions required.
+ */
+static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
+				struct host_info *hi, nodeid_t nodeid,
+				unsigned int generation)
+{
+	if (ne->nodeid != nodeid) {
+		HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT,
+			   NODE_BUS_ARGS(ne->host, ne->nodeid),
+			   NODE_BUS_ARGS(ne->host, nodeid));
+		ne->nodeid = nodeid;
+	}
+
+	if (ne->busopt.generation != ((be32_to_cpu(csr->bus_info_data[2]) >> 4) & 0xf)) {
+		kfree(ne->csr->private);
+		csr1212_destroy_csr(ne->csr);
+		ne->csr = csr;
+
+		/* If the node's configrom generation has changed, we
+		 * unregister all the unit directories. */
+		nodemgr_remove_uds(ne);
+
+		nodemgr_update_bus_options(ne);
+
+		/* Mark the node as new, so it gets re-probed */
+		ne->needs_probe = 1;
+	} else {
+		/* old cache is valid, so update its generation */
+		struct nodemgr_csr_info *ci = ne->csr->private;
+		ci->generation = generation;
+		/* free the partially filled now unneeded new cache */
+		kfree(csr->private);
+		csr1212_destroy_csr(csr);
+	}
+
+	if (ne->in_limbo)
+		nodemgr_resume_ne(ne);
+
+	/* Mark the node current */
+	ne->generation = generation;
+}
+
+
+
+static void nodemgr_node_scan_one(struct host_info *hi,
+				  nodeid_t nodeid, int generation)
+{
+	struct hpsb_host *host = hi->host;
+	struct node_entry *ne;
+	octlet_t guid;
+	struct csr1212_csr *csr;
+	struct nodemgr_csr_info *ci;
+
+	ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL);
+	if (!ci)
+		return;
+
+	ci->host = host;
+	ci->nodeid = nodeid;
+	ci->generation = generation;
+
+	/* We need to detect when the ConfigROM's generation has changed,
+	 * so we only update the node's info when it needs to be.  */
+
+	csr = csr1212_create_csr(&nodemgr_csr_ops, 5 * sizeof(quadlet_t), ci);
+	if (!csr || csr1212_parse_csr(csr) != CSR1212_SUCCESS) {
+		HPSB_ERR("Error parsing configrom for node " NODE_BUS_FMT,
+			 NODE_BUS_ARGS(host, nodeid));
+		if (csr)
+			csr1212_destroy_csr(csr);
+		kfree(ci);
+		return;
+	}
+
+	if (csr->bus_info_data[1] != IEEE1394_BUSID_MAGIC) {
+		/* This isn't a 1394 device, but we let it slide. There
+		 * was a report of a device with broken firmware which
+		 * reported '2394' instead of '1394', which is obviously a
+		 * mistake. One would hope that a non-1394 device never
+		 * gets connected to Firewire bus. If someone does, we
+		 * shouldn't be held responsible, so we'll allow it with a
+		 * warning.  */
+		HPSB_WARN("Node " NODE_BUS_FMT " has invalid busID magic [0x%08x]",
+			  NODE_BUS_ARGS(host, nodeid), csr->bus_info_data[1]);
+	}
+
+	guid = ((u64)be32_to_cpu(csr->bus_info_data[3]) << 32) | be32_to_cpu(csr->bus_info_data[4]);
+	ne = find_entry_by_guid(guid);
+
+	if (ne && ne->host != host && ne->in_limbo) {
+		/* Must have moved this device from one host to another */
+		nodemgr_remove_ne(ne);
+		ne = NULL;
+	}
+
+	if (!ne)
+		nodemgr_create_node(guid, csr, hi, nodeid, generation);
+	else
+		nodemgr_update_node(ne, csr, hi, nodeid, generation);
+
+	return;
+}
+
+
+static void nodemgr_node_scan(struct host_info *hi, int generation)
+{
+        int count;
+        struct hpsb_host *host = hi->host;
+        struct selfid *sid = (struct selfid *)host->topology_map;
+        nodeid_t nodeid = LOCAL_BUS;
+
+        /* Scan each node on the bus */
+        for (count = host->selfid_count; count; count--, sid++) {
+                if (sid->extended)
+                        continue;
+
+                if (!sid->link_active) {
+                        nodeid++;
+                        continue;
+                }
+                nodemgr_node_scan_one(hi, nodeid++, generation);
+        }
+}
+
+
+static void nodemgr_suspend_ne(struct node_entry *ne)
+{
+	struct class_device *cdev;
+	struct unit_directory *ud;
+
+	HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
+
+	ne->in_limbo = 1;
+	device_create_file(&ne->device, &dev_attr_ne_in_limbo);
+
+	down_write(&ne->device.bus->subsys.rwsem);
+	list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
+		ud = container_of(cdev, struct unit_directory, class_dev);
+
+		if (ud->ne != ne)
+			continue;
+
+		if (ud->device.driver &&
+		    (!ud->device.driver->suspend ||
+		      ud->device.driver->suspend(&ud->device, PMSG_SUSPEND, 0)))
+			device_release_driver(&ud->device);
+	}
+	up_write(&ne->device.bus->subsys.rwsem);
+}
+
+
+static void nodemgr_resume_ne(struct node_entry *ne)
+{
+	struct class_device *cdev;
+	struct unit_directory *ud;
+
+	ne->in_limbo = 0;
+	device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
+
+	down_read(&ne->device.bus->subsys.rwsem);
+	list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
+		ud = container_of(cdev, struct unit_directory, class_dev);
+
+		if (ud->ne != ne)
+			continue;
+
+		if (ud->device.driver && ud->device.driver->resume)
+			ud->device.driver->resume(&ud->device, 0);
+	}
+	up_read(&ne->device.bus->subsys.rwsem);
+
+	HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
+}
+
+
+static void nodemgr_update_pdrv(struct node_entry *ne)
+{
+	struct unit_directory *ud;
+	struct hpsb_protocol_driver *pdrv;
+	struct class *class = &nodemgr_ud_class;
+	struct class_device *cdev;
+
+	down_read(&class->subsys.rwsem);
+	list_for_each_entry(cdev, &class->children, node) {
+		ud = container_of(cdev, struct unit_directory, class_dev);
+		if (ud->ne != ne || !ud->device.driver)
+			continue;
+
+		pdrv = container_of(ud->device.driver, struct hpsb_protocol_driver, driver);
+
+		if (pdrv->update && pdrv->update(ud)) {
+			down_write(&ud->device.bus->subsys.rwsem);
+			device_release_driver(&ud->device);
+			up_write(&ud->device.bus->subsys.rwsem);
+		}
+	}
+	up_read(&class->subsys.rwsem);
+}
+
+
+static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation)
+{
+	struct device *dev;
+
+	if (ne->host != hi->host || ne->in_limbo)
+		return;
+
+	dev = get_device(&ne->device);
+	if (!dev)
+		return;
+
+	/* If "needs_probe", then this is either a new or changed node we
+	 * rescan totally. If the generation matches for an existing node
+	 * (one that existed prior to the bus reset) we send update calls
+	 * down to the drivers. Otherwise, this is a dead node and we
+	 * suspend it. */
+	if (ne->needs_probe)
+		nodemgr_process_root_directory(hi, ne);
+	else if (ne->generation == generation)
+		nodemgr_update_pdrv(ne);
+	else
+		nodemgr_suspend_ne(ne);
+
+	put_device(dev);
+}
+
+
+static void nodemgr_node_probe(struct host_info *hi, int generation)
+{
+	struct hpsb_host *host = hi->host;
+	struct class *class = &nodemgr_ne_class;
+	struct class_device *cdev;
+
+	/* Do some processing of the nodes we've probed. This pulls them
+	 * into the sysfs layer if needed, and can result in processing of
+	 * unit-directories, or just updating the node and it's
+	 * unit-directories. */
+	down_read(&class->subsys.rwsem);
+	list_for_each_entry(cdev, &class->children, node)
+		nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation);
+        up_read(&class->subsys.rwsem);
+
+
+	/* If we had a bus reset while we were scanning the bus, it is
+	 * possible that we did not probe all nodes.  In that case, we
+	 * skip the clean up for now, since we could remove nodes that
+	 * were still on the bus.  The bus reset increased hi->reset_sem,
+	 * so there's a bus scan pending which will do the clean up
+	 * eventually.
+	 *
+	 * Now let's tell the bus to rescan our devices. This may seem
+	 * like overhead, but the driver-model core will only scan a
+	 * device for a driver when either the device is added, or when a
+	 * new driver is added. A bus reset is a good reason to rescan
+	 * devices that were there before.  For example, an sbp2 device
+	 * may become available for login, if the host that held it was
+	 * just removed.  */
+
+	if (generation == get_hpsb_generation(host))
+		bus_rescan_devices(&ieee1394_bus_type);
+
+	return;
+}
+
+/* Because we are a 1394a-2000 compliant IRM, we need to inform all the other
+ * nodes of the broadcast channel.  (Really we're only setting the validity
+ * bit). Other IRM responsibilities go in here as well. */
+static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
+{
+	quadlet_t bc;
+
+	/* if irm_id == -1 then there is no IRM on this bus */
+	if (!host->is_irm || host->irm_id == (nodeid_t)-1)
+		return 1;
+
+	host->csr.broadcast_channel |= 0x40000000;  /* set validity bit */
+
+	bc = cpu_to_be32(host->csr.broadcast_channel);
+
+	hpsb_write(host, LOCAL_BUS | ALL_NODES, get_hpsb_generation(host),
+		   (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
+		   &bc, sizeof(quadlet_t));
+
+	/* If there is no bus manager then we should set the root node's
+	 * force_root bit to promote bus stability per the 1394
+	 * spec. (8.4.2.6) */
+	if (host->busmgr_id == 0xffff && host->node_count > 1)
+	{
+		u16 root_node = host->node_count - 1;
+		struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS);
+
+		if (ne && ne->busopt.cmc)
+			hpsb_send_phy_config(host, root_node, -1);
+		else {
+			HPSB_DEBUG("The root node is not cycle master capable; "
+				   "selecting a new root node and resetting...");
+
+			if (cycles >= 5) {
+				/* Oh screw it! Just leave the bus as it is */
+				HPSB_DEBUG("Stopping reset loop for IRM sanity");
+				return 1;
+			}
+
+			hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
+			hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
+
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/* We need to ensure that if we are not the IRM, that the IRM node is capable of
+ * everything we can do, otherwise issue a bus reset and try to become the IRM
+ * ourselves. */
+static int nodemgr_check_irm_capability(struct hpsb_host *host, int cycles)
+{
+	quadlet_t bc;
+	int status;
+
+	if (hpsb_disable_irm || host->is_irm)
+		return 1;
+
+	status = hpsb_read(host, LOCAL_BUS | (host->irm_id),
+			   get_hpsb_generation(host),
+			   (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
+			   &bc, sizeof(quadlet_t));
+
+	if (status < 0 || !(be32_to_cpu(bc) & 0x80000000)) {
+		/* The current irm node does not have a valid BROADCAST_CHANNEL
+		 * register and we do, so reset the bus with force_root set */
+		HPSB_DEBUG("Current remote IRM is not 1394a-2000 compliant, resetting...");
+
+		if (cycles >= 5) {
+			/* Oh screw it! Just leave the bus as it is */
+			HPSB_DEBUG("Stopping reset loop for IRM sanity");
+			return 1;
+		}
+
+		hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
+		hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
+
+		return 0;
+	}
+
+	return 1;
+}
+
+static int nodemgr_host_thread(void *__hi)
+{
+	struct host_info *hi = (struct host_info *)__hi;
+	struct hpsb_host *host = hi->host;
+	int reset_cycles = 0;
+
+	/* No userlevel access needed */
+	daemonize(hi->daemon_name);
+
+	/* Setup our device-model entries */
+	nodemgr_create_host_dev_files(host);
+
+	/* Sit and wait for a signal to probe the nodes on the bus. This
+	 * happens when we get a bus reset. */
+	while (1) {
+		unsigned int generation = 0;
+		int i;
+
+		if (down_interruptible(&hi->reset_sem) ||
+		    down_interruptible(&nodemgr_serialize)) {
+			if (try_to_freeze(PF_FREEZE))
+				continue;
+			printk("NodeMgr: received unexpected signal?!\n" );
+			break;
+		}
+
+		if (hi->kill_me) {
+			up(&nodemgr_serialize);
+			break;
+		}
+
+		/* Pause for 1/4 second in 1/16 second intervals,
+		 * to make sure things settle down. */
+		for (i = 0; i < 4 ; i++) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (msleep_interruptible(63)) {
+				up(&nodemgr_serialize);
+				goto caught_signal;
+			}
+
+			/* Now get the generation in which the node ID's we collect
+			 * are valid.  During the bus scan we will use this generation
+			 * for the read transactions, so that if another reset occurs
+			 * during the scan the transactions will fail instead of
+			 * returning bogus data. */
+			generation = get_hpsb_generation(host);
+
+			/* If we get a reset before we are done waiting, then
+			 * start the the waiting over again */
+			while (!down_trylock(&hi->reset_sem))
+				i = 0;
+
+			/* Check the kill_me again */
+			if (hi->kill_me) {
+				up(&nodemgr_serialize);
+				goto caught_signal;
+			}
+		}
+
+		if (!nodemgr_check_irm_capability(host, reset_cycles)) {
+			reset_cycles++;
+			up(&nodemgr_serialize);
+			continue;
+		}
+
+		/* Scan our nodes to get the bus options and create node
+		 * entries. This does not do the sysfs stuff, since that
+		 * would trigger hotplug callbacks and such, which is a
+		 * bad idea at this point. */
+		nodemgr_node_scan(hi, generation);
+		if (!nodemgr_do_irm_duties(host, reset_cycles)) {
+			reset_cycles++;
+			up(&nodemgr_serialize);
+			continue;
+		}
+
+		reset_cycles = 0;
+
+		/* This actually does the full probe, with sysfs
+		 * registration. */
+		nodemgr_node_probe(hi, generation);
+
+		/* Update some of our sysfs symlinks */
+		nodemgr_update_host_dev_links(host);
+
+		up(&nodemgr_serialize);
+	}
+
+caught_signal:
+	HPSB_VERBOSE("NodeMgr: Exiting thread");
+
+	complete_and_exit(&hi->exited, 0);
+}
+
+int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
+{
+	struct class *class = &hpsb_host_class;
+	struct class_device *cdev;
+	struct hpsb_host *host;
+	int error = 0;
+
+	down_read(&class->subsys.rwsem);
+	list_for_each_entry(cdev, &class->children, node) {
+		host = container_of(cdev, struct hpsb_host, class_dev);
+
+		if ((error = cb(host, __data)))
+			break;
+	}
+	up_read(&class->subsys.rwsem);
+
+	return error;
+}
+
+/* The following four convenience functions use a struct node_entry
+ * for addressing a node on the bus.  They are intended for use by any
+ * process context, not just the nodemgr thread, so we need to be a
+ * little careful when reading out the node ID and generation.  The
+ * thing that can go wrong is that we get the node ID, then a bus
+ * reset occurs, and then we read the generation.  The node ID is
+ * possibly invalid, but the generation is current, and we end up
+ * sending a packet to a the wrong node.
+ *
+ * The solution is to make sure we read the generation first, so that
+ * if a reset occurs in the process, we end up with a stale generation
+ * and the transactions will fail instead of silently using wrong node
+ * ID's.
+ */
+
+void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
+{
+        pkt->host = ne->host;
+        pkt->generation = ne->generation;
+	barrier();
+        pkt->node_id = ne->nodeid;
+}
+
+int hpsb_node_write(struct node_entry *ne, u64 addr,
+		    quadlet_t *buffer, size_t length)
+{
+	unsigned int generation = ne->generation;
+
+	barrier();
+	return hpsb_write(ne->host, ne->nodeid, generation,
+			  addr, buffer, length);
+}
+
+static void nodemgr_add_host(struct hpsb_host *host)
+{
+	struct host_info *hi;
+
+	hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi));
+
+	if (!hi) {
+		HPSB_ERR ("NodeMgr: out of memory in add host");
+		return;
+	}
+
+	hi->host = host;
+	init_completion(&hi->exited);
+        sema_init(&hi->reset_sem, 0);
+
+	sprintf(hi->daemon_name, "knodemgrd_%d", host->id);
+
+	hi->pid = kernel_thread(nodemgr_host_thread, hi, CLONE_KERNEL);
+
+	if (hi->pid < 0) {
+		HPSB_ERR ("NodeMgr: failed to start %s thread for %s",
+			  hi->daemon_name, host->driver->name);
+		hpsb_destroy_hostinfo(&nodemgr_highlevel, host);
+		return;
+	}
+
+	return;
+}
+
+static void nodemgr_host_reset(struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
+
+	if (hi != NULL) {
+		HPSB_VERBOSE("NodeMgr: Processing host reset for %s", hi->daemon_name);
+		up(&hi->reset_sem);
+	} else
+		HPSB_ERR ("NodeMgr: could not process reset of unused host");
+
+	return;
+}
+
+static void nodemgr_remove_host(struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
+
+	if (hi) {
+		if (hi->pid >= 0) {
+			hi->kill_me = 1;
+			mb();
+			up(&hi->reset_sem);
+			wait_for_completion(&hi->exited);
+			nodemgr_remove_host_dev(&host->device);
+		}
+	} else
+		HPSB_ERR("NodeMgr: host %s does not exist, cannot remove",
+			 host->driver->name);
+
+	return;
+}
+
+static struct hpsb_highlevel nodemgr_highlevel = {
+	.name =		"Node manager",
+	.add_host =	nodemgr_add_host,
+	.host_reset =	nodemgr_host_reset,
+	.remove_host =	nodemgr_remove_host,
+};
+
+int init_ieee1394_nodemgr(void)
+{
+	int ret;
+
+	ret = class_register(&nodemgr_ne_class);
+	if (ret < 0)
+		return ret;
+
+	ret = class_register(&nodemgr_ud_class);
+	if (ret < 0) {
+		class_unregister(&nodemgr_ne_class);
+		return ret;
+	}
+
+	hpsb_register_highlevel(&nodemgr_highlevel);
+
+	return 0;
+}
+
+void cleanup_ieee1394_nodemgr(void)
+{
+        hpsb_unregister_highlevel(&nodemgr_highlevel);
+
+	class_unregister(&nodemgr_ud_class);
+	class_unregister(&nodemgr_ne_class);
+}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
new file mode 100644
index 000000000000..3a2f0c02fd08
--- /dev/null
+++ b/drivers/ieee1394/nodemgr.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2000	Andreas E. Bombe
+ *               2001	Ben Collins <bcollins@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _IEEE1394_NODEMGR_H
+#define _IEEE1394_NODEMGR_H
+
+#include <linux/device.h>
+#include "csr1212.h"
+#include "ieee1394_core.h"
+#include "ieee1394_hotplug.h"
+
+/* '1' '3' '9' '4' in ASCII */
+#define IEEE1394_BUSID_MAGIC	__constant_cpu_to_be32(0x31333934)
+
+/* This is the start of a Node entry structure. It should be a stable API
+ * for which to gather info from the Node Manager about devices attached
+ * to the bus.  */
+struct bus_options {
+	u8	irmc;		/* Iso Resource Manager Capable */
+	u8	cmc;		/* Cycle Master Capable */
+	u8	isc;		/* Iso Capable */
+	u8	bmc;		/* Bus Master Capable */
+	u8	pmc;		/* Power Manager Capable (PNP spec) */
+	u8	cyc_clk_acc;	/* Cycle clock accuracy */
+	u8	max_rom;	/* Maximum block read supported in the CSR */
+	u8	generation;	/* Incremented when configrom changes */
+	u8	lnkspd;		/* Link speed */
+	u16	max_rec;	/* Maximum packet size node can receive */
+};
+
+
+#define UNIT_DIRECTORY_VENDOR_ID		0x01
+#define UNIT_DIRECTORY_MODEL_ID			0x02
+#define UNIT_DIRECTORY_SPECIFIER_ID		0x04
+#define UNIT_DIRECTORY_VERSION			0x08
+#define UNIT_DIRECTORY_HAS_LUN_DIRECTORY	0x10
+#define UNIT_DIRECTORY_LUN_DIRECTORY		0x20
+#define UNIT_DIRECTORY_HAS_LUN			0x40
+
+/*
+ * A unit directory corresponds to a protocol supported by the
+ * node. If a node supports eg. IP/1394 and AV/C, its config rom has a
+ * unit directory for each of these protocols.
+ */
+struct unit_directory {
+	struct node_entry *ne;  /* The node which this directory belongs to */
+	octlet_t address;       /* Address of the unit directory on the node */
+	u8 flags;		/* Indicates which entries were read */
+
+	quadlet_t vendor_id;
+	struct csr1212_keyval *vendor_name_kv;
+	const char *vendor_oui;
+
+	quadlet_t model_id;
+	struct csr1212_keyval *model_name_kv;
+	quadlet_t specifier_id;
+	quadlet_t version;
+
+	unsigned int id;
+
+	int ignore_driver;
+
+	int length;		/* Number of quadlets */
+
+	struct device device;
+
+	struct class_device class_dev;
+
+	struct csr1212_keyval *ud_kv;
+	u32 lun;                /* logical unit number immediate value */
+};
+
+struct node_entry {
+	u64 guid;			/* GUID of this node */
+	u32 guid_vendor_id;		/* Top 24bits of guid */
+	const char *guid_vendor_oui;	/* OUI name of guid vendor id */
+
+	struct hpsb_host *host;		/* Host this node is attached to */
+	nodeid_t nodeid;		/* NodeID */
+	struct bus_options busopt;	/* Bus Options */
+	int needs_probe;
+	unsigned int generation;	/* Synced with hpsb generation */
+
+	/* The following is read from the config rom */
+	u32 vendor_id;
+	struct csr1212_keyval *vendor_name_kv;
+	const char *vendor_oui;
+
+	u32 capabilities;
+	struct hpsb_tlabel_pool *tpool;
+
+	struct device device;
+
+	struct class_device class_dev;
+
+	/* Means this node is not attached anymore */
+	int in_limbo;
+
+	struct csr1212_csr *csr;
+};
+
+struct hpsb_protocol_driver {
+	/* The name of the driver, e.g. SBP2 or IP1394 */
+	const char *name;
+
+	/*
+	 * The device id table describing the protocols and/or devices
+	 * supported by this driver.  This is used by the nodemgr to
+	 * decide if a driver could support a given node, but the
+	 * probe function below can implement further protocol
+	 * dependent or vendor dependent checking.
+	 */
+	struct ieee1394_device_id *id_table;
+
+	/*
+	 * The update function is called when the node has just
+	 * survived a bus reset, i.e. it is still present on the bus.
+	 * However, it may be necessary to reestablish the connection
+	 * or login into the node again, depending on the protocol. If the
+	 * probe fails (returns non-zero), we unbind the driver from this
+	 * device.
+	 */
+	int (*update)(struct unit_directory *ud);
+
+	/* Our LDM structure */
+	struct device_driver driver;
+};
+
+int hpsb_register_protocol(struct hpsb_protocol_driver *driver);
+void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
+
+static inline int hpsb_node_entry_valid(struct node_entry *ne)
+{
+	return ne->generation == get_hpsb_generation(ne->host);
+}
+
+/*
+ * Returns a node entry (which has its reference count incremented) or NULL if
+ * the GUID in question is not known.  Getting a valid entry does not mean that
+ * the node with this GUID is currently accessible (might be powered down).
+ */
+struct node_entry *hpsb_guid_get_entry(u64 guid);
+
+/* Same as above, but use the nodeid to get an node entry. This is not
+ * fool-proof by itself, since the nodeid can change.  */
+struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid);
+
+/*
+ * If the entry refers to a local host, this function will return the pointer
+ * to the hpsb_host structure.  It will return NULL otherwise.  Once you have
+ * established it is a local host, you can use that knowledge from then on (the
+ * GUID won't wander to an external node).  */
+struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
+
+/*
+ * This will fill in the given, pre-initialised hpsb_packet with the current
+ * information from the node entry (host, node ID, generation number).  It will
+ * return false if the node owning the GUID is not accessible (and not modify the
+ * hpsb_packet) and return true otherwise.
+ *
+ * Note that packet sending may still fail in hpsb_send_packet if a bus reset
+ * happens while you are trying to set up the packet (due to obsolete generation
+ * number).  It will at least reliably fail so that you don't accidentally and
+ * unknowingly send your packet to the wrong node.
+ */
+void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
+
+int hpsb_node_read(struct node_entry *ne, u64 addr,
+		   quadlet_t *buffer, size_t length);
+int hpsb_node_write(struct node_entry *ne, u64 addr,
+		    quadlet_t *buffer, size_t length);
+int hpsb_node_lock(struct node_entry *ne, u64 addr,
+		   int extcode, quadlet_t *data, quadlet_t arg);
+
+
+/* Iterate the hosts, calling a given function with supplied data for each
+ * host. */
+int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
+
+
+int init_ieee1394_nodemgr(void);
+void cleanup_ieee1394_nodemgr(void);
+
+
+/* The template for a host device */
+extern struct device nodemgr_dev_template_host;
+
+/* Bus attributes we export */
+extern struct bus_attribute *const fw_bus_attrs[];
+
+#endif /* _IEEE1394_NODEMGR_H */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
new file mode 100644
index 000000000000..97ff364c0434
--- /dev/null
+++ b/drivers/ieee1394/ohci1394.c
@@ -0,0 +1,3705 @@
+/*
+ * ohci1394.c - driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                        Gord Peters <GordPeters@smarttech.com>
+ *              2001      Ben Collins <bcollins@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Things known to be working:
+ * . Async Request Transmit
+ * . Async Response Receive
+ * . Async Request Receive
+ * . Async Response Transmit
+ * . Iso Receive
+ * . DMA mmap for iso receive
+ * . Config ROM generation
+ *
+ * Things implemented, but still in test phase:
+ * . Iso Transmit
+ * . Async Stream Packets Transmit (Receive done via Iso interface)
+ *
+ * Things not implemented:
+ * . DMA error recovery
+ *
+ * Known bugs:
+ * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
+ *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
+ */
+
+/*
+ * Acknowledgments:
+ *
+ * Adam J Richter <adam@yggdrasil.com>
+ *  . Use of pci_class to find device
+ *
+ * Emilie Chung	<emilie.chung@axis.com>
+ *  . Tip on Async Request Filter
+ *
+ * Pascal Drolet <pascal.drolet@informission.ca>
+ *  . Various tips for optimization and functionnalities
+ *
+ * Robert Ficklin <rficklin@westengineering.com>
+ *  . Loop in irq_handler
+ *
+ * James Goodwin <jamesg@Filanet.com>
+ *  . Various tips on initialization, self-id reception, etc.
+ *
+ * Albrecht Dress <ad@mpifr-bonn.mpg.de>
+ *  . Apple PowerBook detection
+ *
+ * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
+ *  . Reset the board properly before leaving + misc cleanups
+ *
+ * Leon van Stuivenberg <leonvs@iae.nl>
+ *  . Bug fixes
+ *
+ * Ben Collins <bcollins@debian.org>
+ *  . Working big-endian support
+ *  . Updated to 2.4.x module scheme (PCI aswell)
+ *  . Config ROM generation
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *  . Reworked code for initiating bus resets
+ *    (long, short, with or without hold-off)
+ *
+ * Nandu Santhi <contactnandu@users.sourceforge.net>
+ *  . Added support for nVidia nForce2 onboard Firewire chipset
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "dma.h"
+#include "iso.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "ohci1394.h"
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+#define OHCI1394_DEBUG
+#endif
+
+#ifdef DBGMSG
+#undef DBGMSG
+#endif
+
+#ifdef OHCI1394_DEBUG
+#define DBGMSG(fmt, args...) \
+printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
+#else
+#define DBGMSG(fmt, args...)
+#endif
+
+#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
+#define OHCI_DMA_ALLOC(fmt, args...) \
+	HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
+		++global_outstanding_dmas, ## args)
+#define OHCI_DMA_FREE(fmt, args...) \
+	HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
+		--global_outstanding_dmas, ## args)
+static int global_outstanding_dmas = 0;
+#else
+#define OHCI_DMA_ALLOC(fmt, args...)
+#define OHCI_DMA_FREE(fmt, args...)
+#endif
+
+/* print general (card independent) information */
+#define PRINT_G(level, fmt, args...) \
+printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
+
+/* print card specific information */
+#define PRINT(level, fmt, args...) \
+printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
+
+static char version[] __devinitdata =
+	"$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
+
+/* Module Parameters */
+static int phys_dma = 1;
+module_param(phys_dma, int, 0644);
+MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
+
+static void dma_trm_tasklet(unsigned long data);
+static void dma_trm_reset(struct dma_trm_ctx *d);
+
+static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
+			     enum context_type type, int ctx, int num_desc,
+			     int buf_size, int split_buf_size, int context_base);
+static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
+static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
+
+static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
+			     enum context_type type, int ctx, int num_desc,
+			     int context_base);
+
+static void ohci1394_pci_remove(struct pci_dev *pdev);
+
+#ifndef __LITTLE_ENDIAN
+static unsigned hdr_sizes[] =
+{
+	3,	/* TCODE_WRITEQ */
+	4,	/* TCODE_WRITEB */
+	3,	/* TCODE_WRITE_RESPONSE */
+	0,	/* ??? */
+	3,	/* TCODE_READQ */
+	4,	/* TCODE_READB */
+	3,	/* TCODE_READQ_RESPONSE */
+	4,	/* TCODE_READB_RESPONSE */
+	1,	/* TCODE_CYCLE_START (???) */
+	4,	/* TCODE_LOCK_REQUEST */
+	2,	/* TCODE_ISO_DATA */
+	4,	/* TCODE_LOCK_RESPONSE */
+};
+
+/* Swap headers */
+static inline void packet_swab(quadlet_t *data, int tcode)
+{
+	size_t size = hdr_sizes[tcode];
+
+	if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
+		return;
+
+	while (size--)
+		data[size] = swab32(data[size]);
+}
+#else
+/* Don't waste cycles on same sex byte swaps */
+#define packet_swab(w,x)
+#endif /* !LITTLE_ENDIAN */
+
+/***********************************
+ * IEEE-1394 functionality section *
+ ***********************************/
+
+static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
+{
+	int i;
+	unsigned long flags;
+	quadlet_t r;
+
+	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
+
+	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
+
+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
+			break;
+
+		mdelay(1);
+	}
+
+	r = reg_read(ohci, OHCI1394_PhyControl);
+
+	if (i >= OHCI_LOOP_COUNT)
+		PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
+		       r, r & 0x80000000, i);
+
+	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
+
+	return (r & 0x00ff0000) >> 16;
+}
+
+static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
+{
+	int i;
+	unsigned long flags;
+	u32 r = 0;
+
+	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
+
+	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
+
+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+		r = reg_read(ohci, OHCI1394_PhyControl);
+		if (!(r & 0x00004000))
+			break;
+
+		mdelay(1);
+	}
+
+	if (i == OHCI_LOOP_COUNT)
+		PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
+		       r, r & 0x00004000, i);
+
+	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
+
+	return;
+}
+
+/* Or's our value into the current value */
+static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
+{
+	u8 old;
+
+	old = get_phy_reg (ohci, addr);
+	old |= data;
+	set_phy_reg (ohci, addr, old);
+
+	return;
+}
+
+static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
+				int phyid, int isroot)
+{
+	quadlet_t *q = ohci->selfid_buf_cpu;
+	quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
+	size_t size;
+	quadlet_t q0, q1;
+
+	/* Check status of self-id reception */
+
+	if (ohci->selfid_swap)
+		q0 = le32_to_cpu(q[0]);
+	else
+		q0 = q[0];
+
+	if ((self_id_count & 0x80000000) ||
+	    ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
+		PRINT(KERN_ERR,
+		      "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
+		      self_id_count, q0, ohci->self_id_errors);
+
+		/* Tip by James Goodwin <jamesg@Filanet.com>:
+		 * We had an error, generate another bus reset in response.  */
+		if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
+			set_phy_reg_mask (ohci, 1, 0x40);
+			ohci->self_id_errors++;
+		} else {
+			PRINT(KERN_ERR,
+			      "Too many errors on SelfID error reception, giving up!");
+		}
+		return;
+	}
+
+	/* SelfID Ok, reset error counter. */
+	ohci->self_id_errors = 0;
+
+	size = ((self_id_count & 0x00001FFC) >> 2) - 1;
+	q++;
+
+	while (size > 0) {
+		if (ohci->selfid_swap) {
+			q0 = le32_to_cpu(q[0]);
+			q1 = le32_to_cpu(q[1]);
+		} else {
+			q0 = q[0];
+			q1 = q[1];
+		}
+
+		if (q0 == ~q1) {
+			DBGMSG ("SelfID packet 0x%x received", q0);
+			hpsb_selfid_received(host, cpu_to_be32(q0));
+			if (((q0 & 0x3f000000) >> 24) == phyid)
+				DBGMSG ("SelfID for this node is 0x%08x", q0);
+		} else {
+			PRINT(KERN_ERR,
+			      "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
+		}
+		q += 2;
+		size -= 2;
+	}
+
+	DBGMSG("SelfID complete");
+
+	return;
+}
+
+static void ohci_soft_reset(struct ti_ohci *ohci) {
+	int i;
+
+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+
+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+		if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
+			break;
+		mdelay(1);
+	}
+	DBGMSG ("Soft reset finished");
+}
+
+
+/* Generate the dma receive prgs and start the context */
+static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
+{
+	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
+	int i;
+
+	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
+
+	for (i=0; i<d->num_desc; i++) {
+		u32 c;
+
+		c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
+		if (generate_irq)
+			c |= DMA_CTL_IRQ;
+
+		d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
+
+		/* End of descriptor list? */
+		if (i + 1 < d->num_desc) {
+			d->prg_cpu[i]->branchAddress =
+				cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
+		} else {
+			d->prg_cpu[i]->branchAddress =
+				cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
+		}
+
+		d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
+		d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
+	}
+
+        d->buf_ind = 0;
+        d->buf_offset = 0;
+
+	if (d->type == DMA_CTX_ISO) {
+		/* Clear contextControl */
+		reg_write(ohci, d->ctrlClear, 0xffffffff);
+
+		/* Set bufferFill, isochHeader, multichannel for IR context */
+		reg_write(ohci, d->ctrlSet, 0xd0000000);
+
+		/* Set the context match register to match on all tags */
+		reg_write(ohci, d->ctxtMatch, 0xf0000000);
+
+		/* Clear the multi channel mask high and low registers */
+		reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
+
+		/* Set up isoRecvIntMask to generate interrupts */
+		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
+	}
+
+	/* Tell the controller where the first AR program is */
+	reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
+
+	/* Run context */
+	reg_write(ohci, d->ctrlSet, 0x00008000);
+
+	DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
+}
+
+/* Initialize the dma transmit context */
+static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
+{
+	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
+
+	/* Stop the context */
+	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
+
+        d->prg_ind = 0;
+	d->sent_ind = 0;
+	d->free_prgs = d->num_desc;
+        d->branchAddrPtr = NULL;
+	INIT_LIST_HEAD(&d->fifo_list);
+	INIT_LIST_HEAD(&d->pending_list);
+
+	if (d->type == DMA_CTX_ISO) {
+		/* enable interrupts */
+		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
+	}
+
+	DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
+}
+
+/* Count the number of available iso contexts */
+static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
+{
+	int i,ctx=0;
+	u32 tmp;
+
+	reg_write(ohci, reg, 0xffffffff);
+	tmp = reg_read(ohci, reg);
+
+	DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
+
+	/* Count the number of contexts */
+	for (i=0; i<32; i++) {
+	    	if (tmp & 1) ctx++;
+		tmp >>= 1;
+	}
+	return ctx;
+}
+
+/* Global initialization */
+static void ohci_initialize(struct ti_ohci *ohci)
+{
+	char irq_buf[16];
+	quadlet_t buf;
+	int num_ports, i;
+
+	spin_lock_init(&ohci->phy_reg_lock);
+	spin_lock_init(&ohci->event_lock);
+
+	/* Put some defaults to these undefined bus options */
+	buf = reg_read(ohci, OHCI1394_BusOptions);
+	buf |=  0x60000000; /* Enable CMC and ISC */
+	if (!hpsb_disable_irm)
+		buf |=  0x80000000; /* Enable IRMC */
+	buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
+	buf &= ~0x18000000; /* Disable PMC and BMC */
+	reg_write(ohci, OHCI1394_BusOptions, buf);
+
+	/* Set the bus number */
+	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
+
+	/* Enable posted writes */
+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
+
+	/* Clear link control register */
+	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
+
+	/* Enable cycle timer and cycle master and set the IRM
+	 * contender bit in our self ID packets if appropriate. */
+	reg_write(ohci, OHCI1394_LinkControlSet,
+		  OHCI1394_LinkControl_CycleTimerEnable |
+		  OHCI1394_LinkControl_CycleMaster);
+	set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
+			 (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
+
+	/* Set up self-id dma buffer */
+	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
+
+	/* enable self-id and phys */
+	reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
+		  OHCI1394_LinkControl_RcvPhyPkt);
+
+	/* Set the Config ROM mapping register */
+	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
+
+	/* Now get our max packet size */
+	ohci->max_packet_size =
+		1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
+		
+	/* Don't accept phy packets into AR request context */
+	reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
+
+	/* Clear the interrupt mask */
+	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
+	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
+
+	/* Clear the interrupt mask */
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
+	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
+
+	/* Initialize AR dma */
+	initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
+	initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
+
+	/* Initialize AT dma */
+	initialize_dma_trm_ctx(&ohci->at_req_context);
+	initialize_dma_trm_ctx(&ohci->at_resp_context);
+	
+	/* Initialize IR Legacy DMA */
+	ohci->ir_legacy_channels = 0;
+	initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
+	DBGMSG("ISO receive legacy context activated");
+
+	/*
+	 * Accept AT requests from all nodes. This probably
+	 * will have to be controlled from the subsystem
+	 * on a per node basis.
+	 */
+	reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
+
+	/* Specify AT retries */
+	reg_write(ohci, OHCI1394_ATRetries,
+		  OHCI1394_MAX_AT_REQ_RETRIES |
+		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
+		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
+
+	/* We don't want hardware swapping */
+	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
+
+	/* Enable interrupts */
+	reg_write(ohci, OHCI1394_IntMaskSet,
+		  OHCI1394_unrecoverableError |
+		  OHCI1394_masterIntEnable |
+		  OHCI1394_busReset |
+		  OHCI1394_selfIDComplete |
+		  OHCI1394_RSPkt |
+		  OHCI1394_RQPkt |
+		  OHCI1394_respTxComplete |
+		  OHCI1394_reqTxComplete |
+		  OHCI1394_isochRx |
+		  OHCI1394_isochTx |
+		  OHCI1394_cycleInconsistent);
+
+	/* Enable link */
+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
+
+	buf = reg_read(ohci, OHCI1394_Version);
+#ifndef __sparc__
+	sprintf (irq_buf, "%d", ohci->dev->irq);
+#else
+	sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
+#endif
+	PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
+	      "MMIO=[%lx-%lx]  Max Packet=[%d]",
+	      ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
+	      ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
+	      pci_resource_start(ohci->dev, 0),
+	      pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
+	      ohci->max_packet_size);
+
+	/* Check all of our ports to make sure that if anything is
+	 * connected, we enable that port. */
+	num_ports = get_phy_reg(ohci, 2) & 0xf;
+	for (i = 0; i < num_ports; i++) {
+		unsigned int status;
+
+		set_phy_reg(ohci, 7, i);
+		status = get_phy_reg(ohci, 8);
+
+		if (status & 0x20)
+			set_phy_reg(ohci, 8, status & ~1);
+	}
+
+        /* Serial EEPROM Sanity check. */
+        if ((ohci->max_packet_size < 512) ||
+	    (ohci->max_packet_size > 4096)) {
+		/* Serial EEPROM contents are suspect, set a sane max packet
+		 * size and print the raw contents for bug reports if verbose
+		 * debug is enabled. */
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+		int i;
+#endif
+
+		PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
+                      "attempting to setting max_packet_size to 512 bytes");
+		reg_write(ohci, OHCI1394_BusOptions,
+			  (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
+		ohci->max_packet_size = 512;
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+		PRINT(KERN_DEBUG, "    EEPROM Present: %d",
+		      (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
+		reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
+
+		for (i = 0;
+		     ((i < 1000) &&
+		      (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
+			udelay(10);
+
+		for (i = 0; i < 0x20; i++) {
+			reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
+			PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
+			      (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
+		}
+#endif
+	}
+}
+
+/*
+ * Insert a packet in the DMA fifo and generate the DMA prg
+ * FIXME: rewrite the program in order to accept packets crossing
+ *        page boundaries.
+ *        check also that a single dma descriptor doesn't cross a
+ *        page boundary.
+ */
+static void insert_packet(struct ti_ohci *ohci,
+			  struct dma_trm_ctx *d, struct hpsb_packet *packet)
+{
+	u32 cycleTimer;
+	int idx = d->prg_ind;
+
+	DBGMSG("Inserting packet for node " NODE_BUS_FMT
+	       ", tlabel=%d, tcode=0x%x, speed=%d",
+	       NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
+	       packet->tcode, packet->speed_code);
+
+	d->prg_cpu[idx]->begin.address = 0;
+	d->prg_cpu[idx]->begin.branchAddress = 0;
+
+	if (d->type == DMA_CTX_ASYNC_RESP) {
+		/*
+		 * For response packets, we need to put a timeout value in
+		 * the 16 lower bits of the status... let's try 1 sec timeout
+		 */
+		cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+		d->prg_cpu[idx]->begin.status = cpu_to_le32(
+			(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
+			((cycleTimer&0x01fff000)>>12));
+
+		DBGMSG("cycleTimer: %08x timeStamp: %08x",
+		       cycleTimer, d->prg_cpu[idx]->begin.status);
+	} else 
+		d->prg_cpu[idx]->begin.status = 0;
+
+        if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
+
+                if (packet->type == hpsb_raw) {
+			d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
+                        d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
+                        d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
+                } else {
+                        d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
+                                (packet->header[0] & 0xFFFF);
+
+			if (packet->tcode == TCODE_ISO_DATA) {
+				/* Sending an async stream packet */
+				d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
+			} else {
+				/* Sending a normal async request or response */
+				d->prg_cpu[idx]->data[1] =
+					(packet->header[1] & 0xFFFF) |
+					(packet->header[0] & 0xFFFF0000);
+				d->prg_cpu[idx]->data[2] = packet->header[2];
+				d->prg_cpu[idx]->data[3] = packet->header[3];
+			}
+			packet_swab(d->prg_cpu[idx]->data, packet->tcode);
+                }
+
+                if (packet->data_size) { /* block transmit */
+			if (packet->tcode == TCODE_STREAM_DATA){
+				d->prg_cpu[idx]->begin.control =
+					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
+						    DMA_CTL_IMMEDIATE | 0x8);
+			} else {
+				d->prg_cpu[idx]->begin.control =
+					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
+						    DMA_CTL_IMMEDIATE | 0x10);
+			}
+                        d->prg_cpu[idx]->end.control =
+                                cpu_to_le32(DMA_CTL_OUTPUT_LAST |
+					    DMA_CTL_IRQ |
+					    DMA_CTL_BRANCH |
+					    packet->data_size);
+                        /*
+                         * Check that the packet data buffer
+                         * does not cross a page boundary.
+			 *
+			 * XXX Fix this some day. eth1394 seems to trigger
+			 * it, but ignoring it doesn't seem to cause a
+			 * problem.
+                         */
+#if 0
+                        if (cross_bound((unsigned long)packet->data,
+                                        packet->data_size)>0) {
+                                /* FIXME: do something about it */
+                                PRINT(KERN_ERR,
+                                      "%s: packet data addr: %p size %Zd bytes "
+                                      "cross page boundary", __FUNCTION__,
+                                      packet->data, packet->data_size);
+                        }
+#endif
+                        d->prg_cpu[idx]->end.address = cpu_to_le32(
+                                pci_map_single(ohci->dev, packet->data,
+                                               packet->data_size,
+                                               PCI_DMA_TODEVICE));
+			OHCI_DMA_ALLOC("single, block transmit packet");
+
+                        d->prg_cpu[idx]->end.branchAddress = 0;
+                        d->prg_cpu[idx]->end.status = 0;
+                        if (d->branchAddrPtr)
+                                *(d->branchAddrPtr) =
+					cpu_to_le32(d->prg_bus[idx] | 0x3);
+                        d->branchAddrPtr =
+                                &(d->prg_cpu[idx]->end.branchAddress);
+                } else { /* quadlet transmit */
+                        if (packet->type == hpsb_raw)
+                                d->prg_cpu[idx]->begin.control =
+					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
+						    DMA_CTL_IMMEDIATE |
+						    DMA_CTL_IRQ |
+						    DMA_CTL_BRANCH |
+						    (packet->header_size + 4));
+                        else
+                                d->prg_cpu[idx]->begin.control =
+					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
+						    DMA_CTL_IMMEDIATE |
+						    DMA_CTL_IRQ |
+						    DMA_CTL_BRANCH |
+						    packet->header_size);
+
+                        if (d->branchAddrPtr)
+                                *(d->branchAddrPtr) =
+					cpu_to_le32(d->prg_bus[idx] | 0x2);
+                        d->branchAddrPtr =
+                                &(d->prg_cpu[idx]->begin.branchAddress);
+                }
+
+        } else { /* iso packet */
+                d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
+                        (packet->header[0] & 0xFFFF);
+                d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
+		packet_swab(d->prg_cpu[idx]->data, packet->tcode);
+
+                d->prg_cpu[idx]->begin.control =
+			cpu_to_le32(DMA_CTL_OUTPUT_MORE |
+				    DMA_CTL_IMMEDIATE | 0x8);
+                d->prg_cpu[idx]->end.control =
+			cpu_to_le32(DMA_CTL_OUTPUT_LAST |
+				    DMA_CTL_UPDATE |
+				    DMA_CTL_IRQ |
+				    DMA_CTL_BRANCH |
+				    packet->data_size);
+                d->prg_cpu[idx]->end.address = cpu_to_le32(
+				pci_map_single(ohci->dev, packet->data,
+				packet->data_size, PCI_DMA_TODEVICE));
+		OHCI_DMA_ALLOC("single, iso transmit packet");
+
+                d->prg_cpu[idx]->end.branchAddress = 0;
+                d->prg_cpu[idx]->end.status = 0;
+                DBGMSG("Iso xmit context info: header[%08x %08x]\n"
+                       "                       begin=%08x %08x %08x %08x\n"
+                       "                             %08x %08x %08x %08x\n"
+                       "                       end  =%08x %08x %08x %08x",
+                       d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
+                       d->prg_cpu[idx]->begin.control,
+                       d->prg_cpu[idx]->begin.address,
+                       d->prg_cpu[idx]->begin.branchAddress,
+                       d->prg_cpu[idx]->begin.status,
+                       d->prg_cpu[idx]->data[0],
+                       d->prg_cpu[idx]->data[1],
+                       d->prg_cpu[idx]->data[2],
+                       d->prg_cpu[idx]->data[3],
+                       d->prg_cpu[idx]->end.control,
+                       d->prg_cpu[idx]->end.address,
+                       d->prg_cpu[idx]->end.branchAddress,
+                       d->prg_cpu[idx]->end.status);
+                if (d->branchAddrPtr)
+  		        *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
+                d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
+        }
+	d->free_prgs--;
+
+	/* queue the packet in the appropriate context queue */
+	list_add_tail(&packet->driver_list, &d->fifo_list);
+	d->prg_ind = (d->prg_ind + 1) % d->num_desc;
+}
+
+/*
+ * This function fills the FIFO with the (eventual) pending packets
+ * and runs or wakes up the DMA prg if necessary.
+ *
+ * The function MUST be called with the d->lock held.
+ */
+static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
+{
+	struct hpsb_packet *packet, *ptmp;
+	int idx = d->prg_ind;
+	int z = 0;
+
+	/* insert the packets into the dma fifo */
+	list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
+		if (!d->free_prgs)
+			break;
+
+		/* For the first packet only */
+		if (!z)
+			z = (packet->data_size) ? 3 : 2;
+
+		/* Insert the packet */
+		list_del_init(&packet->driver_list);
+		insert_packet(ohci, d, packet);
+	}
+
+	/* Nothing must have been done, either no free_prgs or no packets */
+	if (z == 0)
+		return;
+
+	/* Is the context running ? (should be unless it is
+	   the first packet to be sent in this context) */
+	if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
+		u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
+
+		DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
+		reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
+
+		/* Check that the node id is valid, and not 63 */
+		if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
+			PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
+		else
+			reg_write(ohci, d->ctrlSet, 0x8000);
+	} else {
+		/* Wake up the dma context if necessary */
+		if (!(reg_read(ohci, d->ctrlSet) & 0x400))
+			DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
+
+		/* do this always, to avoid race condition */
+		reg_write(ohci, d->ctrlSet, 0x1000);
+	}
+
+	return;
+}
+
+/* Transmission of an async or iso packet */
+static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
+{
+	struct ti_ohci *ohci = host->hostdata;
+	struct dma_trm_ctx *d;
+	unsigned long flags;
+
+	if (packet->data_size > ohci->max_packet_size) {
+		PRINT(KERN_ERR,
+		      "Transmit packet size %Zd is too big",
+		      packet->data_size);
+		return -EOVERFLOW;
+	}
+
+	/* Decide whether we have an iso, a request, or a response packet */
+	if (packet->type == hpsb_raw)
+		d = &ohci->at_req_context;
+	else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
+		/* The legacy IT DMA context is initialized on first
+		 * use.  However, the alloc cannot be run from
+		 * interrupt context, so we bail out if that is the
+		 * case. I don't see anyone sending ISO packets from
+		 * interrupt context anyway... */
+
+		if (ohci->it_legacy_context.ohci == NULL) {
+			if (in_interrupt()) {
+				PRINT(KERN_ERR,
+				      "legacy IT context cannot be initialized during interrupt");
+				return -EINVAL;
+			}
+
+			if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
+					      DMA_CTX_ISO, 0, IT_NUM_DESC,
+					      OHCI1394_IsoXmitContextBase) < 0) {
+				PRINT(KERN_ERR,
+				      "error initializing legacy IT context");
+				return -ENOMEM;
+			}
+
+			initialize_dma_trm_ctx(&ohci->it_legacy_context);
+		}
+
+		d = &ohci->it_legacy_context;
+	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
+		d = &ohci->at_resp_context;
+	else
+		d = &ohci->at_req_context;
+
+	spin_lock_irqsave(&d->lock,flags);
+
+	list_add_tail(&packet->driver_list, &d->pending_list);
+
+	dma_trm_flush(ohci, d);
+
+	spin_unlock_irqrestore(&d->lock,flags);
+
+	return 0;
+}
+
+static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
+{
+	struct ti_ohci *ohci = host->hostdata;
+	int retval = 0;
+	unsigned long flags;
+	int phy_reg;
+
+	switch (cmd) {
+	case RESET_BUS:
+		switch (arg) {
+		case SHORT_RESET:
+			phy_reg = get_phy_reg(ohci, 5);
+			phy_reg |= 0x40;
+			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
+			break;
+		case LONG_RESET:
+			phy_reg = get_phy_reg(ohci, 1);
+			phy_reg |= 0x40;
+			set_phy_reg(ohci, 1, phy_reg); /* set IBR */
+			break;
+		case SHORT_RESET_NO_FORCE_ROOT:
+			phy_reg = get_phy_reg(ohci, 1);
+			if (phy_reg & 0x80) {
+				phy_reg &= ~0x80;
+				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
+			}
+
+			phy_reg = get_phy_reg(ohci, 5);
+			phy_reg |= 0x40;
+			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
+			break;
+		case LONG_RESET_NO_FORCE_ROOT:
+			phy_reg = get_phy_reg(ohci, 1);
+			phy_reg &= ~0x80;
+			phy_reg |= 0x40;
+			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
+			break;
+		case SHORT_RESET_FORCE_ROOT:
+			phy_reg = get_phy_reg(ohci, 1);
+			if (!(phy_reg & 0x80)) {
+				phy_reg |= 0x80;
+				set_phy_reg(ohci, 1, phy_reg); /* set RHB */
+			}
+
+			phy_reg = get_phy_reg(ohci, 5);
+			phy_reg |= 0x40;
+			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
+			break;
+		case LONG_RESET_FORCE_ROOT:
+			phy_reg = get_phy_reg(ohci, 1);
+			phy_reg |= 0xc0;
+			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
+			break;
+		default:
+			retval = -1;
+		}
+		break;
+
+	case GET_CYCLE_COUNTER:
+		retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+		break;
+
+	case SET_CYCLE_COUNTER:
+		reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
+		break;
+
+	case SET_BUS_ID:
+		PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
+		break;
+
+	case ACT_CYCLE_MASTER:
+		if (arg) {
+			/* check if we are root and other nodes are present */
+			u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
+			if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
+				/*
+				 * enable cycleTimer, cycleMaster
+				 */
+				DBGMSG("Cycle master enabled");
+				reg_write(ohci, OHCI1394_LinkControlSet,
+					  OHCI1394_LinkControl_CycleTimerEnable |
+					  OHCI1394_LinkControl_CycleMaster);
+			}
+		} else {
+			/* disable cycleTimer, cycleMaster, cycleSource */
+			reg_write(ohci, OHCI1394_LinkControlClear,
+				  OHCI1394_LinkControl_CycleTimerEnable |
+				  OHCI1394_LinkControl_CycleMaster |
+				  OHCI1394_LinkControl_CycleSource);
+		}
+		break;
+
+	case CANCEL_REQUESTS:
+		DBGMSG("Cancel request received");
+		dma_trm_reset(&ohci->at_req_context);
+		dma_trm_reset(&ohci->at_resp_context);
+		break;
+
+	case ISO_LISTEN_CHANNEL:
+        {
+		u64 mask;
+
+		if (arg<0 || arg>63) {
+			PRINT(KERN_ERR,
+			      "%s: IS0 listen channel %d is out of range",
+			      __FUNCTION__, arg);
+			return -EFAULT;
+		}
+
+		mask = (u64)0x1<<arg;
+
+                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
+
+		if (ohci->ISO_channel_usage & mask) {
+			PRINT(KERN_ERR,
+			      "%s: IS0 listen channel %d is already used",
+			      __FUNCTION__, arg);
+			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+			return -EFAULT;
+		}
+
+		ohci->ISO_channel_usage |= mask;
+		ohci->ir_legacy_channels |= mask;
+
+		if (arg>31)
+			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
+				  1<<(arg-32));
+		else
+			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
+				  1<<arg);
+
+                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+                DBGMSG("Listening enabled on channel %d", arg);
+                break;
+        }
+	case ISO_UNLISTEN_CHANNEL:
+        {
+		u64 mask;
+
+		if (arg<0 || arg>63) {
+			PRINT(KERN_ERR,
+			      "%s: IS0 unlisten channel %d is out of range",
+			      __FUNCTION__, arg);
+			return -EFAULT;
+		}
+
+		mask = (u64)0x1<<arg;
+
+                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
+
+		if (!(ohci->ISO_channel_usage & mask)) {
+			PRINT(KERN_ERR,
+			      "%s: IS0 unlisten channel %d is not used",
+			      __FUNCTION__, arg);
+			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+			return -EFAULT;
+		}
+
+		ohci->ISO_channel_usage &= ~mask;
+		ohci->ir_legacy_channels &= ~mask;
+
+		if (arg>31)
+			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
+				  1<<(arg-32));
+		else
+			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
+				  1<<arg);
+
+                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
+                DBGMSG("Listening disabled on channel %d", arg);
+                break;
+        }
+	default:
+		PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
+			cmd);
+		break;
+	}
+	return retval;
+}
+
+/***********************************
+ * rawiso ISO reception            *
+ ***********************************/
+
+/*
+  We use either buffer-fill or packet-per-buffer DMA mode. The DMA
+  buffer is split into "blocks" (regions described by one DMA
+  descriptor). Each block must be one page or less in size, and
+  must not cross a page boundary.
+
+  There is one little wrinkle with buffer-fill mode: a packet that
+  starts in the final block may wrap around into the first block. But
+  the user API expects all packets to be contiguous. Our solution is
+  to keep the very last page of the DMA buffer in reserve - if a
+  packet spans the gap, we copy its tail into this page.
+*/
+
+struct ohci_iso_recv {
+	struct ti_ohci *ohci;
+
+	struct ohci1394_iso_tasklet task;
+	int task_active;
+
+	enum { BUFFER_FILL_MODE = 0,
+	       PACKET_PER_BUFFER_MODE = 1 } dma_mode;
+
+	/* memory and PCI mapping for the DMA descriptors */
+	struct dma_prog_region prog;
+	struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
+
+	/* how many DMA blocks fit in the buffer */
+	unsigned int nblocks;
+
+	/* stride of DMA blocks */
+	unsigned int buf_stride;
+
+	/* number of blocks to batch between interrupts */
+	int block_irq_interval;
+
+	/* block that DMA will finish next */
+	int block_dma;
+
+	/* (buffer-fill only) block that the reader will release next */
+	int block_reader;
+
+	/* (buffer-fill only) bytes of buffer the reader has released,
+	   less than one block */
+	int released_bytes;
+
+	/* (buffer-fill only) buffer offset at which the next packet will appear */
+	int dma_offset;
+
+	/* OHCI DMA context control registers */
+	u32 ContextControlSet;
+	u32 ContextControlClear;
+	u32 CommandPtr;
+	u32 ContextMatch;
+};
+
+static void ohci_iso_recv_task(unsigned long data);
+static void ohci_iso_recv_stop(struct hpsb_iso *iso);
+static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
+static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
+static void ohci_iso_recv_program(struct hpsb_iso *iso);
+
+static int ohci_iso_recv_init(struct hpsb_iso *iso)
+{
+	struct ti_ohci *ohci = iso->host->hostdata;
+	struct ohci_iso_recv *recv;
+	int ctx;
+	int ret = -ENOMEM;
+
+	recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
+	if (!recv)
+		return -ENOMEM;
+
+	iso->hostdata = recv;
+	recv->ohci = ohci;
+	recv->task_active = 0;
+	dma_prog_region_init(&recv->prog);
+	recv->block = NULL;
+
+	/* use buffer-fill mode, unless irq_interval is 1
+	   (note: multichannel requires buffer-fill) */
+
+	if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
+	     iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
+		recv->dma_mode = PACKET_PER_BUFFER_MODE;
+	} else {
+		recv->dma_mode = BUFFER_FILL_MODE;
+	}
+
+	/* set nblocks, buf_stride, block_irq_interval */
+
+	if (recv->dma_mode == BUFFER_FILL_MODE) {
+		recv->buf_stride = PAGE_SIZE;
+
+		/* one block per page of data in the DMA buffer, minus the final guard page */
+		recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
+		if (recv->nblocks < 3) {
+			DBGMSG("ohci_iso_recv_init: DMA buffer too small");
+			goto err;
+		}
+
+		/* iso->irq_interval is in packets - translate that to blocks */
+		if (iso->irq_interval == 1)
+			recv->block_irq_interval = 1;
+		else
+			recv->block_irq_interval = iso->irq_interval *
+							((recv->nblocks+1)/iso->buf_packets);
+		if (recv->block_irq_interval*4 > recv->nblocks)
+			recv->block_irq_interval = recv->nblocks/4;
+		if (recv->block_irq_interval < 1)
+			recv->block_irq_interval = 1;
+
+	} else {
+		int max_packet_size;
+
+		recv->nblocks = iso->buf_packets;
+		recv->block_irq_interval = iso->irq_interval;
+		if (recv->block_irq_interval * 4 > iso->buf_packets)
+			recv->block_irq_interval = iso->buf_packets / 4;
+		if (recv->block_irq_interval < 1)
+		recv->block_irq_interval = 1;
+
+		/* choose a buffer stride */
+		/* must be a power of 2, and <= PAGE_SIZE */
+
+		max_packet_size = iso->buf_size / iso->buf_packets;
+
+		for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
+		    recv->buf_stride *= 2);
+
+		if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
+		   recv->buf_stride > PAGE_SIZE) {
+			/* this shouldn't happen, but anyway... */
+			DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
+			goto err;
+		}
+	}
+
+	recv->block_reader = 0;
+	recv->released_bytes = 0;
+	recv->block_dma = 0;
+	recv->dma_offset = 0;
+
+	/* size of DMA program = one descriptor per block */
+	if (dma_prog_region_alloc(&recv->prog,
+				 sizeof(struct dma_cmd) * recv->nblocks,
+				 recv->ohci->dev))
+		goto err;
+
+	recv->block = (struct dma_cmd*) recv->prog.kvirt;
+
+	ohci1394_init_iso_tasklet(&recv->task,
+				  iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
+				                       OHCI_ISO_RECEIVE,
+				  ohci_iso_recv_task, (unsigned long) iso);
+
+	if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
+		goto err;
+
+	recv->task_active = 1;
+
+	/* recv context registers are spaced 32 bytes apart */
+	ctx = recv->task.context;
+	recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
+	recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
+	recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
+	recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
+
+	if (iso->channel == -1) {
+		/* clear multi-channel selection mask */
+		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
+		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
+	}
+
+	/* write the DMA program */
+	ohci_iso_recv_program(iso);
+
+	DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
+	       " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
+	       recv->dma_mode == BUFFER_FILL_MODE ?
+	       "buffer-fill" : "packet-per-buffer",
+	       iso->buf_size/PAGE_SIZE, iso->buf_size,
+	       recv->nblocks, recv->buf_stride, recv->block_irq_interval);
+
+	return 0;
+
+err:
+	ohci_iso_recv_shutdown(iso);
+	return ret;
+}
+
+static void ohci_iso_recv_stop(struct hpsb_iso *iso)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+
+	/* disable interrupts */
+	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
+
+	/* halt DMA */
+	ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
+}
+
+static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+
+	if (recv->task_active) {
+		ohci_iso_recv_stop(iso);
+		ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
+		recv->task_active = 0;
+	}
+
+	dma_prog_region_free(&recv->prog);
+	kfree(recv);
+	iso->hostdata = NULL;
+}
+
+/* set up a "gapped" ring buffer DMA program */
+static void ohci_iso_recv_program(struct hpsb_iso *iso)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+	int blk;
+
+	/* address of 'branch' field in previous DMA descriptor */
+	u32 *prev_branch = NULL;
+
+	for (blk = 0; blk < recv->nblocks; blk++) {
+		u32 control;
+
+		/* the DMA descriptor */
+		struct dma_cmd *cmd = &recv->block[blk];
+
+		/* offset of the DMA descriptor relative to the DMA prog buffer */
+		unsigned long prog_offset = blk * sizeof(struct dma_cmd);
+
+		/* offset of this packet's data within the DMA buffer */
+		unsigned long buf_offset = blk * recv->buf_stride;
+
+		if (recv->dma_mode == BUFFER_FILL_MODE) {
+			control = 2 << 28; /* INPUT_MORE */
+		} else {
+			control = 3 << 28; /* INPUT_LAST */
+		}
+
+		control |= 8 << 24; /* s = 1, update xferStatus and resCount */
+
+		/* interrupt on last block, and at intervals */
+		if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
+			control |= 3 << 20; /* want interrupt */
+		}
+
+		control |= 3 << 18; /* enable branch to address */
+		control |= recv->buf_stride;
+
+		cmd->control = cpu_to_le32(control);
+		cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
+		cmd->branchAddress = 0; /* filled in on next loop */
+		cmd->status = cpu_to_le32(recv->buf_stride);
+
+		/* link the previous descriptor to this one */
+		if (prev_branch) {
+			*prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
+		}
+
+		prev_branch = &cmd->branchAddress;
+	}
+
+	/* the final descriptor's branch address and Z should be left at 0 */
+}
+
+/* listen or unlisten to a specific channel (multi-channel mode only) */
+static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+	int reg, i;
+
+	if (channel < 32) {
+		reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
+		i = channel;
+	} else {
+		reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
+		i = channel - 32;
+	}
+
+	reg_write(recv->ohci, reg, (1 << i));
+
+	/* issue a dummy read to force all PCI writes to be posted immediately */
+	mb();
+	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
+}
+
+static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+	int i;
+
+	for (i = 0; i < 64; i++) {
+		if (mask & (1ULL << i)) {
+			if (i < 32)
+				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
+			else
+				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
+		} else {
+			if (i < 32)
+				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
+			else
+				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
+		}
+	}
+
+	/* issue a dummy read to force all PCI writes to be posted immediately */
+	mb();
+	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
+}
+
+static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+	struct ti_ohci *ohci = recv->ohci;
+	u32 command, contextMatch;
+
+	reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
+	wmb();
+
+	/* always keep ISO headers */
+	command = (1 << 30);
+
+	if (recv->dma_mode == BUFFER_FILL_MODE)
+		command |= (1 << 31);
+
+	reg_write(recv->ohci, recv->ContextControlSet, command);
+
+	/* match on specified tags */
+	contextMatch = tag_mask << 28;
+
+	if (iso->channel == -1) {
+		/* enable multichannel reception */
+		reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
+	} else {
+		/* listen on channel */
+		contextMatch |= iso->channel;
+	}
+
+	if (cycle != -1) {
+		u32 seconds;
+
+		/* enable cycleMatch */
+		reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
+
+		/* set starting cycle */
+		cycle &= 0x1FFF;
+
+		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
+		   just snarf them from the current time */
+		seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
+
+		/* advance one second to give some extra time for DMA to start */
+		seconds += 1;
+
+		cycle |= (seconds & 3) << 13;
+
+		contextMatch |= cycle << 12;
+	}
+
+	if (sync != -1) {
+		/* set sync flag on first DMA descriptor */
+		struct dma_cmd *cmd = &recv->block[recv->block_dma];
+		cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
+
+		/* match sync field */
+		contextMatch |= (sync&0xf)<<8;
+	}
+
+	reg_write(recv->ohci, recv->ContextMatch, contextMatch);
+
+	/* address of first descriptor block */
+	command = dma_prog_region_offset_to_bus(&recv->prog,
+						recv->block_dma * sizeof(struct dma_cmd));
+	command |= 1; /* Z=1 */
+
+	reg_write(recv->ohci, recv->CommandPtr, command);
+
+	/* enable interrupts */
+	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
+
+	wmb();
+
+	/* run */
+	reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
+
+	/* issue a dummy read of the cycle timer register to force
+	   all PCI writes to be posted immediately */
+	mb();
+	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
+
+	/* check RUN */
+	if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
+		PRINT(KERN_ERR,
+		      "Error starting IR DMA (ContextControl 0x%08x)\n",
+		      reg_read(recv->ohci, recv->ContextControlSet));
+		return -1;
+	}
+
+	return 0;
+}
+
+static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
+{
+	/* re-use the DMA descriptor for the block */
+	/* by linking the previous descriptor to it */
+
+	int next_i = block;
+	int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
+
+	struct dma_cmd *next = &recv->block[next_i];
+	struct dma_cmd *prev = &recv->block[prev_i];
+
+	/* 'next' becomes the new end of the DMA chain,
+	   so disable branch and enable interrupt */
+	next->branchAddress = 0;
+	next->control |= cpu_to_le32(3 << 20);
+	next->status = cpu_to_le32(recv->buf_stride);
+
+	/* link prev to next */
+	prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
+									sizeof(struct dma_cmd) * next_i)
+					  | 1); /* Z=1 */
+
+	/* disable interrupt on previous DMA descriptor, except at intervals */
+	if ((prev_i % recv->block_irq_interval) == 0) {
+		prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
+	} else {
+		prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
+	}
+	wmb();
+
+	/* wake up DMA in case it fell asleep */
+	reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
+}
+
+static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
+					     struct hpsb_iso_packet_info *info)
+{
+	int len;
+
+	/* release the memory where the packet was */
+	len = info->len;
+
+	/* add the wasted space for padding to 4 bytes */
+	if (len % 4)
+		len += 4 - (len % 4);
+
+	/* add 8 bytes for the OHCI DMA data format overhead */
+	len += 8;
+
+	recv->released_bytes += len;
+
+	/* have we released enough memory for one block? */
+	while (recv->released_bytes > recv->buf_stride) {
+		ohci_iso_recv_release_block(recv, recv->block_reader);
+		recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
+		recv->released_bytes -= recv->buf_stride;
+	}
+}
+
+static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
+{
+	struct ohci_iso_recv *recv = iso->hostdata;
+	if (recv->dma_mode == BUFFER_FILL_MODE) {
+		ohci_iso_recv_bufferfill_release(recv, info);
+	} else {
+		ohci_iso_recv_release_block(recv, info - iso->infos);
+	}
+}
+
+/* parse all packets from blocks that have been fully received */
+static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
+{
+	int wake = 0;
+	int runaway = 0;
+	struct ti_ohci *ohci = recv->ohci;
+
+	while (1) {
+		/* we expect the next parsable packet to begin at recv->dma_offset */
+		/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
+
+		unsigned int offset;
+		unsigned short len, cycle;
+		unsigned char channel, tag, sy;
+
+		unsigned char *p = iso->data_buf.kvirt;
+
+		unsigned int this_block = recv->dma_offset/recv->buf_stride;
+
+		/* don't loop indefinitely */
+		if (runaway++ > 100000) {
+			atomic_inc(&iso->overflows);
+			PRINT(KERN_ERR,
+			      "IR DMA error - Runaway during buffer parsing!\n");
+			break;
+		}
+
+		/* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
+		if (this_block == recv->block_dma)
+			break;
+
+		wake = 1;
+
+		/* parse data length, tag, channel, and sy */
+
+		/* note: we keep our own local copies of 'len' and 'offset'
+		   so the user can't mess with them by poking in the mmap area */
+
+		len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
+
+		if (len > 4096) {
+			PRINT(KERN_ERR,
+			      "IR DMA error - bogus 'len' value %u\n", len);
+		}
+
+		channel = p[recv->dma_offset+1] & 0x3F;
+		tag = p[recv->dma_offset+1] >> 6;
+		sy = p[recv->dma_offset+0] & 0xF;
+
+		/* advance to data payload */
+		recv->dma_offset += 4;
+
+		/* check for wrap-around */
+		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
+			recv->dma_offset -= recv->buf_stride*recv->nblocks;
+		}
+
+		/* dma_offset now points to the first byte of the data payload */
+		offset = recv->dma_offset;
+
+		/* advance to xferStatus/timeStamp */
+		recv->dma_offset += len;
+
+		/* payload is padded to 4 bytes */
+		if (len % 4) {
+			recv->dma_offset += 4 - (len%4);
+		}
+
+		/* check for wrap-around */
+		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
+			/* uh oh, the packet data wraps from the last
+                           to the first DMA block - make the packet
+                           contiguous by copying its "tail" into the
+                           guard page */
+
+			int guard_off = recv->buf_stride*recv->nblocks;
+			int tail_len = len - (guard_off - offset);
+
+			if (tail_len > 0  && tail_len < recv->buf_stride) {
+				memcpy(iso->data_buf.kvirt + guard_off,
+				       iso->data_buf.kvirt,
+				       tail_len);
+			}
+
+			recv->dma_offset -= recv->buf_stride*recv->nblocks;
+		}
+
+		/* parse timestamp */
+		cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
+		cycle &= 0x1FFF;
+
+		/* advance to next packet */
+		recv->dma_offset += 4;
+
+		/* check for wrap-around */
+		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
+			recv->dma_offset -= recv->buf_stride*recv->nblocks;
+		}
+
+		hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
+	}
+
+	if (wake)
+		hpsb_iso_wake(iso);
+}
+
+static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
+{
+	int loop;
+	struct ti_ohci *ohci = recv->ohci;
+
+	/* loop over all blocks */
+	for (loop = 0; loop < recv->nblocks; loop++) {
+
+		/* check block_dma to see if it's done */
+		struct dma_cmd *im = &recv->block[recv->block_dma];
+
+		/* check the DMA descriptor for new writes to xferStatus */
+		u16 xferstatus = le32_to_cpu(im->status) >> 16;
+
+		/* rescount is the number of bytes *remaining to be written* in the block */
+		u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
+
+		unsigned char event = xferstatus & 0x1F;
+
+		if (!event) {
+			/* nothing has happened to this block yet */
+			break;
+		}
+
+		if (event != 0x11) {
+			atomic_inc(&iso->overflows);
+			PRINT(KERN_ERR,
+			      "IR DMA error - OHCI error code 0x%02x\n", event);
+		}
+
+		if (rescount != 0) {
+			/* the card is still writing to this block;
+			   we can't touch it until it's done */
+			break;
+		}
+
+		/* OK, the block is finished... */
+
+		/* sync our view of the block */
+		dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
+
+		/* reset the DMA descriptor */
+		im->status = recv->buf_stride;
+
+		/* advance block_dma */
+		recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
+
+		if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
+			atomic_inc(&iso->overflows);
+			DBGMSG("ISO reception overflow - "
+			       "ran out of DMA blocks");
+		}
+	}
+
+	/* parse any packets that have arrived */
+	ohci_iso_recv_bufferfill_parse(iso, recv);
+}
+
+static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
+{
+	int count;
+	int wake = 0;
+	struct ti_ohci *ohci = recv->ohci;
+
+	/* loop over the entire buffer */
+	for (count = 0; count < recv->nblocks; count++) {
+		u32 packet_len = 0;
+
+		/* pointer to the DMA descriptor */
+		struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
+
+		/* check the DMA descriptor for new writes to xferStatus */
+		u16 xferstatus = le32_to_cpu(il->status) >> 16;
+		u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
+
+		unsigned char event = xferstatus & 0x1F;
+
+		if (!event) {
+			/* this packet hasn't come in yet; we are done for now */
+			goto out;
+		}
+
+		if (event == 0x11) {
+			/* packet received successfully! */
+
+			/* rescount is the number of bytes *remaining* in the packet buffer,
+			   after the packet was written */
+			packet_len = recv->buf_stride - rescount;
+
+		} else if (event == 0x02) {
+			PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
+		} else if (event) {
+			PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
+		}
+
+		/* sync our view of the buffer */
+		dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
+
+		/* record the per-packet info */
+		{
+			/* iso header is 8 bytes ahead of the data payload */
+			unsigned char *hdr;
+
+			unsigned int offset;
+			unsigned short cycle;
+			unsigned char channel, tag, sy;
+
+			offset = iso->pkt_dma * recv->buf_stride;
+			hdr = iso->data_buf.kvirt + offset;
+
+			/* skip iso header */
+			offset += 8;
+			packet_len -= 8;
+
+			cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
+			channel = hdr[5] & 0x3F;
+			tag = hdr[5] >> 6;
+			sy = hdr[4] & 0xF;
+
+			hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
+		}
+
+		/* reset the DMA descriptor */
+		il->status = recv->buf_stride;
+
+		wake = 1;
+		recv->block_dma = iso->pkt_dma;
+	}
+
+out:
+	if (wake)
+		hpsb_iso_wake(iso);
+}
+
+static void ohci_iso_recv_task(unsigned long data)
+{
+	struct hpsb_iso *iso = (struct hpsb_iso*) data;
+	struct ohci_iso_recv *recv = iso->hostdata;
+
+	if (recv->dma_mode == BUFFER_FILL_MODE)
+		ohci_iso_recv_bufferfill_task(iso, recv);
+	else
+		ohci_iso_recv_packetperbuf_task(iso, recv);
+}
+
+/***********************************
+ * rawiso ISO transmission         *
+ ***********************************/
+
+struct ohci_iso_xmit {
+	struct ti_ohci *ohci;
+	struct dma_prog_region prog;
+	struct ohci1394_iso_tasklet task;
+	int task_active;
+
+	u32 ContextControlSet;
+	u32 ContextControlClear;
+	u32 CommandPtr;
+};
+
+/* transmission DMA program:
+   one OUTPUT_MORE_IMMEDIATE for the IT header
+   one OUTPUT_LAST for the buffer data */
+
+struct iso_xmit_cmd {
+	struct dma_cmd output_more_immediate;
+	u8 iso_hdr[8];
+	u32 unused[2];
+	struct dma_cmd output_last;
+};
+
+static int ohci_iso_xmit_init(struct hpsb_iso *iso);
+static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
+static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
+static void ohci_iso_xmit_task(unsigned long data);
+
+static int ohci_iso_xmit_init(struct hpsb_iso *iso)
+{
+	struct ohci_iso_xmit *xmit;
+	unsigned int prog_size;
+	int ctx;
+	int ret = -ENOMEM;
+
+	xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
+	if (!xmit)
+		return -ENOMEM;
+
+	iso->hostdata = xmit;
+	xmit->ohci = iso->host->hostdata;
+	xmit->task_active = 0;
+
+	dma_prog_region_init(&xmit->prog);
+
+	prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
+
+	if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
+		goto err;
+
+	ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
+				  ohci_iso_xmit_task, (unsigned long) iso);
+
+	if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
+		goto err;
+
+	xmit->task_active = 1;
+
+	/* xmit context registers are spaced 16 bytes apart */
+	ctx = xmit->task.context;
+	xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
+	xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
+	xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
+
+	return 0;
+
+err:
+	ohci_iso_xmit_shutdown(iso);
+	return ret;
+}
+
+static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
+{
+	struct ohci_iso_xmit *xmit = iso->hostdata;
+	struct ti_ohci *ohci = xmit->ohci;
+
+	/* disable interrupts */
+	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
+
+	/* halt DMA */
+	if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
+		/* XXX the DMA context will lock up if you try to send too much data! */
+		PRINT(KERN_ERR,
+		      "you probably exceeded the OHCI card's bandwidth limit - "
+		      "reload the module and reduce xmit bandwidth");
+	}
+}
+
+static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
+{
+	struct ohci_iso_xmit *xmit = iso->hostdata;
+
+	if (xmit->task_active) {
+		ohci_iso_xmit_stop(iso);
+		ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
+		xmit->task_active = 0;
+	}
+
+	dma_prog_region_free(&xmit->prog);
+	kfree(xmit);
+	iso->hostdata = NULL;
+}
+
+static void ohci_iso_xmit_task(unsigned long data)
+{
+	struct hpsb_iso *iso = (struct hpsb_iso*) data;
+	struct ohci_iso_xmit *xmit = iso->hostdata;
+	struct ti_ohci *ohci = xmit->ohci;
+	int wake = 0;
+	int count;
+
+	/* check the whole buffer if necessary, starting at pkt_dma */
+	for (count = 0; count < iso->buf_packets; count++) {
+		int cycle;
+
+		/* DMA descriptor */
+		struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
+
+		/* check for new writes to xferStatus */
+		u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
+		u8  event = xferstatus & 0x1F;
+
+		if (!event) {
+			/* packet hasn't been sent yet; we are done for now */
+			break;
+		}
+
+		if (event != 0x11)
+			PRINT(KERN_ERR,
+			      "IT DMA error - OHCI error code 0x%02x\n", event);
+
+		/* at least one packet went out, so wake up the writer */
+		wake = 1;
+
+		/* parse cycle */
+		cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
+
+		/* tell the subsystem the packet has gone out */
+		hpsb_iso_packet_sent(iso, cycle, event != 0x11);
+
+		/* reset the DMA descriptor for next time */
+		cmd->output_last.status = 0;
+	}
+
+	if (wake)
+		hpsb_iso_wake(iso);
+}
+
+static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
+{
+	struct ohci_iso_xmit *xmit = iso->hostdata;
+	struct ti_ohci *ohci = xmit->ohci;
+
+	int next_i, prev_i;
+	struct iso_xmit_cmd *next, *prev;
+
+	unsigned int offset;
+	unsigned short len;
+	unsigned char tag, sy;
+
+	/* check that the packet doesn't cross a page boundary
+	   (we could allow this if we added OUTPUT_MORE descriptor support) */
+	if (cross_bound(info->offset, info->len)) {
+		PRINT(KERN_ERR,
+		      "rawiso xmit: packet %u crosses a page boundary",
+		      iso->first_packet);
+		return -EINVAL;
+	}
+
+	offset = info->offset;
+	len = info->len;
+	tag = info->tag;
+	sy = info->sy;
+
+	/* sync up the card's view of the buffer */
+	dma_region_sync_for_device(&iso->data_buf, offset, len);
+
+	/* append first_packet to the DMA chain */
+	/* by linking the previous descriptor to it */
+	/* (next will become the new end of the DMA chain) */
+
+	next_i = iso->first_packet;
+	prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
+
+	next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
+	prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
+
+	/* set up the OUTPUT_MORE_IMMEDIATE descriptor */
+	memset(next, 0, sizeof(struct iso_xmit_cmd));
+	next->output_more_immediate.control = cpu_to_le32(0x02000008);
+
+	/* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
+
+	/* tcode = 0xA, and sy */
+	next->iso_hdr[0] = 0xA0 | (sy & 0xF);
+
+	/* tag and channel number */
+	next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
+
+	/* transmission speed */
+	next->iso_hdr[2] = iso->speed & 0x7;
+
+	/* payload size */
+	next->iso_hdr[6] = len & 0xFF;
+	next->iso_hdr[7] = len >> 8;
+
+	/* set up the OUTPUT_LAST */
+	next->output_last.control = cpu_to_le32(1 << 28);
+	next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
+	next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
+	next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
+	next->output_last.control |= cpu_to_le32(len);
+
+	/* payload bus address */
+	next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
+
+	/* leave branchAddress at zero for now */
+
+	/* re-write the previous DMA descriptor to chain to this one */
+
+	/* set prev branch address to point to next (Z=3) */
+	prev->output_last.branchAddress = cpu_to_le32(
+		dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
+
+	/* disable interrupt, unless required by the IRQ interval */
+	if (prev_i % iso->irq_interval) {
+		prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
+	} else {
+		prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
+	}
+
+	wmb();
+
+	/* wake DMA in case it is sleeping */
+	reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
+
+	/* issue a dummy read of the cycle timer to force all PCI
+	   writes to be posted immediately */
+	mb();
+	reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
+
+	return 0;
+}
+
+static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
+{
+	struct ohci_iso_xmit *xmit = iso->hostdata;
+	struct ti_ohci *ohci = xmit->ohci;
+
+	/* clear out the control register */
+	reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
+	wmb();
+
+	/* address and length of first descriptor block (Z=3) */
+	reg_write(xmit->ohci, xmit->CommandPtr,
+		  dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
+
+	/* cycle match */
+	if (cycle != -1) {
+		u32 start = cycle & 0x1FFF;
+
+		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
+		   just snarf them from the current time */
+		u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
+
+		/* advance one second to give some extra time for DMA to start */
+		seconds += 1;
+
+		start |= (seconds & 3) << 13;
+
+		reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
+	}
+
+	/* enable interrupts */
+	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
+
+	/* run */
+	reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
+	mb();
+
+	/* wait 100 usec to give the card time to go active */
+	udelay(100);
+
+	/* check the RUN bit */
+	if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
+		PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
+		      reg_read(xmit->ohci, xmit->ContextControlSet));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
+{
+
+	switch(cmd) {
+	case XMIT_INIT:
+		return ohci_iso_xmit_init(iso);
+	case XMIT_START:
+		return ohci_iso_xmit_start(iso, arg);
+	case XMIT_STOP:
+		ohci_iso_xmit_stop(iso);
+		return 0;
+	case XMIT_QUEUE:
+		return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
+	case XMIT_SHUTDOWN:
+		ohci_iso_xmit_shutdown(iso);
+		return 0;
+
+	case RECV_INIT:
+		return ohci_iso_recv_init(iso);
+	case RECV_START: {
+		int *args = (int*) arg;
+		return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
+	}
+	case RECV_STOP:
+		ohci_iso_recv_stop(iso);
+		return 0;
+	case RECV_RELEASE:
+		ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
+		return 0;
+	case RECV_FLUSH:
+		ohci_iso_recv_task((unsigned long) iso);
+		return 0;
+	case RECV_SHUTDOWN:
+		ohci_iso_recv_shutdown(iso);
+		return 0;
+	case RECV_LISTEN_CHANNEL:
+		ohci_iso_recv_change_channel(iso, arg, 1);
+		return 0;
+	case RECV_UNLISTEN_CHANNEL:
+		ohci_iso_recv_change_channel(iso, arg, 0);
+		return 0;
+	case RECV_SET_CHANNEL_MASK:
+		ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
+		return 0;
+
+	default:
+		PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
+			cmd);
+		break;
+	}
+	return -EINVAL;
+}
+
+/***************************************
+ * IEEE-1394 functionality section END *
+ ***************************************/
+
+
+/********************************************************
+ * Global stuff (interrupt handler, init/shutdown code) *
+ ********************************************************/
+
+static void dma_trm_reset(struct dma_trm_ctx *d)
+{
+	unsigned long flags;
+	LIST_HEAD(packet_list);
+	struct ti_ohci *ohci = d->ohci;
+	struct hpsb_packet *packet, *ptmp;
+
+	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
+
+	/* Lock the context, reset it and release it. Move the packets
+	 * that were pending in the context to packet_list and free
+	 * them after releasing the lock. */
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	list_splice(&d->fifo_list, &packet_list);
+	list_splice(&d->pending_list, &packet_list);
+	INIT_LIST_HEAD(&d->fifo_list);
+	INIT_LIST_HEAD(&d->pending_list);
+
+	d->branchAddrPtr = NULL;
+	d->sent_ind = d->prg_ind;
+	d->free_prgs = d->num_desc;
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	if (list_empty(&packet_list))
+		return;
+
+	PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
+
+	/* Now process subsystem callbacks for the packets from this
+	 * context. */
+	list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
+		list_del_init(&packet->driver_list);
+		hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
+	}
+}
+
+static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
+				       quadlet_t rx_event,
+				       quadlet_t tx_event)
+{
+	struct ohci1394_iso_tasklet *t;
+	unsigned long mask;
+
+	spin_lock(&ohci->iso_tasklet_list_lock);
+
+	list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
+		mask = 1 << t->context;
+
+		if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
+			tasklet_schedule(&t->tasklet);
+		else if (rx_event & mask)
+			tasklet_schedule(&t->tasklet);
+	}
+
+	spin_unlock(&ohci->iso_tasklet_list_lock);
+
+}
+
+static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
+                             struct pt_regs *regs_are_unused)
+{
+	quadlet_t event, node_id;
+	struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
+	struct hpsb_host *host = ohci->host;
+	int phyid = -1, isroot = 0;
+	unsigned long flags;
+
+	/* Read and clear the interrupt event register.  Don't clear
+	 * the busReset event, though. This is done when we get the
+	 * selfIDComplete interrupt. */
+	spin_lock_irqsave(&ohci->event_lock, flags);
+	event = reg_read(ohci, OHCI1394_IntEventClear);
+	reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+	spin_unlock_irqrestore(&ohci->event_lock, flags);
+
+	if (!event)
+		return IRQ_NONE;
+
+	/* If event is ~(u32)0 cardbus card was ejected.  In this case
+	 * we just return, and clean up in the ohci1394_pci_remove
+	 * function. */
+	if (event == ~(u32) 0) {
+		DBGMSG("Device removed.");
+		return IRQ_NONE;
+	}
+
+	DBGMSG("IntEvent: %08x", event);
+
+	if (event & OHCI1394_unrecoverableError) {
+		int ctx;
+		PRINT(KERN_ERR, "Unrecoverable error!");
+
+		if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
+			PRINT(KERN_ERR, "Async Req Tx Context died: "
+				"ctrl[%08x] cmdptr[%08x]",
+				reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
+				reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
+
+		if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
+			PRINT(KERN_ERR, "Async Rsp Tx Context died: "
+				"ctrl[%08x] cmdptr[%08x]",
+				reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
+				reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
+
+		if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
+			PRINT(KERN_ERR, "Async Req Rcv Context died: "
+				"ctrl[%08x] cmdptr[%08x]",
+				reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
+				reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
+
+		if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
+			PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
+				"ctrl[%08x] cmdptr[%08x]",
+				reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
+				reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
+
+		for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
+			if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
+				PRINT(KERN_ERR, "Iso Xmit %d Context died: "
+					"ctrl[%08x] cmdptr[%08x]", ctx,
+					reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
+					reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
+		}
+
+		for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
+			if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
+				PRINT(KERN_ERR, "Iso Recv %d Context died: "
+					"ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
+					reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
+					reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
+					reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
+		}
+
+		event &= ~OHCI1394_unrecoverableError;
+	}
+
+	if (event & OHCI1394_cycleInconsistent) {
+		/* We subscribe to the cycleInconsistent event only to
+		 * clear the corresponding event bit... otherwise,
+		 * isochronous cycleMatch DMA won't work. */
+		DBGMSG("OHCI1394_cycleInconsistent");
+		event &= ~OHCI1394_cycleInconsistent;
+	}
+
+	if (event & OHCI1394_busReset) {
+		/* The busReset event bit can't be cleared during the
+		 * selfID phase, so we disable busReset interrupts, to
+		 * avoid burying the cpu in interrupt requests. */
+		spin_lock_irqsave(&ohci->event_lock, flags);
+		reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
+
+		if (ohci->check_busreset) {
+			int loop_count = 0;
+
+			udelay(10);
+
+			while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
+				reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+
+				spin_unlock_irqrestore(&ohci->event_lock, flags);
+				udelay(10);
+				spin_lock_irqsave(&ohci->event_lock, flags);
+
+				/* The loop counter check is to prevent the driver
+				 * from remaining in this state forever. For the
+				 * initial bus reset, the loop continues for ever
+				 * and the system hangs, until some device is plugged-in
+				 * or out manually into a port! The forced reset seems
+				 * to solve this problem. This mainly effects nForce2. */
+				if (loop_count > 10000) {
+					ohci_devctl(host, RESET_BUS, LONG_RESET);
+					DBGMSG("Detected bus-reset loop. Forced a bus reset!");
+					loop_count = 0;
+				}
+
+				loop_count++;
+			}
+		}
+		spin_unlock_irqrestore(&ohci->event_lock, flags);
+		if (!host->in_bus_reset) {
+			DBGMSG("irq_handler: Bus reset requested");
+
+			/* Subsystem call */
+			hpsb_bus_reset(ohci->host);
+		}
+		event &= ~OHCI1394_busReset;
+	}
+
+	if (event & OHCI1394_reqTxComplete) {
+		struct dma_trm_ctx *d = &ohci->at_req_context;
+		DBGMSG("Got reqTxComplete interrupt "
+		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
+		if (reg_read(ohci, d->ctrlSet) & 0x800)
+			ohci1394_stop_context(ohci, d->ctrlClear,
+					      "reqTxComplete");
+		else
+			dma_trm_tasklet((unsigned long)d);
+			//tasklet_schedule(&d->task);
+		event &= ~OHCI1394_reqTxComplete;
+	}
+	if (event & OHCI1394_respTxComplete) {
+		struct dma_trm_ctx *d = &ohci->at_resp_context;
+		DBGMSG("Got respTxComplete interrupt "
+		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
+		if (reg_read(ohci, d->ctrlSet) & 0x800)
+			ohci1394_stop_context(ohci, d->ctrlClear,
+					      "respTxComplete");
+		else
+			tasklet_schedule(&d->task);
+		event &= ~OHCI1394_respTxComplete;
+	}
+	if (event & OHCI1394_RQPkt) {
+		struct dma_rcv_ctx *d = &ohci->ar_req_context;
+		DBGMSG("Got RQPkt interrupt status=0x%08X",
+		       reg_read(ohci, d->ctrlSet));
+		if (reg_read(ohci, d->ctrlSet) & 0x800)
+			ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
+		else
+			tasklet_schedule(&d->task);
+		event &= ~OHCI1394_RQPkt;
+	}
+	if (event & OHCI1394_RSPkt) {
+		struct dma_rcv_ctx *d = &ohci->ar_resp_context;
+		DBGMSG("Got RSPkt interrupt status=0x%08X",
+		       reg_read(ohci, d->ctrlSet));
+		if (reg_read(ohci, d->ctrlSet) & 0x800)
+			ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
+		else
+			tasklet_schedule(&d->task);
+		event &= ~OHCI1394_RSPkt;
+	}
+	if (event & OHCI1394_isochRx) {
+		quadlet_t rx_event;
+
+		rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
+		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
+		ohci_schedule_iso_tasklets(ohci, rx_event, 0);
+		event &= ~OHCI1394_isochRx;
+	}
+	if (event & OHCI1394_isochTx) {
+		quadlet_t tx_event;
+
+		tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
+		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
+		ohci_schedule_iso_tasklets(ohci, 0, tx_event);
+		event &= ~OHCI1394_isochTx;
+	}
+	if (event & OHCI1394_selfIDComplete) {
+		if (host->in_bus_reset) {
+			node_id = reg_read(ohci, OHCI1394_NodeID);
+
+			if (!(node_id & 0x80000000)) {
+				PRINT(KERN_ERR,
+				      "SelfID received, but NodeID invalid "
+				      "(probably new bus reset occurred): %08X",
+				      node_id);
+				goto selfid_not_valid;
+			}
+
+			phyid =  node_id & 0x0000003f;
+			isroot = (node_id & 0x40000000) != 0;
+
+			DBGMSG("SelfID interrupt received "
+			      "(phyid %d, %s)", phyid,
+			      (isroot ? "root" : "not root"));
+
+			handle_selfid(ohci, host, phyid, isroot);
+
+			/* Clear the bus reset event and re-enable the
+			 * busReset interrupt.  */
+			spin_lock_irqsave(&ohci->event_lock, flags);
+			reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+			reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+			spin_unlock_irqrestore(&ohci->event_lock, flags);
+
+			/* Accept Physical requests from all nodes. */
+			reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
+			reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
+
+			/* Turn on phys dma reception.
+			 *
+			 * TODO: Enable some sort of filtering management.
+			 */
+			if (phys_dma) {
+				reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
+				reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
+				reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
+			} else {
+				reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
+				reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
+			}
+
+			DBGMSG("PhyReqFilter=%08x%08x",
+			       reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
+			       reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
+
+			hpsb_selfid_complete(host, phyid, isroot);
+		} else
+			PRINT(KERN_ERR,
+			      "SelfID received outside of bus reset sequence");
+
+selfid_not_valid:
+		event &= ~OHCI1394_selfIDComplete;
+	}
+
+	/* Make sure we handle everything, just in case we accidentally
+	 * enabled an interrupt that we didn't write a handler for.  */
+	if (event)
+		PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
+		      event);
+
+	return IRQ_HANDLED;
+}
+
+/* Put the buffer back into the dma context */
+static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
+{
+	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
+	DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
+
+	d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
+	d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
+	idx = (idx + d->num_desc - 1 ) % d->num_desc;
+	d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
+
+	/* To avoid a race, ensure 1394 interface hardware sees the inserted
+	 * context program descriptors before it sees the wakeup bit set. */
+	wmb();
+	
+	/* wake up the dma context if necessary */
+	if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
+		PRINT(KERN_INFO,
+		      "Waking dma ctx=%d ... processing is probably too slow",
+		      d->ctx);
+	}
+
+	/* do this always, to avoid race condition */
+	reg_write(ohci, d->ctrlSet, 0x1000);
+}
+
+#define cond_le32_to_cpu(data, noswap) \
+	(noswap ? data : le32_to_cpu(data))
+
+static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
+			    -1, 0, -1, 0, -1, -1, 16, -1};
+
+/*
+ * Determine the length of a packet in the buffer
+ * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
+ */
+static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
+			 int offset, unsigned char tcode, int noswap)
+{
+	int length = -1;
+
+	if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
+		length = TCODE_SIZE[tcode];
+		if (length == 0) {
+			if (offset + 12 >= d->buf_size) {
+				length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
+						[3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
+			} else {
+				length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
+			}
+			length += 20;
+		}
+	} else if (d->type == DMA_CTX_ISO) {
+		/* Assumption: buffer fill mode with header/trailer */
+		length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
+	}
+
+	if (length > 0 && length % 4)
+		length += 4 - (length % 4);
+
+	return length;
+}
+
+/* Tasklet that processes dma receive buffers */
+static void dma_rcv_tasklet (unsigned long data)
+{
+	struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
+	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
+	unsigned int split_left, idx, offset, rescount;
+	unsigned char tcode;
+	int length, bytes_left, ack;
+	unsigned long flags;
+	quadlet_t *buf_ptr;
+	char *split_ptr;
+	char msg[256];
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	idx = d->buf_ind;
+	offset = d->buf_offset;
+	buf_ptr = d->buf_cpu[idx] + offset/4;
+
+	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
+	bytes_left = d->buf_size - rescount - offset;
+
+	while (bytes_left > 0) {
+		tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
+
+		/* packet_length() will return < 4 for an error */
+		length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
+
+		if (length < 4) { /* something is wrong */
+			sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
+				tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
+				d->ctx, length);
+			ohci1394_stop_context(ohci, d->ctrlClear, msg);
+			spin_unlock_irqrestore(&d->lock, flags);
+			return;
+		}
+
+		/* The first case is where we have a packet that crosses
+		 * over more than one descriptor. The next case is where
+		 * it's all in the first descriptor.  */
+		if ((offset + length) > d->buf_size) {
+			DBGMSG("Split packet rcv'd");
+			if (length > d->split_buf_size) {
+				ohci1394_stop_context(ohci, d->ctrlClear,
+					     "Split packet size exceeded");
+				d->buf_ind = idx;
+				d->buf_offset = offset;
+				spin_unlock_irqrestore(&d->lock, flags);
+				return;
+			}
+
+			if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
+			    == d->buf_size) {
+				/* Other part of packet not written yet.
+				 * this should never happen I think
+				 * anyway we'll get it on the next call.  */
+				PRINT(KERN_INFO,
+				      "Got only half a packet!");
+				d->buf_ind = idx;
+				d->buf_offset = offset;
+				spin_unlock_irqrestore(&d->lock, flags);
+				return;
+			}
+
+			split_left = length;
+			split_ptr = (char *)d->spb;
+			memcpy(split_ptr,buf_ptr,d->buf_size-offset);
+			split_left -= d->buf_size-offset;
+			split_ptr += d->buf_size-offset;
+			insert_dma_buffer(d, idx);
+			idx = (idx+1) % d->num_desc;
+			buf_ptr = d->buf_cpu[idx];
+			offset=0;
+
+			while (split_left >= d->buf_size) {
+				memcpy(split_ptr,buf_ptr,d->buf_size);
+				split_ptr += d->buf_size;
+				split_left -= d->buf_size;
+				insert_dma_buffer(d, idx);
+				idx = (idx+1) % d->num_desc;
+				buf_ptr = d->buf_cpu[idx];
+			}
+
+			if (split_left > 0) {
+				memcpy(split_ptr, buf_ptr, split_left);
+				offset = split_left;
+				buf_ptr += offset/4;
+			}
+		} else {
+			DBGMSG("Single packet rcv'd");
+			memcpy(d->spb, buf_ptr, length);
+			offset += length;
+			buf_ptr += length/4;
+			if (offset==d->buf_size) {
+				insert_dma_buffer(d, idx);
+				idx = (idx+1) % d->num_desc;
+				buf_ptr = d->buf_cpu[idx];
+				offset=0;
+			}
+		}
+
+		/* We get one phy packet to the async descriptor for each
+		 * bus reset. We always ignore it.  */
+		if (tcode != OHCI1394_TCODE_PHY) {
+			if (!ohci->no_swap_incoming)
+				packet_swab(d->spb, tcode);
+			DBGMSG("Packet received from node"
+				" %d ack=0x%02X spd=%d tcode=0x%X"
+				" length=%d ctx=%d tlabel=%d",
+				(d->spb[1]>>16)&0x3f,
+				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
+				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
+				tcode, length, d->ctx,
+				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
+
+			ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
+				== 0x11) ? 1 : 0;
+
+			hpsb_packet_received(ohci->host, d->spb,
+					     length-4, ack);
+		}
+#ifdef OHCI1394_DEBUG
+		else
+			PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
+			       d->ctx);
+#endif
+
+	       	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
+
+		bytes_left = d->buf_size - rescount - offset;
+
+	}
+
+	d->buf_ind = idx;
+	d->buf_offset = offset;
+
+	spin_unlock_irqrestore(&d->lock, flags);
+}
+
+/* Bottom half that processes sent packets */
+static void dma_trm_tasklet (unsigned long data)
+{
+	struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
+	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
+	struct hpsb_packet *packet, *ptmp;
+	unsigned long flags;
+	u32 status, ack;
+        size_t datasize;
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
+                datasize = packet->data_size;
+		if (datasize && packet->type != hpsb_raw)
+			status = le32_to_cpu(
+				d->prg_cpu[d->sent_ind]->end.status) >> 16;
+		else
+			status = le32_to_cpu(
+				d->prg_cpu[d->sent_ind]->begin.status) >> 16;
+
+		if (status == 0)
+			/* this packet hasn't been sent yet*/
+			break;
+
+#ifdef OHCI1394_DEBUG
+		if (datasize)
+			if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
+				DBGMSG("Stream packet sent to channel %d tcode=0x%X "
+				       "ack=0x%X spd=%d dataLength=%d ctx=%d",
+				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
+				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
+				       status&0x1f, (status>>5)&0x3,
+				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
+				       d->ctx);
+			else
+				DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
+				       "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
+				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
+				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
+				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
+				       status&0x1f, (status>>5)&0x3,
+				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
+				       d->ctx);
+		else
+			DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
+			       "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
+                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
+                                        >>16)&0x3f,
+                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+                                        >>4)&0xf,
+                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+                                        >>10)&0x3f,
+                                status&0x1f, (status>>5)&0x3,
+                                le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
+                                d->ctx);
+#endif
+
+		if (status & 0x10) {
+			ack = status & 0xf;
+		} else {
+			switch (status & 0x1f) {
+			case EVT_NO_STATUS: /* that should never happen */
+			case EVT_RESERVED_A: /* that should never happen */
+			case EVT_LONG_PACKET: /* that should never happen */
+				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_MISSING_ACK:
+				ack = ACKX_TIMEOUT;
+				break;
+			case EVT_UNDERRUN:
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_OVERRUN: /* that should never happen */
+				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_DESCRIPTOR_READ:
+			case EVT_DATA_READ:
+			case EVT_DATA_WRITE:
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_BUS_RESET: /* that should never happen */
+				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_TIMEOUT:
+				ack = ACKX_TIMEOUT;
+				break;
+			case EVT_TCODE_ERR:
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_RESERVED_B: /* that should never happen */
+			case EVT_RESERVED_C: /* that should never happen */
+				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
+				ack = ACKX_SEND_ERROR;
+				break;
+			case EVT_UNKNOWN:
+			case EVT_FLUSHED:
+				ack = ACKX_SEND_ERROR;
+				break;
+			default:
+				PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
+				ack = ACKX_SEND_ERROR;
+				BUG();
+			}
+		}
+
+		list_del_init(&packet->driver_list);
+		hpsb_packet_sent(ohci->host, packet, ack);
+
+		if (datasize) {
+			pci_unmap_single(ohci->dev,
+					 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
+					 datasize, PCI_DMA_TODEVICE);
+			OHCI_DMA_FREE("single Xmit data packet");
+		}
+
+		d->sent_ind = (d->sent_ind+1)%d->num_desc;
+		d->free_prgs++;
+	}
+
+	dma_trm_flush(ohci, d);
+
+	spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
+{
+	if (d->ctrlClear) {
+		ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
+
+		if (d->type == DMA_CTX_ISO) {
+			/* disable interrupts */
+			reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
+			ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
+		} else {
+			tasklet_kill(&d->task);
+		}
+	}
+}
+
+
+static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
+{
+	int i;
+	struct ti_ohci *ohci = d->ohci;
+
+	if (ohci == NULL)
+		return;
+
+	DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
+
+	if (d->buf_cpu) {
+		for (i=0; i<d->num_desc; i++)
+			if (d->buf_cpu[i] && d->buf_bus[i]) {
+				pci_free_consistent(
+					ohci->dev, d->buf_size,
+					d->buf_cpu[i], d->buf_bus[i]);
+				OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
+			}
+		kfree(d->buf_cpu);
+		kfree(d->buf_bus);
+	}
+	if (d->prg_cpu) {
+		for (i=0; i<d->num_desc; i++)
+			if (d->prg_cpu[i] && d->prg_bus[i]) {
+				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
+				OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
+			}
+		pci_pool_destroy(d->prg_pool);
+		OHCI_DMA_FREE("dma_rcv prg pool");
+		kfree(d->prg_cpu);
+		kfree(d->prg_bus);
+	}
+	if (d->spb) kfree(d->spb);
+
+	/* Mark this context as freed. */
+	d->ohci = NULL;
+}
+
+static int
+alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
+		  enum context_type type, int ctx, int num_desc,
+		  int buf_size, int split_buf_size, int context_base)
+{
+	int i, len;
+	static int num_allocs;
+	static char pool_name[20];
+
+	d->ohci = ohci;
+	d->type = type;
+	d->ctx = ctx;
+
+	d->num_desc = num_desc;
+	d->buf_size = buf_size;
+	d->split_buf_size = split_buf_size;
+
+	d->ctrlSet = 0;
+	d->ctrlClear = 0;
+	d->cmdPtr = 0;
+
+	d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
+	d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
+
+	if (d->buf_cpu == NULL || d->buf_bus == NULL) {
+		PRINT(KERN_ERR, "Failed to allocate dma buffer");
+		free_dma_rcv_ctx(d);
+		return -ENOMEM;
+	}
+	memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
+	memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
+
+	d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
+				GFP_ATOMIC);
+	d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
+
+	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
+		PRINT(KERN_ERR, "Failed to allocate dma prg");
+		free_dma_rcv_ctx(d);
+		return -ENOMEM;
+	}
+	memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
+	memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
+
+	d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
+
+	if (d->spb == NULL) {
+		PRINT(KERN_ERR, "Failed to allocate split buffer");
+		free_dma_rcv_ctx(d);
+		return -ENOMEM;
+	}
+	
+	len = sprintf(pool_name, "ohci1394_rcv_prg");
+	sprintf(pool_name+len, "%d", num_allocs);
+	d->prg_pool = pci_pool_create(pool_name, ohci->dev,
+				sizeof(struct dma_cmd), 4, 0);
+	if(d->prg_pool == NULL)
+	{
+		PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
+		free_dma_rcv_ctx(d);
+		return -ENOMEM;
+	}
+	num_allocs++;
+
+	OHCI_DMA_ALLOC("dma_rcv prg pool");
+
+	for (i=0; i<d->num_desc; i++) {
+		d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
+						     d->buf_size,
+						     d->buf_bus+i);
+		OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
+
+		if (d->buf_cpu[i] != NULL) {
+			memset(d->buf_cpu[i], 0, d->buf_size);
+		} else {
+			PRINT(KERN_ERR,
+			      "Failed to allocate dma buffer");
+			free_dma_rcv_ctx(d);
+			return -ENOMEM;
+		}
+
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
+
+                if (d->prg_cpu[i] != NULL) {
+                        memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
+		} else {
+			PRINT(KERN_ERR,
+			      "Failed to allocate dma prg");
+			free_dma_rcv_ctx(d);
+			return -ENOMEM;
+		}
+	}
+
+        spin_lock_init(&d->lock);
+
+	if (type == DMA_CTX_ISO) {
+		ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
+					  OHCI_ISO_MULTICHANNEL_RECEIVE,
+					  dma_rcv_tasklet, (unsigned long) d);
+		if (ohci1394_register_iso_tasklet(ohci,
+						  &ohci->ir_legacy_tasklet) < 0) {
+			PRINT(KERN_ERR, "No IR DMA context available");
+			free_dma_rcv_ctx(d);
+			return -EBUSY;
+		}
+
+		/* the IR context can be assigned to any DMA context
+		 * by ohci1394_register_iso_tasklet */
+		d->ctx = ohci->ir_legacy_tasklet.context;
+		d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
+		d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
+		d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
+		d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
+	} else {
+		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
+		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
+		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
+
+		tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
+	}
+
+	return 0;
+}
+
+static void free_dma_trm_ctx(struct dma_trm_ctx *d)
+{
+	int i;
+	struct ti_ohci *ohci = d->ohci;
+
+	if (ohci == NULL)
+		return;
+
+	DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
+
+	if (d->prg_cpu) {
+		for (i=0; i<d->num_desc; i++)
+			if (d->prg_cpu[i] && d->prg_bus[i]) {
+				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
+				OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
+			}
+		pci_pool_destroy(d->prg_pool);
+		OHCI_DMA_FREE("dma_trm prg pool");
+		kfree(d->prg_cpu);
+		kfree(d->prg_bus);
+	}
+
+	/* Mark this context as freed. */
+	d->ohci = NULL;
+}
+
+static int
+alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
+		  enum context_type type, int ctx, int num_desc,
+		  int context_base)
+{
+	int i, len;
+	static char pool_name[20];
+	static int num_allocs=0;
+
+	d->ohci = ohci;
+	d->type = type;
+	d->ctx = ctx;
+	d->num_desc = num_desc;
+	d->ctrlSet = 0;
+	d->ctrlClear = 0;
+	d->cmdPtr = 0;
+
+	d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
+			     GFP_KERNEL);
+	d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
+
+	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
+		PRINT(KERN_ERR, "Failed to allocate at dma prg");
+		free_dma_trm_ctx(d);
+		return -ENOMEM;
+	}
+	memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
+	memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
+
+	len = sprintf(pool_name, "ohci1394_trm_prg");
+	sprintf(pool_name+len, "%d", num_allocs);
+	d->prg_pool = pci_pool_create(pool_name, ohci->dev,
+				sizeof(struct at_dma_prg), 4, 0);
+	if (d->prg_pool == NULL) {
+		PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
+		free_dma_trm_ctx(d);
+		return -ENOMEM;
+	}
+	num_allocs++;
+
+	OHCI_DMA_ALLOC("dma_rcv prg pool");
+
+	for (i = 0; i < d->num_desc; i++) {
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
+
+                if (d->prg_cpu[i] != NULL) {
+                        memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
+		} else {
+			PRINT(KERN_ERR,
+			      "Failed to allocate at dma prg");
+			free_dma_trm_ctx(d);
+			return -ENOMEM;
+		}
+	}
+
+        spin_lock_init(&d->lock);
+
+	/* initialize tasklet */
+	if (type == DMA_CTX_ISO) {
+		ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
+					  dma_trm_tasklet, (unsigned long) d);
+		if (ohci1394_register_iso_tasklet(ohci,
+						  &ohci->it_legacy_tasklet) < 0) {
+			PRINT(KERN_ERR, "No IT DMA context available");
+			free_dma_trm_ctx(d);
+			return -EBUSY;
+		}
+
+		/* IT can be assigned to any context by register_iso_tasklet */
+		d->ctx = ohci->it_legacy_tasklet.context;
+		d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
+		d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
+		d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
+	} else {
+		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
+		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
+		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
+		tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
+	}
+
+	return 0;
+}
+
+static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
+{
+	struct ti_ohci *ohci = host->hostdata;
+
+	reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
+	reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
+
+	memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
+}
+
+
+static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
+                                 quadlet_t data, quadlet_t compare)
+{
+	struct ti_ohci *ohci = host->hostdata;
+	int i;
+
+	reg_write(ohci, OHCI1394_CSRData, data);
+	reg_write(ohci, OHCI1394_CSRCompareData, compare);
+	reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
+
+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+		if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+			break;
+
+		mdelay(1);
+	}
+
+	return reg_read(ohci, OHCI1394_CSRData);
+}
+
+static struct hpsb_host_driver ohci1394_driver = {
+	.owner =		THIS_MODULE,
+	.name =			OHCI1394_DRIVER_NAME,
+	.set_hw_config_rom =	ohci_set_hw_config_rom,
+	.transmit_packet =	ohci_transmit,
+	.devctl =		ohci_devctl,
+	.isoctl =               ohci_isoctl,
+	.hw_csr_reg =		ohci_hw_csr_reg,
+};
+
+
+
+/***********************************
+ * PCI Driver Interface functions  *
+ ***********************************/
+
+#define FAIL(err, fmt, args...)			\
+do {						\
+	PRINT_G(KERN_ERR, fmt , ## args);	\
+        ohci1394_pci_remove(dev);               \
+	return err;				\
+} while (0)
+
+static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
+					const struct pci_device_id *ent)
+{
+	static int version_printed = 0;
+
+	struct hpsb_host *host;
+	struct ti_ohci *ohci;	/* shortcut to currently handled device */
+	unsigned long ohci_base;
+
+	if (version_printed++ == 0)
+		PRINT_G(KERN_INFO, "%s", version);
+
+        if (pci_enable_device(dev))
+		FAIL(-ENXIO, "Failed to enable OHCI hardware");
+        pci_set_master(dev);
+
+	host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
+	if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
+
+	ohci = host->hostdata;
+	ohci->dev = dev;
+	ohci->host = host;
+	ohci->init_state = OHCI_INIT_ALLOC_HOST;
+	host->pdev = dev;
+	pci_set_drvdata(dev, ohci);
+
+	/* We don't want hardware swapping */
+	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+
+	/* Some oddball Apple controllers do not order the selfid
+	 * properly, so we make up for it here.  */
+#ifndef __LITTLE_ENDIAN
+	/* XXX: Need a better way to check this. I'm wondering if we can
+	 * read the values of the OHCI1394_PCI_HCI_Control and the
+	 * noByteSwapData registers to see if they were not cleared to
+	 * zero. Should this work? Obviously it's not defined what these
+	 * registers will read when they aren't supported. Bleh! */
+	if (dev->vendor == PCI_VENDOR_ID_APPLE &&
+	    dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
+		ohci->no_swap_incoming = 1;
+		ohci->selfid_swap = 0;
+	} else
+		ohci->selfid_swap = 1;
+#endif
+
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
+#endif
+
+	/* These chipsets require a bit of extra care when checking after
+	 * a busreset.  */
+	if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
+	     dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
+	    (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
+	     dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
+		ohci->check_busreset = 1;
+
+	/* We hardwire the MMIO length, since some CardBus adaptors
+	 * fail to report the right length.  Anyway, the ohci spec
+	 * clearly says it's 2kb, so this shouldn't be a problem. */
+	ohci_base = pci_resource_start(dev, 0);
+	if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
+		PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
+		      pci_resource_len(dev, 0));
+
+	/* Seems PCMCIA handles this internally. Not sure why. Seems
+	 * pretty bogus to force a driver to special case this.  */
+#ifndef PCMCIA
+	if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
+		FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
+		     ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
+#endif
+	ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
+
+	ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
+	if (ohci->registers == NULL)
+		FAIL(-ENXIO, "Failed to remap registers - card not accessible");
+	ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
+	DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
+
+	/* csr_config rom allocation */
+	ohci->csr_config_rom_cpu =
+		pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
+				     &ohci->csr_config_rom_bus);
+	OHCI_DMA_ALLOC("consistent csr_config_rom");
+	if (ohci->csr_config_rom_cpu == NULL)
+		FAIL(-ENOMEM, "Failed to allocate buffer config rom");
+	ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
+
+	/* self-id dma buffer allocation */
+	ohci->selfid_buf_cpu =
+		pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
+                      &ohci->selfid_buf_bus);
+	OHCI_DMA_ALLOC("consistent selfid_buf");
+
+	if (ohci->selfid_buf_cpu == NULL)
+		FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
+	ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
+
+	if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
+		PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
+		      "8Kb boundary... may cause problems on some CXD3222 chip",
+		      ohci->selfid_buf_cpu);
+
+	/* No self-id errors at startup */
+	ohci->self_id_errors = 0;
+
+	ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
+	/* AR DMA request context allocation */
+	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
+			      DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
+			      AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
+			      OHCI1394_AsReqRcvContextBase) < 0)
+		FAIL(-ENOMEM, "Failed to allocate AR Req context");
+
+	/* AR DMA response context allocation */
+	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
+			      DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
+			      AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
+			      OHCI1394_AsRspRcvContextBase) < 0)
+		FAIL(-ENOMEM, "Failed to allocate AR Resp context");
+
+	/* AT DMA request context */
+	if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
+			      DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
+			      OHCI1394_AsReqTrContextBase) < 0)
+		FAIL(-ENOMEM, "Failed to allocate AT Req context");
+
+	/* AT DMA response context */
+	if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
+			      DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
+			      OHCI1394_AsRspTrContextBase) < 0)
+		FAIL(-ENOMEM, "Failed to allocate AT Resp context");
+
+	/* Start off with a soft reset, to clear everything to a sane
+	 * state. */
+	ohci_soft_reset(ohci);
+
+	/* Now enable LPS, which we need in order to start accessing
+	 * most of the registers.  In fact, on some cards (ALI M5251),
+	 * accessing registers in the SClk domain without LPS enabled
+	 * will lock up the machine.  Wait 50msec to make sure we have
+	 * full link enabled.  */
+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
+
+	/* Disable and clear interrupts */
+	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
+	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
+
+	mdelay(50);
+
+	/* Determine the number of available IR and IT contexts. */
+	ohci->nb_iso_rcv_ctx =
+		get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
+	DBGMSG("%d iso receive contexts available",
+	       ohci->nb_iso_rcv_ctx);
+
+	ohci->nb_iso_xmit_ctx =
+		get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
+	DBGMSG("%d iso transmit contexts available",
+	       ohci->nb_iso_xmit_ctx);
+
+	/* Set the usage bits for non-existent contexts so they can't
+	 * be allocated */
+	ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
+	ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
+
+	INIT_LIST_HEAD(&ohci->iso_tasklet_list);
+	spin_lock_init(&ohci->iso_tasklet_list_lock);
+	ohci->ISO_channel_usage = 0;
+        spin_lock_init(&ohci->IR_channel_lock);
+
+	/* Allocate the IR DMA context right here so we don't have
+	 * to do it in interrupt path - note that this doesn't
+	 * waste much memory and avoids the jugglery required to
+	 * allocate it in IRQ path. */
+	if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
+			      DMA_CTX_ISO, 0, IR_NUM_DESC,
+			      IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
+			      OHCI1394_IsoRcvContextBase) < 0) {
+		FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
+	}
+
+	/* We hopefully don't have to pre-allocate IT DMA like we did
+	 * for IR DMA above. Allocate it on-demand and mark inactive. */
+	ohci->it_legacy_context.ohci = NULL;
+
+	if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
+			 OHCI1394_DRIVER_NAME, ohci))
+		FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
+
+	ohci->init_state = OHCI_INIT_HAVE_IRQ;
+	ohci_initialize(ohci);
+
+	/* Set certain csr values */
+	host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
+	host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
+	host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
+	host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
+	host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
+
+	/* Tell the highlevel this host is ready */
+	if (hpsb_add_host(host))
+		FAIL(-ENOMEM, "Failed to register host with highlevel");
+
+	ohci->init_state = OHCI_INIT_DONE;
+
+	return 0;
+#undef FAIL
+}
+
+static void ohci1394_pci_remove(struct pci_dev *pdev)
+{
+	struct ti_ohci *ohci;
+	struct device *dev;
+
+	ohci = pci_get_drvdata(pdev);
+	if (!ohci)
+		return;
+
+	dev = get_device(&ohci->host->device);
+
+	switch (ohci->init_state) {
+	case OHCI_INIT_DONE:
+		stop_dma_rcv_ctx(&ohci->ir_legacy_context);
+		hpsb_remove_host(ohci->host);
+
+		/* Clear out BUS Options */
+		reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
+		reg_write(ohci, OHCI1394_BusOptions,
+			  (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
+			  0x00ff0000);
+		memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
+
+	case OHCI_INIT_HAVE_IRQ:
+		/* Clear interrupt registers */
+		reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
+		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
+
+		/* Disable IRM Contender */
+		set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
+
+		/* Clear link control register */
+		reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
+
+		/* Let all other nodes know to ignore us */
+		ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
+
+		/* Soft reset before we start - this disables
+		 * interrupts and clears linkEnable and LPS. */
+		ohci_soft_reset(ohci);
+		free_irq(ohci->dev->irq, ohci);
+
+	case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
+		/* The ohci_soft_reset() stops all DMA contexts, so we
+		 * dont need to do this.  */
+		/* Free AR dma */
+		free_dma_rcv_ctx(&ohci->ar_req_context);
+		free_dma_rcv_ctx(&ohci->ar_resp_context);
+
+		/* Free AT dma */
+		free_dma_trm_ctx(&ohci->at_req_context);
+		free_dma_trm_ctx(&ohci->at_resp_context);
+
+		/* Free IR dma */
+		free_dma_rcv_ctx(&ohci->ir_legacy_context);
+
+		/* Free IT dma */
+		free_dma_trm_ctx(&ohci->it_legacy_context);
+
+		/* Free IR legacy dma */
+		free_dma_rcv_ctx(&ohci->ir_legacy_context);
+
+
+	case OHCI_INIT_HAVE_SELFID_BUFFER:
+		pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
+				    ohci->selfid_buf_cpu,
+				    ohci->selfid_buf_bus);
+		OHCI_DMA_FREE("consistent selfid_buf");
+
+	case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
+		pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
+				    ohci->csr_config_rom_cpu,
+				    ohci->csr_config_rom_bus);
+		OHCI_DMA_FREE("consistent csr_config_rom");
+
+	case OHCI_INIT_HAVE_IOMAPPING:
+		iounmap(ohci->registers);
+
+	case OHCI_INIT_HAVE_MEM_REGION:
+#ifndef PCMCIA
+		release_mem_region(pci_resource_start(ohci->dev, 0),
+				   OHCI1394_REGISTER_SIZE);
+#endif
+
+#ifdef CONFIG_PPC_PMAC
+	/* On UniNorth, power down the cable and turn off the chip
+	 * clock when the module is removed to save power on
+	 * laptops. Turning it back ON is done by the arch code when
+	 * pci_enable_device() is called */
+	{
+		struct device_node* of_node;
+
+		of_node = pci_device_to_OF_node(ohci->dev);
+		if (of_node) {
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
+			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
+		}
+	}
+#endif /* CONFIG_PPC_PMAC */
+
+	case OHCI_INIT_ALLOC_HOST:
+		pci_set_drvdata(ohci->dev, NULL);
+	}
+
+	if (dev)
+		put_device(dev);
+}
+
+
+static int ohci1394_pci_resume (struct pci_dev *pdev)
+{
+#ifdef CONFIG_PMAC_PBOOK
+	{
+		struct device_node *of_node;
+
+		/* Re-enable 1394 */
+		of_node = pci_device_to_OF_node (pdev);
+		if (of_node)
+			pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
+	}
+#endif
+
+	pci_enable_device(pdev);
+
+	return 0;
+}
+
+
+static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+#ifdef CONFIG_PMAC_PBOOK
+	{
+		struct device_node *of_node;
+
+		/* Disable 1394 */
+		of_node = pci_device_to_OF_node (pdev);
+		if (of_node)
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
+	}
+#endif
+
+	return 0;
+}
+
+
+#define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
+
+static struct pci_device_id ohci1394_pci_tbl[] = {
+	{
+		.class = 	PCI_CLASS_FIREWIRE_OHCI,
+		.class_mask = 	PCI_ANY_ID,
+		.vendor =	PCI_ANY_ID,
+		.device =	PCI_ANY_ID,
+		.subvendor =	PCI_ANY_ID,
+		.subdevice =	PCI_ANY_ID,
+	},
+	{ 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
+
+static struct pci_driver ohci1394_pci_driver = {
+	.name =		OHCI1394_DRIVER_NAME,
+	.id_table =	ohci1394_pci_tbl,
+	.probe =	ohci1394_pci_probe,
+	.remove =	ohci1394_pci_remove,
+	.resume =	ohci1394_pci_resume,
+	.suspend =	ohci1394_pci_suspend,
+};
+
+
+
+/***********************************
+ * OHCI1394 Video Interface        *
+ ***********************************/
+
+/* essentially the only purpose of this code is to allow another
+   module to hook into ohci's interrupt handler */
+
+int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
+{
+	int i=0;
+
+	/* stop the channel program if it's still running */
+	reg_write(ohci, reg, 0x8000);
+
+	/* Wait until it effectively stops */
+	while (reg_read(ohci, reg) & 0x400) {
+		i++;
+		if (i>5000) {
+			PRINT(KERN_ERR,
+			      "Runaway loop while stopping context: %s...", msg ? msg : "");
+			return 1;
+		}
+
+		mb();
+		udelay(10);
+	}
+	if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
+	return 0;
+}
+
+void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
+			       void (*func)(unsigned long), unsigned long data)
+{
+	tasklet_init(&tasklet->tasklet, func, data);
+	tasklet->type = type;
+	/* We init the tasklet->link field, so we can list_del() it
+	 * without worrying whether it was added to the list or not. */
+	INIT_LIST_HEAD(&tasklet->link);
+}
+
+int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
+				  struct ohci1394_iso_tasklet *tasklet)
+{
+	unsigned long flags, *usage;
+	int n, i, r = -EBUSY;
+
+	if (tasklet->type == OHCI_ISO_TRANSMIT) {
+		n = ohci->nb_iso_xmit_ctx;
+		usage = &ohci->it_ctx_usage;
+	}
+	else {
+		n = ohci->nb_iso_rcv_ctx;
+		usage = &ohci->ir_ctx_usage;
+
+		/* only one receive context can be multichannel (OHCI sec 10.4.1) */
+		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
+			if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
+				return r;
+			}
+		}
+	}
+
+	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
+
+	for (i = 0; i < n; i++)
+		if (!test_and_set_bit(i, usage)) {
+			tasklet->context = i;
+			list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
+			r = 0;
+			break;
+		}
+
+	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
+
+	return r;
+}
+
+void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
+				     struct ohci1394_iso_tasklet *tasklet)
+{
+	unsigned long flags;
+
+	tasklet_kill(&tasklet->tasklet);
+
+	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
+
+	if (tasklet->type == OHCI_ISO_TRANSMIT)
+		clear_bit(tasklet->context, &ohci->it_ctx_usage);
+	else {
+		clear_bit(tasklet->context, &ohci->ir_ctx_usage);
+
+		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
+			clear_bit(0, &ohci->ir_multichannel_used);
+		}
+	}
+
+	list_del(&tasklet->link);
+
+	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
+}
+
+EXPORT_SYMBOL(ohci1394_stop_context);
+EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
+EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
+EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
+
+
+/***********************************
+ * General module initialization   *
+ ***********************************/
+
+MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
+MODULE_LICENSE("GPL");
+
+static void __exit ohci1394_cleanup (void)
+{
+	pci_unregister_driver(&ohci1394_pci_driver);
+}
+
+static int __init ohci1394_init(void)
+{
+	return pci_register_driver(&ohci1394_pci_driver);
+}
+
+module_init(ohci1394_init);
+module_exit(ohci1394_cleanup);
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
new file mode 100644
index 000000000000..d1758d409610
--- /dev/null
+++ b/drivers/ieee1394/ohci1394.h
@@ -0,0 +1,456 @@
+/*
+ * ohci1394.h - driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                        Gord Peters <GordPeters@smarttech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _OHCI1394_H
+#define _OHCI1394_H
+
+#include "ieee1394_types.h"
+#include <asm/io.h>
+
+#define OHCI1394_DRIVER_NAME      "ohci1394"
+
+#define OHCI1394_MAX_AT_REQ_RETRIES	0x2
+#define OHCI1394_MAX_AT_RESP_RETRIES	0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES	0x8
+#define OHCI1394_MAX_SELF_ID_ERRORS	16
+
+#define AR_REQ_NUM_DESC		4		/* number of AR req descriptors */
+#define AR_REQ_BUF_SIZE		PAGE_SIZE	/* size of AR req buffers */
+#define AR_REQ_SPLIT_BUF_SIZE	PAGE_SIZE	/* split packet buffer */
+
+#define AR_RESP_NUM_DESC	4		/* number of AR resp descriptors */
+#define AR_RESP_BUF_SIZE	PAGE_SIZE	/* size of AR resp buffers */
+#define AR_RESP_SPLIT_BUF_SIZE	PAGE_SIZE	/* split packet buffer */
+
+#define IR_NUM_DESC		16		/* number of IR descriptors */
+#define IR_BUF_SIZE		PAGE_SIZE	/* 4096 bytes/buffer */
+#define IR_SPLIT_BUF_SIZE	PAGE_SIZE	/* split packet buffer */
+
+#define IT_NUM_DESC		16	/* number of IT descriptors */
+
+#define AT_REQ_NUM_DESC		32	/* number of AT req descriptors */
+#define AT_RESP_NUM_DESC	32	/* number of AT resp descriptors */
+
+#define OHCI_LOOP_COUNT		100	/* Number of loops for reg read waits */
+
+#define OHCI_CONFIG_ROM_LEN	1024	/* Length of the mapped configrom space */
+
+#define OHCI1394_SI_DMA_BUF_SIZE	8192 /* length of the selfid buffer */
+
+/* PCI configuration space addresses */
+#define OHCI1394_PCI_HCI_Control 0x40
+
+struct dma_cmd {
+        u32 control;
+        u32 address;
+        u32 branchAddress;
+        u32 status;
+};
+
+/*
+ * FIXME:
+ * It is important that a single at_dma_prg does not cross a page boundary
+ * The proper way to do it would be to do the check dynamically as the
+ * programs are inserted into the AT fifo.
+ */
+struct at_dma_prg {
+	struct dma_cmd begin;
+	quadlet_t data[4];
+	struct dma_cmd end;
+	quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
+};
+
+/* identify whether a DMA context is asynchronous or isochronous */
+enum context_type { DMA_CTX_ASYNC_REQ, DMA_CTX_ASYNC_RESP, DMA_CTX_ISO };
+
+/* DMA receive context */
+struct dma_rcv_ctx {
+	struct ti_ohci *ohci;
+	enum context_type type;
+	int ctx;
+	unsigned int num_desc;
+
+	unsigned int buf_size;
+	unsigned int split_buf_size;
+
+	/* dma block descriptors */
+        struct dma_cmd **prg_cpu;
+        dma_addr_t *prg_bus;
+	struct pci_pool *prg_pool;
+
+	/* dma buffers */
+        quadlet_t **buf_cpu;
+        dma_addr_t *buf_bus;
+
+        unsigned int buf_ind;
+        unsigned int buf_offset;
+        quadlet_t *spb;
+        spinlock_t lock;
+        struct tasklet_struct task;
+	int ctrlClear;
+	int ctrlSet;
+	int cmdPtr;
+	int ctxtMatch;
+};
+
+/* DMA transmit context */
+struct dma_trm_ctx {
+	struct ti_ohci *ohci;
+	enum context_type type;
+	int ctx;
+	unsigned int num_desc;
+
+	/* dma block descriptors */
+        struct at_dma_prg **prg_cpu;
+	dma_addr_t *prg_bus;
+	struct pci_pool *prg_pool;
+
+        unsigned int prg_ind;
+        unsigned int sent_ind;
+	int free_prgs;
+        quadlet_t *branchAddrPtr;
+
+	/* list of packets inserted in the AT FIFO */
+	struct list_head fifo_list;
+
+	/* list of pending packets to be inserted in the AT FIFO */
+	struct list_head pending_list;
+
+        spinlock_t lock;
+        struct tasklet_struct task;
+	int ctrlClear;
+	int ctrlSet;
+	int cmdPtr;
+};
+
+struct ohci1394_iso_tasklet {
+	struct tasklet_struct tasklet;
+	struct list_head link;
+	int context;
+	enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,
+	       OHCI_ISO_MULTICHANNEL_RECEIVE } type;
+};
+
+struct ti_ohci {
+        struct pci_dev *dev;
+
+	enum {
+		OHCI_INIT_ALLOC_HOST,
+		OHCI_INIT_HAVE_MEM_REGION,
+		OHCI_INIT_HAVE_IOMAPPING,
+		OHCI_INIT_HAVE_CONFIG_ROM_BUFFER,
+		OHCI_INIT_HAVE_SELFID_BUFFER,
+		OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE,
+		OHCI_INIT_HAVE_IRQ,
+		OHCI_INIT_DONE,
+	} init_state;
+
+        /* remapped memory spaces */
+        void __iomem *registers;
+
+	/* dma buffer for self-id packets */
+        quadlet_t *selfid_buf_cpu;
+        dma_addr_t selfid_buf_bus;
+
+	/* buffer for csr config rom */
+        quadlet_t *csr_config_rom_cpu;
+        dma_addr_t csr_config_rom_bus;
+	int csr_config_rom_length;
+
+	unsigned int max_packet_size;
+
+        /* async receive */
+	struct dma_rcv_ctx ar_resp_context;
+	struct dma_rcv_ctx ar_req_context;
+
+	/* async transmit */
+	struct dma_trm_ctx at_resp_context;
+	struct dma_trm_ctx at_req_context;
+
+        /* iso receive */
+	int nb_iso_rcv_ctx;
+	unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
+	unsigned long ir_multichannel_used; /* ditto */
+        spinlock_t IR_channel_lock;
+
+	/* iso receive (legacy API) */
+	u64 ir_legacy_channels; /* note: this differs from ISO_channel_usage;
+				   it only accounts for channels listened to
+				   by the legacy API, so that we can know when
+				   it is safe to free the legacy API context */
+
+	struct dma_rcv_ctx ir_legacy_context;
+	struct ohci1394_iso_tasklet ir_legacy_tasklet;
+
+        /* iso transmit */
+	int nb_iso_xmit_ctx;
+	unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
+
+	/* iso transmit (legacy API) */
+	struct dma_trm_ctx it_legacy_context;
+	struct ohci1394_iso_tasklet it_legacy_tasklet;
+
+        u64 ISO_channel_usage;
+
+        /* IEEE-1394 part follows */
+        struct hpsb_host *host;
+
+        int phyid, isroot;
+
+        spinlock_t phy_reg_lock;
+	spinlock_t event_lock;
+
+	int self_id_errors;
+
+	/* Tasklets for iso receive and transmit, used by video1394,
+	 * amdtp and dv1394 */
+
+	struct list_head iso_tasklet_list;
+	spinlock_t iso_tasklet_list_lock;
+
+	/* Swap the selfid buffer? */
+	unsigned int selfid_swap:1;
+	/* Some Apple chipset seem to swap incoming headers for us */
+	unsigned int no_swap_incoming:1;
+
+	/* Force extra paranoia checking on bus-reset handling */
+	unsigned int check_busreset:1;
+};
+
+static inline int cross_bound(unsigned long addr, unsigned int size)
+{
+	if (size > PAGE_SIZE)
+		return 1;
+
+	if (addr >> PAGE_SHIFT != (addr + size - 1) >> PAGE_SHIFT)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Register read and write helper functions.
+ */
+static inline void reg_write(const struct ti_ohci *ohci, int offset, u32 data)
+{
+        writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
+{
+        return readl(ohci->registers + offset);
+}
+
+
+/* 2 KiloBytes of register space */
+#define OHCI1394_REGISTER_SIZE                0x800
+
+/* Offsets relative to context bases defined below */
+
+#define OHCI1394_ContextControlSet            0x000
+#define OHCI1394_ContextControlClear          0x004
+#define OHCI1394_ContextCommandPtr            0x00C
+
+/* register map */
+#define OHCI1394_Version                      0x000
+#define OHCI1394_GUID_ROM                     0x004
+#define OHCI1394_ATRetries                    0x008
+#define OHCI1394_CSRData                      0x00C
+#define OHCI1394_CSRCompareData               0x010
+#define OHCI1394_CSRControl                   0x014
+#define OHCI1394_ConfigROMhdr                 0x018
+#define OHCI1394_BusID                        0x01C
+#define OHCI1394_BusOptions                   0x020
+#define OHCI1394_GUIDHi                       0x024
+#define OHCI1394_GUIDLo                       0x028
+#define OHCI1394_ConfigROMmap                 0x034
+#define OHCI1394_PostedWriteAddressLo         0x038
+#define OHCI1394_PostedWriteAddressHi         0x03C
+#define OHCI1394_VendorID                     0x040
+#define OHCI1394_HCControlSet                 0x050
+#define OHCI1394_HCControlClear               0x054
+#define  OHCI1394_HCControl_noByteSwap		0x40000000
+#define  OHCI1394_HCControl_programPhyEnable	0x00800000
+#define  OHCI1394_HCControl_aPhyEnhanceEnable	0x00400000
+#define  OHCI1394_HCControl_LPS			0x00080000
+#define  OHCI1394_HCControl_postedWriteEnable	0x00040000
+#define  OHCI1394_HCControl_linkEnable		0x00020000
+#define  OHCI1394_HCControl_softReset		0x00010000
+#define OHCI1394_SelfIDBuffer                 0x064
+#define OHCI1394_SelfIDCount                  0x068
+#define OHCI1394_IRMultiChanMaskHiSet         0x070
+#define OHCI1394_IRMultiChanMaskHiClear       0x074
+#define OHCI1394_IRMultiChanMaskLoSet         0x078
+#define OHCI1394_IRMultiChanMaskLoClear       0x07C
+#define OHCI1394_IntEventSet                  0x080
+#define OHCI1394_IntEventClear                0x084
+#define OHCI1394_IntMaskSet                   0x088
+#define OHCI1394_IntMaskClear                 0x08C
+#define OHCI1394_IsoXmitIntEventSet           0x090
+#define OHCI1394_IsoXmitIntEventClear         0x094
+#define OHCI1394_IsoXmitIntMaskSet            0x098
+#define OHCI1394_IsoXmitIntMaskClear          0x09C
+#define OHCI1394_IsoRecvIntEventSet           0x0A0
+#define OHCI1394_IsoRecvIntEventClear         0x0A4
+#define OHCI1394_IsoRecvIntMaskSet            0x0A8
+#define OHCI1394_IsoRecvIntMaskClear          0x0AC
+#define OHCI1394_InitialBandwidthAvailable    0x0B0
+#define OHCI1394_InitialChannelsAvailableHi   0x0B4
+#define OHCI1394_InitialChannelsAvailableLo   0x0B8
+#define OHCI1394_FairnessControl              0x0DC
+#define OHCI1394_LinkControlSet               0x0E0
+#define OHCI1394_LinkControlClear             0x0E4
+#define  OHCI1394_LinkControl_RcvSelfID		0x00000200
+#define  OHCI1394_LinkControl_RcvPhyPkt		0x00000400
+#define  OHCI1394_LinkControl_CycleTimerEnable	0x00100000
+#define  OHCI1394_LinkControl_CycleMaster	0x00200000
+#define  OHCI1394_LinkControl_CycleSource	0x00400000
+#define OHCI1394_NodeID                       0x0E8
+#define OHCI1394_PhyControl                   0x0EC
+#define OHCI1394_IsochronousCycleTimer        0x0F0
+#define OHCI1394_AsReqFilterHiSet             0x100
+#define OHCI1394_AsReqFilterHiClear           0x104
+#define OHCI1394_AsReqFilterLoSet             0x108
+#define OHCI1394_AsReqFilterLoClear           0x10C
+#define OHCI1394_PhyReqFilterHiSet            0x110
+#define OHCI1394_PhyReqFilterHiClear          0x114
+#define OHCI1394_PhyReqFilterLoSet            0x118
+#define OHCI1394_PhyReqFilterLoClear          0x11C
+#define OHCI1394_PhyUpperBound                0x120
+
+#define OHCI1394_AsReqTrContextBase           0x180
+#define OHCI1394_AsReqTrContextControlSet     0x180
+#define OHCI1394_AsReqTrContextControlClear   0x184
+#define OHCI1394_AsReqTrCommandPtr            0x18C
+
+#define OHCI1394_AsRspTrContextBase           0x1A0
+#define OHCI1394_AsRspTrContextControlSet     0x1A0
+#define OHCI1394_AsRspTrContextControlClear   0x1A4
+#define OHCI1394_AsRspTrCommandPtr            0x1AC
+
+#define OHCI1394_AsReqRcvContextBase          0x1C0
+#define OHCI1394_AsReqRcvContextControlSet    0x1C0
+#define OHCI1394_AsReqRcvContextControlClear  0x1C4
+#define OHCI1394_AsReqRcvCommandPtr           0x1CC
+
+#define OHCI1394_AsRspRcvContextBase          0x1E0
+#define OHCI1394_AsRspRcvContextControlSet    0x1E0
+#define OHCI1394_AsRspRcvContextControlClear  0x1E4
+#define OHCI1394_AsRspRcvCommandPtr           0x1EC
+
+/* Isochronous transmit registers */
+/* Add (16 * n) for context n */
+#define OHCI1394_IsoXmitContextBase           0x200
+#define OHCI1394_IsoXmitContextControlSet     0x200
+#define OHCI1394_IsoXmitContextControlClear   0x204
+#define OHCI1394_IsoXmitCommandPtr            0x20C
+
+/* Isochronous receive registers */
+/* Add (32 * n) for context n */
+#define OHCI1394_IsoRcvContextBase            0x400
+#define OHCI1394_IsoRcvContextControlSet      0x400
+#define OHCI1394_IsoRcvContextControlClear    0x404
+#define OHCI1394_IsoRcvCommandPtr             0x40C
+#define OHCI1394_IsoRcvContextMatch           0x410
+
+/* Interrupts Mask/Events */
+
+#define OHCI1394_reqTxComplete           0x00000001
+#define OHCI1394_respTxComplete          0x00000002
+#define OHCI1394_ARRQ                    0x00000004
+#define OHCI1394_ARRS                    0x00000008
+#define OHCI1394_RQPkt                   0x00000010
+#define OHCI1394_RSPkt                   0x00000020
+#define OHCI1394_isochTx                 0x00000040
+#define OHCI1394_isochRx                 0x00000080
+#define OHCI1394_postedWriteErr          0x00000100
+#define OHCI1394_lockRespErr             0x00000200
+#define OHCI1394_selfIDComplete          0x00010000
+#define OHCI1394_busReset                0x00020000
+#define OHCI1394_phy                     0x00080000
+#define OHCI1394_cycleSynch              0x00100000
+#define OHCI1394_cycle64Seconds          0x00200000
+#define OHCI1394_cycleLost               0x00400000
+#define OHCI1394_cycleInconsistent       0x00800000
+#define OHCI1394_unrecoverableError      0x01000000
+#define OHCI1394_cycleTooLong            0x02000000
+#define OHCI1394_phyRegRcvd              0x04000000
+#define OHCI1394_masterIntEnable         0x80000000
+
+/* DMA Control flags */
+#define DMA_CTL_OUTPUT_MORE              0x00000000
+#define DMA_CTL_OUTPUT_LAST              0x10000000
+#define DMA_CTL_INPUT_MORE               0x20000000
+#define DMA_CTL_INPUT_LAST               0x30000000
+#define DMA_CTL_UPDATE                   0x08000000
+#define DMA_CTL_IMMEDIATE                0x02000000
+#define DMA_CTL_IRQ                      0x00300000
+#define DMA_CTL_BRANCH                   0x000c0000
+#define DMA_CTL_WAIT                     0x00030000
+
+/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
+#define EVT_NO_STATUS		0x0	/* No event status */
+#define EVT_RESERVED_A		0x1	/* Reserved, not used !!! */
+#define EVT_LONG_PACKET		0x2	/* The revc data was longer than the buf */
+#define EVT_MISSING_ACK		0x3	/* A subaction gap was detected before an ack
+					   arrived, or recv'd ack had a parity error */
+#define EVT_UNDERRUN		0x4	/* Underrun on corresponding FIFO, packet
+					   truncated */
+#define EVT_OVERRUN		0x5	/* A recv FIFO overflowed on reception of ISO
+					   packet */
+#define EVT_DESCRIPTOR_READ	0x6	/* An unrecoverable error occurred while host was
+					   reading a descriptor block */
+#define EVT_DATA_READ		0x7	/* An error occurred while host controller was
+					   attempting to read from host memory in the data
+					   stage of descriptor processing */
+#define EVT_DATA_WRITE		0x8	/* An error occurred while host controller was
+					   attempting to write either during the data stage
+					   of descriptor processing, or when processing a single
+					   16-bit host memory write */
+#define EVT_BUS_RESET		0x9	/* Identifies a PHY packet in the recv buffer as
+					   being a synthesized bus reset packet */
+#define EVT_TIMEOUT		0xa	/* Indicates that the asynchronous transmit response
+					   packet expired and was not transmitted, or that an
+					   IT DMA context experienced a skip processing overflow */
+#define EVT_TCODE_ERR		0xb	/* A bad tCode is associated with this packet.
+					   The packet was flushed */
+#define EVT_RESERVED_B		0xc	/* Reserved, not used !!! */
+#define EVT_RESERVED_C		0xd	/* Reserved, not used !!! */
+#define EVT_UNKNOWN		0xe	/* An error condition has occurred that cannot be
+					   represented by any other event codes defined herein. */
+#define EVT_FLUSHED		0xf	/* Send by the link side of output FIFO when asynchronous
+					   packets are being flushed due to a bus reset. */
+
+#define OHCI1394_TCODE_PHY               0xE
+
+void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
+			       int type,
+			       void (*func)(unsigned long),
+			       unsigned long data);
+int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
+				  struct ohci1394_iso_tasklet *tasklet);
+void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
+				     struct ohci1394_iso_tasklet *tasklet);
+
+/* returns zero if successful, one if DMA context is locked up */
+int ohci1394_stop_context      (struct ti_ohci *ohci, int reg, char *msg);
+struct ti_ohci *ohci1394_get_struct(int card_num);
+
+#endif
diff --git a/drivers/ieee1394/oui.db b/drivers/ieee1394/oui.db
new file mode 100644
index 000000000000..592c8a60d01e
--- /dev/null
+++ b/drivers/ieee1394/oui.db
@@ -0,0 +1,7048 @@
+000000 XEROX CORPORATION
+000001 XEROX CORPORATION
+000002 XEROX CORPORATION
+000003 XEROX CORPORATION
+000004 XEROX CORPORATION
+000005 XEROX CORPORATION
+000006 XEROX CORPORATION
+000007 XEROX CORPORATION
+000008 XEROX CORPORATION
+000009 XEROX CORPORATION
+00000A OMRON TATEISI ELECTRONICS CO.
+00000B MATRIX CORPORATION
+00000C CISCO SYSTEMS, INC.
+00000D FIBRONICS LTD.
+00000E FUJITSU LIMITED
+00000F NEXT, INC.
+000010 SYTEK INC.
+000011 NORMEREL SYSTEMES
+000012 INFORMATION TECHNOLOGY LIMITED
+000013 CAMEX
+000014 NETRONIX
+000015 DATAPOINT CORPORATION
+000016 DU PONT PIXEL SYSTEMS     .
+000017 TEKELEC
+000018 WEBSTER COMPUTER CORPORATION
+000019 APPLIED DYNAMICS INTERNATIONAL
+00001A ADVANCED MICRO DEVICES
+00001B NOVELL INC.
+00001C BELL TECHNOLOGIES
+00001D CABLETRON SYSTEMS, INC.
+00001E TELSIST INDUSTRIA ELECTRONICA
+00001F Telco Systems, Inc.
+000020 DATAINDUSTRIER DIAB AB
+000021 SUREMAN COMP. & COMMUN. CORP.
+000022 VISUAL TECHNOLOGY INC.
+000023 ABB INDUSTRIAL SYSTEMS AB
+000024 CONNECT AS
+000025 RAMTEK CORP.
+000026 SHA-KEN CO., LTD.
+000027 JAPAN RADIO COMPANY
+000028 PRODIGY SYSTEMS CORPORATION
+000029 IMC NETWORKS CORP.
+00002A TRW - SEDD/INP
+00002B CRISP AUTOMATION, INC
+00002C AUTOTOTE LIMITED
+00002D CHROMATICS INC
+00002E SOCIETE EVIRA
+00002F TIMEPLEX INC.
+000030 VG LABORATORY SYSTEMS LTD
+000031 QPSX COMMUNICATIONS PTY LTD
+000032 Marconi plc
+000033 EGAN MACHINERY COMPANY
+000034 NETWORK RESOURCES CORPORATION
+000035 SPECTRAGRAPHICS CORPORATION
+000036 ATARI CORPORATION
+000037 OXFORD METRICS LIMITED
+000038 CSS LABS
+000039 TOSHIBA CORPORATION
+00003A CHYRON CORPORATION
+00003B i Controls, Inc.
+00003C AUSPEX SYSTEMS INC.
+00003D UNISYS
+00003E SIMPACT
+00003F SYNTREX, INC.
+000040 APPLICON, INC.
+000041 ICE CORPORATION
+000042 METIER MANAGEMENT SYSTEMS LTD.
+000043 MICRO TECHNOLOGY
+000044 CASTELLE CORPORATION
+000045 FORD AEROSPACE & COMM. CORP.
+000046 OLIVETTI NORTH AMERICA
+000047 NICOLET INSTRUMENTS CORP.
+000048 SEIKO EPSON CORPORATION
+000049 APRICOT COMPUTERS, LTD
+00004A ADC CODENOLL TECHNOLOGY CORP.
+00004B ICL DATA OY
+00004C NEC CORPORATION
+00004D DCI CORPORATION
+00004E AMPEX CORPORATION
+00004F LOGICRAFT, INC.
+000050 RADISYS CORPORATION
+000051 HOB ELECTRONIC GMBH & CO. KG
+000052 Intrusion.com, Inc.
+000053 COMPUCORP
+000054 MODICON, INC.
+000055 COMMISSARIAT A L`ENERGIE ATOM.
+000056 DR. B. STRUCK
+000057 SCITEX CORPORATION LTD.
+000058 RACORE COMPUTER PRODUCTS INC.
+000059 HELLIGE GMBH
+00005A SysKonnect GmbH
+00005B ELTEC ELEKTRONIK AG
+00005C TELEMATICS INTERNATIONAL INC.
+00005D CS TELECOM
+00005E USC INFORMATION SCIENCES INST
+00005F SUMITOMO ELECTRIC IND., LTD.
+000060 KONTRON ELEKTRONIK GMBH
+000061 GATEWAY COMMUNICATIONS
+000062 BULL HN INFORMATION SYSTEMS
+000063 DR.ING.SEUFERT GMBH
+000064 YOKOGAWA DIGITAL COMPUTER CORP
+000065 NETWORK ASSOCIATES, INC.
+000066 TALARIS SYSTEMS, INC.
+000067 SOFT * RITE, INC.
+000068 ROSEMOUNT CONTROLS
+000069 CONCORD COMMUNICATIONS INC
+00006A COMPUTER CONSOLES INC.
+00006B SILICON GRAPHICS INC./MIPS
+00006D CRAY COMMUNICATIONS, LTD.
+00006E ARTISOFT, INC.
+00006F Madge Ltd.
+000070 HCL LIMITED
+000071 ADRA SYSTEMS INC.
+000072 MINIWARE TECHNOLOGY
+000073 SIECOR CORPORATION
+000074 RICOH COMPANY LTD.
+000075 Nortel Networks
+000076 ABEKAS VIDEO SYSTEM
+000077 INTERPHASE CORPORATION
+000078 LABTAM LIMITED
+000079 NETWORTH INCORPORATED
+00007A DANA COMPUTER INC.
+00007B RESEARCH MACHINES
+00007C AMPERE INCORPORATED
+00007D SUN MICROSYSTEMS, INC.
+00007E CLUSTRIX CORPORATION
+00007F LINOTYPE-HELL AG
+000080 CRAY COMMUNICATIONS A/S
+000081 BAY NETWORKS
+000082 LECTRA SYSTEMES SA
+000083 TADPOLE TECHNOLOGY PLC
+000084 SUPERNET
+000085 CANON INC.
+000086 MEGAHERTZ CORPORATION
+000087 HITACHI, LTD.
+000088 COMPUTER NETWORK TECH. CORP.
+000089 CAYMAN SYSTEMS INC.
+00008A DATAHOUSE INFORMATION SYSTEMS
+00008B INFOTRON
+00008C Alloy Computer Products (Australia) Pty Ltd
+00008D VERDIX CORPORATION
+00008E SOLBOURNE COMPUTER, INC.
+00008F RAYTHEON COMPANY
+000090 MICROCOM
+000091 ANRITSU CORPORATION
+000092 COGENT DATA TECHNOLOGIES
+000093 PROTEON INC.
+000094 ASANTE TECHNOLOGIES
+000095 SONY TEKTRONIX CORP.
+000096 MARCONI ELECTRONICS LTD.
+000097 EPOCH SYSTEMS
+000098 CROSSCOMM CORPORATION
+000099 MTX, INC.
+00009A RC COMPUTER A/S
+00009B INFORMATION INTERNATIONAL, INC
+00009C ROLM MIL-SPEC COMPUTERS
+00009D LOCUS COMPUTING CORPORATION
+00009E MARLI S.A.
+00009F AMERISTAR TECHNOLOGIES INC.
+0000A0 TOKYO SANYO ELECTRIC CO. LTD.
+0000A1 MARQUETTE ELECTRIC CO.
+0000A2 BAY NETWORKS
+0000A3 NETWORK APPLICATION TECHNOLOGY
+0000A4 ACORN COMPUTERS LIMITED
+0000A5 COMPATIBLE SYSTEMS CORP.
+0000A6 NETWORK GENERAL CORPORATION
+0000A7 NETWORK COMPUTING DEVICES INC.
+0000A8 STRATUS COMPUTER INC.
+0000A9 NETWORK SYSTEMS CORP.
+0000AA XEROX CORPORATION
+0000AB LOGIC MODELING CORPORATION
+0000AC CONWARE COMPUTER CONSULTING
+0000AD BRUKER INSTRUMENTS INC.
+0000AE DASSAULT ELECTRONIQUE
+0000AF NUCLEAR DATA INSTRUMENTATION
+0000B0 RND-RAD NETWORK DEVICES
+0000B1 ALPHA MICROSYSTEMS INC.
+0000B2 TELEVIDEO SYSTEMS, INC.
+0000B3 CIMLINC INCORPORATED
+0000B4 EDIMAX COMPUTER COMPANY
+0000B5 DATABILITY SOFTWARE SYS. INC.
+0000B6 MICRO-MATIC RESEARCH
+0000B7 DOVE COMPUTER CORPORATION
+0000B8 SEIKOSHA CO., LTD.
+0000B9 MCDONNELL DOUGLAS COMPUTER SYS
+0000BA SIIG, INC.
+0000BB TRI-DATA
+0000BC ALLEN-BRADLEY CO. INC.
+0000BD MITSUBISHI CABLE COMPANY
+0000BE THE NTI GROUP
+0000BF SYMMETRIC COMPUTER SYSTEMS
+0000C0 WESTERN DIGITAL CORPORATION
+0000C1 Madge Ltd.
+0000C2 INFORMATION PRESENTATION TECH.
+0000C3 HARRIS CORP COMPUTER SYS DIV
+0000C4 WATERS DIV. OF MILLIPORE
+0000C5 FARALLON COMPUTING/NETOPIA
+0000C6 EON SYSTEMS
+0000C7 ARIX CORPORATION
+0000C8 ALTOS COMPUTER SYSTEMS
+0000C9 EMULEX CORPORATION
+0000CA APPLITEK
+0000CB COMPU-SHACK ELECTRONIC GMBH
+0000CC DENSAN CO., LTD.
+0000CD Allied Telesyn Research Ltd.
+0000CE MEGADATA CORP.
+0000CF HAYES MICROCOMPUTER PRODUCTS
+0000D0 DEVELCON ELECTRONICS LTD.
+0000D1 ADAPTEC INCORPORATED
+0000D2 SBE, INC.
+0000D3 WANG LABORATORIES INC.
+0000D4 PURE DATA LTD.
+0000D5 MICROGNOSIS INTERNATIONAL
+0000D6 PUNCH LINE HOLDING
+0000D7 DARTMOUTH COLLEGE
+0000D8 NOVELL, INC.
+0000D9 NIPPON TELEGRAPH & TELEPHONE
+0000DA ATEX
+0000DB BRITISH TELECOMMUNICATIONS PLC
+0000DC HAYES MICROCOMPUTER PRODUCTS
+0000DD TCL INCORPORATED
+0000DE CETIA
+0000DF BELL & HOWELL PUB SYS DIV
+0000E0 QUADRAM CORP.
+0000E1 GRID SYSTEMS
+0000E2 ACER TECHNOLOGIES CORP.
+0000E3 INTEGRATED MICRO PRODUCTS LTD
+0000E4 IN2 GROUPE INTERTECHNIQUE
+0000E5 SIGMEX LTD.
+0000E6 APTOR PRODUITS DE COMM INDUST
+0000E7 STAR GATE TECHNOLOGIES
+0000E8 ACCTON TECHNOLOGY CORP.
+0000E9 ISICAD, INC.
+0000EA UPNOD AB
+0000EB MATSUSHITA COMM. IND. CO. LTD.
+0000EC MICROPROCESS
+0000ED APRIL
+0000EE NETWORK DESIGNERS, LTD.
+0000EF KTI
+0000F0 SAMSUNG ELECTRONICS CO., LTD.
+0000F1 MAGNA COMPUTER CORPORATION
+0000F2 SPIDER COMMUNICATIONS
+0000F3 GANDALF DATA LIMITED
+0000F4 ALLIED TELESYN INTERNATIONAL
+0000F5 DIAMOND SALES LIMITED
+0000F6 APPLIED MICROSYSTEMS CORP.
+0000F7 YOUTH KEEP ENTERPRISE CO LTD
+0000F8 DIGITAL EQUIPMENT CORPORATION
+0000F9 QUOTRON SYSTEMS INC.
+0000FA MICROSAGE COMPUTER SYSTEMS INC
+0000FB RECHNER ZUR KOMMUNIKATION
+0000FC MEIKO
+0000FD HIGH LEVEL HARDWARE
+0000FE ANNAPOLIS MICRO SYSTEMS
+0000FF CAMTEC ELECTRONICS LTD.
+000100 EQUIP'TRANS
+000102 3COM CORPORATION
+000103 3COM CORPORATION
+000104 DVICO Co., Ltd.
+000105 BECKHOFF GmbH
+000106 Tews Datentechnik GmbH
+000107 Leiser GmbH
+000108 AVLAB Technology, Inc.
+000109 Nagano Japan Radio Co., Ltd.
+00010A CIS TECHNOLOGY INC.
+00010B Space CyberLink, Inc.
+00010C System Talks Inc.
+00010D CORECO, INC.
+00010E Bri-Link Technologies Co., Ltd
+00010F Nishan Systems, Inc.
+000110 Gotham Networks
+000111 iDigm Inc.
+000112 Shark Multimedia Inc.
+000113 OLYMPUS CORPORATION
+000114 KANDA TSUSHIN KOGYO CO., LTD.
+000115 EXTRATECH CORPORATION
+000116 Netspect Technologies, Inc.
+000117 CANAL +
+000118 EZ Digital Co., Ltd.
+000119 Action Controls Pty. Ltd.
+00011A EEH DataLink GmbH
+00011B Unizone Technologies, Inc.
+00011C Universal Talkware Corporation
+00011D Centillium Communications
+00011E Precidia Technologies, Inc.
+00011F RC Networks, Inc.
+000120 OSCILLOQUARTZ S.A.
+000121 RapidStream Inc.
+000122 Trend Communications, Ltd.
+000123 DIGITAL ELECTRONICS CORP.
+000124 Acer Incorporated
+000125 YAESU MUSEN CO., LTD.
+000126 PAC Labs
+000127 The OPEN Group Limited
+000128 EnjoyWeb, Inc.
+000129 DFI Inc.
+00012A Telematica Sistems Inteligente
+00012B TELENET Co., Ltd.
+00012C Aravox Technologies, Inc.
+00012D Komodo Technology
+00012E PC Partner Ltd.
+00012F Twinhead International Corp
+000130 Extreme Networks
+000131 Detection Systems, Inc.
+000132 Dranetz - BMI
+000133 KYOWA Electronic Instruments C
+000134 SIG Positec Systems AG
+000135 KDC Corp.
+000136 CyberTAN Technology, Inc.
+000137 IT Farm Corporation
+000138 XAVi Technologies Corp.
+000139 Point Multimedia Systems
+00013A SHELCAD COMMUNICATIONS, LTD.
+00013B BNA SYSTEMS
+00013C TIW SYSTEMS
+00013D RiscStation Ltd.
+00013E Ascom Tateco AB
+00013F Neighbor World Co., Ltd.
+000140 Sendtek Corporation
+000141 CABLE PRINT
+000142 Cisco Systems, Inc.
+000143 Cisco Systems, Inc.
+000144 Cereva Networks, Inc.
+000145 WINSYSTEMS, INC.
+000146 Tesco Controls, Inc.
+000147 Zhone Technologies
+000148 X-traWeb Inc.
+000149 T.D.T. Transfer Data Test GmbH
+00014A SONY COMPUTER SCIENCE LABS., I
+00014B Ennovate Networks, Inc.
+00014C Berkeley Process Control
+00014D Shin Kin Enterprises Co., Ltd
+00014E WIN Enterprises, Inc.
+00014F LUMINOUS Networks, Inc.
+000150 GILAT COMMUNICATIONS, LTD.
+000151 Ensemble Communications
+000152 CHROMATEK INC.
+000153 ARCHTEK TELECOM CORPORATION
+000154 G3M Corporation
+000155 Promise Technology, Inc.
+000156 FIREWIREDIRECT.COM, INC.
+000157 SYSWAVE CO., LTD
+000158 Electro Industries/Gauge Tech
+000159 S1 Corporation
+00015A Digital Video Broadcasting
+00015B ITALTEL S.p.A/RF-UP-I
+00015C CADANT INC.
+00015D Sun Microsystems, Inc
+00015E BEST TECHNOLOGY CO., LTD.
+00015F DIGITAL DESIGN GmbH
+000160 ELMEX Co., LTD.
+000161 Meta Machine Technology
+000162 Cygnet Technologies, Inc.
+000163 Cisco Systems, Inc.
+000164 Cisco Systems, Inc.
+000165 AirSwitch Corporation
+000166 TC GROUP A/S
+000167 HIOKI E.E. CORPORATION
+000168 VITANA CORPORATION
+000169 Celestix Networks Pte Ltd.
+00016A ALITEC
+00016B LightChip, Inc.
+00016C FOXCONN
+00016D CarrierComm Inc.
+00016E Conklin Corporation
+00016F HAITAI ELECTRONICS CO., LTD.
+000170 ESE Embedded System Engineer'g
+000171 Allied Data Technologies
+000172 TechnoLand Co., LTD.
+000173 JNI Corporation
+000174 CyberOptics Corporation
+000175 Radiant Communications Corp.
+000176 Orient Silver Enterprises
+000177 EDSL
+000178 MARGI Systems, Inc.
+000179 WIRELESS TECHNOLOGY, INC.
+00017A Chengdu Maipu Electric Industrial Co., Ltd.
+00017B Heidelberger Druckmaschinen AG
+00017C AG-E GmbH
+00017D ThermoQuest
+00017E ADTEK System Science Co., Ltd.
+00017F Experience Music Project
+000180 AOpen, Inc.
+000181 Nortel Networks
+000182 DICA TECHNOLOGIES AG
+000183 ANITE TELECOMS
+000184 SIEB & MEYER AG
+000185 Aloka Co., Ltd.
+000186 DISCH GmbH
+000187 i2SE GmbH
+000188 LXCO Technologies ag
+000189 Refraction Technology, Inc.
+00018A ROI COMPUTER AG
+00018B NetLinks Co., Ltd.
+00018C Mega Vision
+00018D AudeSi Technologies
+00018E Logitec Corporation
+00018F Kenetec, Inc.
+000190 SMK-M
+000191 SYRED Data Systems
+000192 Texas Digital Systems
+000193 Hanbyul Telecom Co., Ltd.
+000194 Capital Equipment Corporation
+000195 Sena Technologies, Inc.
+000196 Cisco Systems, Inc.
+000197 Cisco Systems, Inc.
+000198 Darim Vision
+000199 HeiSei Electronics
+00019A LEUNIG GmbH
+00019B Kyoto Microcomputer Co., Ltd.
+00019C JDS Uniphase Inc.
+00019D E-Control Systems, Inc.
+00019E ESS Technology, Inc.
+00019F Phonex Broadband
+0001A0 Infinilink Corporation
+0001A1 Mag-Tek, Inc.
+0001A2 Logical Co., Ltd.
+0001A3 GENESYS LOGIC, INC.
+0001A4 Microlink Corporation
+0001A5 Nextcomm, Inc.
+0001A6 Scientific-Atlanta Arcodan A/S
+0001A7 UNEX TECHNOLOGY CORPORATION
+0001A8 Welltech Computer Co., Ltd.
+0001A9 BMW AG
+0001AA Airspan Communications, Ltd.
+0001AB Main Street Networks
+0001AC Sitara Networks, Inc.
+0001AD Coach Master International  d.b.a. CMI Worldwide, Inc.
+0001AE Trex Enterprises
+0001AF Motorola Computer Group
+0001B0 Fulltek Technology Co., Ltd.
+0001B1 General Bandwidth
+0001B2 Digital Processing Systems, Inc.
+0001B3 Precision Electronic Manufacturing
+0001B4 Wayport, Inc.
+0001B5 Turin Networks, Inc.
+0001B6 SAEJIN T&M Co., Ltd.
+0001B7 Centos, Inc.
+0001B8 Netsensity, Inc.
+0001B9 SKF Condition Monitoring
+0001BA IC-Net, Inc.
+0001BB Frequentis
+0001BC Brains Corporation
+0001BD Peterson Electro-Musical Products, Inc.
+0001BE Gigalink Co., Ltd.
+0001BF Teleforce Co., Ltd.
+0001C0 CompuLab, Ltd.
+0001C1 Vitesse Semiconductor Corporation
+0001C2 ARK Research Corp.
+0001C3 Acromag, Inc.
+0001C4 NeoWave, Inc.
+0001C5 Simpler Networks
+0001C6 Quarry Technologies
+0001C7 Cisco Systems, Inc.
+0001C8 THOMAS CONRAD CORP.
+0001C8 CONRAD CORP.
+0001C9 Cisco Systems, Inc.
+0001CA Geocast Network Systems, Inc.
+0001CB NetGame, Ltd.
+0001CC Japan Total Design Communication Co., Ltd.
+0001CD ARtem
+0001CE Custom Micro Products, Ltd.
+0001CF Alpha Data Parallel Systems, Ltd.
+0001D0 VitalPoint, Inc.
+0001D1 CoNet Communications, Inc.
+0001D2 MacPower Peripherals, Ltd.
+0001D3 PAXCOMM, Inc.
+0001D4 Leisure Time, Inc.
+0001D5 HAEDONG INFO & COMM CO., LTD
+0001D6 MAN Roland Druckmaschinen AG
+0001D7 F5 Networks, Inc.
+0001D8 Teltronics, Inc.
+0001D9 Sigma, Inc.
+0001DA WINCOMM Corporation
+0001DB Freecom Technologies GmbH
+0001DC Activetelco
+0001DD Avail Networks
+0001DE Trango Systems, Inc.
+0001DF ISDN Communications, Ltd.
+0001E0 Fast Systems, Inc.
+0001E1 Kinpo Electronics, Inc.
+0001E2 Ando Electric Corporation
+0001E3 Siemens AG
+0001E4 Sitera, Inc.
+0001E5 Supernet, Inc.
+0001E6 Hewlett-Packard Company
+0001E7 Hewlett-Packard Company
+0001E8 Force10 Networks, Inc.
+0001E9 Litton Marine Systems B.V.
+0001EA Cirilium Corp.
+0001EB C-COM Corporation
+0001EC Ericsson Group
+0001ED SETA Corp.
+0001EE Comtrol Europe, Ltd.
+0001EF Camtel Technology Corp.
+0001F0 Tridium, Inc.
+0001F1 Innovative Concepts, Inc.
+0001F2 Mark of the Unicorn, Inc.
+0001F3 QPS, Inc.
+0001F4 Enterasys Networks
+0001F5 ERIM S.A.
+0001F6 Association of Musical Electronics Industry
+0001F7 Image Display Systems, Inc.
+0001F8 Adherent Systems, Ltd.
+0001F9 TeraGlobal Communications Corp.
+0001FA HOROSCAS
+0001FB DoTop Technology, Inc.
+0001FC Keyence Corporation
+0001FD Digital Voice Systems, Inc.
+0001FE DIGITAL EQUIPMENT CORPORATION
+0001FF Data Direct Networks, Inc.
+000200 Net & Sys Co., Ltd.
+000201 IFM Electronic gmbh
+000202 Amino Communications, Ltd.
+000203 Woonsang Telecom, Inc.
+000204 Bodmann Industries Elektronik GmbH
+000205 Hitachi Denshi, Ltd.
+000206 Telital R&D Denmark A/S
+000207 VisionGlobal Network Corp.
+000208 Unify Networks, Inc.
+000209 Shenzhen SED Information Technology Co., Ltd.
+00020A Gefran Spa
+00020B Native Networks, Inc.
+00020C Metro-Optix
+00020D Micronpc.com
+00020E Laurel Networks, Inc.
+00020F AATR
+000210 Fenecom
+000211 Nature Worldwide Technology Corp.
+000212 SierraCom
+000213 S.D.E.L.
+000214 DTVRO
+000215 Cotas Computer Technology A/B
+000216 Cisco Systems, Inc.
+000217 Cisco Systems, Inc.
+000218 Advanced Scientific Corp
+000219 Paralon Technologies
+00021A Zuma Networks
+00021B Kollmorgen-Servotronix
+00021C Network Elements, Inc.
+00021D Data General Communication Ltd.
+00021E SIMTEL S.R.L.
+00021F Aculab PLC
+000220 Canon Aptex, Inc.
+000221 DSP Application, Ltd.
+000222 Chromisys, Inc.
+000223 ClickTV
+000224 Lantern Communications, Inc.
+000225 Certus Technology, Inc.
+000226 XESystems, Inc.
+000227 ESD GmbH
+000228 Necsom, Ltd.
+000229 Adtec Corporation
+00022A Asound Electronic
+00022B Tamura Electric Works, Ltd.
+00022C ABB Bomem, Inc.
+00022D Agere Systems
+00022E TEAC Corp. R& D
+00022F P-Cube, Ltd.
+000230 Intersoft Electronics
+000231 Ingersoll-Rand
+000232 Avision, Inc.
+000233 Mantra Communications, Inc.
+000234 Imperial Technology, Inc.
+000235 Paragon Networks International
+000236 INIT GmbH
+000237 Cosmo Research Corp.
+000238 Serome Technology, Inc.
+000239 Visicom
+00023A ZSK Stickmaschinen GmbH
+00023B Redback Networks
+00023C Creative Technology, Ltd.
+00023D NuSpeed, Inc.
+00023E Selta Telematica S.p.a
+00023F Compal Electronics, Inc.
+000240 Seedek Co., Ltd.
+000241 Amer.com
+000242 Videoframe Systems
+000243 Raysis Co., Ltd.
+000244 SURECOM Technology Co.
+000245 Lampus Co, Ltd.
+000246 All-Win Tech Co., Ltd.
+000247 Great Dragon Information Technology (Group) Co., Ltd.
+000248 Pilz GmbH & Co.
+000249 Aviv Infocom Co, Ltd.
+00024A Cisco Systems, Inc.
+00024B Cisco Systems, Inc.
+00024C SiByte, Inc.
+00024D Mannesman Dematic Colby Pty. Ltd.
+00024E Datacard Group
+00024F IPM Datacom S.R.L.
+000250 Geyser Networks, Inc.
+000251 Soma Networks
+000252 Carrier Corporation
+000253 Televideo, Inc.
+000254 WorldGate
+000255 IBM Corporation
+000256 Alpha Processor, Inc.
+000257 Microcom Corp.
+000258 Flying Packets Communications
+000259 Tsann Kuen China (Shanghai)Enterprise Co., Ltd. IT Group
+00025A Catena Networks
+00025B Cambridge Silicon Radio
+00025C SCI Systems (Kunshan) Co., Ltd.
+00025D Calix Networks
+00025E High Technology Ltd
+00025F Nortel Networks
+000260 Accordion Networks, Inc.
+000261 i3 Micro Technology AB
+000262 Soyo Group Soyo Com Tech Co., Ltd
+000263 UPS Manufacturing SRL
+000264 AudioRamp.com
+000265 Virditech Co. Ltd.
+000266 Thermalogic Corporation
+000267 NODE RUNNER, INC.
+000268 Harris Government Communications
+000269 Nadatel Co., Ltd
+00026A Cocess Telecom Co., Ltd.
+00026B BCM Computers Co., Ltd.
+00026C Philips CFT
+00026D Adept Telecom
+00026E NeGeN Access, Inc.
+00026F Senao International Co., Ltd.
+000270 Crewave Co., Ltd.
+000271 Vpacket Communications
+000272 CC&C Technologies, Inc.
+000273 Coriolis Networks
+000274 Tommy Technologies Corp.
+000275 SMART Technologies, Inc.
+000276 Primax Electronics Ltd.
+000277 Cash Systemes Industrie
+000278 Samsung Electro-Mechanics Co., Ltd.
+000279 Control Applications, Ltd.
+00027A IOI Technology Corporation
+00027B Amplify Net, Inc.
+00027C Trilithic, Inc.
+00027D Cisco Systems, Inc.
+00027E Cisco Systems, Inc.
+00027F ask-technologies.com
+000280 Mu Net, Inc.
+000281 Madge Ltd.
+000282 ViaClix, Inc.
+000283 Spectrum Controls, Inc.
+000284 Alstom T&D P&C
+000285 Riverstone Networks
+000286 Occam Networks
+000287 Adapcom
+000288 GLOBAL VILLAGE COMMUNICATION
+000289 DNE Technologies
+00028A Ambit Microsystems Corporation
+00028B VDSL Systems OY
+00028C Micrel-Synergy Semiconductor
+00028D Movita Technologies, Inc.
+00028E Rapid 5 Networks, Inc.
+00028F Globetek, Inc.
+000290 Woorigisool, Inc.
+000291 Open Network Co., Ltd.
+000292 Logic Innovations, Inc.
+000293 Solid Data Systems
+000294 Tokyo Sokushin Co., Ltd.
+000295 IP.Access Limited
+000296 Lectron Co,. Ltd.
+000297 C-COR.net
+000298 Broadframe Corporation
+000299 Apex, Inc.
+00029A Storage Apps
+00029B Kreatel Communications AB
+00029C 3COM
+00029D Merix Corp.
+00029E Information Equipment Co., Ltd.
+00029F L-3 Communication Aviation Recorders
+0002A0 Flatstack Ltd.
+0002A1 World Wide Packets
+0002A2 Hilscher GmbH
+0002A3 ABB Power Automation
+0002A4 AddPac Technology Co., Ltd.
+0002A5 Compaq Computer Corporation
+0002A6 Effinet Systems Co., Ltd.
+0002A7 Vivace Networks
+0002A8 Air Link Technology
+0002A9 RACOM, s.r.o.
+0002AA PLcom Co., Ltd.
+0002AB CTC Union Technologies Co., Ltd.
+0002AC 3PAR data
+0002AD Pentax Corpotation
+0002AE Scannex Electronics Ltd.
+0002AF TeleCruz Technology, Inc.
+0002B0 Hokubu Communication & Industrial Co., Ltd.
+0002B1 Anritsu, Ltd.
+0002B2 Cablevision
+0002B3 Intel Corporation
+0002B4 DAPHNE
+0002B5 Avnet, Inc.
+0002B6 Acrosser Technology Co., Ltd.
+0002B7 Watanabe Electric Industry Co., Ltd.
+0002B8 WHI KONSULT AB
+0002B9 Cisco Systems, Inc.
+0002BA Cisco Systems, Inc.
+0002BB Continuous Computing
+0002BC LVL 7 Systems, Inc.
+0002BD Bionet Co., Ltd.
+0002BE Totsu Engineering, Inc.
+0002BF dotRocket, Inc.
+0002C0 Bencent Tzeng Industry Co., Ltd.
+0002C1 Innovative Electronic Designs, Inc.
+0002C2 Net Vision Telecom
+0002C3 Arelnet Ltd.
+0002C4 Vector International BUBA
+0002C5 Evertz Microsystems Ltd.
+0002C6 Data Track Technology PLC
+0002C7 ALPS ELECTRIC Co., Ltd.
+0002C8 Technocom Communications Technology (pte) Ltd
+0002C9 Mellanox Technologies
+0002CA EndPoints, Inc.
+0002CB TriState Ltd.
+0002CC M.C.C.I
+0002CD TeleDream, Inc.
+0002CE FoxJet, Inc.
+0002CF ZyGate Communications, Inc.
+0002D0 Comdial Corporation
+0002D1 Vivotek, Inc.
+0002D2 Workstation AG
+0002D3 NetBotz, Inc.
+0002D4 PDA Peripherals, Inc.
+0002D5 ACR
+0002D6 NICE Systems
+0002D7 EMPEG Ltd
+0002D8 BRECIS Communications Corporation
+0002D9 Reliable Controls
+0002DA ExiO Communications, Inc.
+0002DB NETSEC
+0002DC Fujitsu General Limited
+0002DD Bromax Communications, Ltd.
+0002DE Astrodesign, Inc.
+0002DF Net Com Systems, Inc.
+0002E0 ETAS GmbH
+0002E1 Integrated Network Corporation
+0002E2 NDC Infared Engineering
+0002E3 LITE-ON Communications, Inc.
+0002E4 JC HYUN Systems, Inc.
+0002E5 Timeware Ltd.
+0002E6 Gould Instrument Systems, Inc.
+0002E7 CAB GmbH & Co KG
+0002E8 E.D.&A.
+0002E9 CS Systemes De Securite - C3S
+0002EA Videonics, Inc.
+0002EB Pico Communications
+0002EC Maschoff Design Engineering
+0002ED DXO Telecom Co., Ltd.
+0002EE Nokia Danmark A/S
+0002EF CCC Network Systems Group Ltd.
+0002F0 AME Optimedia Technology Co., Ltd.
+0002F1 Pinetron Co., Ltd.
+0002F2 eDevice, Inc.
+0002F3 Media Serve Co., Ltd.
+0002F4 PCTEL, Inc.
+0002F5 VIVE Synergies, Inc.
+0002F6 Equipe Communications
+0002F7 ARM
+0002F8 SEAKR Engineering, Inc.
+0002F9 Mimos Semiconductor SDN BHD
+0002FA DX Antenna Co., Ltd.
+0002FB Baumuller Aulugen-Systemtechnik GmbH
+0002FC Cisco Systems, Inc.
+0002FD Cisco Systems, Inc.
+0002FE Viditec, Inc.
+0002FF Handan BroadInfoCom
+000300 NetContinuum, Inc.
+000301 Avantas Networks Corporation
+000302 Oasys Telecom, Inc.
+000303 JAMA Electronics Co., Ltd.
+000304 Pacific Broadband Communications
+000305 Smart Network Devices GmbH
+000306 Fusion In Tech Co., Ltd.
+000307 Secure Works, Inc.
+000308 AM Communications, Inc.
+000309 Texcel Technology PLC
+00030A Argus Technologies
+00030B Hunter Technology, Inc.
+00030C Telesoft Technologies Ltd.
+00030D Uniwill Computer Corp.
+00030E Core Communications Co., Ltd.
+00030F Digital China (Shanghai) Networks Ltd.
+000310 Link Evolution Corp.
+000311 Micro Technology Co., Ltd.
+000312 TR-Systemtechnik GmbH
+000313 Access Media SPA
+000314 Teleware Network Systems
+000315 Cidco Incorporated
+000316 Nobell Communications, Inc.
+000317 Merlin Systems, Inc.
+000318 Cyras Systems, Inc.
+000319 Infineon AG
+00031A Beijing Broad Telecom Ltd., China
+00031B Cellvision Systems, Inc.
+00031C Svenska Hardvarufabriken AB
+00031D Taiwan Commate Computer, Inc.
+00031E Optranet, Inc.
+00031F Condev Ltd.
+000320 Xpeed, Inc.
+000321 Reco Research Co., Ltd.
+000322 IDIS Co., Ltd.
+000323 Cornet Technology, Inc.
+000324 SANYO Multimedia Tottori Co., Ltd.
+000325 Arima Computer Corp.
+000326 Iwasaki Information Systems Co., Ltd.
+000327 ACT'L
+000328 Mace Group, Inc.
+000329 F3, Inc.
+00032A UniData Communication Systems, Inc.
+00032B GAI Datenfunksysteme GmbH
+00032C ABB Industrie AG
+00032D IBASE Technology, Inc.
+00032E Scope Information Management, Ltd.
+00032F Global Sun Technology, Inc.
+000330 Imagenics, Co., Ltd.
+000331 Cisco Systems, Inc.
+000332 Cisco Systems, Inc.
+000333 Digitel Co., Ltd.
+000334 Newport Electronics
+000335 Mirae Technology
+000336 Zetes Technologies
+000337 Vaone, Inc.
+000338 Oak Technology
+000339 Eurologic Systems, Ltd.
+00033A Silicon Wave, Inc.
+00033B TAMI Tech Co., Ltd.
+00033C Daiden Co., Ltd.
+00033D ILSHin Lab
+00033E Tateyama System Laboratory Co., Ltd.
+00033F BigBand Networks, Ltd.
+000340 Floware Wireless Systems, Ltd.
+000341 Axon Digital Design
+000342 Nortel Networks
+000343 Martin Professional A/S
+000344 Tietech.Co., Ltd.
+000345 Routrek Networks Corporation
+000346 Hitachi Kokusai Electric, Inc.
+000347 Intel Corporation
+000348 Norscan Instruments, Ltd.
+000349 Vidicode Datacommunicatie B.V.
+00034A RIAS Corporation
+00034B Nortel Networks
+00034C Shanghai DigiVision Technology Co., Ltd.
+00034D Chiaro Networks, Ltd.
+00034E Pos Data Company, Ltd.
+00034F Sur-Gard Security
+000350 BTICINO SPA
+000351 Diebold, Inc.
+000352 Colubris Networks
+000353 Mitac, Inc.
+000354 Fiber Logic Communications
+000355 TeraBeam Internet Systems
+000356 Wincor Nixdorf GmbH & Co KG
+000357 Intervoice-Brite, Inc.
+000358 iCable System Co., Ltd.
+000359 DigitalSis
+00035A Photron Limited
+00035B BridgeWave Communications
+00035C Saint Song Corp.
+00035D Bosung Hi-Net Co., Ltd.
+00035E Metropolitan Area Networks, Inc.
+00035F Prueftechnik Condition Monitoring GmbH & Co. KG
+000360 PAC Interactive Technology, Inc.
+000361 Widcomm, Inc.
+000362 Vodtel Communications, Inc.
+000363 Miraesys Co., Ltd.
+000364 Scenix Semiconductor, Inc.
+000365 Kira Information & Communications, Ltd.
+000366 ASM Pacific Technology
+000367 Jasmine Networks, Inc.
+000368 Embedone Co., Ltd.
+000369 Nippon Antenna Co., Ltd.
+00036A Mainnet, Ltd.
+00036B Cisco Systems, Inc.
+00036C Cisco Systems, Inc.
+00036D Runtop, Inc.
+00036E Nicon Systems (Pty) Limited
+00036F Telsey SPA
+000370 NXTV, Inc.
+000371 Acomz Networks Corp.
+000372 ULAN
+000373 Aselsan A.S
+000374 Hunter Watertech
+000375 NetMedia, Inc.
+000376 Graphtec Technology, Inc.
+000377 Gigabit Wireless
+000378 HUMAX Co., Ltd.
+000379 Proscend Communications, Inc.
+00037A Taiyo Yuden Co., Ltd.
+00037B IDEC IZUMI Corporation
+00037C Coax Media
+00037D Stellcom
+00037E PORTech Communications, Inc.
+00037F Atheros Communications, Inc.
+000380 SSH Communications Security Corp.
+000381 Ingenico International
+000382 A-One Co., Ltd.
+000383 Metera Networks, Inc.
+000384 AETA
+000385 Actelis Networks, Inc.
+000386 Ho Net, Inc.
+000387 Blaze Network Products
+000388 Fastfame Technology Co., Ltd.
+000389 Plantronics
+00038A America Online, Inc.
+00038B PLUS-ONE I&T, Inc.
+00038C Total Impact
+00038D PCS Revenue Control Systems, Inc.
+00038E Atoga Systems, Inc.
+00038F Weinschel Corporation
+000390 Digital Video Communications, Inc.
+000392 Hyundai Teletek Co., Ltd.
+000393 Apple Computer, Inc.
+000394 Connect One
+000395 California Amplifier
+000396 EZ Cast Co., Ltd.
+000397 Watchfront Electronics
+000398 WISI
+000399 Dongju Informations & Communications Co., Ltd.
+00039A nSine, Ltd.
+00039B NetChip Technology, Inc.
+00039C OptiMight Communications, Inc.
+00039D BENQ CORPORATION
+00039E Tera System Co., Ltd.
+00039F Cisco Systems, Inc.
+0003A0 Cisco Systems, Inc.
+0003A1 HIPER Information & Communication, Inc.
+0003A2 Catapult Communications
+0003A3 MAVIX, Ltd.
+0003A4 Data Storage and Information Management
+0003A5 Medea Corporation
+0003A7 Unixtar Technology, Inc.
+0003A8 IDOT Computers, Inc.
+0003A9 AXCENT Media AG
+0003AA Watlow
+0003AB Bridge Information Systems
+0003AC Fronius Schweissmaschinen
+0003AD Emerson Energy Systems AB
+0003AE Allied Advanced Manufacturing Pte, Ltd.
+0003AF Paragea Communications
+0003B0 Xsense Technology Corp.
+0003B1 Abbott Laboratories HPD
+0003B2 Radware
+0003B3 IA Link Systems Co., Ltd.
+0003B4 Macrotek International Corp.
+0003B5 Entra Technology Co.
+0003B6 QSI Corporation
+0003B7 ZACCESS Systems
+0003B8 NetKit Solutions, LLC
+0003B9 Hualong Telecom Co., Ltd.
+0003BA Sun Microsystems
+0003BB Signal Communications Limited
+0003BC COT GmbH
+0003BD OmniCluster Technologies, Inc.
+0003BE Netility
+0003BF Centerpoint Broadband Technologies, Inc.
+0003C0 RFTNC Co., Ltd.
+0003C1 Packet Dynamics Ltd
+0003C2 Solphone K.K.
+0003C3 Micronik Multimedia
+0003C4 Tomra Systems ASA
+0003C5 Mobotix AG
+0003C6 ICUE Systems, Inc.
+0003C7 hopf Elektronik GmbH
+0003C8 CML Emergency Services
+0003C9 TECOM Co., Ltd.
+0003CA MTS Systems Corp.
+0003CB Nippon Systems Development Co., Ltd.
+0003CC Momentum Computer, Inc.
+0003CD Clovertech, Inc.
+0003CE ETEN Technologies, Inc.
+0003CF Muxcom, Inc.
+0003D0 KOANKEISO Co., Ltd.
+0003D1 Takaya Corporation
+0003D2 Crossbeam Systems, Inc.
+0003D3 Internet Energy Systems, Inc.
+0003D4 Alloptic, Inc.
+0003D5 Advanced Communications Co., Ltd.
+0003D6 RADVision, Ltd.
+0003D7 NextNet Wireless, Inc.
+0003D8 iMPath Networks, Inc.
+0003D9 Secheron SA
+0003DA Takamisawa Cybernetics Co., Ltd.
+0003DB Apogee Electronics Corp.
+0003DC Lexar Media, Inc.
+0003DD Comark Corp.
+0003DE OTC Wireless
+0003DF Desana Systems
+0003E0 RadioFrame Networks, Inc.
+0003E1 Winmate Communication, Inc.
+0003E2 Comspace Corporation
+0003E3 Cisco Systems, Inc.
+0003E4 Cisco Systems, Inc.
+0003E5 Hermstedt SG
+0003E6 Entone Technologies, Inc.
+0003E7 Logostek Co. Ltd.
+0003E8 Wavelength Digital Limited
+0003E9 Akara Canada, Inc.
+0003EA Mega System Technologies, Inc.
+0003EB Atrica
+0003EC ICG Research, Inc.
+0003ED Shinkawa Electric Co., Ltd.
+0003EE MKNet Corporation
+0003EF Oneline AG
+0003F0 Redfern Broadband Networks
+0003F1 Cicada Semiconductor, Inc.
+0003F2 Seneca Networks
+0003F3 Dazzle Multimedia, Inc.
+0003F4 NetBurner
+0003F5 Chip2Chip
+0003F6 Allegro Networks, Inc.
+0003F7 Plast-Control GmbH
+0003F8 SanCastle Technologies, Inc.
+0003F9 Pleiades Communications, Inc.
+0003FA TiMetra Networks
+0003FB Toko Seiki Company, Ltd.
+0003FC Intertex Data AB
+0003FD Cisco Systems, Inc.
+0003FE Cisco Systems, Inc.
+0003FF Connectix
+000400 LEXMARK INTERNATIONAL, INC.
+000401 Osaki Electric Co., Ltd.
+000402 Nexsan Technologies, Ltd.
+000403 Nexsi Corporation
+000404 Makino Milling Machine Co., Ltd.
+000405 ACN Technologies
+000406 Fa. Metabox AG
+000407 Topcon Positioning Systems, Inc.
+000408 Sanko Electronics Co., Ltd.
+000409 Cratos Networks
+00040A Sage Systems
+00040B 3com Europe Ltd.
+00040C KANNO Work's Ltd.
+00040D Avaya, Inc.
+00040E AVM GmbH
+00040F Asus Network Technologies, Inc.
+000410 Spinnaker Networks, Inc.
+000411 Inkra Networks, Inc.
+000412 WaveSmith Networks, Inc.
+000413 SNOM Technology AG
+000414 Umezawa Musen Denki Co., Ltd.
+000415 Rasteme Systems Co., Ltd.
+000416 Parks S/A Comunicacoes Digitais
+000417 ELAU AG
+000418 Teltronic S.A.U.
+000419 Fibercycle Networks, Inc.
+00041A ines GmbH
+00041B Digital Interfaces Ltd.
+00041C ipDialog, Inc.
+00041D Corega of America
+00041E Shikoku Instrumentation Co., Ltd.
+00041F Sony Computer Entertainment, Inc.
+000420 Slim Devices, Inc.
+000421 Ocular Networks
+000422 Gordon Kapes, Inc.
+000423 Intel Corporation
+000424 TMC s.r.l.
+000425 Atmel Corporation
+000426 Autosys
+000427 Cisco Systems, Inc.
+000428 Cisco Systems, Inc.
+000429 Pixord Corporation
+00042A Wireless Networks, Inc.
+00042B IT Access Co., Ltd.
+00042C Minet, Inc.
+00042D Sarian Systems, Ltd.
+00042E Netous Technologies, Ltd.
+00042F International Communications Products, Inc.
+000430 Netgem
+000431 GlobalStreams, Inc.
+000432 Voyetra Turtle Beach, Inc.
+000433 Cyberboard A/S
+000434 Accelent Systems, Inc.
+000435 Comptek International, Inc.
+000436 ELANsat Technologies, Inc.
+000437 Powin Information Technology, Inc.
+000438 Nortel Networks
+000439 Rosco Entertainment Technology, Inc.
+00043A Intelligent Telecommunications, Inc.
+00043B Lava Computer Mfg., Inc.
+00043C SONOS Co., Ltd.
+00043D INDEL AG
+00043E Telencomm
+00043F Electronic Systems Technology, Inc.
+000440 cyberPIXIE, Inc.
+000441 Half Dome Systems, Inc.
+000442 NACT
+000443 Agilent Technologies, Inc.
+000444 Western Multiplex Corporation
+000445 LMS Skalar Instruments GmbH
+000446 CYZENTECH Co., Ltd.
+000447 Acrowave Systems Co., Ltd.
+000448 Polaroid Professional Imaging
+000449 Mapletree Networks
+00044A iPolicy Networks, Inc.
+00044B NVIDIA
+00044C JENOPTIK
+00044D Cisco Systems, Inc.
+00044E Cisco Systems, Inc.
+00044F Leukhardt Systemelektronik GmbH
+000450 DMD Computers SRL
+000451 Medrad, Inc.
+000452 RocketLogix, Inc.
+000453 YottaYotta, Inc.
+000454 Quadriga UK
+000455 ANTARA.net
+000456 PipingHot Networks
+000457 Universal Access Technology, Inc.
+000458 Fusion X Co., Ltd.
+000459 Veristar Corporation
+00045A The Linksys Group, Inc.
+00045B Techsan Electronics Co., Ltd.
+00045C Mobiwave Pte Ltd
+00045D BEKA Elektronik
+00045E PolyTrax Information Technology AG
+00045F Evalue Technology, Inc.
+000460 Knilink Technology, Inc.
+000461 EPOX Computer Co., Ltd.
+000462 DAKOS Data & Communication Co., Ltd.
+000463 Bosch Security Systems
+000464 Fantasma Networks, Inc.
+000465 i.s.t isdn-support technik GmbH
+000466 ARMITEL Co.
+000467 Wuhan Research Institute of MII
+000468 Vivity, Inc.
+000469 Innocom, Inc.
+00046A Navini Networks
+00046B Palm Wireless, Inc.
+00046C Cyber Technology Co., Ltd.
+00046D Cisco Systems, Inc.
+00046E Cisco Systems, Inc.
+00046F Digitel S/A Industria Eletronica
+000470 ipUnplugged AB
+000471 IPrad
+000472 Telelynx, Inc.
+000473 Photonex Corporation
+000474 LEGRAND
+000475 3 Com Corporation
+000476 3 Com Corporation
+000477 Scalant Systems, Inc.
+000478 G. Star Technology Corporation
+000479 Radius Co., Ltd.
+00047A AXXESSIT ASA
+00047B Schlumberger
+00047C Skidata AG
+00047D Pelco
+00047E NKF Electronics
+00047F Chr. Mayr GmbH & Co. KG
+000480 Foundry Networks, Inc.
+000481 Econolite Control Products, Inc.
+000482 Medialogic Corp.
+000483 Deltron Technology, Inc.
+000484 Amann GmbH
+000485 PicoLight
+000486 ITTC, University of Kansas
+000487 Cogency Semiconductor, Inc.
+000488 Eurotherm Action Incorporated.
+000489 YAFO Networks, Inc.
+00048A Temia Vertriebs GmbH
+00048B Poscon Corporation
+00048C Nayna Networks, Inc.
+00048D Tone Commander Systems, Inc.
+00048E Ohm Tech Labs, Inc.
+00048F TD Systems Corp.
+000490 Optical Access
+000491 Technovision, Inc.
+000492 Hive Internet, Ltd.
+000493 Tsinghua Unisplendour Co., Ltd.
+000494 Breezecom, Ltd.
+000495 Tejas Networks
+000496 Extreme Networks
+000497 MacroSystem Digital Video AG
+000499 Chino Corporation
+00049A Cisco Systems, Inc.
+00049B Cisco Systems, Inc.
+00049C Surgient Networks, Inc.
+00049D Ipanema Technologies
+00049E Wirelink Co., Ltd.
+00049F Metrowerks
+0004A0 Verity Instruments, Inc.
+0004A1 Pathway Connectivity
+0004A2 L.S.I. Japan Co., Ltd.
+0004A3 Microchip Technology, Inc.
+0004A4 NetEnabled, Inc.
+0004A5 Barco Projection Systems NV
+0004A6 SAF Tehnika Ltd.
+0004A7 FabiaTech Corporation
+0004A8 Broadmax Technologies, Inc.
+0004A9 SandStream Technologies, Inc.
+0004AA Jetstream Communications
+0004AB Comverse Network Systems, Inc.
+0004AC IBM CORP.
+0004AD Malibu Networks
+0004AE Liquid Metronics
+0004AF Digital Fountain, Inc.
+0004B0 ELESIGN Co., Ltd.
+0004B1 Signal Technology, Inc.
+0004B2 ESSEGI SRL
+0004B3 Videotek, Inc.
+0004B4 CIAC
+0004B5 Equitrac Corporation
+0004B6 Stratex Networks, Inc.
+0004B7 AMB i.t. Holding
+0004B8 Kumahira Co., Ltd.
+0004B9 S.I. Soubou, Inc.
+0004BA KDD Media Will Corporation
+0004BB Bardac Corporation
+0004BC Giantec, Inc.
+0004BD Motorola BCS
+0004BE OptXCon, Inc.
+0004BF VersaLogic Corp.
+0004C0 Cisco Systems, Inc.
+0004C1 Cisco Systems, Inc.
+0004C2 Magnipix, Inc.
+0004C3 CASTOR Informatique
+0004C4 Allen & Heath Limited
+0004C5 ASE Technologies, USA
+0004C6 Yamaha Motor Co., Ltd.
+0004C7 NetMount
+0004C8 LIBA Maschinenfabrik GmbH
+0004C9 Micro Electron Co., Ltd.
+0004CA FreeMs Corp.
+0004CB Tdsoft Communication, Ltd.
+0004CC Peek Traffic B.V.
+0004CD Informedia Research Group
+0004CE Patria Ailon
+0004CF Seagate Technology
+0004D0 Softlink s.r.o.
+0004D1 Drew Technologies, Inc.
+0004D2 Adcon Telemetry AG
+0004D3 Toyokeiki Co., Ltd.
+0004D4 Proview Electronics Co., Ltd.
+0004D5 Hitachi Communication Systems, Inc.
+0004D6 Takagi Industrial Co., Ltd.
+0004D7 Omitec Instrumentation Ltd.
+0004D8 IPWireless, Inc.
+0004D9 Titan Electronics, Inc.
+0004DA Relax Technology, Inc.
+0004DB Tellus Group Corp.
+0004DC Nortel Networks
+0004DD Cisco Systems, Inc.
+0004DE Cisco Systems, Inc.
+0004DF Teracom Telematica Ltda.
+0004E0 Procket Networks
+0004E1 Infinior Microsystems
+0004E2 SMC Networks, Inc.
+0004E3 Accton Technology Corp.
+0004E4 Daeryung Ind., Inc.
+0004E5 Glonet Systems, Inc.
+0004E6 Banyan Network Private Limited
+0004E7 Lightpointe Communications, Inc
+0004E8 IER, Inc.
+0004E9 Infiniswitch Corporation
+0004EA Hewlett-Packard Company
+0004EB Paxonet Communications, Inc.
+0004EC Memobox SA
+0004ED Billion Electric Co., Ltd.
+0004EE Lincoln Electric Company
+0004EF Polestar Corp.
+0004F0 International Computers, Ltd
+0004F1 WhereNet
+0004F2 Circa Communications, Ltd.
+0004F3 FS FORTH-SYSTEME GmbH
+0004F4 Infinite Electronics Inc.
+0004F5 SnowShore Networks, Inc.
+0004F6 Amphus
+0004F7 Omega Band, Inc.
+0004F8 QUALICABLE TV Industria E Com., Ltda
+0004F9 Xtera Communications, Inc.
+0004FA MIST Inc.
+0004FB Commtech, Inc.
+0004FC Stratus Computer (DE), Inc.
+0004FD Japan Control Engineering Co., Ltd.
+0004FE Pelago Networks
+0004FF Acronet Co., Ltd.
+000500 Cisco Systems, Inc.
+000501 Cisco Systems, Inc.
+000502 APPLE COMPUTER
+000503 ICONAG
+000504 Naray Information & Communication Enterprise
+000505 Systems Integration Solutions, Inc.
+000506 Reddo Networks AB
+000507 Fine Appliance Corp.
+000508 Inetcam, Inc.
+000509 AVOC Nishimura Ltd.
+00050A ICS Spa
+00050B SICOM Systems, Inc.
+00050C Network Photonics, Inc.
+00050D Midstream Technologies, Inc.
+00050E 3ware, Inc.
+00050F Tanaka S/S Ltd.
+000510 Infinite Shanghai Communication Terminals Ltd.
+000511 Complementary Technologies Ltd
+000512 MeshNetworks, Inc.
+000513 VTLinx Multimedia Systems, Inc.
+000514 KDT Systems Co., Ltd.
+000515 Nuark Co., Ltd.
+000516 SMART Modular Technologies
+000517 Shellcomm, Inc.
+000518 Jupiters Technology
+000519 Siemens Building Technologies AG,
+00051A 3Com Europe Ltd.
+00051B Magic Control Technology Corporation
+00051C Xnet Technology Corp.
+00051D Airocon, Inc.
+00051E Brocade Communications Systems, Inc.
+00051F Taijin Media Co., Ltd.
+000520 Smartronix, Inc.
+000521 Control Microsystems
+000522 LEA*D Corporation, Inc.
+000523 AVL List GmbH
+000524 BTL System (HK) Limited
+000525 Puretek Industrial Co., Ltd.
+000526 IPAS GmbH
+000527 SJ Tek Co. Ltd
+000528 New Focus, Inc.
+000529 Shanghai Broadan Communication Technology Co., Ltd
+00052A Ikegami Tsushinki Co., Ltd.
+00052B HORIBA, Ltd.
+00052C Supreme Magic Corporation
+00052D Zoltrix International Limited
+00052E Cinta Networks
+00052F Leviton Voice and Data
+000530 Andiamo Systems, Inc.
+000531 Cisco Systems, Inc.
+000532 Cisco Systems, Inc.
+000533 Sanera Systems, Inc.
+000534 Northstar Engineering Ltd.
+000535 Chip PC Ltd.
+000536 Danam Communications, Inc.
+000537 Nets Technology Co., Ltd.
+000538 Merilus, Inc.
+000539 A Brand New World in Sweden AB
+00053A Willowglen Services Pte Ltd
+00053B Harbour Networks Ltd., Co. Beijing
+00053C Xircom
+00053D Agere Systems
+00053E KID Systeme GmbH
+00053F VisionTek, Inc.
+000540 FAST Corporation
+000541 Advanced Systems Co., Ltd.
+000542 Otari, Inc.
+000543 IQ Wireless GmbH
+000544 Valley Technologies, Inc.
+000545 Internet Photonics
+000546 K-Solutions Inc.
+000547 Starent Networks
+000548 Disco Corporation
+000549 Salira Optical Network Systems
+00054A Ario Data Networks, Inc.
+00054B Micro Innovation AG
+00054C RF Innovations Pty Ltd
+00054D Brans Technologies, Inc.
+00054E Philips Components
+000550 Digi-Tech Communications Limited
+000551 F & S Elektronik Systeme GmbH
+000552 Xycotec Computer GmbH
+000553 DVC Company, Inc.
+000554 Rangestar Wireless
+000555 Japan Cash Machine Co., Ltd.
+000556 360 Systems
+000557 Agile TV Corporation
+000558 Synchronous, Inc.
+000559 Intracom S.A.
+00055A Power Dsine Ltd.
+00055B Charles Industries, Ltd.
+00055C Kowa Company, Ltd.
+00055D D-Link Systems, Inc.
+00055E Cisco Systems, Inc.
+00055F Cisco Systems, Inc.
+000560 LEADER COMM.CO., LTD
+000561 nac Image Technology, Inc.
+000562 Digital View Limited
+000563 J-Works, Inc.
+000564 Tsinghua Bitway Co., Ltd.
+000565 Tailyn Communication Company Ltd.
+000566 Secui.com Corporation
+000567 Etymonic Design, Inc.
+000568 Piltofish Networks AB
+000569 VMWARE, Inc.
+00056A Heuft Systemtechnik GmbH
+00056B C.P. Technology Co., Ltd.
+00056C Hung Chang Co., Ltd.
+00056D Pacific Corporation
+00056E National Enhance Technology, Inc.
+00056F Innomedia Technologies Pvt. Ltd.
+000570 Baydel Ltd.
+000571 Seiwa Electronics Co.
+000572 Deonet Co., Ltd.
+000573 Cisco Systems, Inc.
+000574 Cisco Systems, Inc.
+000575 CDS-Electronics BV
+000576 NSM Technology Ltd.
+000577 SM Information & Communication
+000579 Universal Control Solution Corp.
+00057A Hatteras Networks
+00057B Chung Nam Electronic Co., Ltd.
+00057C RCO Security AB
+00057D Sun Communications, Inc.
+00057E Eckelmann Steuerungstechnik GmbH
+00057F Acqis Technology
+000580 Fibrolan Ltd.
+000581 Snell & Wilcox Ltd.
+000582 ClearCube Technology
+000583 ImageCom Limited
+000584 AbsoluteValue Systems, Inc.
+000585 Juniper Networks, Inc.
+000586 Lucent Technologies
+000587 Locus, Incorporated
+000588 Sensoria Corp.
+000589 National Datacomputer
+00058A Netcom Co., Ltd.
+00058B IPmental, Inc.
+00058C Opentech Inc.
+00058D Lynx Photonic Networks, Inc.
+00058E Flextronics International GmbH & Co. Nfg. KG
+00058F CLCsoft co.
+000590 Swissvoice Ltd.
+000591 Active Silicon Ltd.
+000592 Pultek Corp.
+000593 Grammar Engine Inc.
+000594 IXXAT Automation GmbH
+000595 Alesis Corporation
+000596 Genotech Co., Ltd.
+000597 Eagle Traffic Control Systems
+000598 CRONOS S.r.l.
+000599 DRS Test and Energy Management or DRS-TEM
+00059A Cisco Systems, Inc.
+00059B Cisco Systems, Inc.
+00059C Kleinknecht GmbH, Ing. Buero
+00059D Daniel Computing Systems, Inc.
+00059E Zinwell Corporation
+00059F Yotta Networks, Inc.
+0005A0 MOBILINE Kft.
+0005A1 Zenocom
+0005A2 CELOX Networks
+0005A3 QEI, Inc.
+0005A4 Lucid Voice Ltd.
+0005A5 KOTT
+0005A6 Extron Electronics
+0005A7 Hyperchip, Inc.
+0005A8 WYLE ELECTRONICS
+0005A9 Princeton Networks, Inc.
+0005AA Moore Industries International Inc.
+0005AB Cyber Fone, Inc.
+0005AC Northern Digital, Inc.
+0005AD Topspin Communications, Inc.
+0005AE Mediaport USA
+0005AF InnoScan Computing A/S
+0005B0 Korea Computer Technology Co., Ltd.
+0005B1 ASB Technology BV
+0005B2 Medison Co., Ltd.
+0005B3 Asahi-Engineering Co., Ltd.
+0005B4 Aceex Corporation
+0005B5 Broadcom Technologies
+0005B6 INSYS Microelectronics GmbH
+0005B7 Arbor Technology Corp.
+0005B8 Electronic Design Associates, Inc.
+0005B9 Airvana, Inc.
+0005BA Area Netwoeks, Inc.
+0005BB Myspace AB
+0005BC Resorsys Ltd.
+0005BD ROAX BV
+0005BE Kongsberg Seatex AS
+0005BF JustEzy Technology, Inc.
+0005C0 Digital Network Alacarte Co., Ltd.
+0005C1 A-Kyung Motion, Inc.
+0005C2 Soronti, Inc.
+0005C3 Pacific Instruments, Inc.
+0005C4 Telect, Inc.
+0005C5 Flaga HF
+0005C6 Triz Communications
+0005C7 I/F-COM A/S
+0005C8 VERYTECH
+0005C9 LG Innotek
+0005CA Hitron Technology, Inc.
+0005CB ROIS Technologies, Inc.
+0005CC Sumtel Communications, Inc.
+0005CD Denon, Ltd.
+0005CE Prolink Microsystems Corporation
+0005CF Thunder River Technologies, Inc.
+0005D0 Solinet Systems
+0005D1 Metavector Technologies
+0005D2 DAP Technologies
+0005D3 eProduction Solutions, Inc.
+0005D4 FutureSmart Networks, Inc.
+0005D5 Speedcom Wireless
+0005D6 Titan Wireless
+0005D7 Vista Imaging, Inc.
+0005D8 Arescom, Inc.
+0005D9 Techno Valley, Inc.
+0005DA Apex Automationstechnik
+0005DB Nentec GmbH
+0005DC Cisco Systems, Inc.
+0005DD Cisco Systems, Inc.
+0005DE Gi Fone Korea, Inc.
+0005DF Electronic Innovation, Inc.
+0005E0 Empirix Corp.
+0005E1 Trellis Photonics, Ltd.
+0005E2 Creativ Network Technologies
+0005E3 LightSand Communications, Inc.
+0005E4 Red Lion Controls L.P.
+0005E5 Renishaw PLC
+0005E6 Egenera, Inc.
+0005E7 Netrake Corp.
+0005E8 TurboWave, Inc.
+0005E9 Unicess Network, Inc.
+0005EA Rednix
+0005EB Blue Ridge Networks, Inc.
+0005EC Mosaic Systems Inc.
+0005ED Technikum Joanneum GmbH
+0005EE BEWATOR Group
+0005EF ADOIR Digital Technology
+0005F0 SATEC
+0005F1 Vrcom, Inc.
+0005F2 Power R, Inc.
+0005F3 Weboyn
+0005F4 System Base Co., Ltd.
+0005F5 OYO Geospace Corp.
+0005F6 Young Chang Co. Ltd.
+0005F7 Analog Devices, Inc.
+0005F8 Real Time Access, Inc.
+0005F9 TOA Corporation
+0005FA IPOptical, Inc.
+0005FB ShareGate, Inc.
+0005FC Schenck Pegasus Corp.
+0005FD PacketLight Networks Ltd.
+0005FE Traficon N.V.
+0005FF SNS Solutions, Inc.
+000600 Tokyo Electronic Industry Co., Ltd.
+000601 Otanikeiki Co., Ltd.
+000602 Cirkitech Electronics Co.
+000603 Baker Hughes Inc.
+000604 @Track Communications, Inc.
+000605 Inncom International, Inc.
+000606 RapidWAN, Inc.
+000607 Omni Directional Control Technology Inc.
+000608 At-Sky SAS
+000609 Crossport Systems
+00060A Blue2space
+00060B Paceline Systems Corporation
+00060C Melco Industries, Inc.
+00060D Wave7 Optics
+00060E IGYS Systems, Inc.
+00060F Narad Networks Inc
+000610 Abeona Networks Inc
+000611 Zeus Wireless, Inc.
+000612 Accusys, Inc.
+000613 Kawasaki Microelectronics Incorporated
+000614 Prism Holdings
+000615 Kimoto Electric Co., Ltd.
+000616 Tel Net Co., Ltd.
+000617 Redswitch Inc.
+000618 DigiPower Manufacturing Inc.
+000619 Connection Technology Systems
+00061A Zetari Inc.
+00061B Portable Systems, IBM Japan Co, Ltd
+00061C Hoshino Metal Industries, Ltd.
+00061D MIP Telecom, Inc.
+00061E Maxan Systems
+00061F Vision Components GmbH
+000620 Serial System Ltd.
+000621 Hinox, Co., Ltd.
+000622 Chung Fu Chen Yeh Enterprise Corp.
+000623 MGE UPS Systems France
+000624 Gentner Communications Corp.
+000625 The Linksys Group, Inc.
+000626 MWE GmbH
+000627 Uniwide Technologies, Inc.
+000628 Cisco Systems, Inc.
+000629 IBM CORPORATION
+00062A Cisco Systems, Inc.
+00062B INTRASERVER TECHNOLOGY
+00062C Network Robots, Inc.
+00062D TouchStar Technologies, L.L.C.
+00062E Aristos Logic Corp.
+00062F Pivotech Systems Inc.
+000630 Adtranz Sweden
+000631 Optical Solutions, Inc.
+000632 Mesco Engineering GmbH
+000633 Heimann Biometric Systems GmbH
+000634 GTE Airfone Inc.
+000635 PacketAir Networks, Inc.
+000636 Jedai Broadband Networks
+000637 Toptrend-Meta Information (ShenZhen) Inc.
+000638 Sungjin C&C Co., Ltd.
+000639 Newtec
+00063A Dura Micro, Inc.
+00063B Arcturus Networks, Inc.
+00063C NMI Electronics Ltd
+00063D Microwave Data Systems Inc.
+00063E Opthos Inc.
+00063F Everex Communications Inc.
+000640 White Rock Networks
+000641 ITCN
+000642 Genetel Systems Inc.
+000643 SONO Computer Co., Ltd.
+000644 NEIX Inc.
+000645 Meisei Electric Co. Ltd.
+000646 ShenZhen XunBao Network Technology Co Ltd
+000647 Etrali S.A.
+000648 Seedsware, Inc.
+000649 Quante
+00064A Honeywell Co., Ltd. (KOREA)
+00064B Alexon Co., Ltd.
+00064C Invicta Networks, Inc.
+00064D Sencore
+00064E Broad Net Technology Inc.
+00064F PRO-NETS Technology Corporation
+000650 Tiburon Networks, Inc.
+000651 Aspen Networks Inc.
+000652 Cisco Systems, Inc.
+000653 Cisco Systems, Inc.
+000654 Maxxio Technologies
+000655 Yipee, Inc.
+000656 Tactel AB
+000657 Market Central, Inc.
+000658 Helmut Fischer GmbH & Co. KG
+000659 EAL (Apeldoorn) B.V.
+00065A Strix Systems
+00065B Dell Computer Corp.
+00065C Malachite Technologies, Inc.
+00065D Heidelberg Web Systems
+00065E Photuris, Inc.
+00065F ECI Telecom - NGTS Ltd.
+000660 NADEX Co., Ltd.
+000661 NIA Home Technologies Corp.
+000662 MBM Technology Ltd.
+000663 Human Technology Co., Ltd.
+000664 Fostex Corporation
+000665 Sunny Giken, Inc.
+000666 Roving Networks
+000667 Tripp Lite
+000668 Vicon Industries Inc.
+000669 Datasound Laboratories Ltd
+00066A InfiniCon Systems, Inc.
+00066B Sysmex Corporation
+00066C Robinson Corporation
+00066D Compuprint S.P.A.
+00066E Delta Electronics, Inc.
+00066F Korea Data Systems
+000670 Upponetti Oy
+000671 Softing AG
+000672 Netezza
+000673 Optelecom, Inc.
+000674 Spectrum Control, Inc.
+000675 Banderacom, Inc.
+000676 Novra Technologies Inc.
+000677 SICK AG
+000678 Marantz Japan, Inc.
+000679 Konami Corporation
+00067A JMP Systems
+00067B Toplink C&C Corporation
+00067C CISCO SYSTEMS, INC.
+00067D Takasago Ltd.
+00067E WinCom Systems, Inc.
+00067F Rearden Steel Technologies
+000680 Card Access, Inc.
+000681 Goepel Electronic GmbH
+000682 Convedia
+000683 Bravara Communications, Inc.
+000684 Biacore AB
+000685 NetNearU Corporation
+000686 ZARDCOM Co., Ltd.
+000687 Omnitron Systems Technology, Inc.
+000688 Telways Communication Co., Ltd.
+000689 yLez Technologies Pte Ltd
+00068A NeuronNet Co. Ltd. R&D Center
+00068B AirRunner Technologies, Inc.
+00068C 3Com Corporation
+00068D SANgate Systems
+00068E HID Corporation
+00068F Telemonitor, Inc.
+000690 Euracom Communication GmbH
+000691 PT Inovacao
+000692 Intruvert Networks, Inc.
+000693 Flexus Computer Technology, Inc.
+000694 Mobillian Corporation
+000695 Ensure Technologies, Inc.
+000696 Advent Networks
+000697 R & D Center
+000698 egnite Software GmbH
+000699 Vida Design Co.
+00069A e & Tel
+00069B AVT Audio Video Technologies GmbH
+00069C Transmode Systems AB
+00069D Petards Mobile Intelligence
+00069E UNIQA, Inc.
+00069F Kuokoa Networks
+0006A0 Mx Imaging
+0006A1 Celsian Technologies, Inc.
+0006A2 Microtune, Inc.
+0006A3 Bitran Corporation
+0006A4 INNOWELL Corp.
+0006A5 PINON Corp.
+0006A6 Artistic Licence (UK) Ltd
+0006A7 Primarion
+0006A8 KC Technology, Inc.
+0006A9 Universal Instruments Corp.
+0006AA Miltope Corporation
+0006AB W-Link Systems, Inc.
+0006AC Intersoft Co.
+0006AD KB Electronics Ltd.
+0006AE Himachal Futuristic Communications Ltd
+0006B0 Comtech EF Data Corp.
+0006B1 Sonicwall
+0006B2 Linxtek Co.
+0006B3 Diagraph Corporation
+0006B4 Vorne Industries, Inc.
+0006B5 Luminent, Inc.
+0006B6 Nir-Or Israel Ltd.
+0006B7 TELEM GmbH
+0006B8 Bandspeed Pty Ltd
+0006B9 A5TEK Corp.
+0006BA Westwave Communications
+0006BB ATI Technologies Inc.
+0006BC Macrolink, Inc.
+0006BD BNTECHNOLOGY Co., Ltd.
+0006BE Baumer Optronic GmbH
+0006BF Accella Technologies Co., Ltd.
+0006C0 United Internetworks, Inc.
+0006C1 CISCO SYSTEMS, INC.
+0006C2 Smartmatic Corporation
+0006C3 Schindler Elevators Ltd.
+0006C4 Piolink Inc.
+0006C5 INNOVI Technologies Limited
+0006C6 lesswire AG
+0006C7 RFNET Technologies Pte Ltd (S)
+0006C8 Sumitomo Metal Micro Devices, Inc.
+0006C9 Technical Marketing Research, Inc.
+0006CA American Computer & Digital Components, Inc. (ACDC)
+0006CB Jotron Electronics A/S
+0006CC JMI Electronics Co., Ltd.
+0006CD CreoScitex Corporation Ltd.
+0006CE DATENO
+0006CF Thales Avionics In-Flight Systems, LLC
+0006D0 Elgar Electronics Corp.
+0006D1 Tahoe Networks, Inc.
+0006D2 Tundra Semiconductor Corp.
+0006D3 Alpha Telecom, Inc. U.S.A.
+0006D4 Interactive Objects, Inc.
+0006D5 Diamond Systems Corp.
+0006D6 Cisco Systems, Inc.
+0006D7 Cisco Systems, Inc.
+0006D8 Maple Optical Systems
+0006D9 IPM-Net S.p.A.
+0006DA ITRAN Communications Ltd.
+0006DB ICHIPS Co., Ltd.
+0006DC Syabas Technology (Amquest)
+0006DD AT & T Laboratories - Cambridge Ltd
+0006DE Flash Technology
+0006DF AIDONIC Corporation
+0006E0 MAT Co., Ltd.
+0006E1 Techno Trade s.a
+0006E2 Ceemax Technology Co., Ltd.
+0006E3 Quantitative Imaging Corporation
+0006E4 Citel Technologies Ltd.
+0006E5 Fujian Newland Computer Ltd. Co.
+0006E6 DongYang Telecom Co., Ltd.
+0006E7 Bit Blitz Communications Inc.
+0006E8 Optical Network Testing, Inc.
+0006E9 Intime Corp.
+0006EA ELZET80 Mikrocomputer GmbH&Co. KG
+0006EB Global Data
+0006EC M/A COM Private Radio System Inc.
+0006ED Inara Networks
+0006EE Shenyang Neu-era Information & Technology Stock Co., Ltd
+0006EF Maxxan Systems, Inc.
+0006F0 Digeo, Inc.
+0006F1 Optillion
+0006F2 Platys Communications
+0006F3 AcceLight Networks
+0006F4 Prime Electronics & Satellitics Inc.
+0006F9 Mitsui Zosen Systems Research Inc.
+0006FA IP SQUARE Co, Ltd.
+0006FB Hitachi Printing Solutions, Ltd.
+0006FC Fnet Co., Ltd.
+0006FD Comjet Information Systems Corp.
+0006FE Celion Networks, Inc.
+0006FF Sheba Systems Co., Ltd.
+000700 Zettamedia Korea
+000701 RACAL-DATACOM
+000702 Varian Medical Systems
+000703 CSEE Transport
+000705 Endress & Hauser GmbH & Co
+000706 Sanritz Corporation
+000707 Interalia Inc.
+000708 Bitrage Inc.
+000709 Westerstrand Urfabrik AB
+00070A Unicom Automation Co., Ltd.
+00070B Octal, SA
+00070C SVA-Intrusion.com Co. Ltd.
+00070D Cisco Systems Inc.
+00070E Cisco Systems Inc.
+00070F Fujant, Inc.
+000710 Adax, Inc.
+000711 Acterna
+000712 JAL Information Technology
+000713 IP One, Inc.
+000714 Brightcom
+000715 General Research of Electronics, Inc.
+000716 J & S Marine Ltd.
+000717 Wieland Electric GmbH
+000718 iCanTek Co., Ltd.
+000719 Mobiis Co., Ltd.
+00071A Finedigital Inc.
+00071B Position Technology Inc.
+00071C AT&T Fixed Wireless Services
+00071D Satelsa Sistemas Y Aplicaciones De Telecomunicaciones, S.A.
+00071E Tri-M Engineering / Nupak Dev. Corp.
+00071F European Systems Integration
+000720 Trutzschler GmbH & Co. KG
+000721 Formac Elektronik GmbH
+000722 Nielsen Media Research
+000723 ELCON Systemtechnik GmbH
+000724 Telemax Co., Ltd.
+000725 Bematech International Corp.
+000727 Zi Corporation (HK) Ltd.
+000728 Neo Telecom
+000729 Kistler Instrumente AG
+00072A Innovance Networks
+00072B Jung Myung Telecom Co., Ltd.
+00072C Fabricom
+00072D CNSystems
+00072E North Node AB
+00072F Instransa, Inc.
+000730 Hutchison OPTEL Telecom Technology Co., Ltd.
+000731 Spiricon, Inc.
+000732 AAEON Technology Inc.
+000733 DANCONTROL Engineering
+000734 ONStor, Inc.
+000735 Flarion Technologies, Inc.
+000736 Data Video Technologies Co., Ltd.
+000737 Soriya Co. Ltd.
+000738 Young Technology Co., Ltd.
+000739 Motion Media Technology Ltd.
+00073A Inventel Systemes
+00073B Tenovis GmbH & Co KG
+00073C Telecom Design
+00073D Nanjing Postel Telecommunications Co., Ltd.
+00073E China Great-Wall Computer Shenzhen Co., Ltd.
+00073F Woojyun Systec Co., Ltd.
+000740 Melco Inc.
+000741 Sierra Automated Systems
+000742 Current Technologies
+000743 Chelsio Communications
+000744 Unico, Inc.
+000745 Radlan Computer Communications Ltd.
+000746 Interlink BT, LLC
+000747 Mecalc
+000748 The Imaging Source Europe
+000749 CENiX Inc.
+00074A Carl Valentin GmbH
+00074B Daihen Corporation
+00074C Beicom Inc.
+00074D Zebra Technologies Corp.
+00074E Naughty boy co., Ltd.
+00074F Cisco Systems, Inc.
+000750 Cisco Systems, Inc.
+000751 m.u.t. - GmbH
+000752 Rhythm Watch Co., Ltd.
+000753 Beijing Qxcomm Technology Co., Ltd.
+000754 Xyterra Computing, Inc.
+000755 Lafon SA
+000756 Juyoung Telecom
+000757 Topcall International AG
+000758 Dragonwave
+000759 Boris Manufacturing Corp.
+00075A Air Products and Chemicals, Inc.
+00075B Gibson Guitars
+00075C ENCAD, Inc.
+00075D Celleritas Inc.
+00075E Pulsar Technologies, Inc.
+00075F VCS Video Communication Systems AG
+000760 TOMIS Information & Telecom Corp.
+000761 Logitech SA
+000762 Group Sense Limited
+000763 Sunniwell Cyber Tech. Co., Ltd.
+000764 YoungWoo Telecom Co. Ltd.
+000765 Jade Quantum Technologies, Inc.
+000766 Chou Chin Industrial Co., Ltd.
+000767 Yuxing Electronics Company Limited
+000768 Danfoss A/S
+000769 Italiana Macchi SpA
+00076A NEXTEYE Co., Ltd.
+00076B Stralfors AB
+00076C Daehanet, Inc.
+00076D Flexlight Networks
+00076E Sinetica Corporation Ltd.
+00076F Synoptics Limited
+000770 Locusnetworks Corporation
+000771 Embedded System Corporation
+000772 Alcatel Shanghai Bell Co., Ltd.
+000773 Ascom Powerline Communications Ltd.
+000774 GuangZhou Thinker Technology Co. Ltd.
+000775 Valence Semiconductor, Inc.
+000776 Federal APD
+000777 Motah Ltd.
+000778 GERSTEL GmbH & Co. KG
+000779 Sungil Telecom Co., Ltd.
+00077A Infoware System Co., Ltd.
+00077B Millimetrix Broadband Networks
+00077C OnTime Networks
+00077E Elrest GmbH
+00077F J Communications Co., Ltd.
+000780 Bluegiga Technologies OY
+000781 Itron Inc.
+000782 Nauticus Networks, Inc.
+000783 SynCom Network, Inc.
+000784 Cisco Systems Inc.
+000785 Cisco Systems Inc.
+000786 Wireless Networks Inc.
+000787 Idea System Co., Ltd.
+000788 Clipcomm, Inc.
+000789 Eastel Systems Corporation
+00078A Mentor Data System Inc.
+00078B Wegener Communications, Inc.
+00078C Elektronikspecialisten i Borlange AB
+00078D NetEngines Ltd.
+00078E Garz & Friche GmbH
+00078F Emkay Innovative Products
+000790 Tri-M Technologies (s) Limited
+000791 International Data Communications, Inc.
+000792 Suetron Electronic GmbH
+000794 Simple Devices, Inc.
+000795 Elitegroup Computer System Co. (ECS)
+000796 LSI Systems, Inc.
+000797 Netpower Co., Ltd.
+000798 Selea SRL
+000799 Tipping Point Technologies, Inc.
+00079A SmartSight Networks Inc.
+00079B Aurora Networks
+00079C Golden Electronics Technology Co., Ltd.
+00079D Musashi Co., Ltd.
+00079E Ilinx Co., Ltd.
+00079F Action Digital Inc.
+0007A0 e-Watch Inc.
+0007A1 VIASYS Healthcare GmbH
+0007A2 Opteon Corporation
+0007A3 Ositis Software, Inc.
+0007A4 GN Netcom Ltd.
+0007A5 Y.D.K Co. Ltd.
+0007A6 Home Automation, Inc.
+0007A7 A-Z Inc.
+0007A8 Haier Group Technologies Ltd.
+0007A9 Novasonics
+0007AA Quantum Data Inc.
+0007AC Eolring
+0007AD Pentacon GmbH Foto-und Feinwerktechnik
+0007AE Layer N Networks
+0007AF N-Tron Corp.
+0007B0 Office Details, Inc.
+0007B1 Equator Technologies
+0007B2 Transaccess S.A.
+0007B3 Cisco Systems Inc.
+0007B4 Cisco Systems Inc.
+0007B5 Any One Wireless Ltd.
+0007B6 Telecom Technology Ltd.
+0007B7 Samurai Ind. Prods Eletronicos Ltda
+0007B8 American Predator Corp.
+0007B9 Ginganet Corporation
+0007BA Xebeo Communications, Inc.
+0007BB Candera Inc.
+0007BC Identix Inc.
+0007BD Radionet Ltd.
+0007BE DataLogic SpA
+0007BF Armillaire Technologies, Inc.
+0007C0 NetZerver Inc.
+0007C1 Overture Networks, Inc.
+0007C2 Netsys Telecom
+0007C3 Cirpack
+0007C4 JEAN Co. Ltd.
+0007C5 Gcom, Inc.
+0007C6 VDS Vosskuhler GmbH
+0007C7 Synectics Systems Limited
+0007C8 Brain21, Inc.
+0007C9 Technol Seven Co., Ltd.
+0007CA Creatix Polymedia Ges Fur Kommunikaitonssysteme
+0007CB Freebox SA
+0007CC Kaba Benzing GmbH
+0007CD NMTEL Co., Ltd.
+0007CE Cabletime Limited
+0007CF Anoto AB
+0007D0 Automat Engenharia de Automaoa Ltda.
+0007D1 Spectrum Signal Processing Inc.
+0007D2 Logopak Systeme
+0007D3 Stork Digital Imaging B.V.
+0007D4 Zhejiang Yutong Network Communication Co Ltd.
+0007D5 3e Technologies Int;., Inc.
+0007D6 Commil Ltd.
+0007D7 Caporis Networks AG
+0007D8 Hitron Systems Inc.
+0007D9 Splicecom
+0007DA Neuro Telecom Co., Ltd.
+0007DB Kirana Networks, Inc.
+0007DC Atek Co, Ltd.
+0007DD Cradle Technologies
+0007DE eCopilt AB
+0007DF Vbrick Systems Inc.
+0007E0 Palm Inc.
+0007E1 WIS Communications Co. Ltd.
+0007E2 Bitworks, Inc.
+0007E3 Navcom Technology, Inc.
+0007E4 SoftRadio Co., Ltd.
+0007E5 Coup Corporation
+0007E6 edgeflow Canada Inc.
+0007E7 FreeWave Technologies
+0007E8 St. Bernard Software
+0007E9 Intel Corporation
+0007EA Massana, Inc.
+0007EB Cisco Systems Inc.
+0007EC Cisco Systems Inc.
+0007ED Altera Corporation
+0007EE telco Informationssysteme GmbH
+0007EF Lockheed Martin Tactical Systems
+0007F0 LogiSync Corporation
+0007F1 TeraBurst Networks Inc.
+0007F2 IOA Corporation
+0007F3 Think Engine Networks
+0007F4 Eletex Co., Ltd.
+0007F5 Bridgeco Co AG
+0007F6 Qqest Software Systems
+0007F7 Galtronics
+0007F8 ITDevices, Inc.
+0007F9 Phonetics, Inc.
+0007FA ITT Co., Ltd.
+0007FB Giga Stream UMTS Technologies GmbH
+0007FC Adept Systems Inc.
+0007FD LANergy Ltd.
+0007FE Rigaku Corporation
+0007FF Gluon Networks
+000800 MULTITECH SYSTEMS, INC.
+000801 HighSpeed Surfing Inc.
+000802 Compaq Computer Corporation
+000803 Cos Tron
+000804 ICA Inc.
+000805 Techno-Holon Corporation
+000806 Raonet Systems, Inc.
+000807 Access Devices Limited
+000808 PPT Vision, Inc.
+000809 Systemonic AG
+00080A Espera-Werke GmbH
+00080B Birka BPA Informationssystem AB
+00080C VDA elettronica SrL
+00080D Toshiba
+00080E Motorola, BCS
+00080F Proximion Fiber Optics AB
+000810 Key Technology, Inc.
+000811 VOIX Corporation
+000812 GM-2 Corporation
+000813 Diskbank, Inc.
+000814 TIL Technologies
+000815 CATS Co., Ltd.
+000816 Bluetags A/S
+000817 EmergeCore Networks LLC
+000818 Pixelworks, Inc.
+000819 Banksys
+00081A Sanrad Intelligence Storage Communications (2000) Ltd.
+00081B Windigo Systems
+00081C @pos.com
+00081D Ipsil, Incorporated
+00081E Repeatit AB
+00081F Pou Yuen Tech Corp. Ltd.
+000820 Cisco Systems Inc.
+000821 Cisco Systems Inc.
+000822 InPro Comm
+000823 Texa Corp.
+000824 Promatek Industries Ltd.
+000825 Acme Packet
+000826 Colorado Med Tech
+000827 Pirelli Cables & Systems
+000828 Koei Engineering Ltd.
+000829 Aval Nagasaki Corporation
+00082A Powerwallz Network Security
+00082B Wooksung Electronics, Inc.
+00082C Homag AG
+00082D Indus Teqsite Private Limited
+00082E Multitone Electronics PLC
+00084E DivergeNet, Inc.
+00084F Qualstar Corporation
+000850 Arizona Instrument Corp.
+000851 Canadian Bank Note Company, Ltd.
+000852 Davolink Co. Inc.
+000853 Schleicher GmbH & Co. Relaiswerke KG
+000854 Netronix, Inc.
+000855 NASA-Goddard Space Flight Center
+000856 Gamatronic Electronic Industries Ltd.
+000857 Polaris Networks, Inc.
+000858 Novatechnology Inc.
+000859 ShenZhen Unitone Electronics Co., Ltd.
+00085A IntiGate Inc.
+00085B Hanbit Electronics Co., Ltd.
+00085C Shanghai Dare Technologies Co. Ltd.
+00085D Aastra
+00085E PCO AG
+00085F Picanol N.V.
+000860 LodgeNet Entertainment Corp.
+000861 SoftEnergy Co., Ltd.
+000862 NEC Eluminant Technologies, Inc.
+000863 Entrisphere Inc.
+000864 Fasy S.p.A.
+000865 JASCOM CO., LTD
+000866 DSX Access Systems, Inc.
+000867 Uptime Devices
+000868 PurOptix
+000869 Command-e Technology Co.,Ltd.
+00086A Industrie Technik IPS GmbH
+00086B MIPSYS
+00086C Plasmon LMS
+00086D Missouri FreeNet
+00086E Hyglo AB
+00086F Resources Computer Network Ltd.
+000870 Rasvia Systems, Inc.
+000871 NORTHDATA Co., Ltd.
+000872 Sorenson Technologies, Inc.
+000873 DAP Design B.V.
+000874 Dell Computer Corp.
+000875 Acorp Electronics Corp.
+000876 SDSystem
+000877 Liebert HIROSS S.p.A.
+000878 Benchmark Storage Innovations
+000879 CEM Corporation
+00087A Wipotec GmbH
+00087B RTX Telecom A/S
+00087C Cisco Systems, Inc.
+00087D Cisco Systems Inc.
+00087E Bon Electro-Telecom Inc.
+00087F SPAUN electronic GmbH & Co. KG
+000880 BroadTel Canada Communications inc.
+000881 DIGITAL HANDS CO.,LTD.
+000882 SIGMA CORPORATION
+000883 Hewlett-Packard Company
+000884 Index Braille AB
+000885 EMS Dr. Thomas Wuensche
+000886 Hansung Teliann, Inc.
+000887 Maschinenfabrik Reinhausen GmbH
+000888 OULLIM Information Technology Inc,.
+000889 Echostar Technologies Corp
+00088A Minds@Work
+00088B Tropic Networks Inc.
+00088C Quanta Network Systems Inc.
+00088D Sigma-Links Inc.
+00088E Nihon Computer Co., Ltd.
+00088F ADVANCED DIGITAL TECHNOLOGY
+000890 AVILINKS SA
+000891 Lyan Inc.
+000892 EM Solutions
+000894 InnoVISION Multimedia Ltd.
+000895 DIRC Technologie GmbH & Co.KG
+000896 Printronix, Inc.
+000897 Quake Technologies
+000898 Gigabit Optics Corporation
+000899 Netbind, Inc.
+00089A Alcatel Microelectronics
+00089B ICP Electronics Inc.
+00089C Elecs Industry Co., Ltd.
+00089D UHD-Elektronik
+00089E Beijing Enter-Net co.LTD
+00089F EFM Networks
+0008A0 Stotz Feinmesstechnik GmbH
+0008A1 CNet Technology Inc.
+0008A2 ADI Engineering, Inc.
+0008A3 Cisco Systems
+0008A4 Cisco Systems
+0008A5 Peninsula Systems Inc.
+0008A6 Multiware & Image Co., Ltd.
+0008A7 iLogic Inc.
+0008A8 Systec Co., Ltd.
+0008A9 SangSang Technology, Inc.
+0008AA KARAM
+0008AB EnerLinx.com, Inc.
+0008AD Toyo-Linx Co., Ltd.
+0008AE Packetfront
+0008AF Novatec Corporation
+0008B0 BKtel communications GmbH
+0008B1 ProQuent Systems
+0008B2 SHENZHEN COMPASS TECHNOLOGY DEVELOPMENT CO.,LTD
+0008B3 Fastwel
+0008B4 SYSPOL
+0008B5 TAI GUEN ENTERPRISE CO., LTD
+0008B6 RouteFree, Inc.
+0008B7 HIT Incorporated
+0008B8 E.F. Johnson
+0008B9 KAON MEDIA Co., Ltd.
+0008BA Erskine Systems Ltd
+0008BB NetExcell
+0008BC Ilevo AB
+0008BD TEPG-US
+0008BE XENPAK MSA Group
+0008BF Aptus Elektronik AB
+0008C0 ASA SYSTEMS
+0008C1 Avistar Communications Corporation
+0008C2 Cisco Systems
+0008C3 Contex A/S
+0008C4 Hikari Co.,Ltd.
+0008C5 Liontech Co., Ltd.
+0008C6 Philips Consumer Communications
+0008C7 COMPAQ COMPUTER CORPORATION
+0008C8 Soneticom, Inc.
+0008C9 TechniSat Digital GmbH
+0008CA TwinHan Technology Co.,Ltd
+0008CB Zeta Broadband Inc.
+0008CC Remotec, Inc.
+0008CD With-Net Inc
+0008CF Nippon Koei Power Systems Co., Ltd.
+0008D0 Musashi Engineering Co., LTD.
+0008D1 KAREL INC.
+0008D2 ZOOM Networks Inc.
+0008D3 Hercules Technologies S.A.
+0008D4 IneoQuest Technologies, Inc
+0008D5 Vanguard Managed Solutions
+0008D6 HASSNET Inc.
+0008D7 HOW CORPORATION
+0008D8 Dowkey Microwave
+0008D9 Mitadenshi Co.,LTD
+0008DA SofaWare Technologies Ltd.
+0008DB Corrigent Systems
+0008DC Wiznet
+0008DD Telena Communications, Inc.
+0008DE 3UP Systems
+0008DF Alistel Inc.
+0008E0 ATO Technology Ltd.
+0008E1 Barix AG
+0008E2 Cisco Systems
+0008E3 Cisco Systems
+0008E4 Envenergy Inc
+0008E5 IDK Corporation
+0008E6 Littlefeet
+0008E7 SHI ControlSystems,Ltd.
+0008E8 Excel Master Ltd.
+0008E9 NextGig
+0008EA Motion Control Engineering, Inc
+0008EB ROMWin Co.,Ltd.
+0008EC Zonu, Inc.
+0008ED ST&T Instrument Corp.
+0008EE Logic Product Development
+0008EF DIBAL,S.A.
+0008F0 Next Generation Systems, Inc.
+0008F1 Voltaire
+0008F2 C&S Technology
+0008F3 WANY
+0008F4 Bluetake Technology Co., Ltd.
+0008F5 YESTECHNOLOGY Co.,Ltd.
+0008F6 SUMITOMO ELECTRIC HIGHTECHS.co.,ltd.
+0008F7 Hitachi Ltd, Semiconductor &amp; Integrated Circuits Gr
+0008F8 Guardall Ltd
+0008F9 Padcom, Inc.
+0008FA Karl E.Brinkmann GmbH
+0008FB SonoSite, Inc.
+0008FC Gigaphoton Inc.
+0008FD BlueKorea Co., Ltd.
+0008FE UNIK C&C Co.,Ltd.
+0008FF Trilogy Broadcast (Holdings) Ltd
+000900 TMT
+000901 Shenzhen Shixuntong Information & Technoligy Co
+000902 Redline Communications Inc.
+000903 Panasas, Inc
+000904 MONDIAL electronic
+000905 iTEC Technologies Ltd.
+000906 Esteem Networks
+000907 Chrysalis Development
+000908 VTech Technology Corp.
+000909 Telenor Connect A/S
+00090A SnedFar Technology Co., Ltd.
+00090B MTL  Instruments PLC
+00090C Mayekawa Mfg. Co. Ltd.
+00090D LEADER ELECTRONICS CORP.
+00090E Helix Technology Inc.
+00090F Fortinet Inc.
+000910 Simple Access Inc.
+000911 Cisco Systems
+000912 Cisco Systems
+000914 COMPUTROLS INC.
+000915 CAS Corp.
+000916 Listman Home Technologies, Inc.
+000917 WEM Technology Inc
+000918 SAMSUNG TECHWIN CO.,LTD
+000919 MDS Gateways
+00091A Macat Optics & Electronics Co., Ltd.
+00091B Digital Generation Inc.
+00091C CacheVision, Inc
+00091D Proteam Computer Corporation
+00091E Firstech Technology Corp.
+00091F A&amp;D Co., Ltd.
+000920 EpoX COMPUTER CO.,LTD.
+000921 Planmeca Oy
+000922 Touchless Sensor Technology AG
+000923 Heaman System Co., Ltd
+000924 Telebau GmbH
+000925 VSN Systemen BV
+000926 YODA COMMUNICATIONS, INC.
+000927 TOYOKEIKI CO.,LTD.
+000928 Telecore Inc
+000929 Sanyo Industries (UK) Limited
+00092A MYTECS Co.,Ltd.
+00092B iQstor Networks, Inc.
+00092C Hitpoint Inc.
+00092D High Tech Computer, Corp.
+00092E B&Tech System Inc.
+00092F Akom Technology Corporation
+000930 AeroConcierge Inc.
+000931 Future Internet, Inc.
+000932 Omnilux
+000933 OPTOVALLEY Co. Ltd.
+000934 Dream-Multimedia-Tv GmbH
+000935 Sandvine Incorporated
+000936 Ipetronik GmbH & Co.KG
+000937 Inventec Appliance Corp
+000938 Allot Communications
+000939 ShibaSoku Co.,Ltd.
+00093A Molex Fiber Optics
+00093B HYUNDAI NETWORKS INC.
+00093C Jacques Technologies P/L
+00093D Newisys,Inc.
+00093E C&I Technologies
+00093F Double-Win Enterpirse CO., LTD
+000940 AGFEO GmbH & Co. KG
+000941 Allied Telesis K.K.
+000942 CRESCO, LTD.
+000943 Cisco Systems
+000944 Cisco Systems
+000945 Palmmicro Communications Inc
+000946 Cluster Labs GmbH
+000947 Aztek, Inc.
+000948 Vista Control Systems, Corp.
+000949 Glyph Technologies Inc.
+00094A Homenet Communications
+00094B FillFactory NV
+00094C Communication Weaver Co.,Ltd.
+00094D Braintree Communications Pty Ltd
+00094E BARTECH SYSTEMS INTERNATIONAL, INC
+00094F elmegt GmbH & Co. KG
+000950 Independent Storage Corporation
+000951 Apogee Instruments, Inc
+000952 Auerswald GmbH & Co. KG
+000953 Linkage System Integration Co.Ltd.
+000954 AMiT spol. s. r. o.
+000955 Young Generation International Corp.
+000956 Network Systems Group, Ltd. (NSG)
+000957 Supercaller, Inc.
+000958 INTELNET S.A.
+000959 Sitecsoft
+00095A RACEWOOD TECHNOLOGY
+00095B Netgear, Inc.
+00095C Philips Medical Systems - Cardiac and Monitoring Systems (CM
+00095D Dialogue Technology Corp.
+00095E Masstech Group Inc.
+00095F Telebyte, Inc.
+000960 YOZAN Inc.
+000961 Switchgear and Instrumentation Ltd
+000962 Filetrac AS
+000963 Dominion Lasercom Inc.
+000964 Hi-Techniques
+000966 Thales Navigation
+000967 Tachyon, Inc
+000968 TECHNOVENTURE, INC.
+000969 Meret Optical Communications
+00096A Cloverleaf Communications Inc.
+00096B IBM Corporation
+00096C Imedia Semiconductor Corp.
+00096D Powernet Technologies Corp.
+00096E GIANT ELECTRONICS LTD.
+00096F Beijing Zhongqing Elegant Tech. Corp.,Limited
+000970 Vibration Research Corporation
+000971 Time Management, Inc.
+000972 Securebase,Inc
+000973 Lenten Technology Co., Ltd.
+000974 Innopia Technologies, Inc.
+000975 fSONA Communications Corporation
+000976 Datasoft ISDN Systems GmbH
+000977 Brunner Elektronik AG
+000978 AIJI System Co., Ltd.
+000979 Advanced Television Systems Committee, Inc.
+00097A Louis Design Labs.
+00097B Cisco Systems
+00097C Cisco Systems
+00097D SecWell Networks Oy
+00097E IMI TECHNOLOGY CO., LTD
+00097F Vsecure 2000 LTD.
+000980 Power Zenith Inc.
+000981 Newport Networks
+000982 Loewe Opta GmbH
+000983 Gvision Incorporated
+000984 MyCasa Network Inc.
+000985 Auto Telecom Company
+000986 Metalink LTD.
+000987 NISHI NIPPON ELECTRIC WIRE & CABLE CO.,LTD.
+000988 Nudian Electron Co., Ltd.
+000989 VividLogic Inc.
+00098A EqualLogic Inc
+00098B Entropic Communications, Inc.
+00098C Possio AB
+00098D DCT Ltd (Digital Communication Technologies Ltd)
+00098E ipcas GmbH
+00098F Cetacean Networks
+000990 ACKSYS Communications & systems
+000991 GE Fanuc Automation Manufacturing, Inc.
+000992 InterEpoch Technology,INC.
+000993 Visteon Corporation
+000994 Cronyx Engineering
+000995 Castle Technology Ltd
+000996 RDI
+000997 Nortel Networks
+000998 Capinfo Company Limited
+000999 CP GEORGES RENAULT
+00099A ELMO COMPANY, LIMITED
+00099B Western Telematic Inc.
+00099C Naval Research Laboratory
+00099D Haliplex Communications
+00099E Testech, Inc.
+00099F VIDEX INC.
+0009A0 Microtechno Corporation
+0009A1 Telewise Communications, Inc.
+0009A2 Interface Co., Ltd.
+0009A3 Leadfly Techologies Corp. Ltd.
+0009A4 HARTEC Corporation
+0009A5 HANSUNG ELETRONIC INDUSTRIES DEVELOPMENT CO., LTD
+0009A6 Ignis Optics, Inc.
+0009A7 Bang & Olufsen A/S
+0009A8 Eastmode Pte Ltd
+0009A9 Ikanos Communications
+0009AA Data Comm for Business, Inc.
+0009AB Netcontrol Oy
+0009AC LANVOICE
+0009AD HYUNDAI SYSCOMM, INC.
+0009AE OKANO ELECTRIC CO.,LTD
+0009AF e-generis
+0009B0 Onkyo Corporation
+0009B1 Kanematsu Electronics, Ltd.
+0009B2 L&F Inc.
+0009B3 MCM Systems Ltd
+0009B4 KISAN TELECOM CO., LTD.
+0009B5 3J Tech. Co., Ltd.
+0009B6 Cisco Systems
+0009B7 Cisco Systems
+0009B8 Entise Systems
+0009B9 Action Imaging Solutions
+0009BA MAKU Informationstechik GmbH
+0009BB MathStar, Inc.
+0009BC Digital Safety Technologies Inc.
+0009BD Epygi Technologies, Ltd.
+0009BE Mamiya-OP Co.,Ltd.
+0009BF Nintendo Co.,Ltd.
+0009C0 6WIND
+0009C1 PROCES-DATA A/S
+0009C3 NETAS
+0009C4 Medicore Co., Ltd
+0009C5 KINGENE Technology Corporation
+0009C6 Visionics Corporation
+0009C7 Movistec
+0009C8 SINAGAWA TSUSHIN KEISOU SERVICE
+0009C9 BlueWINC Co., Ltd.
+0009CA iMaxNetworks(Shenzhen)Limited.
+0009CB HBrain
+0009CC Moog GmbH
+0009CD HUDSON SOFT CO.,LTD.
+0009CE SpaceBridge Semiconductor Corp.
+0009CF iAd GmbH
+0009D0 Versatel Networks
+0009D1 SERANOA NETWORKS INC
+0009D2 Mai Logic Inc.
+0009D3 Western DataCom Co., Inc.
+0009D4 Transtech Networks
+0009D5 Signal Communication, Inc.
+0009D6 KNC One GmbH
+0009D7 DC Security Products
+0009D9 Neoscale Systems, Inc
+0009DA Control Module Inc.
+0009DB eSpace
+0009DC Galaxis Technology AG
+0009DD Mavin Technology Inc.
+0009DE Samjin Information & Communications Co., Ltd.
+0009DF Vestel Komunikasyon Sanayi ve Ticaret A.S.
+0009E0 XEMICS S.A.
+0009E1 Gemtek Technology Co., Ltd.
+0009E2 Sinbon Electronics Co., Ltd.
+0009E3 Angel Iglesias S.A.
+0009E4 K Tech Infosystem Inc.
+0009E5 Hottinger Baldwin Messtechnik GmbH
+0009E6 Cyber Switching Inc.
+0009E7 ADC Techonology
+0009E8 Cisco Systems
+0009E9 Cisco Systems
+0009EA YEM Inc.
+0009EB HuMANDATA LTD.
+0009EC Daktronics, Inc.
+0009ED CipherOptics
+0009EE MEIKYO ELECTRIC CO.,LTD
+0009EF Vocera Communications
+0009F0 Shimizu Technology Inc.
+0009F1 Yamaki Electric Corporation
+0009F2 Cohu, Inc., Electronics Division
+0009F3 WELL Communication Corp.
+0009F4 Alcon Laboratories, Inc.
+0009F5 Emerson Network Power Co.,Ltd
+0009F6 Shenzhen Eastern Digital Tech Ltd.
+0009F7 SED, a division of Calian
+0009F8 UNIMO TECHNOLOGY CO., LTD.
+0009F9 ART JAPAN CO., LTD.
+0009FB Philips Medizinsysteme Boeblingen GmbH
+0009FC IPFLEX Inc.
+0009FD Ubinetics Limited
+0009FE Daisy Technologies, Inc.
+0009FF X.net 2000 GmbH
+000A00 Mediatek Corp.
+000A01 SOHOware, Inc.
+000A02 ANNSO CO., LTD.
+000A03 ENDESA SERVICIOS, S.L.
+000A04 3Com Europe Ltd
+000A05 Widax Corp.
+000A06 Teledex LLC
+000A07 WebWayOne Ltd
+000A08 ALPINE ELECTRONICS, INC.
+000A09 TaraCom Integrated Products, Inc.
+000A0A SUNIX Co., Ltd.
+000A0B Sealevel Systems, Inc.
+000A0C Scientific Research Corporation
+000A0D MergeOptics GmbH
+000A0E Invivo Research Inc.
+000A0F Ilryung Telesys, Inc
+000A10 FAST media integrations AG
+000A11 ExPet Technologies, Inc
+000A12 Azylex Technology, Inc
+000A13 Silent Witness
+000A14 TECO a.s.
+000A15 Silicon Data, Inc
+000A16 Lassen Research
+000A17 NESTAR COMMUNICATIONS, INC
+000A18 Vichel Inc.
+000A19 Valere Power, Inc.
+000A1A Imerge Ltd
+000A1B Stream Labs
+000A1C Bridge Information Co., Ltd.
+000A1D Optical Communications Products Inc.
+000A1E Red-M (Communications) Limited
+000A1F ART WARE Telecommunication Co., Ltd.
+000A20 SVA Networks, Inc.
+000A21 Integra Telecom Co. Ltd
+000A22 Amperion Inc
+000A23 Parama Networks Inc
+000A24 Octave Communications
+000A25 CERAGON NETWORKS
+000A26 CEIA S.p.A.
+000A27 Apple Computer, Inc.
+000A28 Motorola
+000A29 Pan Dacom Networking AG
+000A2A QSI Systems Inc.
+000A2B Etherstuff
+000A2C Active Tchnology Corporation
+000A2E MAPLE NETWORKS CO., LTD
+000A2F Artnix Inc.
+000A30 Johnson Controls-ASG
+000A31 HCV Wireless
+000A32 Xsido Corporation
+000A33 Sierra Logic, Inc.
+000A34 Identicard Systems Incorporated
+000A35 Xilinx
+000A36 Synelec Telecom Multimedia
+000A37 Procera Networks, Inc.
+000A38 Netlock Technologies, Inc.
+000A39 LoPA Information Technology
+000A3A J-THREE INTERNATIONAL Holding Co., Ltd.
+000A3B GCT Semiconductor, Inc
+000A3C Enerpoint Ltd.
+000A3D Elo Sistemas Eletronicos S.A.
+000A3E EADS Telecom
+000A3F Data East Corporation
+000A40 Crown Audio
+000A41 Cisco Systems
+000A42 Cisco Systems
+000A43 Chunghwa Telecom Co., Ltd.
+000A44 Avery Dennison Deutschland GmbH
+000A45 Audio-Technica Corp.
+000A46 ARO Controls SAS
+000A47 Allied Vision Technologies
+000A48 Albatron Technology
+000A49 Acopia Networks
+000A4A Targa Systems Ltd.
+000A4B DataPower Technology, Inc.
+000A4C Molecular Devices Corporation
+000A4D Noritz Corporation
+000A4E UNITEK Electronics INC.
+000A4F Brain Boxes Limited
+000A50 REMOTEK CORPORATION
+000A51 GyroSignal Technology Co., Ltd.
+000A52 Venitek Co. Ltd.
+000A53 Intronics, Incorporated
+000A54 Laguna Hills, Inc.
+000A55 MARKEM Corporation
+000A56 HITACHI Maxell Ltd.
+000A57 Hewlett-Packard Company - Standards
+000A58 Ingenieur-Buero Freyer & Siegel
+000A59 HW server
+000A5A GreenNET Technologies Co.,Ltd.
+000A5B Power-One as
+000A5C Carel s.p.a.
+000A5D PUC Founder (MSC) Berhad
+000A5E 3COM Corporation
+000A5F almedio inc.
+000A60 Autostar Technology Pte Ltd
+000A61 Cellinx Systems Inc.
+000A62 Crinis Networks, Inc.
+000A63 DHD GmbH
+000A64 Eracom Technologies
+000A65 GentechMedia.co.,ltd.
+000A66 MITSUBISHI ELECTRIC SYSTEM & SERVICE CO.,LTD.
+000A67 OngCorp
+000A68 SolarFlare Communications, Inc.
+000A69 SUNNY bell Technology Co., Ltd.
+000A6A SVM Microwaves s.r.o.
+000A6B Tadiran Telecom Business Systems LTD
+000A6C Walchem Corporation
+000A6D EKS Elektronikservice GmbH
+000A6E Broadcast Technology Limited
+000A6F ZyTera Technologies Inc.
+000A70 MPLS Forum
+000A71 Avrio Technologies, Inc
+000A72 SimpleTech, Inc.
+000A73 Scientific Atlanta
+000A74 Manticom Networks Inc.
+000A75 Cat Electronics
+000A76 Beida Jade Bird Huaguang Technology Co.,Ltd
+000A77 Bluewire Technologies LLC
+000A78 OLITEC
+000A79 corega K.K.
+000A7A Kyoritsu Electric Co., Ltd.
+000A7B Cornelius Consult
+000A7C Tecton Ltd
+000A7D Valo, Inc.
+000A7E The Advantage Group
+000A7F Teradon Industries, Inc
+000A80 Telkonet Inc.
+000A81 TEIMA Audiotex S.L.
+000A82 TATSUTA SYSTEM ELECTRONICS CO.,LTD.
+000A83 SALTO SYSTEMS S.L.
+000A84 Rainsun Enterprise Co., Ltd.
+000A85 PLAT'C2,Inc
+000A86 Lenze
+000A87 Integrated Micromachines Inc.
+000A88 InCypher S.A.
+000A89 Creval Systems, Inc.
+000A8A Cisco Systems
+000A8B Cisco Systems
+000A8C Guardware Systems Ltd.
+000A8D EUROTHERM LIMITED
+000A8E Invacom Ltd
+000A8F Aska International Inc.
+000A90 Bayside Interactive, Inc.
+000A91 HemoCue AB
+000A92 Presonus Corporation
+000A93 W2 Networks, Inc.
+000A94 ShangHai cellink CO., LTD
+000A95 Apple Computer, Inc.
+000A96 MEWTEL TECHNOLOGY INC.
+000A97 SONICblue, Inc.
+000A98 M+F Gwinner GmbH & Co
+000A99 Dataradio Inc.
+000A9A Aiptek International Inc
+000A9B Towa Meccs Corporation
+000A9C Server Technology, Inc.
+000A9D King Young Technology Co. Ltd.
+000A9E BroadWeb Corportation
+000A9F Pannaway Technologies, Inc.
+000AA0 Cedar Point Communications
+000AA1 V V S Limited
+000AA2 SYSTEK INC.
+000AA3 SHIMAFUJI ELECTRIC CO.,LTD.
+000AA4 SHANGHAI SURVEILLANCE TECHNOLOGY CO,LTD
+000AA5 MAXLINK INDUSTRIES LIMITED
+000AA6 Hochiki Corporation
+000AA7 FEI Company
+000AA8 ePipe Pty. Ltd.
+000AA9 Brooks Automation GmbH
+000AAA AltiGen Communications Inc.
+000AAB TOYOTA MACS, INC.
+000AAC TerraTec Electronic GmbH
+000AAD Stargames Corporation
+000AAE Rosemount Process Analytical
+000AAF Pipal Systems
+000AB0 LOYTEC electronics GmbH
+000AB1 GENETEC Corporation
+000AB2 Fresnel Wireless Systems
+000AB3 Fa. GIRA
+000AB4 ETIC Telecommunications
+000AB5 Digital Electronic Network
+000AB6 COMPUNETIX, INC
+000AB7 Cisco Systems
+000AB8 Cisco Systems
+000AB9 Astera Technologies Corp.
+000ABA Arcon Technology Limited
+000ABB Taiwan Secom Co,. Ltd
+000ABC Seabridge Ltd.
+000ABD Rupprecht & Patashnick Co.
+000ABE OPNET Technologies CO., LTD.
+000ABF HIROTA SS
+000AC0 Fuyoh Video Industry CO., LTD.
+000AC1 Futuretel
+000AC2 FiberHome Telecommunication Technologies CO.,LTD
+000AC3 eM Technics Co., Ltd.
+000AC4 Daewoo Teletech Co., Ltd
+000AC5 Color Kinetics
+000AC7 Unication Group
+000AC8 ZPSYS CO.,LTD. (Planning&Management)
+000AC9 Zambeel Inc
+000ACA YOKOYAMA SHOKAI CO.,Ltd.
+000ACB XPAK MSA Group
+000ACC Winnow Networks, Inc.
+000ACD Sunrich Technology Limited
+000ACE RADIANTECH, INC.
+000ACF PROVIDEO Multimedia Co. Ltd.
+000AD0 Niigata Develoment Center,  F.I.T. Co., Ltd.
+000AD1 MWS
+000AD2 JEPICO Corporation
+000AD3 INITECH Co., Ltd
+000AD4 CoreBell Systems Inc.
+000AD5 Brainchild Electronic Co., Ltd.
+000AD6 BeamReach Networks
+000AD8 IPCserv Technology Corp.
+000AD9 Sony Ericsson Mobile Communications AB
+000ADB SkyPilot Network, Inc
+000ADC RuggedCom Inc.
+000ADD InSciTek Microsystems, Inc.
+000ADE Happy Communication Co., Ltd.
+000ADF Gennum Corporation
+000AE0 Fujitsu Softek
+000AE1 EG Technology
+000AE2 Binatone Electronics International, Ltd
+000AE3 YANG MEI TECHNOLOGY CO., LTD
+000AE4 Wistron Corp.
+000AE5 ScottCare Corporation
+000AE6 Elitegroup Computer System Co. (ECS)
+000AE7 ELIOP S.A.
+000AE8 Cathay Roxus Information Technology Co. LTD
+000AE9 AirVast Technology Inc.
+000AEA ADAM ELEKTRONIK LTD.STI.
+000AEB Shenzhen Tp-link Technology Co; Ltd.
+000AEC Koatsu Gas Kogyo Co., Ltd.
+000AED HARTING Vending G.m.b.H. & CO KG
+000AEE GCD Hard- & Software GmbH
+000AEF OTRUM ASA
+000AF0 SHIN-OH ELECTRONICS CO., LTD. R&D
+000AF1 Clarity Design, Inc.
+000AF2 NeoAxiom Corp.
+000AF3 Cisco Systems
+000AF4 Cisco Systems
+000AF5 Airgo Networks, Inc.
+000AF6 Computer Process Controls
+000AF7 Broadcom Corp.
+000AF8 American Telecare Inc.
+000AFA Traverse Technologies Australia
+000AFB Ambri Limited
+000AFC Core Tec Communications, LLC
+000AFD Viking Electronic Services
+000AFE NovaPal Ltd
+000AFF Kilchherr Elektronik AG
+000B00 FUJIAN START COMPUTER EQUIPMENT CO.,LTD
+000B01 DAIICHI ELECTRONICS CO., LTD.
+000B02 Dallmeier electronic
+000B03 Taekwang Industrial Co., Ltd
+000B04 Volktek Corporation
+000B05 Pacific Broadband Networks
+000B06 Motorola BCS
+000B07 Voxpath Networks
+000B08 Pillar Data Systems
+000B09 Ifoundry Systems Singapore
+000B0A dBm Optics
+000B0B Corrent Corporation
+000B0C Agile Systems Inc.
+000B0D Air2U, Inc.
+000B0E Trapeze Networks
+000B0F Nyquist Industrial Control BV
+000B10 11wave Technonlogy Co.,Ltd
+000B11 HIMEJI ABC TRADING CO.,LTD.
+000B13 ZETRON INC
+000B14 ViewSonic Corporation
+000B15 Platypus Technology
+000B16 Communication Machinery Corporation
+000B17 MKS Instruments
+000B19 Vernier Networks, Inc.
+000B1A Teltone Corporation
+000B1B Systronix, Inc.
+000B1D LayerZero Power Systems, Inc.
+000B1E KAPPA opto-electronics GmbH
+000B1F I CON Computer Co.
+000B20 Hirata corporation
+000B21 G-Star Communications Inc.
+000B22 Environmental Systems and Services
+000B23 Efficient Networks, Inc.
+000B24 AirLogic
+000B25 Aeluros
+000B26 Wetek Corporation
+000B27 Scion Corporation
+000B28 Quatech Inc.
+000B29 LG Industrial Systems Co.,Ltd.
+000B2A HOWTEL Co., Ltd.
+000B2B HOSTNET CORPORATION
+000B2C Eiki Industrial Co. Ltd.
+000B2D Danfoss Inc.
+000B2E Cal-Comp Electronics (Thailand) Public Company Limited Taipe
+000B2F bplan GmbH
+000B30 Beijing Gongye Science & Technology Co.,Ltd
+000B31 Yantai ZhiYang Scientific and technology industry CO., LTD
+000B32 VORMETRIC, INC.
+000B33 Vivato
+000B34 ShangHai Broadband Technologies CO.LTD
+000B35 Quad Bit System co., Ltd.
+000B36 Productivity Systems, Inc.
+000B37 MANUFACTURE DES MONTRES ROLEX SA
+000B38 Knuerr AG
+000B39 Keisoku Giken Co.,Ltd.
+000B3A Fortel DTV, Inc.
+000B3B devolo AG
+000B3C Cygnal Integrated Products, Inc.
+000B3D CONTAL OK Ltd.
+000B3E BittWare, Inc
+000B3F Anthology Solutions Inc.
+000B40 OpNext Inc.
+000B41 Ing. Buero Dr. Beutlhauser
+000B42 commax Co., Ltd.
+000B43 Microscan Systems, Inc.
+000B44 Concord IDea Corp.
+000B45 Cisco
+000B46 Cisco
+000B47 Advanced Energy
+000B48 sofrel
+000B49 RF-Link System Inc.
+000B4A Visimetrics (UK) Ltd
+000B4B VISIOWAVE SA
+000B4C Clarion (M) Sdn Bhd
+000B4D Emuzed
+000B4E VertexRSI Antenna Products Division
+000B4F Verifone, INC.
+000B50 Oxygnet
+000B51 Micetek International Inc.
+000B52 JOYMAX ELECTRONICS CORP.
+000B53 INITIUM Co., Ltd.
+000B54 BiTMICRO Networks, Inc.
+000B55 ADInstruments
+000B56 Cybernetics
+000B57 Silicon Laboratories
+000B58 Astronautics C.A  LTD
+000B59 ScriptPro, LLC
+000B5A HyperEdge
+000B5B Rincon Research Corporation
+000B5C Newtech Co.,Ltd
+000B5D FUJITSU LIMITED
+000B5E ATMAVA Ltd
+000B5F Cisco Systems
+000B60 Cisco Systems
+000B61 Friedrich Lütze GmbH &Co.
+000B62 Ingenieurbüro Ingo Mohnen
+000B64 Kieback & Peter GmbH & Co KG
+000B65 Sy.A.C. srl
+000B66 Teralink Communications
+000B67 Topview Technology Corporation
+000B68 Addvalue Communications Pte Ltd
+000B69 Franke Finland Oy
+000B6A Asiarock Incorporation
+000B6B Wistron Neweb Corp.
+000B6C Sychip Inc.
+000B6D SOLECTRON JAPAN NAKANIIDA
+000B6E Neff Instrument Corp.
+000B6F Media Streaming Networks Inc
+000B70 Load Technology, Inc.
+000B71 Litchfield Communications Inc.
+000B72 Lawo AG
+000B73 Kodeos Communications
+000B74 Kingwave Technology Co., Ltd.
+000B75 Iosoft Ltd.
+000B76 ET&T Co. Ltd.
+000B77 Cogent Systems, Inc.
+000B78 TAIFATECH INC.
+000B79 X-COM, Inc.
+000B7B Test-Um Inc.
+000B7C Telex Communications
+000B7D SOLOMON EXTREME INTERNATIONAL LTD.
+000B7E SAGINOMIYA Seisakusho Inc.
+000B7F OmniWerks
+000B81 Kaparel Corporation
+000B82 Grandstream Networks, Inc.
+000B83 DATAWATT B.V.
+000B84 BODET
+000B85 Airespace, Inc.
+000B86 Aruba Networks
+000B87 American Reliance Inc.
+000B88 Vidisco ltd.
+000B89 Top Global Technology, Ltd.
+000B8A MITEQ Inc.
+000B8B KERAJET, S.A.
+000B8C flextronics israel
+000B8D Avvio Networks
+000B8E Ascent Corporation
+000B8F AKITA ELECTRONICS SYSTEMS CO.,LTD.
+000B90 Covaro Networks, Inc.
+000B91 Aglaia Gesellschaft für Bildverarbeitung und Kommunikation m
+000B92 Ascom Danmark A/S
+000B93 Barmag Electronic
+000B94 Digital Monitoring Products, Inc.
+000B95 eBet Gaming Systems Pty Ltd
+000B96 Innotrac Diagnostics Oy
+000B97 Matsushita Electric Industrial Co.,Ltd.
+000B98 NiceTechVision
+000B99 SensAble Technologies, Inc.
+000B9A Shanghai Ulink Telecom Equipment Co. Ltd.
+000B9B Sirius System Co, Ltd.
+000B9C TriBeam Technologies, Inc.
+000B9D TwinMOS Technologies Inc.
+000B9E Yasing Technology Corp.
+000B9F Neue ELSA GmbH
+000BA0 T&L Information Inc.
+000BA1 SYSCOM Ltd.
+000BA2 Sumitomo Electric Networks, Inc
+000BA3 Siemens AG, I&S
+000BA4 Shiron Satellite Communications Ltd. (1996)
+000BA5 Quasar Cipta Mandiri, PT
+000BA6 Miyakawa Electric Works Ltd.
+000BA7 Maranti Networks
+000BA8 HANBACK ELECTRONICS CO., LTD.
+000BAA Aiphone co.,Ltd
+000BAB Advantech Technology (CHINA) Co., Ltd.
+000BAC 3Com Europe Ltd.
+000BAD PC-PoS Inc.
+000BAE Vitals System Inc.
+000BB0 Sysnet Telematica srl
+000BB1 Super Star Technology Co., Ltd.
+000BB2 SMALLBIG TECHNOLOGY
+000BB3 RiT technologies Ltd.
+000BB4 RDC Semiconductor Inc.,
+000BB5 nStor Technologies, Inc.
+000BB6 Mototech Inc.
+000BB7 Micro Systems Co.,Ltd.
+000BB8 Kihoku Electronic Co.
+000BB9 Imsys AB
+000BBA Harmonic Broadband Access Networks
+000BBB Etin Systems Co., Ltd
+000BBC En Garde Systems, Inc.
+000BBD Connexionz Limited
+000BBE Cisco Systems
+000BBF Cisco Systems
+000BC0 China IWNComm Co., Ltd.
+000BC1 Bay Microsystems, Inc.
+000BC2 Corinex Communication Corp.
+000BC3 Multiplex, Inc.
+000BC4 BIOTRONIK GmbH & Co
+000BC5 SMC Networks, Inc.
+000BC6 ISAC, Inc.
+000BC7 ICET S.p.A.
+000BC8 AirFlow Networks
+000BC9 Electroline Equipment
+000BCA DATAVAN International Corporation
+000BCB Fagor Automation , S. Coop
+000BCC JUSAN, S.A.
+000BCD Compaq (HP)
+000BCE Free2move AB
+000BCF AGFA NDT INC.
+000BD0 XiMeta Technology Americas Inc.
+000BD1 Aeronix, Inc.
+000BD2 Remopro Technology Inc.
+000BD3 cd3o
+000BD4 Beijing Wise Technology & Science Development Co.Ltd
+000BD5 Nvergence, Inc.
+000BD6 Paxton Access Ltd
+000BD7 MBB Gelma GmbH
+000BD8 Industrial Scientific Corp.
+000BD9 General Hydrogen
+000BDA EyeCross Co.,Inc.
+000BDB Dell ESG PCBA Test
+000BDC AKCP
+000BDD TOHOKU RICOH Co., LTD.
+000BDF Shenzhen RouterD Networks Limited
+000BE0 SercoNet Ltd.
+000BE2 Lumenera Corporation
+000BE3 Key Stream Co., Ltd.
+000BE4 Hosiden Corporation
+000BE5 HIMS Korea Co., Ltd.
+000BE6 Datel Electronics
+000BE7 COMFLUX TECHNOLOGY INC.
+000BE8 AOIP
+000BEA Zultys Technologies
+000BEB Systegra AG
+000BEC NIPPON ELECTRIC INSTRUMENT, INC.
+000BED ELM Inc.
+000BEE inc.jet, Incorporated
+000BEF Code Corporation
+000BF0 MoTEX Products Co., Ltd.
+000BF1 LAP Laser Applikations
+000BF2 Chih-Kan Technology Co., Ltd.
+000BF3 BAE SYSTEMS
+000BF5 Shanghai Sibo Telecom Technology Co.,Ltd
+000BF6 Nitgen Co., Ltd
+000BF7 NIDEK CO.,LTD
+000BF8 Infinera
+000BF9 Gemstone communications, Inc.
+000BFB D-NET International Corporation
+000BFC Cisco Systems
+000BFD Cisco Systems
+000BFE CASTEL Broadband Limited
+000BFF Berkeley Camera Engineering
+000C00 BEB Industrie-Elektronik AG
+000C01 Abatron AG
+000C02 ABB Oy
+000C03 HDMI Licensing, LLC
+000C04 Tecnova
+000C05 RPA Reserch Co., Ltd.
+000C06 Nixvue Systems  Pte Ltd
+000C07 Iftest AG
+000C08 HUMEX Technologies Corp.
+000C09 Hitachi IE Systems Co., Ltd
+000C0A Guangdong Province Electronic Technology Research Institute
+000C0B Broadbus Technologies
+000C0C APPRO TECHNOLOGY INC.
+000C0D Communications & Power Industries / Satcom Division
+000C0E XtremeSpectrum, Inc.
+000C0F Techno-One Co., Ltd
+000C10 PNI Corporation
+000C11 NIPPON DEMPA CO.,LTD.
+000C12 Micro-Optronic-Messtechnik GmbH
+000C13 MediaQ
+000C14 Diagnostic Instruments, Inc.
+000C15 CyberPower Systems, Inc.
+000C16 Concorde Microsystems Inc.
+000C17 AJA Video Systems Inc
+000C18 Zenisu Keisoku Inc.
+000C19 Telio Communications GmbH
+000C1A Quest Technical Solutions Inc.
+000C1B ORACOM Co, Ltd.
+000C1C MicroWeb Co., Ltd.
+000C1D Mettler & Fuchs AG
+000C1E Global Cache
+000C1F Glimmerglass Networks
+000C20 Fi WIn, Inc.
+000C21 Faculty of Science and Technology, Keio University
+000C22 Double D Electronics Ltd
+000C23 Beijing Lanchuan Tech. Co., Ltd.
+000C25 Allied Telesyn Networks
+000C26 Weintek Labs. Inc.
+000C27 Sammy Corporation
+000C28 RIFATRON
+000C29 VMware, Inc.
+000C2A OCTTEL Communication Co., Ltd.
+000C2B ELIAS Technology, Inc.
+000C2C Enwiser Inc.
+000C2D FullWave Technology Co., Ltd.
+000C2E Openet information technology(shenzhen) Co., Ltd.
+000C2F SeorimTechnology Co.,Ltd.
+000C30 Cisco
+000C31 Cisco
+000C32 Avionic Design Development GmbH
+000C33 Compucase Enterprise Co. Ltd.
+000C34 Vixen Co., Ltd.
+000C35 KaVo Dental GmbH & Co. KG
+000C36 SHARP TAKAYA ELECTRONICS INDUSTRY CO.,LTD.
+000C37 Geomation, Inc.
+000C38 TelcoBridges Inc.
+000C39 Sentinel Wireless Inc.
+000C3A Oxance
+000C3B Orion Electric Co., Ltd.
+000C3C MediaChorus, Inc.
+000C3D Glsystech Co., Ltd.
+000C3E Crest Audio
+000C3F Cogent Defence & Security Networks,
+000C40 Altech Controls
+000C41 The Linksys Group, Inc.
+000C42 Routerboard.com
+000C43 Ralink Technology, Corp.
+000C44 Automated Interfaces, Inc.
+000C45 Animation Technologies Inc.
+000C46 Allied Telesyn Inc.
+000C47 SK Teletech(R&D Planning Team)
+000C48 QoStek Corporation
+000C49 Dangaard Telecom RTC Division A/S
+000C4A Cygnus Microsystems Private Limited
+000C4B Cheops Elektronik
+000C4C Arcor AG&Co.
+000C4D ACRA CONTROL
+000C4E Winbest Technology CO,LT
+000C4F UDTech Japan Corporation
+000C50 Seagate Technology
+000C51 Scientific Technologies Inc.
+000C52 Roll Systems Inc.
+000C54 Pedestal Networks, Inc
+000C55 Microlink Communications Inc.
+000C56 Megatel Computer (1986) Corp.
+000C57 MACKIE Engineering Services Belgium BVBA
+000C58 M&S Systems
+000C59 Indyme Electronics, Inc.
+000C5A IBSmm Industrieelektronik Multimedia
+000C5B HANWANG TECHNOLOGY CO.,LTD
+000C5C GTN Systems B.V.
+000C5D CHIC TECHNOLOGY (CHINA) CORP.
+000C5F Avtec, Inc.
+000C60 ACM Systems
+000C61 AC Tech corporation DBA Advanced Digital
+000C62 ABB Automation Technology Products AB, Control
+000C63 Zenith Electronics Corporation
+000C64 X2 MSA Group
+000C65 Sunin Telecom
+000C66 Pronto Networks Inc
+000C67 OYO ELECTRIC CO.,LTD
+000C68 Oasis Semiconductor, Inc.
+000C69 National Radio Astronomy Observatory
+000C6A MBARI
+000C6B Kurz Industrie-Elektronik GmbH
+000C6C Elgato Systems LLC
+000C6D BOC Edwards
+000C6E ASUSTEK COMPUTER INC.
+000C6F Amtek system co.,LTD.
+000C70 ACC GmbH
+000C71 Wybron, Inc
+000C72 Tempearl Industrial Co., Ltd.
+000C73 TELSON ELECTRONICS CO., LTD
+000C74 RIVERTEC CORPORATION
+000C75 Oriental integrated electronics. LTD
+000C76 MICRO-STAR INTERNATIONAL CO., LTD.
+000C77 Life Racing Ltd
+000C78 In-Tech Electronics Limited
+000C79 Extel Communications P/L
+000C7A DaTARIUS Technologies GmbH
+000C7B ALPHA PROJECT Co.,Ltd.
+000C7C Internet Information Image Inc.
+000C7D TEIKOKU ELECTRIC MFG. CO., LTD
+000C7E Tellium Incorporated
+000C7F synertronixx GmbH
+000C80 Opelcomm Inc.
+000C81 Nulec Industries Pty Ltd
+000C82 NETWORK TECHNOLOGIES INC
+000C83 Logical Solutions
+000C84 Eazix, Inc.
+000C85 Cisco Systems
+000C86 Cisco Systems
+000C87 ATI
+000C88 Apache Micro Peripherals, Inc.
+000C89 AC Electric Vehicles, Ltd.
+000C8A Bose Corporation
+000C8B Connect Tech Inc
+000C8C KODICOM CO.,LTD.
+000C8D MATRIX VISION GmbH
+000C8E Mentor Engineering Inc
+000C8F Nergal s.r.l.
+000C90 Octasic Inc.
+000C91 Riverhead Networks Inc.
+000C92 WolfVision Gmbh
+000C93 Xeline Co., Ltd.
+000C94 United Electronic Industries, Inc.
+000C95 PrimeNet
+000C96 OQO, Inc.
+000C97 NV ADB TTV Technologies SA
+000C98 LETEK Communications Inc.
+000C99 HITEL LINK Co.,Ltd
+000C9A Hitech Electronics Corp.
+000C9B EE Solutions, Inc
+000C9C Chongho information & communications
+000C9D AirWalk Communications, Inc.
+000C9E MemoryLink Corp.
+000C9F NKE Corporation
+000CA0 StorCase Technology, Inc.
+000CA1 SIGMACOM Co., LTD.
+000CA2 Scopus Network Technologies Ltd
+000CA3 Rancho Technology, Inc.
+000CA4 Prompttec Product Management GmbH
+000CA6 Mintera Corporation
+000CA7 Metro (Suzhou) Technologies Co., Ltd.
+000CA8 Garuda Networks Corporation
+000CA9 Ebtron Inc.
+000CAA Cubic Transportation Systems Inc
+000CAB COMMEND International
+000CAC Citizen Watch Co., Ltd.
+000CAD BTU International
+000CAE Ailocom Oy
+000CAF TRI TERM CO.,LTD.
+000CB0 Star Semiconductor Corporation
+000CB1 Salland Engineering (Europe) BV
+000CB2 safei Co., Ltd.
+000CB3 ROUND Co.,Ltd.
+000CB4 Propagate Networks, Inc
+000CB5 Premier Technolgies, Inc
+000CB6 NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD
+000CB7 Nanjing Huazhuo Electronics Co., Ltd.
+000CB8 MEDION AG
+000CB9 LEA
+000CBA Jamex
+000CBB ISKRAEMECO
+000CBC Iscutum
+000CBD Interface Masters, Inc
+000CBF Holy Stone Ent. Co., Ltd.
+000CC0 Genera Oy
+000CC1 Cooper Industries Inc.
+000CC3 BeWAN systems
+000CC4 Tiptel AG
+000CC5 Nextlink Co., Ltd.
+000CC6 Ka-Ro electronics GmbH
+000CC7 Intelligent Computer Solutions Inc.
+000CC8 Integrated Digital Systems, Inc.
+000CC9 ILWOO DATA & TECHNOLOGY CO.,LTD
+000CCA Hitachi Global Storage Technologies
+000CCB Design Combus Ltd
+000CCC Bluesoft Ltd.
+000CCD IEC - TC57
+000CCE Cisco Systems
+000CCF Cisco Systems
+000CD0 Symetrix
+000CD1 SFOM Technology Corp.
+000CD2 Schaffner EMV AG
+000CD3 Prettl Elektronik Radeberg GmbH
+000CD4 Positron Public Safety Systems inc.
+000CD5 Passave Inc.
+000CD6 PARTNER TECH
+000CD7 Nallatech Ltd
+000CD8 M. K. Juchheim GmbH & Co
+000CD9 Itcare Co., Ltd
+000CDA FreeHand Systems, Inc.
+000CDB Foundry Networks
+000CDC BECS Technology, Inc
+000CDD AOS Technologies AG
+000CDE ABB STOTZ-KONTAKT GmbH
+000CDF PULNiX America, Inc
+000CE0 Trek Diagnostics Inc.
+000CE1 The Open Group
+000CE2 Rolls-Royce
+000CE3 Option International N.V.
+000CE4 NeuroCom International, Inc.
+000CE5 Motorola BCS
+000CE6 Meru Networks Inc
+000CE7 MediaTek Inc.
+000CE8 GuangZhou AnJuBao Co., Ltd
+000CE9 BLOOMBERG L.P.
+000CEA aphona Kommunikationssysteme
+000CEB CNMP Networks, Inc.
+000CEC Spectracom Corp.
+000CED Real Digital Media
+000CEE Q-Networks
+000CEF Open Networks Engineering Ltd
+000CF0 M & N GmbH
+000CF1 Intel Corporation
+000CF2 GAMESA EÓLICA
+000CF3 CALL IMAGE SA
+000CF4 AKATSUKI ELECTRIC MFG.CO.,LTD.
+000CF5 InfoExpress
+000CF6 Sitecom Europe BV
+000CF7 Nortel Networks
+000CF8 Nortel Networks
+000CF9 ITT Flygt AB
+000CFA Digital Systems Corp
+000CFB Korea Network Systems
+000CFC S2io Technologies Corp
+000CFE Grand Electronic Co., Ltd
+000CFF MRO-TEK LIMITED
+000D00 Seaway Networks Inc.
+000D01 P&E Microcomputer Systems, Inc.
+000D02 NEC Access Technica,Ltd
+000D03 Matrics, Inc.
+000D04 Foxboro Eckardt Development GmbH
+000D05 cybernet manufacturing inc.
+000D06 Compulogic Limited
+000D07 Calrec Audio Ltd
+000D08 AboveCable, Inc.
+000D09 Yuehua(Zhuhai) Electronic CO. LTD
+000D0A Projectiondesign as
+000D0B Melco Inc.
+000D0C MDI Security Systems
+000D0D ITSupported, LLC
+000D0E Inqnet Systems, Inc.
+000D0F Finlux Ltd
+000D10 Embedtronics Oy
+000D11 DENTSPLY - Gendex
+000D12 AXELL Corporation
+000D13 Wilhelm Rutenbeck GmbH&Co.
+000D14 Vtech Innovation LP dba Advanced American Telephones
+000D15 Voipac s.r.o.
+000D16 UHS Systems Pty Ltd
+000D17 Turbo Networks Co.Ltd
+000D18 Sunitec Enterprise Co., Ltd.
+000D19 ROBE Show lighting
+000D1A Mustek System Inc.
+000D1B Kyoto Electronics Manufacturing Co., Ltd.
+000D1C I2E TELECOM
+000D1D HIGH-TEK HARNESS ENT. CO., LTD.
+000D1E Control Techniques
+000D1F AV Digital
+000D20 ASAHIKASEI TECHNOSYSTEM CO.,LTD.
+000D21 WISCORE Inc.
+000D22 Unitronics
+000D23 Smart Solution, Inc
+000D24 SENTEC E&E CO., LTD.
+000D25 SANDEN CORPORATION
+000D26 Primagraphics Limited
+000D27 MICROPLEX Printware AG
+000D28 Cisco
+000D29 Cisco
+000D2A Scanmatic AS
+000D2B Racal Instruments
+000D2C Patapsco Designs Ltd
+000D2D NCT Deutschland GmbH
+000D2E Matsushita Avionics Systems Corporation
+000D2F AIN Comm.Tech.Co., LTD
+000D30 IceFyre Semiconductor
+000D31 Compellent Technologies, Inc.
+000D32 DispenseSource, Inc.
+000D33 Prediwave Corp.
+000D34 Shell International Exploration and Production, Inc.
+000D35 PAC International Ltd
+000D36 Wu Han Routon Electronic Co., Ltd
+000D37 WIPLUG
+000D38 NISSIN INC.
+000D39 Network Electronics
+000D3A Microsoft Corp.
+000D3B Microelectronics Technology Inc.
+000D3C i.Tech Dynamic Ltd
+000D3E APLUX Communications Ltd.
+000D3F VXI Technology
+000D40 Verint Loronix Video Solutions
+000D41 Siemens AG ICM MP UC RD IT KLF1
+000D42 Newbest Development Limited
+000D43 DRS Tactical Systems Inc.
+000D45 Tottori SANYO Electric Co., Ltd.
+000D46 Eurotherm Drives, Ltd.
+000D47 Collex
+000D48 AEWIN Technologies Co., Ltd.
+000D49 Triton Systems of Delaware, Inc.
+000D4A Steag ETA-Optik
+000D4B Roku, LLC
+000D4C Outline Electronics Ltd.
+000D4D Ninelanes
+000D4E NDR Co.,LTD.
+000D4F Kenwood Corporation
+000D50 Galazar Networks
+000D51 DIVR Systems, Inc.
+000D52 Comart system
+000D53 Beijing 5w Communication Corp.
+000D54 3Com Europe Ltd
+000D55 SANYCOM Technology Co.,Ltd
+000D56 Dell PCBA Test
+000D57 Fujitsu I-Network Systems Limited.
+000D59 Amity Systems, Inc.
+000D5A Tiesse SpA
+000D5B Smart Empire Investments Limited
+000D5C Robert Bosch GmbH, VT-ATMO
+000D5D Raritan Computer, Inc
+000D5E NEC CustomTechnica, Ltd.
+000D5F Minds Inc
+000D60 IBM Corporation
+000D61 Giga-Byte Technology Co., Ltd.
+000D62 Funkwerk Dabendorf GmbH
+000D63 DENT Instruments, Inc.
+000D64 COMAG Handels AG
+000D65 Cisco Systems
+000D66 Cisco Systems
+000D67 BelAir Networks Inc.
+000D68 Vinci Systems, Inc.
+000D69 TMT&D Corporation
+000D6A Redwood Technologies LTD
+000D6B Mita-Teknik A/S
+000D6C M-Audio
+000D6D K-Tech Devices Corp.
+000D6E K-Patents Oy
+000D6F Ember Corporation
+000D70 Datamax Corporation
+000D71 boca systems
+000D72 2Wire, Inc
+000D73 Technical Support, Inc.
+000D74 Sand Network Systems, Inc.
+000D75 Kobian Pte Ltd - Taiwan Branch
+000D76 Hokuto Denshi Co,. Ltd.
+000D77 FalconStor Software
+000D78 Engineering & Security
+000D79 Dynamic Solutions Co,.Ltd.
+000D7A DiGATTO Asia Pacific Pte Ltd
+000D7B Consensys Computers Inc.
+000D7C Codian Ltd
+000D7D Afco Systems
+000D7E Axiowave Networks, Inc.
+000D7F MIDAS  COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)
+000D80 Online Development Inc
+000D81 Pepperl+Fuchs GmbH
+000D82 PHS srl
+000D83 Sanmina-SCI Hungary  Ltd.
+000D84 Seodu Inchip, Inc.
+000D85 Tapwave, Inc.
+000D86 Huber + Suhner AG
+000D87 Elitegroup Computer System Co. (ECS)
+000D88 D-Link Corporation
+000D89 Bils Technology Inc
+000D8A Winners Electronics Co., Ltd.
+000D8B T&D Corporation
+000D8C Shanghai Wedone Digital Ltd. CO.
+000D8D ProLinx Communication Gateways, Inc.
+000D8E Koden Electronics Co., Ltd.
+000D8F King Tsushin Kogyo Co., LTD.
+000D90 Factum Electronics AB
+000D91 Eclipse (HQ Espana) S.L.
+000D92 Arima Communication Corporation
+000D93 Apple Computer
+000D94 AFAR Communications,Inc
+000D96 Vtera Technology Inc.
+000D97 Tropos Networks, Inc.
+000D98 S.W.A.C. Schmitt-Walter Automation Consult GmbH
+000D99 Orbital Sciences Corp.; Launch Systems Group
+000D9A INFOTEC LTD
+000D9C Elan GmbH & Co KG
+000D9D Hewlett Packard
+000D9E TOKUDEN OHIZUMI SEISAKUSYO Co.,Ltd.
+000D9F RF Micro Devices
+000DA0 NEDAP N.V.
+000DA1 MIRAE ITS Co.,LTD.
+000DA2 Infrant Technologies, Inc.
+000DA3 Emerging Technologies Limited
+000DA4 DOSCH & AMAND SYSTEMS AG
+000DA5 Fabric7 Systems, Inc
+000DA6 Universal Switching Corporation
+000DA8 Teletronics Technology Corporation
+000DA9 T.E.A.M. S.L.
+000DAA S.A.Tehnology co.,Ltd.
+000DAB Parker Hannifin GmbH Electromechanical Division Europe
+000DAC Japan CBM Corporation
+000DAD Dataprobe Inc
+000DAE SAMSUNG HEAVY INDUSTRIES CO., LTD.
+000DAF Plexus Corp (UK) Ltd
+000DB0 Olym-tech Co.,Ltd.
+000DB1 Japan Network Service Co., Ltd.
+000DB2 Ammasso, Inc.
+000DB3 SDO Communication Corperation
+000DB4 NETASQ
+000DB5 GLOBALSAT TECHNOLOGY CORPORATION
+000DB6 Teknovus, Inc.
+000DB7 SANKO ELECTRIC CO,.LTD
+000DB8 SCHILLER AG
+000DB9 PC Engines GmbH
+000DBA Océ Document Technologies GmbH
+000DBB Nippon Dentsu Co.,Ltd.
+000DBC Cisco Systems
+000DBD Cisco Systems
+000DBE Bel Fuse Europe Ltd.,UK
+000DBF TekTone Sound & Signal Mfg., Inc.
+000DC0 Spagat AS
+000DC1 SafeWeb Inc
+000DC3 First Communication, Inc.
+000DC4 Emcore Corporation
+000DC5 EchoStar International Corporation
+000DC6 DigiRose Technology Co., Ltd.
+000DC7 COSMIC ENGINEERING INC.
+000DC8 AirMagnet, Inc
+000DC9 THALES Elektronik Systeme GmbH
+000DCA Tait Electronics
+000DCB Petcomkorea Co., Ltd.
+000DCC NEOSMART Corp.
+000DCD GROUPE TXCOM
+000DCE Dynavac Technology Pte Ltd
+000DCF Cidra Corp.
+000DD0 TetraTec Instruments GmbH
+000DD1 Stryker Corporation
+000DD2 Simrad Optronics ASA
+000DD3 SAMWOO Telecommunication Co.,Ltd.
+000DD4 Revivio Inc.
+000DD5 O'RITE TECHNOLOGY CO.,LTD
+000DD7 Bright
+000DD8 BBN
+000DD9 Anton Paar GmbH
+000DDA ALLIED TELESIS K.K.
+000DDB AIRWAVE TECHNOLOGIES INC.
+000DDC VAC
+000DDD PROFÝLO TELRA ELEKTRONÝK SANAYÝ VE TÝCARET A.Þ.
+000DDE Joyteck Co., Ltd.
+000DDF Japan Image & Network Inc.
+000DE0 ICPDAS Co.,LTD
+000DE1 Control Products, Inc.
+000DE2 CMZ Sistemi Elettronici
+000DE3 AT Sweden AB
+000DE4 DIGINICS, Inc.
+000DE5 Samsung Thales
+000DE6 YOUNGBO ENGINEERING CO.,LTD
+000DE7 Snap-on OEM Group
+000DE8 Nasaco Electronics Pte. Ltd
+000DE9 Napatech Aps
+000DEA Kingtel Telecommunication Corp.
+000DEB CompXs Limited
+000DEC Cisco Systems
+000DED Cisco Systems
+000DEF Soc. Coop. Bilanciai
+000DF0 QCOM TECHNOLOGY INC.
+000DF1 IONIX INC.
+000DF3 Asmax Solutions
+000DF4 Watertek Co.
+000DF5 Teletronics International Inc.
+000DF6 Technology Thesaurus Corp.
+000DF7 Space Dynamics Lab
+000DF8 ORGA Kartensysteme GmbH
+000DF9 NDS Limited
+000DFA Micro Control Systems Ltd.
+000DFB Komax AG
+000DFC ITFOR Inc. resarch and development
+000DFD Huges Hi-Tech Inc.,
+000DFE Hauppauge Computer Works, Inc.
+000DFF CHENMING MOLD INDUSTRY CORP.
+000E01 ASIP Technologies Inc.
+000E02 Advantech AMT Inc.
+000E03 Aarohi Communications, Inc.
+000E05 WIRELESS MATRIX CORP.
+000E06 Team Simoco Ltd
+000E07 Sony Ericsson Mobile Communications AB
+000E08 Sipura Technology, Inc.
+000E09 Shenzhen Coship Software Co.,LTD.
+000E0B Netac Technology Co., Ltd.
+000E0C Intel Corporation
+000E0D HESCH Schröder GmbH
+000E0E ESA elettronica S.P.A.
+000E0F ERMME
+000E11 BDT Büro- und Datentechnik GmbH & Co. KG
+000E12 Adaptive Micro Systems Inc.
+000E13 Accu-Sort Systems inc.
+000E14 Visionary Solutions, Inc.
+000E15 Tadlys LTD
+000E16 SouthWing
+000E18 MyA Technology
+000E19 LogicaCMG Pty Ltd
+000E1B IAV GmbH
+000E1C Hach Company
+000E1F TCL Networks Equipment Co., Ltd.
+000E20 PalmSource, Inc.
+000E21 MTU Friedrichshafen GmbH
+000E23 Incipient, Inc.
+000E25 Hannae Technology Co., Ltd
+000E26 Gincom Technology Corp.
+000E27 Crere Networks, Inc.
+000E28 Dynamic Ratings P/L
+000E29 Shester Communications Inc
+000E2B Safari Technologies
+000E2C Netcodec co.
+000E2D Hyundai Digital Technology Co.,Ltd.
+000E2E Edimax Technology Co., Ltd.
+000E2F Disetronic Medical Systems AG
+000E30 AERAS Networks, Inc.
+000E31 Olympus BioSystems GmbH
+000E32 Kontron Medical
+000E33 Shuko Electronics Co.,Ltd
+000E34 NexxGenCity
+000E35 Intel Corp
+000E36 HEINESYS, Inc.
+000E37 Harms & Wende GmbH & Co.KG
+000E38 Cisco Systems
+000E39 Cisco Systems
+000E3A Cirrus Logic
+000E3B Hawking Technologies, Inc.
+000E3C TransAct Technoloiges Inc.
+000E3D Televic N.V.
+000E3E Sun Optronics Inc
+000E3F Soronti, Inc.
+000E40 Nortel Networks
+000E41 NIHON MECHATRONICS CO.,LTD.
+000E42 Motic Incoporation Ltd.
+000E43 G-Tek Electronics Sdn. Bhd.
+000E44 Digital 5, Inc.
+000E45 Beijing Newtry Electronic Technology Ltd
+000E46 Niigata Seimitsu Co.,Ltd.
+000E47 NCI System Co.,Ltd.
+000E48 Lipman TransAction Solutions
+000E49 Forsway Scandinavia AB
+000E4A Changchun Huayu WEBPAD Co.,LTD
+000E4B atrium c and i
+000E4C Bermai Inc.
+000E4D Numesa Inc.
+000E4E Waveplus Technology Co., Ltd.
+000E4F Trajet GmbH
+000E50 Thomson Multi Media
+000E51 tecna elettronica srl
+000E52 Optium Corporation
+000E53 AV TECH CORPORATION
+000E54 AlphaCell Wireless Ltd.
+000E55 AUVITRAN
+000E56 4G Systems GmbH
+000E57 Iworld Networking, Inc.
+000E58 Rincon Networks
+000E5A TELEFIELD inc.
+000E5B ParkerVision - Direct2Data
+000E5C Motorola BCS
+000E5D Com-X Networks
+000E5E Beijing Raisecom Science & Technology Development Co.,Ltd
+000E5F activ-net GmbH & Co. KG
+000E60 360SUN Digital Broadband Corporation
+000E61 MICROTROL LIMITED
+000E62 Nortel Networks
+000E63 Lemke Diagnostics GmbH
+000E64 Elphel, Inc
+000E65 TransCore
+000E66 Hitachi Advanced Digital, Inc.
+000E67 Eltis Microelectronics Ltd.
+000E68 E-TOP Network Technology Inc.
+000E69 China Electric Power Research Institute
+000E6A 3COM EUROPE LTD
+000E6B Janitza electronics GmbH
+000E6C Device Drivers Limited
+000E6D Murata Manufacturing Co., Ltd.
+000E6E MICRELEC  ELECTRONICS S.A
+000E6F IRIS Corporation Berhad
+000E70 in2 Networks
+000E71 Gemstar Technology Development Ltd.
+000E72 CTS electronics
+000E73 Tpack A/S
+000E74 Solar Telecom. Tech
+000E75 New York Air Brake Corp.
+000E76 GEMSOC INNOVISION INC.
+000E77 Decru, Inc.
+000E78 Amtelco
+000E79 Ample Communications Inc.
+000E7B Toshiba
+000E7D Electronics Line 3000 Ltd.
+000E7E Comprog Oy
+000E7F Hewlett Packard
+000E81 Instant802 Networks Inc.
+000E82 Commtech Wireless
+000E83 Cisco Systems
+000E84 Cisco Systems
+000E85 Catalyst Enterprises, Inc.
+000E86 Alcatel North America
+000E87 adp Gauselmann GmbH
+000E88 VIDEOTRON CORP.
+000E89 CLEMATIC
+000E8A Avara Technologies Pty. Ltd.
+000E8B Astarte Technology Co, Ltd.
+000E8C Siemens AG A&D ET
+000E8D Systems in Progress Holding GmbH
+000E8E SparkLAN Communications, Inc.
+000E8F Sercomm Corp.
+000E90 PONICO CORP.
+000E92 Millinet Co., Ltd.
+000E93 Milénio 3 Sistemas Electrónicos, Lda.
+000E94 Maas International BV
+000E95 Fujiya Denki Seisakusho Co.,Ltd.
+000E96 Cubic Defense Applications, Inc.
+000E97 Ultracker Technology CO., Inc
+000E98 Vitec CC, INC.
+000E99 Spectrum Digital, Inc
+000E9A BOE TECHNOLOGY GROUP CO.,LTD
+000E9C Pemstar
+000E9D Video Networks Ltd
+000E9E Topfield Co., Ltd
+000E9F TEMIC SDS GmbH
+000EA0 NetKlass Technology Inc.
+000EA1 Formosa Teletek Corporation
+000EA2 CyberGuard Corporation
+000EA3 CNCR-IT CO.,LTD,HangZhou P.R.CHINA
+000EA4 Certance Inc.
+000EA5 BLIP Systems
+000EA6 ASUSTEK COMPUTER INC.
+000EA7 Endace Inc Ltd.
+000EA8 United Technologists Europe Limited
+000EA9 Shanghai Xun Shi Communications Equipment Ltd. Co.
+000EAC MINTRON ENTERPRISE CO., LTD.
+000EAD Metanoia Technologies, Inc.
+000EAE GAWELL TECHNOLOGIES CORP.
+000EAF CASTEL
+000EB0 Solutions Radio BV
+000EB1 Newcotech,Ltd
+000EB2 Micro-Research Finland Oy
+000EB3 LeftHand Networks
+000EB4 GUANGZHOU GAOKE COMMUNICATIONS TECHNOLOGY CO.LTD.
+000EB5 Ecastle Electronics Co., Ltd.
+000EB6 Riverbed Technology, Inc.
+000EB7 Knovative, Inc.
+000EB8 Iiga co.,Ltd
+000EB9 HASHIMOTO Electronics Industry Co.,Ltd.
+000EBA HANMI SEMICONDUCTOR CO., LTD.
+000EBB Everbee Networks
+000EBC Cullmann GmbH
+000EBD Burdick, a Quinton Compny
+000EBE B&B Electronics Manufacturing Co.
+000EC0 Nortel Networks
+000EC1 MYNAH Technologies
+000EC2 Lowrance Electronics, Inc.
+000EC3 Logic Controls, Inc.
+000EC4 Iskra Transmission d.d.
+000EC6 ASIX ELECTRONICS CORP.
+000EC7 Appeal Telecom Co.,Ltd.
+000EC8 Zoran Corporation
+000EC9 YOKO Technology Corp.
+000ECB VineSys Technology
+000ECC Tableau
+000ECD SKOV A/S
+000ECE S.I.T.T.I. S.p.A.
+000ECF PROFIBUS Nutzerorganisation e.V.
+000ED0 Privaris, Inc.
+000ED1 Osaka Micro Computer.
+000ED2 Filtronic plc
+000ED3 Epicenter, Inc.
+000ED4 CRESITT INDUSTRIE
+000ED5 COPAN Systems Inc.
+000ED6 Cisco Systems
+000ED7 Cisco Systems
+000ED8 Aktino, Inc.
+000ED9 Aksys, Ltd.
+000EDA C-TECH UNITED CORP.
+000EDB XiNCOM Corp.
+000EDC Tellion INC.
+000EDD SHURE INCORPORATED
+000EDE REMEC, Inc.
+000EDF PLX Technology
+000EE0 Mcharge
+000EE1 ExtremeSpeed Inc.
+000EE2 Custom Engineering S.p.A.
+000EE3 Chiyu Technology Co.,Ltd
+000EE5 bitWallet, Inc.
+000EE6 Adimos Systems LTD
+000EE7 AAC ELECTRONICS CORP.
+000EE8 zioncom
+000EE9 WayTech Development, Inc.
+000EEA Shadong Luneng Jicheng Electronics,Co.,Ltd
+000EEB Sandmartin(zhong shan)Electronics Co.,Ltd
+000EEC Orban
+000EED Nokia Danmark A/S
+000EEE Muco Industrie BV
+000EF0 Festo AG & Co. KG
+000EF1 EZQUEST INC.
+000EF3 Smarthome
+000EF4 Shenzhen Kasda Digital Technology Co.,Ltd
+000EF5 iPAC Technology Co., Ltd.
+000EF6 E-TEN Information Systems Co., Ltd.
+000EF7 Vulcan Portals Inc
+000EF8 SBC ASI
+000EF9 REA Elektronik GmbH
+000EFA Optoway Technology Incorporation
+000EFB Macey Enterprises
+000EFC JTAG Technologies B.V.
+000EFD FUJI PHOTO OPTICAL CO., LTD.
+000EFE EndRun Technologies LLC
+000EFF Megasolution,Inc.
+000F00 Legra Systems, Inc.
+000F01 DIGITALKS INC
+000F02 Digicube Technology Co., Ltd
+000F03 COM&C CO., LTD
+000F04 cim-usa inc
+000F05 3B SYSTEM INC.
+000F06 Nortel Networks
+000F07 Mangrove Systems, Inc.
+000F08 Indagon Oy
+000F0B Kentima Technologies AB
+000F0C SYNCHRONIC ENGINEERING
+000F0D Hunt Electronic Co., Ltd.
+000F0E WaveSplitter Technologies, Inc.
+000F0F Real ID Technology Co., Ltd.
+000F10 RDM Corporation
+000F11 Prodrive B.V.
+000F12 Panasonic AVC Networks Germany GmbH
+000F13 Nisca corporation
+000F14 Mindray Co., Ltd.
+000F15 Kjaerulff1 A/S
+000F16 JAY HOW TECHNOLOGY CO.,
+000F17 Insta Elektro GmbH
+000F18 Industrial Control Systems
+000F19 Guidant Corporation
+000F1A Gaming Support B.V.
+000F1B Ego Systems Inc.
+000F1C DigitAll World Co., Ltd
+000F1D Cosmo Techs Co., Ltd.
+000F1E Chengdu KT Electric Co.of High & New Technology
+000F1F WW PCBA Test
+000F20 WW Ops
+000F21 Scientific Atlanta, Inc
+000F22 Helius, Inc.
+000F23 Cisco Systems
+000F24 Cisco Systems
+000F25 AimValley B.V.
+000F26 WorldAccxx  LLC
+000F27 TEAL Electronics, Inc.
+000F28 Itronix Corporation
+000F29 Augmentix Corporation
+000F2A Cableware Electronics
+000F2B GREENBELL SYSTEMS
+000F2C Uplogix, Inc.
+001000 CABLE TELEVISION LABORATORIES, INC.
+001001 MCK COMMUNICATIONS
+001002 ACTIA
+001003 IMATRON, INC.
+001004 THE BRANTLEY COILE COMPANY,INC
+001005 UEC COMMERCIAL
+001006 Thales Contact Solutions Ltd.
+001007 CISCO SYSTEMS, INC.
+001008 VIENNA SYSTEMS CORPORATION
+001009 HORO QUARTZ
+00100A WILLIAMS COMMUNICATIONS GROUP
+00100B CISCO SYSTEMS, INC.
+00100C ITO CO., LTD.
+00100D CISCO SYSTEMS, INC.
+00100E MICRO LINEAR COPORATION
+00100F INDUSTRIAL CPU SYSTEMS
+001010 INITIO CORPORATION
+001011 CISCO SYSTEMS, INC.
+001012 PROCESSOR SYSTEMS (I) PVT LTD
+001013 INDUSTRIAL COMPUTER SOURCE
+001014 CISCO SYSTEMS, INC.
+001015 OOmon Inc.
+001016 T.SQWARE
+001017 MICOS GmbH
+001018 BROADCOM CORPORATION
+001019 SIRONA DENTAL SYSTEMS GmbH & Co. KG
+00101A PictureTel Corp.
+00101B CORNET TECHNOLOGY, INC.
+00101C OHM TECHNOLOGIES INTL, LLC
+00101D WINBOND ELECTRONICS CORP.
+00101E MATSUSHITA ELECTRONIC INSTRUMENTS CORP.
+00101F CISCO SYSTEMS, INC.
+001020 WELCH ALLYN, DATA COLLECTION
+001021 ENCANTO NETWORKS, INC.
+001022 SatCom Media Corporation
+001023 FLOWWISE NETWORKS, INC.
+001024 NAGOYA ELECTRIC WORKS CO., LTD
+001025 GRAYHILL INC.
+001026 ACCELERATED NETWORKS, INC.
+001027 L-3 COMMUNICATIONS EAST
+001028 COMPUTER TECHNICA, INC.
+001029 CISCO SYSTEMS, INC.
+00102A ZF MICROSYSTEMS, INC.
+00102B UMAX DATA SYSTEMS, INC.
+00102C Lasat Networks A/S
+00102D HITACHI SOFTWARE ENGINEERING
+00102E NETWORK SYSTEMS & TECHNOLOGIES PVT. LTD.
+00102F CISCO SYSTEMS, INC.
+001030 Wi-LAN, Inc.
+001031 OBJECTIVE COMMUNICATIONS, INC.
+001032 ALTA TECHNOLOGY
+001033 ACCESSLAN COMMUNICATIONS, INC.
+001034 GNP Computers
+001035 ELITEGROUP COMPUTER SYSTEMS CO., LTD
+001036 INTER-TEL INTEGRATED SYSTEMS
+001037 CYQ've Technology Co., Ltd.
+001038 MICRO RESEARCH INSTITUTE, INC.
+001039 Vectron Systems AG
+00103A DIAMOND NETWORK TECH
+00103B HIPPI NETWORKING FORUM
+00103C IC ENSEMBLE, INC.
+00103D PHASECOM, LTD.
+00103E NETSCHOOLS CORPORATION
+00103F TOLLGRADE COMMUNICATIONS, INC.
+001040 INTERMEC CORPORATION
+001041 BRISTOL BABCOCK, INC.
+001042 AlacriTech
+001043 A2 CORPORATION
+001044 InnoLabs Corporation
+001045 Nortel Networks
+001046 ALCORN MCBRIDE INC.
+001047 ECHO ELETRIC CO. LTD.
+001048 HTRC AUTOMATION, INC.
+001049 SHORELINE TELEWORKS, INC.
+00104A THE PARVUC CORPORATION
+00104B 3COM CORPORATION
+00104C COMPUTER ACCESS TECHNOLOGY
+00104D SURTEC INDUSTRIES, INC.
+00104E CEOLOGIC
+00104F STORAGE TECHNOLOGY CORPORATION
+001050 RION CO., LTD.
+001051 CMICRO CORPORATION
+001052 METTLER-TOLEDO (ALBSTADT) GMBH
+001053 COMPUTER TECHNOLOGY CORP.
+001054 CISCO SYSTEMS, INC.
+001055 FUJITSU MICROELECTRONICS, INC.
+001056 SODICK CO., LTD.
+001057 Rebel.com, Inc.
+001058 ArrowPoint Communications
+001059 DIABLO RESEARCH CO. LLC
+00105A 3COM CORPORATION
+00105B NET INSIGHT AB
+00105C QUANTUM DESIGNS (H.K.) LTD.
+00105D Draeger Medical
+00105E HEKIMIAN LABORATORIES, INC.
+00105F IN-SNEC
+001060 BILLIONTON SYSTEMS, INC.
+001061 HOSTLINK CORP.
+001062 NX SERVER, ILNC.
+001063 STARGUIDE DIGITAL NETWORKS
+001064 DIGITAL EQUIPMENT CORP.
+001065 RADYNE CORPORATION
+001066 ADVANCED CONTROL SYSTEMS, INC.
+001067 REDBACK NETWORKS, INC.
+001068 COMOS TELECOM
+001069 HELIOSS COMMUNICATIONS, INC.
+00106A DIGITAL MICROWAVE CORPORATION
+00106B SONUS NETWORKS, INC.
+00106C INFRATEC PLUS GmbH
+00106D INTEGRITY COMMUNICATIONS, INC.
+00106E TADIRAN COM. LTD.
+00106F TRENTON TECHNOLOGY INC.
+001070 CARADON TREND LTD.
+001071 ADVANET INC.
+001072 GVN TECHNOLOGIES, INC.
+001073 TECHNOBOX, INC.
+001074 ATEN INTERNATIONAL CO., LTD.
+001075 Maxtor Corporation
+001076 EUREM GmbH
+001077 SAF DRIVE SYSTEMS, LTD.
+001078 NUERA COMMUNICATIONS, INC.
+001079 CISCO SYSTEMS, INC.
+00107A AmbiCom, Inc.
+00107B CISCO SYSTEMS, INC.
+00107C P-COM, INC.
+00107D AURORA COMMUNICATIONS, LTD.
+00107E BACHMANN ELECTRONIC GmbH
+00107F CRESTRON ELECTRONICS, INC.
+001080 METAWAVE COMMUNICATIONS
+001081 DPS, INC.
+001082 JNA TELECOMMUNICATIONS LIMITED
+001083 HEWLETT-PACKARD COMPANY
+001084 K-BOT COMMUNICATIONS
+001085 POLARIS COMMUNICATIONS, INC.
+001086 ATTO TECHNOLOGY, INC.
+001087 Xstreamis PLC
+001088 AMERICAN NETWORKS INC.
+001089 WebSonic
+00108A TeraLogic, Inc.
+00108B LASERANIMATION SOLLINGER GmbH
+00108C FUJITSU TELECOMMUNICATIONS EUROPE, LTD.
+00108D JOHNSON CONTROLS, INC.
+00108E HUGH SYMONS CONCEPT Technologies Ltd.
+00108F RAPTOR SYSTEMS
+001090 CIMETRICS, INC.
+001091 NO WIRES NEEDED BV
+001092 NETCORE INC.
+001093 CMS COMPUTERS, LTD.
+001094 Performance Analysis Broadband, Spirent plc
+001095 Thomson Multimedia, Inc.
+001096 TRACEWELL SYSTEMS, INC.
+001097 WinNet Metropolitan Communications Systems, Inc.
+001098 STARNET TECHNOLOGIES, INC.
+001099 InnoMedia, Inc.
+00109A NETLINE
+00109B VIXEL CORPORATION
+00109C M-SYSTEM CO., LTD.
+00109D CLARINET SYSTEMS, INC.
+00109E AWARE, INC.
+00109F PAVO, INC.
+0010A0 INNOVEX TECHNOLOGIES, INC.
+0010A1 KENDIN SEMICONDUCTOR, INC.
+0010A2 TNS
+0010A3 OMNITRONIX, INC.
+0010A4 XIRCOM
+0010A5 OXFORD INSTRUMENTS
+0010A6 CISCO SYSTEMS, INC.
+0010A7 UNEX TECHNOLOGY CORPORATION
+0010A8 RELIANCE COMPUTER CORP.
+0010A9 ADHOC TECHNOLOGIES
+0010AA MEDIA4, INC.
+0010AB KOITO INDUSTRIES, LTD.
+0010AC IMCI TECHNOLOGIES
+0010AD SOFTRONICS USB, INC.
+0010AE SHINKO ELECTRIC INDUSTRIES CO.
+0010AF TAC SYSTEMS, INC.
+0010B0 MERIDIAN TECHNOLOGY CORP.
+0010B1 FOR-A CO., LTD.
+0010B2 COACTIVE AESTHETICS
+0010B3 NOKIA MULTIMEDIA TERMINALS
+0010B4 ATMOSPHERE NETWORKS
+0010B5 ACCTON TECHNOLOGY CORPORATION
+0010B6 ENTRATA COMMUNICATIONS CORP.
+0010B7 COYOTE TECHNOLOGIES, LLC
+0010B8 ISHIGAKI COMPUTER SYSTEM CO.
+0010B9 MAXTOR CORP.
+0010BA MARTINHO-DAVIS SYSTEMS, INC.
+0010BB DATA & INFORMATION TECHNOLOGY
+0010BC Aastra Telecom
+0010BD THE TELECOMMUNICATION TECHNOLOGY COMMITTEE
+0010BE TELEXIS CORP.
+0010BF InterAir Wireless
+0010C0 ARMA, INC.
+0010C1 OI ELECTRIC CO., LTD.
+0010C2 WILLNET, INC.
+0010C3 CSI-CONTROL SYSTEMS
+0010C4 MEDIA LINKS CO., LTD.
+0010C5 PROTOCOL TECHNOLOGIES, INC.
+0010C6 USI
+0010C7 DATA TRANSMISSION NETWORK
+0010C8 COMMUNICATIONS ELECTRONICS SECURITY GROUP
+0010C9 MITSUBISHI ELECTRONICS LOGISTIC SUPPORT CO.
+0010CA INTEGRAL ACCESS
+0010CB FACIT K.K.
+0010CC CLP COMPUTER LOGISTIK PLANUNG GmbH
+0010CD INTERFACE CONCEPT
+0010CE VOLAMP, LTD.
+0010CF FIBERLANE COMMUNICATIONS
+0010D0 WITCOM, LTD.
+0010D1 Top Layer Networks, Inc.
+0010D2 NITTO TSUSHINKI CO., LTD
+0010D3 GRIPS ELECTRONIC GMBH
+0010D4 STORAGE COMPUTER CORPORATION
+0010D5 IMASDE CANARIAS, S.A.
+0010D6 ITT - A/CD
+0010D7 ARGOSY RESEARCH INC.
+0010D8 CALISTA
+0010D9 IBM JAPAN, FUJISAWA MT+D
+0010DA MOTION ENGINEERING, INC.
+0010DB NetScreen Technologies, Inc.
+0010DC MICRO-STAR INTERNATIONAL CO., LTD.
+0010DD ENABLE SEMICONDUCTOR, INC.
+0010DE INTERNATIONAL DATACASTING CORPORATION
+0010DF RISE COMPUTER INC.
+0010E0 COBALT MICROSERVER, INC.
+0010E1 S.I. TECH, INC.
+0010E2 ArrayComm, Inc.
+0010E3 COMPAQ COMPUTER CORPORATION
+0010E4 NSI CORPORATION
+0010E5 SOLECTRON TEXAS
+0010E6 APPLIED INTELLIGENT SYSTEMS, INC.
+0010E7 BreezeCom
+0010E8 TELOCITY, INCORPORATED
+0010E9 RAIDTEC LTD.
+0010EA ADEPT TECHNOLOGY
+0010EB SELSIUS SYSTEMS, INC.
+0010EC RPCG, LLC
+0010ED SUNDANCE TECHNOLOGY, INC.
+0010EE CTI PRODUCTS, INC.
+0010EF DBTEL INCORPORATED
+0010F1 I-O CORPORATION
+0010F2 ANTEC
+0010F3 Nexcom International Co., Ltd.
+0010F4 VERTICAL NETWORKS, INC.
+0010F5 AMHERST SYSTEMS, INC.
+0010F6 CISCO SYSTEMS, INC.
+0010F7 IRIICHI TECHNOLOGIES Inc.
+0010F8 KENWOOD TMI CORPORATION
+0010F9 UNIQUE SYSTEMS, INC.
+0010FA ZAYANTE, INC.
+0010FB ZIDA TECHNOLOGIES LIMITED
+0010FC BROADBAND NETWORKS, INC.
+0010FD COCOM A/S
+0010FE DIGITAL EQUIPMENT CORPORATION
+0010FF CISCO SYSTEMS, INC.
+001C7C PERQ SYSTEMS CORPORATION
+002000 LEXMARK INTERNATIONAL, INC.
+002001 DSP SOLUTIONS, INC.
+002002 SERITECH ENTERPRISE CO., LTD.
+002003 PIXEL POWER LTD.
+002004 YAMATAKE-HONEYWELL CO., LTD.
+002005 SIMPLE TECHNOLOGY
+002006 GARRETT COMMUNICATIONS, INC.
+002007 SFA, INC.
+002008 CABLE & COMPUTER TECHNOLOGY
+002009 PACKARD BELL ELEC., INC.
+00200A SOURCE-COMM CORP.
+00200B OCTAGON SYSTEMS CORP.
+00200C ADASTRA SYSTEMS CORP.
+00200D CARL ZEISS
+00200E SATELLITE TECHNOLOGY MGMT, INC
+00200F TANBAC CO., LTD.
+002010 JEOL SYSTEM TECHNOLOGY CO. LTD
+002011 CANOPUS CO., LTD.
+002012 CAMTRONICS MEDICAL SYSTEMS
+002013 DIVERSIFIED TECHNOLOGY, INC.
+002014 GLOBAL VIEW CO., LTD.
+002015 ACTIS COMPUTER SA
+002016 SHOWA ELECTRIC WIRE & CABLE CO
+002017 ORBOTECH
+002018 CIS TECHNOLOGY INC.
+002019 OHLER GmbH
+00201A N-BASE SWITCH COMMUNICATIONS
+00201B NORTHERN TELECOM/NETWORK
+00201C EXCEL, INC.
+00201D KATANA PRODUCTS
+00201E NETQUEST CORPORATION
+00201F BEST POWER TECHNOLOGY, INC.
+002020 MEGATRON COMPUTER INDUSTRIES PTY, LTD.
+002021 ALGORITHMS SOFTWARE PVT. LTD.
+002022 TEKNIQUE, INC.
+002023 T.C. TECHNOLOGIES PTY. LTD
+002024 PACIFIC COMMUNICATION SCIENCES
+002025 CONTROL TECHNOLOGY, INC.
+002026 AMKLY SYSTEMS, INC.
+002027 MING FORTUNE INDUSTRY CO., LTD
+002028 WEST EGG SYSTEMS, INC.
+002029 TELEPROCESSING PRODUCTS, INC.
+00202A N.V. DZINE
+00202B ADVANCED TELECOMMUNICATIONS MODULES, LTD.
+00202C WELLTRONIX CO., LTD.
+00202D TAIYO CORPORATION
+00202E DAYSTAR DIGITAL
+00202F ZETA COMMUNICATIONS, LTD.
+002030 ANALOG & DIGITAL SYSTEMS
+002031 ERTEC GmbH
+002032 ALCATEL TAISEL
+002033 SYNAPSE TECHNOLOGIES, INC.
+002034 ROTEC INDUSTRIEAUTOMATION GMBH
+002035 IBM CORPORATION
+002036 BMC SOFTWARE
+002037 SEAGATE TECHNOLOGY
+002038 VME MICROSYSTEMS INTERNATIONAL CORPORATION
+002039 SCINETS
+00203A DIGITAL BI0METRICS INC.
+00203B WISDM LTD.
+00203C EUROTIME AB
+00203D NOVAR ELECTRONICS CORPORATION
+00203E LogiCan Technologies, Inc.
+00203F JUKI CORPORATION
+002040 Motorola Broadband Communications Sector
+002041 DATA NET
+002042 DATAMETRICS CORP.
+002043 NEURON COMPANY LIMITED
+002044 GENITECH PTY LTD
+002045 ION Networks, Inc.
+002046 CIPRICO, INC.
+002047 STEINBRECHER CORP.
+002048 Marconi Communications
+002049 COMTRON, INC.
+00204A PRONET GMBH
+00204B AUTOCOMPUTER CO., LTD.
+00204C MITRON COMPUTER PTE LTD.
+00204D INOVIS GMBH
+00204E NETWORK SECURITY SYSTEMS, INC.
+00204F DEUTSCHE AEROSPACE AG
+002050 KOREA COMPUTER INC.
+002051 Verilink Corporation
+002052 RAGULA SYSTEMS
+002053 HUNTSVILLE MICROSYSTEMS, INC.
+002054 EASTERN RESEARCH, INC.
+002055 ALTECH CO., LTD.
+002056 NEOPRODUCTS
+002057 TITZE DATENTECHNIK GmbH
+002058 ALLIED SIGNAL INC.
+002059 MIRO COMPUTER PRODUCTS AG
+00205A COMPUTER IDENTICS
+00205B SKYLINE TECHNOLOGY
+00205C InterNet Systems of Florida, Inc.
+00205D NANOMATIC OY
+00205E CASTLE ROCK, INC.
+00205F GAMMADATA COMPUTER GMBH
+002060 ALCATEL ITALIA S.p.A.
+002061 DYNATECH COMMUNICATIONS, INC.
+002062 SCORPION LOGIC, LTD.
+002063 WIPRO INFOTECH LTD.
+002064 PROTEC MICROSYSTEMS, INC.
+002065 SUPERNET NETWORKING INC.
+002066 GENERAL MAGIC, INC.
+002068 ISDYNE
+002069 ISDN SYSTEMS CORPORATION
+00206A OSAKA COMPUTER CORP.
+00206B KONICA MINOLTA HOLDINGS, INC.
+00206C EVERGREEN TECHNOLOGY CORP.
+00206D DATA RACE, INC.
+00206E XACT, INC.
+00206F FLOWPOINT CORPORATION
+002070 HYNET, LTD.
+002071 IBR GMBH
+002072 WORKLINK INNOVATIONS
+002073 FUSION SYSTEMS CORPORATION
+002074 SUNGWOON SYSTEMS
+002075 MOTOROLA COMMUNICATION ISRAEL
+002076 REUDO CORPORATION
+002077 KARDIOS SYSTEMS CORP.
+002078 RUNTOP, INC.
+002079 MIKRON GMBH
+00207A WiSE Communications, Inc.
+00207B Intel Corporation
+00207C AUTEC GmbH
+00207D ADVANCED COMPUTER APPLICATIONS
+00207E FINECOM Co., Ltd.
+00207F KYOEI SANGYO CO., LTD.
+002080 SYNERGY (UK) LTD.
+002081 TITAN ELECTRONICS
+002082 ONEAC CORPORATION
+002083 PRESTICOM INCORPORATED
+002084 OCE PRINTING SYSTEMS, GMBH
+002085 EXIDE ELECTRONICS
+002086 MICROTECH ELECTRONICS LIMITED
+002087 MEMOTEC COMMUNICATIONS CORP.
+002088 GLOBAL VILLAGE COMMUNICATION
+002089 T3PLUS NETWORKING, INC.
+00208A SONIX COMMUNICATIONS, LTD.
+00208B LAPIS TECHNOLOGIES, INC.
+00208C GALAXY NETWORKS, INC.
+00208D CMD TECHNOLOGY
+00208E CHEVIN SOFTWARE ENG. LTD.
+00208F ECI TELECOM LTD.
+002090 ADVANCED COMPRESSION TECHNOLOGY, INC.
+002091 J125, NATIONAL SECURITY AGENCY
+002092 CHESS ENGINEERING B.V.
+002093 LANDINGS TECHNOLOGY CORP.
+002094 CUBIX CORPORATION
+002095 RIVA ELECTRONICS
+002096 Invensys
+002097 APPLIED SIGNAL TECHNOLOGY
+002098 HECTRONIC AB
+002099 BON ELECTRIC CO., LTD.
+00209A THE 3DO COMPANY
+00209B ERSAT ELECTRONIC GMBH
+00209C PRIMARY ACCESS CORP.
+00209D LIPPERT AUTOMATIONSTECHNIK
+00209E BROWN'S OPERATING SYSTEM SERVICES, LTD.
+00209F MERCURY COMPUTER SYSTEMS, INC.
+0020A0 OA LABORATORY CO., LTD.
+0020A1 DOVATRON
+0020A2 GALCOM NETWORKING LTD.
+0020A3 DIVICOM INC.
+0020A4 MULTIPOINT NETWORKS
+0020A5 API ENGINEERING
+0020A6 PROXIM, INC.
+0020A7 PAIRGAIN TECHNOLOGIES, INC.
+0020A8 SAST TECHNOLOGY CORP.
+0020A9 WHITE HORSE INDUSTRIAL
+0020AA DIGIMEDIA VISION LTD.
+0020AB MICRO INDUSTRIES CORP.
+0020AC INTERFLEX DATENSYSTEME GMBH
+0020AD LINQ SYSTEMS
+0020AE ORNET DATA COMMUNICATION TECH.
+0020AF 3COM CORPORATION
+0020B0 GATEWAY DEVICES, INC.
+0020B1 COMTECH RESEARCH INC.
+0020B2 GKD Gesellschaft Fur Kommunikation Und Datentechnik
+0020B3 SCLTEC COMMUNICATIONS SYSTEMS
+0020B4 TERMA ELEKTRONIK AS
+0020B5 YASKAWA ELECTRIC CORPORATION
+0020B6 AGILE NETWORKS, INC.
+0020B7 NAMAQUA COMPUTERWARE
+0020B8 PRIME OPTION, INC.
+0020B9 METRICOM, INC.
+0020BA CENTER FOR HIGH PERFORMANCE
+0020BB ZAX CORPORATION
+0020BC JTEC PTY LTD.
+0020BD NIOBRARA R & D CORPORATION
+0020BE LAN ACCESS CORP.
+0020BF AEHR TEST SYSTEMS
+0020C0 PULSE ELECTRONICS, INC.
+0020C1 TAIKO ELECTRIC WORKS, LTD.
+0020C2 TEXAS MEMORY SYSTEMS, INC.
+0020C3 COUNTER SOLUTIONS LTD.
+0020C4 INET,INC.
+0020C5 EAGLE TECHNOLOGY
+0020C6 NECTEC
+0020C7 AKAI Professional M.I. Corp.
+0020C8 LARSCOM INCORPORATED
+0020C9 VICTRON BV
+0020CA DIGITAL OCEAN
+0020CB PRETEC ELECTRONICS CORP.
+0020CC DIGITAL SERVICES, LTD.
+0020CD HYBRID NETWORKS, INC.
+0020CE LOGICAL DESIGN GROUP, INC.
+0020CF TEST & MEASUREMENT SYSTEMS INC
+0020D0 VERSALYNX CORPORATION
+0020D1 MICROCOMPUTER SYSTEMS (M) SDN.
+0020D2 RAD DATA COMMUNICATIONS, LTD.
+0020D3 OST (OUEST STANDARD TELEMATIQU
+0020D4 CABLETRON - ZEITTNET INC.
+0020D5 VIPA GMBH
+0020D6 BREEZECOM
+0020D7 JAPAN MINICOMPUTER SYSTEMS CO., Ltd.
+0020D8 Nortel Networks
+0020D9 PANASONIC TECHNOLOGIES, INC./MIECO-US
+0020DA XYLAN CORPORATION
+0020DB XNET TECHNOLOGY, INC.
+0020DC DENSITRON TAIWAN LTD.
+0020DD Cybertec Pty Ltd
+0020DE JAPAN DIGITAL LABORAT'Y CO.LTD
+0020DF KYOSAN ELECTRIC MFG. CO., LTD.
+0020E0 PREMAX ELECTRONICS, INC.
+0020E1 ALAMAR ELECTRONICS
+0020E2 INFORMATION RESOURCE ENGINEERING
+0020E3 MCD KENCOM CORPORATION
+0020E4 HSING TECH ENTERPRISE CO., LTD
+0020E5 APEX DATA, INC.
+0020E6 LIDKOPING MACHINE TOOLS AB
+0020E7 B&W NUCLEAR SERVICE COMPANY
+0020E8 DATATREK CORPORATION
+0020E9 DANTEL
+0020EA EFFICIENT NETWORKS, INC.
+0020EB CINCINNATI MICROWAVE, INC.
+0020EC TECHWARE SYSTEMS CORP.
+0020ED GIGA-BYTE TECHNOLOGY CO., LTD.
+0020EE GTECH CORPORATION
+0020EF USC CORPORATION
+0020F0 UNIVERSAL MICROELECTRONICS CO.
+0020F1 ALTOS INDIA LIMITED
+0020F2 SUN MICROSYSTEMS, INC.
+0020F3 RAYNET CORPORATION
+0020F4 SPECTRIX CORPORATION
+0020F5 PANDATEL AG
+0020F6 NET TEK  AND KARLNET, INC.
+0020F7 CYBERDATA
+0020F8 CARRERA COMPUTERS, INC.
+0020F9 PARALINK NETWORKS, INC.
+0020FA GDE SYSTEMS, INC.
+0020FB OCTEL COMMUNICATIONS CORP.
+0020FC MATROX
+0020FD ITV TECHNOLOGIES, INC.
+0020FE TOPWARE INC. / GRAND COMPUTER
+0020FF SYMMETRICAL TECHNOLOGIES
+003000 ALLWELL TECHNOLOGY CORP.
+003001 SMP
+003002 Expand Networks
+003003 Phasys Ltd.
+003004 LEADTEK RESEARCH INC.
+003005 Fujitsu Siemens Computers
+003006 SUPERPOWER COMPUTER
+003007 OPTI, INC.
+003008 AVIO DIGITAL, INC.
+003009 Tachion Networks, Inc.
+00300A AZTECH SYSTEMS LTD.
+00300B mPHASE Technologies, Inc.
+00300C CONGRUENCY, LTD.
+00300D MMC Technology, Inc.
+00300E Klotz Digital AG
+00300F IMT - Information Management T
+003010 VISIONETICS INTERNATIONAL
+003011 HMS FIELDBUS SYSTEMS AB
+003012 DIGITAL ENGINEERING LTD.
+003013 NEC Corporation
+003014 DIVIO, INC.
+003015 CP CLARE CORP.
+003016 ISHIDA CO., LTD.
+003017 TERASTACK LTD.
+003018 Jetway Information Co., Ltd.
+003019 CISCO SYSTEMS, INC.
+00301A SMARTBRIDGES PTE. LTD.
+00301B SHUTTLE, INC.
+00301C ALTVATER AIRDATA SYSTEMS
+00301D SKYSTREAM, INC.
+00301E 3COM Europe Ltd.
+00301F OPTICAL NETWORKS, INC.
+003020 TSI, Inc..
+003021 HSING TECH. ENTERPRISE CO.,LTD
+003022 Fong Kai Industrial Co., Ltd.
+003023 COGENT COMPUTER SYSTEMS, INC.
+003024 CISCO SYSTEMS, INC.
+003025 CHECKOUT COMPUTER SYSTEMS, LTD
+003026 HEITEL
+003027 KERBANGO, INC.
+003028 FASE Saldatura srl
+003029 OPICOM
+00302A SOUTHERN INFORMATION
+00302B INALP NETWORKS, INC.
+00302C SYLANTRO SYSTEMS CORPORATION
+00302D QUANTUM BRIDGE COMMUNICATIONS
+00302E Hoft & Wessel AG
+00302F Smiths Industries
+003030 HARMONIX CORPORATION
+003031 LIGHTWAVE COMMUNICATIONS, INC.
+003032 MagicRam, Inc.
+003033 ORIENT TELECOM CO., LTD.
+003036 RMP ELEKTRONIKSYSTEME GMBH
+003037 Packard Bell Nec Services
+003038 XCP, INC.
+003039 SOFTBOOK PRESS
+00303A MAATEL
+00303B PowerCom Technology
+00303C ONNTO CORP.
+00303D IVA CORPORATION
+00303E Radcom Ltd.
+00303F TurboComm Tech Inc.
+003040 CISCO SYSTEMS, INC.
+003041 SAEJIN T & M CO., LTD.
+003042 DeTeWe-Deutsche Telephonwerke
+003043 IDREAM TECHNOLOGIES, PTE. LTD.
+003044 Portsmith LLC
+003045 Village Networks, Inc. (VNI)
+003046 Controlled Electronic Manageme
+003047 NISSEI ELECTRIC CO., LTD.
+003048 Supermicro Computer, Inc.
+003049 BRYANT TECHNOLOGY, LTD.
+00304A FRAUNHOFER INSTITUTE IMS
+00304B ORBACOM SYSTEMS, INC.
+00304C APPIAN COMMUNICATIONS, INC.
+00304D ESI
+00304E BUSTEC PRODUCTION LTD.
+00304F PLANET Technology Corporation
+003050 Versa Technology
+003051 ORBIT AVIONIC & COMMUNICATION
+003052 ELASTIC NETWORKS
+003053 Basler AG
+003054 CASTLENET TECHNOLOGY, INC.
+003055 Hitachi Semiconductor America,
+003056 Beck IPC GmbH
+003057 E-Tel Corporation
+003058 API MOTION
+003059 DIGITAL-LOGIC AG
+00305A TELGEN CORPORATION
+00305B MODULE DEPARTMENT
+00305C SMAR Laboratories Corp.
+00305D DIGITRA SYSTEMS, INC.
+00305E Abelko Innovation
+00305F IMACON APS
+003060 STARMATIX, INC.
+003061 MobyTEL
+003062 PATH 1 NETWORK TECHNOL'S INC.
+003063 SANTERA SYSTEMS, INC.
+003064 ADLINK TECHNOLOGY, INC.
+003065 APPLE COMPUTER, INC.
+003066 DIGITAL WIRELESS CORPORATION
+003067 BIOSTAR MICROTECH INT'L CORP.
+003068 CYBERNETICS TECH. CO., LTD.
+003069 IMPACCT TECHNOLOGY CORP.
+00306A PENTA MEDIA CO., LTD.
+00306B CMOS SYSTEMS, INC.
+00306C Hitex Holding GmbH
+00306D LUCENT TECHNOLOGIES
+00306E HEWLETT PACKARD
+00306F SEYEON TECH. CO., LTD.
+003070 1Net Corporation
+003071 Cisco Systems, Inc.
+003072 INTELLIBYTE INC.
+003073 International Microsystems, In
+003074 EQUIINET LTD.
+003075 ADTECH
+003076 Akamba Corporation
+003077 ONPREM NETWORKS
+003078 Cisco Systems, Inc.
+003079 CQOS, INC.
+00307A Advanced Technology & Systems
+00307B Cisco Systems, Inc.
+00307C ADID SA
+00307D GRE AMERICA, INC.
+00307E Redflex Communication Systems
+00307F IRLAN LTD.
+003080 CISCO SYSTEMS, INC.
+003081 ALTOS C&C
+003082 TAIHAN ELECTRIC WIRE CO., LTD.
+003083 Ivron Systems
+003084 ALLIED TELESYN INTERNAIONAL
+003085 CISCO SYSTEMS, INC.
+003086 Transistor Devices, Inc.
+003087 VEGA GRIESHABER KG
+003088 Siara Systems, Inc.
+003089 Spectrapoint Wireless, LLC
+00308A NICOTRA SISTEMI S.P.A
+00308B Brix Networks
+00308C ADVANCED DIGITAL INFORMATION
+00308D PINNACLE SYSTEMS, INC.
+00308E CROSS MATCH TECHNOLOGIES, INC.
+00308F MICRILOR, Inc.
+003090 CYRA TECHNOLOGIES, INC.
+003091 TAIWAN FIRST LINE ELEC. CORP.
+003092 ModuNORM GmbH
+003093 SONNET TECHNOLOGIES, INC.
+003094 Cisco Systems, Inc.
+003095 Procomp Informatics, Ltd.
+003096 CISCO SYSTEMS, INC.
+003097 EXOMATIC AB
+003098 Global Converging Technologies
+003099 BOENIG UND KALLENBACH OHG
+00309A ASTRO TERRA CORP.
+00309B Smartware
+00309C Timing Applications, Inc.
+00309D Nimble Microsystems, Inc.
+00309E WORKBIT CORPORATION.
+00309F AMBER NETWORKS
+0030A0 TYCO SUBMARINE SYSTEMS, LTD.
+0030A1 WEBGATE Inc.
+0030A2 Lightner Engineering
+0030A3 CISCO SYSTEMS, INC.
+0030A4 Woodwind Communications System
+0030A5 ACTIVE POWER
+0030A6 VIANET TECHNOLOGIES, LTD.
+0030A7 SCHWEITZER ENGINEERING
+0030A8 OL'E COMMUNICATIONS, INC.
+0030A9 Netiverse, Inc.
+0030AA AXUS MICROSYSTEMS, INC.
+0030AB DELTA NETWORKS, INC.
+0030AC Systeme Lauer GmbH & Co., Ltd.
+0030AD SHANGHAI COMMUNICATION
+0030AE Times N System, Inc.
+0030AF Honeywell GmbH
+0030B0 Convergenet Technologies
+0030B1 GOC GESELLSCHAFT FUR OPTISCHE
+0030B2 WESCAM - HEALDSBURG
+0030B3 San Valley Systems, Inc.
+0030B4 INTERSIL CORP.
+0030B5 Tadiran Microwave Networks
+0030B6 CISCO SYSTEMS, INC.
+0030B7 Teletrol Systems, Inc.
+0030B8 RiverDelta Networks
+0030B9 ECTEL
+0030BA AC&T SYSTEM CO., LTD.
+0030BB CacheFlow, Inc.
+0030BC Optronic AG
+0030BD BELKIN COMPONENTS
+0030BE City-Net Technology, Inc.
+0030BF MULTIDATA GMBH
+0030C0 Lara Technology, Inc.
+0030C1 HEWLETT-PACKARD
+0030C2 COMONE
+0030C3 FLUECKIGER ELEKTRONIK AG
+0030C4 Niigata Canotec Co., Inc.
+0030C5 CADENCE DESIGN SYSTEMS
+0030C6 CONTROL SOLUTIONS, INC.
+0030C7 MACROMATE CORP.
+0030C8 GAD LINE, LTD.
+0030C9 LuxN, N
+0030CA Discovery Com
+0030CB OMNI FLOW COMPUTERS, INC.
+0030CC Tenor Networks, Inc.
+0030CD CONEXANT SYSTEMS, INC.
+0030CE Zaffire
+0030CF TWO TECHNOLOGIES, INC.
+0030D1 INOVA CORPORATION
+0030D2 WIN TECHNOLOGIES, CO., LTD.
+0030D3 Agilent Technologies
+0030D4 COMTIER
+0030D5 DResearch GmbH
+0030D6 MSC VERTRIEBS GMBH
+0030D7 Innovative Systems, L.L.C.
+0030D8 SITEK
+0030D9 DATACORE SOFTWARE CORP.
+0030DA COMTREND CO.
+0030DB Mindready Solutions, Inc.
+0030DC RIGHTECH CORPORATION
+0030DD INDIGITA CORPORATION
+0030DE WAGO Kontakttechnik GmbH
+0030DF KB/TEL TELECOMUNICACIONES
+0030E0 OXFORD SEMICONDUCTOR LTD.
+0030E1 ACROTRON SYSTEMS, INC.
+0030E2 GARNET SYSTEMS CO., LTD.
+0030E3 SEDONA NETWORKS CORP.
+0030E4 CHIYODA SYSTEM RIKEN
+0030E5 Amper Datos S.A.
+0030E6 SIEMENS MEDICAL SYSTEMS
+0030E7 CNF MOBILE SOLUTIONS, INC.
+0030E8 ENSIM CORP.
+0030E9 GMA COMMUNICATION MANUFACT'G
+0030EA TeraForce Technology Corporation
+0030EB TURBONET COMMUNICATIONS, INC.
+0030EC BORGARDT
+0030ED Expert Magnetics Corp.
+0030EE DSG Technology, Inc.
+0030EF NEON TECHNOLOGY, INC.
+0030F0 Uniform Industrial Corp.
+0030F1 Accton Technology Corp.
+0030F2 CISCO SYSTEMS, INC.
+0030F3 At Work Computers
+0030F4 STARDOT TECHNOLOGIES
+0030F5 Wild Lab. Ltd.
+0030F6 SECURELOGIX CORPORATION
+0030F7 RAMIX INC.
+0030F8 Dynapro Systems, Inc.
+0030F9 Sollae Systems Co., Ltd.
+0030FA TELICA, INC.
+0030FB AZS Technology AG
+0030FC Terawave Communications, Inc.
+0030FD INTEGRATED SYSTEMS DESIGN
+0030FE DSA GmbH
+0030FF DATAFAB SYSTEMS, INC.
+004000 PCI COMPONENTES DA AMZONIA LTD
+004001 ZYXEL COMMUNICATIONS, INC.
+004002 PERLE SYSTEMS LIMITED
+004003 WESTINGHOUSE PROCESS CONTROL
+004004 ICM CO. LTD.
+004005 ANI COMMUNICATIONS INC.
+004006 SAMPO TECHNOLOGY CORPORATION
+004007 TELMAT INFORMATIQUE
+004008 A PLUS INFO CORPORATION
+004009 TACHIBANA TECTRON CO., LTD.
+00400A PIVOTAL TECHNOLOGIES, INC.
+00400B CISCO SYSTEMS, INC.
+00400C GENERAL MICRO SYSTEMS, INC.
+00400D LANNET DATA COMMUNICATIONS,LTD
+00400E MEMOTEC COMMUNICATIONS, INC.
+00400F DATACOM TECHNOLOGIES
+004010 SONIC SYSTEMS, INC.
+004011 ANDOVER CONTROLS CORPORATION
+004012 WINDATA, INC.
+004013 NTT DATA COMM. SYSTEMS CORP.
+004014 COMSOFT GMBH
+004015 ASCOM INFRASYS AG
+004016 HADAX ELECTRONICS, INC.
+004017 XCD INC.
+004018 ADOBE SYSTEMS, INC.
+004019 AEON SYSTEMS, INC.
+00401A FUJI ELECTRIC CO., LTD.
+00401B PRINTER SYSTEMS CORP.
+00401C AST RESEARCH, INC.
+00401D INVISIBLE SOFTWARE, INC.
+00401E ICC
+00401F COLORGRAPH LTD
+004020 PINACL COMMUNICATION
+004021 RASTER GRAPHICS
+004022 KLEVER COMPUTERS, INC.
+004023 LOGIC CORPORATION
+004024 COMPAC INC.
+004025 MOLECULAR DYNAMICS
+004026 MELCO, INC.
+004027 SMC MASSACHUSETTS, INC.
+004028 NETCOMM LIMITED
+004029 COMPEX
+00402A CANOGA-PERKINS
+00402B TRIGEM COMPUTER, INC.
+00402C ISIS DISTRIBUTED SYSTEMS, INC.
+00402D HARRIS ADACOM CORPORATION
+00402E PRECISION SOFTWARE, INC.
+00402F XLNT DESIGNS INC.
+004030 GK COMPUTER
+004031 KOKUSAI ELECTRIC CO., LTD
+004032 DIGITAL COMMUNICATIONS
+004033 ADDTRON TECHNOLOGY CO., LTD.
+004034 BUSTEK CORPORATION
+004035 OPCOM
+004036 TRIBE COMPUTER WORKS, INC.
+004037 SEA-ILAN, INC.
+004038 TALENT ELECTRIC INCORPORATED
+004039 OPTEC DAIICHI DENKO CO., LTD.
+00403A IMPACT TECHNOLOGIES
+00403B SYNERJET INTERNATIONAL CORP.
+00403C FORKS, INC.
+00403D TERADATA
+00403E RASTER OPS CORPORATION
+00403F SSANGYONG COMPUTER SYSTEMS
+004040 RING ACCESS, INC.
+004041 FUJIKURA LTD.
+004042 N.A.T. GMBH
+004043 NOKIA TELECOMMUNICATIONS
+004044 QNIX COMPUTER CO., LTD.
+004045 TWINHEAD CORPORATION
+004046 UDC RESEARCH LIMITED
+004047 WIND RIVER SYSTEMS
+004048 SMD INFORMATICA S.A.
+004049 TEGIMENTA AG
+00404A WEST AUSTRALIAN DEPARTMENT
+00404B MAPLE COMPUTER SYSTEMS
+00404C HYPERTEC PTY LTD.
+00404D TELECOMMUNICATIONS TECHNIQUES
+00404E FLUENT, INC.
+00404F SPACE & NAVAL WARFARE SYSTEMS
+004050 IRONICS, INCORPORATED
+004051 GRACILIS, INC.
+004052 STAR TECHNOLOGIES, INC.
+004053 AMPRO COMPUTERS
+004054 CONNECTION MACHINES SERVICES
+004055 METRONIX GMBH
+004056 MCM JAPAN LTD.
+004057 LOCKHEED - SANDERS
+004058 KRONOS, INC.
+004059 YOSHIDA KOGYO K. K.
+00405A GOLDSTAR INFORMATION & COMM.
+00405B FUNASSET LIMITED
+00405C FUTURE SYSTEMS, INC.
+00405D STAR-TEK, INC.
+00405E NORTH HILLS ISRAEL
+00405F AFE COMPUTERS LTD.
+004060 COMENDEC LTD
+004061 DATATECH ENTERPRISES CO., LTD.
+004062 E-SYSTEMS, INC./GARLAND DIV.
+004063 VIA TECHNOLOGIES, INC.
+004064 KLA INSTRUMENTS CORPORATION
+004065 GTE SPACENET
+004066 HITACHI CABLE, LTD.
+004067 OMNIBYTE CORPORATION
+004068 EXTENDED SYSTEMS
+004069 LEMCOM SYSTEMS, INC.
+00406A KENTEK INFORMATION SYSTEMS,INC
+00406B SYSGEN
+00406C COPERNIQUE
+00406D LANCO, INC.
+00406E COROLLARY, INC.
+00406F SYNC RESEARCH INC.
+004070 INTERWARE CO., LTD.
+004071 ATM COMPUTER GMBH
+004072 Applied Innovation Inc.
+004073 BASS ASSOCIATES
+004074 CABLE AND WIRELESS
+004075 M-TRADE (UK) LTD
+004076 Sun Conversion Technologies
+004077 MAXTON TECHNOLOGY CORPORATION
+004078 WEARNES AUTOMATION PTE LTD
+004079 JUKO MANUFACTURE COMPANY, LTD.
+00407A SOCIETE D'EXPLOITATION DU CNIT
+00407B SCIENTIFIC ATLANTA
+00407C QUME CORPORATION
+00407D EXTENSION TECHNOLOGY CORP.
+00407E EVERGREEN SYSTEMS, INC.
+00407F FLIR Systems
+004080 ATHENIX CORPORATION
+004081 MANNESMANN SCANGRAPHIC GMBH
+004082 LABORATORY EQUIPMENT CORP.
+004083 TDA INDUSTRIA DE PRODUTOS
+004084 HONEYWELL INC.
+004085 SAAB INSTRUMENTS AB
+004086 MICHELS & KLEBERHOFF COMPUTER
+004087 UBITREX CORPORATION
+004088 MOBIUS TECHNOLOGIES, INC.
+004089 MEIDENSHA CORPORATION
+00408A TPS TELEPROCESSING SYS. GMBH
+00408B RAYLAN CORPORATION
+00408C AXIS COMMUNICATIONS AB
+00408D THE GOODYEAR TIRE & RUBBER CO.
+00408E DIGILOG, INC.
+00408F WM-DATA MINFO AB
+004090 ANSEL COMMUNICATIONS
+004091 PROCOMP INDUSTRIA ELETRONICA
+004092 ASP COMPUTER PRODUCTS, INC.
+004093 PAXDATA NETWORKS LTD.
+004094 SHOGRAPHICS, INC.
+004095 R.P.T. INTERGROUPS INT'L LTD.
+004096 Aironet Wireless Communication
+004097 DATEX DIVISION OF
+004098 DRESSLER GMBH & CO.
+004099 NEWGEN SYSTEMS CORP.
+00409A NETWORK EXPRESS, INC.
+00409B HAL COMPUTER SYSTEMS INC.
+00409C TRANSWARE
+00409D DIGIBOARD, INC.
+00409E CONCURRENT TECHNOLOGIES  LTD.
+00409F LANCAST/CASAT TECHNOLOGY, INC.
+0040A0 GOLDSTAR CO., LTD.
+0040A1 ERGO COMPUTING
+0040A2 KINGSTAR TECHNOLOGY INC.
+0040A3 MICROUNITY SYSTEMS ENGINEERING
+0040A4 ROSE ELECTRONICS
+0040A5 CLINICOMP INTL.
+0040A6 Cray, Inc.
+0040A7 ITAUTEC PHILCO S.A.
+0040A8 IMF INTERNATIONAL LTD.
+0040A9 DATACOM INC.
+0040AA VALMET AUTOMATION INC.
+0040AB ROLAND DG CORPORATION
+0040AC SUPER WORKSTATION, INC.
+0040AD SMA REGELSYSTEME GMBH
+0040AE DELTA CONTROLS, INC.
+0040AF DIGITAL PRODUCTS, INC.
+0040B0 BYTEX CORPORATION, ENGINEERING
+0040B1 CODONICS INC.
+0040B2 SYSTEMFORSCHUNG
+0040B3 PAR MICROSYSTEMS CORPORATION
+0040B4 NEXTCOM K.K.
+0040B5 VIDEO TECHNOLOGY COMPUTERS LTD
+0040B6 COMPUTERM  CORPORATION
+0040B7 STEALTH COMPUTER SYSTEMS
+0040B8 IDEA ASSOCIATES
+0040B9 MACQ ELECTRONIQUE SA
+0040BA ALLIANT COMPUTER SYSTEMS CORP.
+0040BB GOLDSTAR CABLE CO., LTD.
+0040BC ALGORITHMICS LTD.
+0040BD STARLIGHT NETWORKS, INC.
+0040BE BOEING DEFENSE & SPACE
+0040BF CHANNEL SYSTEMS INTERN'L INC.
+0040C0 VISTA CONTROLS CORPORATION
+0040C1 BIZERBA-WERKE WILHEIM KRAUT
+0040C2 APPLIED COMPUTING DEVICES
+0040C3 FISCHER AND PORTER CO.
+0040C4 KINKEI SYSTEM CORPORATION
+0040C5 MICOM COMMUNICATIONS INC.
+0040C6 FIBERNET RESEARCH, INC.
+0040C7 RUBY TECH CORPORATION
+0040C8 MILAN TECHNOLOGY CORPORATION
+0040C9 NCUBE
+0040CA FIRST INTERNAT'L COMPUTER, INC
+0040CB LANWAN TECHNOLOGIES
+0040CC SILCOM MANUF'G TECHNOLOGY INC.
+0040CD TERA MICROSYSTEMS, INC.
+0040CE NET-SOURCE, INC.
+0040CF STRAWBERRY TREE, INC.
+0040D0 MITAC INTERNATIONAL CORP.
+0040D1 FUKUDA DENSHI CO., LTD.
+0040D2 PAGINE CORPORATION
+0040D3 KIMPSION INTERNATIONAL CORP.
+0040D4 GAGE TALKER CORP.
+0040D5 SARTORIUS AG
+0040D6 LOCAMATION B.V.
+0040D7 STUDIO GEN INC.
+0040D8 OCEAN OFFICE AUTOMATION LTD.
+0040D9 AMERICAN MEGATRENDS INC.
+0040DA TELSPEC LTD
+0040DB ADVANCED TECHNICAL SOLUTIONS
+0040DC TRITEC ELECTRONIC GMBH
+0040DD HONG TECHNOLOGIES
+0040DE ELETTRONICA SAN GIORGIO
+0040DF DIGALOG SYSTEMS, INC.
+0040E0 ATOMWIDE LTD.
+0040E1 MARNER INTERNATIONAL, INC.
+0040E2 MESA RIDGE TECHNOLOGIES, INC.
+0040E3 QUIN SYSTEMS LTD
+0040E4 E-M TECHNOLOGY, INC.
+0040E5 SYBUS CORPORATION
+0040E6 C.A.E.N.
+0040E7 ARNOS INSTRUMENTS & COMPUTER
+0040E8 CHARLES RIVER DATA SYSTEMS,INC
+0040E9 ACCORD SYSTEMS, INC.
+0040EA PLAIN TREE SYSTEMS INC
+0040EB MARTIN MARIETTA CORPORATION
+0040EC MIKASA SYSTEM ENGINEERING
+0040ED NETWORK CONTROLS INT'NATL INC.
+0040EE OPTIMEM
+0040EF HYPERCOM, INC.
+0040F0 MICRO SYSTEMS, INC.
+0040F1 CHUO ELECTRONICS CO., LTD.
+0040F2 JANICH & KLASS COMPUTERTECHNIK
+0040F3 NETCOR
+0040F4 CAMEO COMMUNICATIONS, INC.
+0040F5 OEM ENGINES
+0040F6 KATRON COMPUTERS INC.
+0040F7 POLAROID MEDICAL IMAGING SYS.
+0040F8 SYSTEMHAUS DISCOM
+0040F9 COMBINET
+0040FA MICROBOARDS, INC.
+0040FB CASCADE COMMUNICATIONS CORP.
+0040FC IBR COMPUTER TECHNIK GMBH
+0040FD LXE
+0040FE SYMPLEX COMMUNICATIONS
+0040FF TELEBIT CORPORATION
+004252 RLX Technologies
+005000 NEXO COMMUNICATIONS, INC.
+005001 YAMASHITA SYSTEMS CORP.
+005002 OMNISEC AG
+005003 GRETAG MACBETH AG
+005004 3COM CORPORATION
+005006 TAC AB
+005007 SIEMENS TELECOMMUNICATION SYSTEMS LIMITED
+005008 TIVA MICROCOMPUTER CORP. (TMC)
+005009 PHILIPS BROADBAND NETWORKS
+00500A IRIS TECHNOLOGIES, INC.
+00500B CISCO SYSTEMS, INC.
+00500C e-Tek Labs, Inc.
+00500D SATORI ELECTORIC CO., LTD.
+00500E CHROMATIS NETWORKS, INC.
+00500F CISCO SYSTEMS, INC.
+005010 NovaNET Learning, Inc.
+005012 CBL - GMBH
+005013 Chaparral Network Storage
+005014 CISCO SYSTEMS, INC.
+005015 BRIGHT STAR ENGINEERING
+005016 SST/WOODHEAD INDUSTRIES
+005017 RSR S.R.L.
+005018 ADVANCED MULTIMEDIA INTERNET TECHNOLOGY INC.
+005019 SPRING TIDE NETWORKS, INC.
+00501A UISIQN
+00501B ABL CANADA, INC.
+00501C JATOM SYSTEMS, INC.
+00501E Miranda Technologies, Inc.
+00501F MRG SYSTEMS, LTD.
+005020 MEDIASTAR CO., LTD.
+005021 EIS INTERNATIONAL, INC.
+005022 ZONET TECHNOLOGY, INC.
+005023 PG DESIGN ELECTRONICS, INC.
+005024 NAVIC SYSTEMS, INC.
+005026 COSYSTEMS, INC.
+005027 GENICOM CORPORATION
+005028 AVAL COMMUNICATIONS
+005029 1394 PRINTER WORKING GROUP
+00502A CISCO SYSTEMS, INC.
+00502B GENRAD LTD.
+00502C SOYO COMPUTER, INC.
+00502D ACCEL, INC.
+00502E CAMBEX CORPORATION
+00502F TollBridge Technologies, Inc.
+005030 FUTURE PLUS SYSTEMS
+005031 AEROFLEX LABORATORIES, INC.
+005032 PICAZO COMMUNICATIONS, INC.
+005033 MAYAN NETWORKS
+005036 NETCAM, LTD.
+005037 KOGA ELECTRONICS CO.
+005038 DAIN TELECOM CO., LTD.
+005039 MARINER NETWORKS
+00503A DATONG ELECTRONICS LTD.
+00503B MEDIAFIRE CORPORATION
+00503C TSINGHUA NOVEL ELECTRONICS
+00503E CISCO SYSTEMS, INC.
+00503F ANCHOR GAMES
+005040 EMWARE, INC.
+005041 CTX OPTO ELECTRONIC CORP.
+005042 SCI MANUFACTURING SINGAPORE PTE, LTD.
+005043 MARVELL SEMICONDUCTOR, INC.
+005044 ASACA CORPORATION
+005045 RIOWORKS SOLUTIONS, INC.
+005046 MENICX INTERNATIONAL CO., LTD.
+005048 INFOLIBRIA
+005049 ELLACOYA NETWORKS, INC.
+00504A ELTECO A.S.
+00504B BARCONET N.V.
+00504C GALIL MOTION CONTROL, INC.
+00504D TOKYO ELECTRON DEVICE LTD.
+00504E SIERRA MONITOR CORP.
+00504F OLENCOM ELECTRONICS
+005050 CISCO SYSTEMS, INC.
+005051 IWATSU ELECTRIC CO., LTD.
+005052 TIARA NETWORKS, INC.
+005053 CISCO SYSTEMS, INC.
+005054 CISCO SYSTEMS, INC.
+005055 DOMS A/S
+005056 VMWare, Inc.
+005057 BROADBAND ACCESS SYSTEMS
+005058 VEGASTREAM LIMITED
+005059 SUITE TECHNOLOGY SYSTEMS NETWORK
+00505A NETWORK ALCHEMY, INC.
+00505B KAWASAKI LSI U.S.A., INC.
+00505C TUNDO CORPORATION
+00505E DIGITEK MICROLOGIC S.A.
+00505F BRAND INNOVATORS
+005060 TANDBERG TELECOM AS
+005062 KOUWELL ELECTRONICS CORP.  **
+005063 OY COMSEL SYSTEM AB
+005064 CAE ELECTRONICS
+005065 DENSEI-LAMBAD Co., Ltd.
+005066 AtecoM GmbH advanced telecomunication modules
+005067 AEROCOMM, INC.
+005068 ELECTRONIC INDUSTRIES ASSOCIATION
+005069 PixStream Incorporated
+00506A EDEVA, INC.
+00506B SPX-ATEG
+00506C G & L BEIJER ELECTRONICS AB
+00506D VIDEOJET SYSTEMS
+00506E CORDER ENGINEERING CORPORATION
+00506F G-CONNECT
+005070 CHAINTECH COMPUTER CO., LTD.
+005071 AIWA CO., LTD.
+005072 CORVIS CORPORATION
+005073 CISCO SYSTEMS, INC.
+005074 ADVANCED HI-TECH CORP.
+005075 KESTREL SOLUTIONS
+005076 IBM
+005077 PROLIFIC TECHNOLOGY, INC.
+005078 MEGATON HOUSE, LTD.
+00507A XPEED, INC.
+00507B MERLOT COMMUNICATIONS
+00507C VIDEOCON AG
+00507D IFP
+00507E NEWER TECHNOLOGY
+00507F DrayTek Corp.
+005080 CISCO SYSTEMS, INC.
+005081 MURATA MACHINERY, LTD.
+005082 FORESSON CORPORATION
+005083 GILBARCO, INC.
+005084 ATL PRODUCTS
+005086 TELKOM SA, LTD.
+005087 TERASAKI ELECTRIC CO., LTD.
+005088 AMANO CORPORATION
+005089 SAFETY MANAGEMENT SYSTEMS
+00508B COMPAQ COMPUTER CORPORATION
+00508C RSI SYSTEMS
+00508D ABIT COMPUTER CORPORATION
+00508E OPTIMATION, INC.
+00508F ASITA TECHNOLOGIES INT'L LTD.
+005090 DCTRI
+005091 NETACCESS, INC.
+005092 RIGAKU INDUSTRIAL CORPORATION
+005093 BOEING
+005094 PACE MICRO TECHNOLOGY PLC
+005095 PERACOM NETWORKS
+005096 SALIX TECHNOLOGIES, INC.
+005097 MMC-EMBEDDED COMPUTERTECHNIK GmbH
+005098 GLOBALOOP, LTD.
+005099 3COM EUROPE, LTD.
+00509A TAG ELECTRONIC SYSTEMS
+00509B SWITCHCORE AB
+00509C BETA RESEARCH
+00509D THE INDUSTREE B.V.
+00509E Les Technologies SoftAcoustik Inc.
+00509F HORIZON COMPUTER
+0050A0 DELTA COMPUTER SYSTEMS, INC.
+0050A1 CARLO GAVAZZI, INC.
+0050A2 CISCO SYSTEMS, INC.
+0050A3 TransMedia Communications, Inc.
+0050A4 IO TECH, INC.
+0050A5 CAPITOL BUSINESS SYSTEMS, LTD.
+0050A6 OPTRONICS
+0050A7 CISCO SYSTEMS, INC.
+0050A8 OpenCon Systems, Inc.
+0050A9 MOLDAT WIRELESS TECHNOLGIES
+0050AA KONICA MINOLTA HOLDINGS, INC.
+0050AB NALTEC, INC.
+0050AC MAPLE COMPUTER CORPORATION
+0050AD CommUnique Wireless Corp.
+0050AE IWAKI ELECTRONICS CO., LTD.
+0050AF INTERGON, INC.
+0050B0 TECHNOLOGY ATLANTA CORPORATION
+0050B1 GIDDINGS & LEWIS
+0050B2 BRODEL AUTOMATION
+0050B3 VOICEBOARD CORPORATION
+0050B4 SATCHWELL CONTROL SYSTEMS, LTD
+0050B5 FICHET-BAUCHE
+0050B6 GOOD WAY IND. CO., LTD.
+0050B7 BOSER TECHNOLOGY CO., LTD.
+0050B8 INOVA COMPUTERS GMBH & CO. KG
+0050B9 XITRON TECHNOLOGIES, INC.
+0050BA D-LINK
+0050BB CMS TECHNOLOGIES
+0050BC HAMMER STORAGE SOLUTIONS
+0050BD CISCO SYSTEMS, INC.
+0050BE FAST MULTIMEDIA AG
+0050BF MOTOTECH INC.
+0050C0 GATAN, INC.
+0050C1 GEMFLEX NETWORKS, LTD.
+0050C2 IEEE REGISTRATION AUTHORITY
+0050C4 IMD
+0050C5 ADS TECHNOLOGIES, INC.
+0050C6 LOOP TELECOMMUNICATION INTERNATIONAL, INC.
+0050C8 ADDONICS COMMUNICATIONS, INC.
+0050C9 MASPRO DENKOH CORP.
+0050CA NET TO NET TECHNOLOGIES
+0050CB JETTER
+0050CC XYRATEX
+0050CD DIGIANSWER A/S
+0050CE LG INTERNATIONAL CORP.
+0050CF VANLINK COMMUNICATION TECHNOLOGY RESEARCH INSTITUTE
+0050D0 MINERVA SYSTEMS
+0050D1 CISCO SYSTEMS, INC.
+0050D2 BAE Systems Canada, Inc.
+0050D3 DIGITAL AUDIO PROCESSING PTY. LTD.
+0050D4 JOOHONG INFORMATION &
+0050D5 AD SYSTEMS CORP.
+0050D6 ATLAS COPCO TOOLS AB
+0050D7 TELSTRAT
+0050D8 UNICORN COMPUTER CORP.
+0050D9 ENGETRON-ENGENHARIA ELETRONICA IND. e COM. LTDA
+0050DA 3COM CORPORATION
+0050DB CONTEMPORARY CONTROL
+0050DC TAS TELEFONBAU A. SCHWABE GMBH & CO. KG
+0050DD SERRA SOLDADURA, S.A.
+0050DE SIGNUM SYSTEMS CORP.
+0050DF AirFiber, Inc.
+0050E1 NS TECH ELECTRONICS SDN BHD
+0050E2 CISCO SYSTEMS, INC.
+0050E3 Terayon Communications Systems
+0050E4 APPLE COMPUTER, INC.
+0050E6 HAKUSAN CORPORATION
+0050E7 PARADISE INNOVATIONS (ASIA)
+0050E8 NOMADIX INC.
+0050EA XEL COMMUNICATIONS, INC.
+0050EB ALPHA-TOP CORPORATION
+0050EC OLICOM A/S
+0050ED ANDA NETWORKS
+0050EE TEK DIGITEL CORPORATION
+0050EF SPE Systemhaus GmbH
+0050F0 CISCO SYSTEMS, INC.
+0050F1 LIBIT SIGNAL PROCESSING, LTD.
+0050F2 MICROSOFT CORP.
+0050F3 GLOBAL NET INFORMATION CO., Ltd.
+0050F4 SIGMATEK GMBH & CO. KG
+0050F6 PAN-INTERNATIONAL INDUSTRIAL CORP.
+0050F7 VENTURE MANUFACTURING (SINGAPORE) LTD.
+0050F8 ENTREGA TECHNOLOGIES, INC.
+0050FA OXTEL, LTD.
+0050FB VSK ELECTRONICS
+0050FC EDIMAX TECHNOLOGY CO., LTD.
+0050FD VISIONCOMM CO., LTD.
+0050FE PCTVnet ASA
+0050FF HAKKO ELECTRONICS CO., LTD.
+006000 XYCOM INC.
+006001 InnoSys, Inc.
+006002 SCREEN SUBTITLING SYSTEMS, LTD
+006003 TERAOKA WEIGH SYSTEM PTE, LTD.
+006004 COMPUTADORES MODULARES SA
+006005 FEEDBACK DATA LTD.
+006006 SOTEC CO., LTD
+006007 ACRES GAMING, INC.
+006008 3COM CORPORATION
+006009 CISCO SYSTEMS, INC.
+00600A SORD COMPUTER CORPORATION
+00600B LOGWARE GmbH
+00600C APPLIED DATA SYSTEMS, INC.
+00600D Digital Logic GmbH
+00600E WAVENET INTERNATIONAL, INC.
+00600F WESTELL, INC.
+006010 NETWORK MACHINES, INC.
+006011 CRYSTAL SEMICONDUCTOR CORP.
+006012 POWER COMPUTING CORPORATION
+006013 NETSTAL MASCHINEN AG
+006014 EDEC CO., LTD.
+006015 NET2NET CORPORATION
+006016 CLARIION
+006017 TOKIMEC INC.
+006018 STELLAR ONE CORPORATION
+006019 Roche Diagnostics
+00601A KEITHLEY INSTRUMENTS
+00601B MESA ELECTRONICS
+00601C TELXON CORPORATION
+00601D LUCENT TECHNOLOGIES
+00601E SOFTLAB, INC.
+00601F STALLION TECHNOLOGIES
+006020 PIVOTAL NETWORKING, INC.
+006021 DSC CORPORATION
+006022 VICOM SYSTEMS, INC.
+006023 PERICOM SEMICONDUCTOR CORP.
+006024 GRADIENT TECHNOLOGIES, INC.
+006025 ACTIVE IMAGING PLC
+006026 VIKING COMPONENTS, INC.
+006027 Superior Modular Products
+006028 MACROVISION CORPORATION
+006029 CARY PERIPHERALS INC.
+00602A SYMICRON COMPUTER COMMUNICATIONS, LTD.
+00602B PEAK AUDIO
+00602C LINX Data Terminals, Inc.
+00602D ALERTON TECHNOLOGIES, INC.
+00602E CYCLADES CORPORATION
+00602F CISCO SYSTEMS, INC.
+006030 VILLAGE TRONIC ENTWICKLUNG
+006031 HRK SYSTEMS
+006032 I-CUBE, INC.
+006033 ACUITY IMAGING, INC.
+006034 ROBERT BOSCH GmbH
+006035 DALLAS SEMICONDUCTOR, INC.
+006036 AUSTRIAN RESEARCH CENTER SEIBERSDORF
+006037 PHILIPS SEMICONDUCTORS
+006038 Nortel Networks
+006039 SanCom Technology, Inc.
+00603A QUICK CONTROLS LTD.
+00603B AMTEC spa
+00603C HAGIWARA SYS-COM CO., LTD.
+00603D 3CX
+00603E CISCO SYSTEMS, INC.
+00603F PATAPSCO DESIGNS
+006040 NETRO CORP.
+006041 Yokogawa Electric Corporation
+006042 TKS (USA), INC.
+006043 ComSoft Systems, Inc.
+006044 LITTON/POLY-SCIENTIFIC
+006045 PATHLIGHT TECHNOLOGIES
+006046 VMETRO, INC.
+006047 CISCO SYSTEMS, INC.
+006048 EMC CORPORATION
+006049 VINA TECHNOLOGIES
+00604A SAIC IDEAS GROUP
+00604B BIODATA GmbH
+00604C SAT
+00604D MMC NETWORKS, INC.
+00604E CYCLE COMPUTER CORPORATION, INC.
+00604F SUZUKI MFG. CO., LTD.
+006050 INTERNIX INC.
+006051 QUALITY SEMICONDUCTOR
+006052 PERIPHERALS ENTERPRISE CO., Ltd.
+006053 TOYODA MACHINE WORKS, LTD.
+006054 CONTROLWARE GMBH
+006055 CORNELL UNIVERSITY
+006056 NETWORK TOOLS, INC.
+006057 MURATA MANUFACTURING CO., LTD.
+006058 COPPER MOUNTAIN COMMUNICATIONS, INC.
+006059 TECHNICAL COMMUNICATIONS CORP.
+00605A CELCORE, INC.
+00605B IntraServer Technology, Inc.
+00605C CISCO SYSTEMS, INC.
+00605D SCANIVALVE CORP.
+00605E LIBERTY TECHNOLOGY NETWORKING
+00605F NIPPON UNISOFT CORPORATION
+006060 DAWNING TECHNOLOGIES, INC.
+006061 WHISTLE COMMUNICATIONS CORP.
+006062 TELESYNC, INC.
+006063 PSION DACOM PLC.
+006064 NETCOMM LIMITED
+006065 BERNECKER & RAINER INDUSTRIE-ELEKTRONIC GmbH
+006066 LACROIX TECHNOLGIE
+006067 ACER NETXUS INC.
+006068 EICON TECHNOLOGY CORPORATION
+006069 BROCADE COMMUNICATIONS SYSTEMS, Inc.
+00606A MITSUBISHI WIRELESS COMMUNICATIONS. INC.
+00606B Synclayer Inc.
+00606C ARESCOM
+00606D DIGITAL EQUIPMENT CORP.
+00606E DAVICOM SEMICONDUCTOR, INC.
+00606F CLARION CORPORATION OF AMERICA
+006070 CISCO SYSTEMS, INC.
+006071 MIDAS LAB, INC.
+006072 VXL INSTRUMENTS, LIMITED
+006073 REDCREEK COMMUNICATIONS, INC.
+006074 QSC AUDIO PRODUCTS
+006075 PENTEK, INC.
+006076 SCHLUMBERGER TECHNOLOGIES RETAIL PETROLEUM SYSTEMS
+006077 PRISA NETWORKS
+006078 POWER MEASUREMENT LTD.
+006079 Mainstream Data, Inc.
+00607A DVS GmbH
+00607B FORE SYSTEMS, INC.
+00607C WaveAccess, Ltd.
+00607D SENTIENT NETWORKS INC.
+00607E GIGALABS, INC.
+00607F AURORA TECHNOLOGIES, INC.
+006080 MICROTRONIX DATACOM LTD.
+006081 TV/COM INTERNATIONAL
+006082 NOVALINK TECHNOLOGIES, INC.
+006083 CISCO SYSTEMS, INC.
+006084 DIGITAL VIDEO
+006085 Storage Concepts
+006086 LOGIC REPLACEMENT TECH. LTD.
+006087 KANSAI ELECTRIC CO., LTD.
+006088 WHITE MOUNTAIN DSP, INC.
+006089 XATA
+00608A CITADEL COMPUTER
+00608B ConferTech International
+00608C 3COM CORPORATION
+00608D UNIPULSE CORP.
+00608E HE ELECTRONICS, TECHNOLOGIE & SYSTEMTECHNIK GmbH
+00608F TEKRAM TECHNOLOGY CO., LTD.
+006090 ABLE COMMUNICATIONS, INC.
+006091 FIRST PACIFIC NETWORKS, INC.
+006092 MICRO/SYS, INC.
+006093 VARIAN
+006094 IBM CORP.
+006095 ACCU-TIME SYSTEMS, INC.
+006096 T.S. MICROTECH INC.
+006097 3COM CORPORATION
+006098 HT COMMUNICATIONS
+006099 LAN MEDIA CORPORATION
+00609A NJK TECHNO CO.
+00609B ASTRO-MED, INC.
+00609C Perkin-Elmer Incorporated
+00609D PMI FOOD EQUIPMENT GROUP
+00609E ASC X3 - INFORMATION TECHNOLOGY STANDARDS SECRETARIATS
+00609F PHAST CORPORATION
+0060A0 SWITCHED NETWORK TECHNOLOGIES, INC.
+0060A1 VPNet, Inc.
+0060A2 NIHON UNISYS LIMITED CO.
+0060A3 CONTINUUM TECHNOLOGY CORP.
+0060A4 GRINAKER SYSTEM TECHNOLOGIES
+0060A5 PERFORMANCE TELECOM CORP.
+0060A6 PARTICLE MEASURING SYSTEMS
+0060A7 MICROSENS GmbH & CO. KG
+0060A8 TIDOMAT AB
+0060A9 GESYTEC MbH
+0060AA INTELLIGENT DEVICES INC. (IDI)
+0060AB LARSCOM INCORPORATED
+0060AC RESILIENCE CORPORATION
+0060AD MegaChips Corporation
+0060AE TRIO INFORMATION SYSTEMS AB
+0060AF PACIFIC MICRO DATA, INC.
+0060B0 HEWLETT-PACKARD CO.
+0060B1 INPUT/OUTPUT, INC.
+0060B2 PROCESS CONTROL CORP.
+0060B3 Z-COM, INC.
+0060B4 GLENAYRE R&D INC.
+0060B5 KEBA GmbH
+0060B6 LAND COMPUTER CO., LTD.
+0060B7 CHANNELMATIC, INC.
+0060B8 CORELIS INC.
+0060B9 NITSUKO CORPORATION
+0060BA SAHARA NETWORKS, INC.
+0060BB CABLETRON - NETLINK, INC.
+0060BC KeunYoung Electronics & Communication Co., Ltd.
+0060BD HUBBELL-PULSECOM
+0060BE WEBTRONICS
+0060BF MACRAIGOR SYSTEMS, INC.
+0060C0 NERA AS
+0060C1 WaveSpan Corporation
+0060C2 MPL AG
+0060C3 NETVISION CORPORATION
+0060C4 SOLITON SYSTEMS K.K.
+0060C5 ANCOT CORP.
+0060C6 DCS AG
+0060C7 AMATI COMMUNICATIONS CORP.
+0060C8 KUKA WELDING SYSTEMS & ROBOTS
+0060C9 ControlNet, Inc.
+0060CA HARMONIC SYSTEMS INCORPORATED
+0060CB HITACHI ZOSEN CORPORATION
+0060CC EMTRAK, INCORPORATED
+0060CD VideoServer, Inc.
+0060CE ACCLAIM COMMUNICATIONS
+0060CF ALTEON NETWORKS, INC.
+0060D0 SNMP RESEARCH INCORPORATED
+0060D1 CASCADE COMMUNICATIONS
+0060D2 LUCENT TECHNOLOGIES TAIWAN TELECOMMUNICATIONS CO., LTD.
+0060D3 AT&T
+0060D4 ELDAT COMMUNICATION LTD.
+0060D5 MIYACHI TECHNOS CORP.
+0060D6 NovAtel Wireless Technologies Ltd.
+0060D7 ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (EPFL)
+0060D8 ELMIC SYSTEMS, INC.
+0060D9 TRANSYS NETWORKS INC.
+0060DA JBM ELECTRONICS CO.
+0060DB NTP ELEKTRONIK A/S
+0060DC TOYO COMMUNICATION EQUIPMENT Co., Ltd.
+0060DD MYRICOM, INC.
+0060DE KAYSER-THREDE GmbH
+0060DF CNT Corporation
+0060E0 AXIOM TECHNOLOGY CO., LTD.
+0060E1 ORCKIT COMMUNICATIONS LTD.
+0060E2 QUEST ENGINEERING & DEVELOPMENT
+0060E3 ARBIN INSTRUMENTS
+0060E4 COMPUSERVE, INC.
+0060E5 FUJI AUTOMATION CO., LTD.
+0060E6 SHOMITI SYSTEMS INCORPORATED
+0060E7 RANDATA
+0060E8 HITACHI COMPUTER PRODUCTS (AMERICA), INC.
+0060E9 ATOP TECHNOLOGIES, INC.
+0060EA StreamLogic
+0060EB FOURTHTRACK SYSTEMS
+0060EC HERMARY OPTO ELECTRONICS INC.
+0060ED RICARDO TEST AUTOMATION LTD.
+0060EE APOLLO
+0060EF FLYTECH TECHNOLOGY CO., LTD.
+0060F0 JOHNSON & JOHNSON MEDICAL, INC
+0060F1 EXP COMPUTER, INC.
+0060F2 LASERGRAPHICS, INC.
+0060F3 Performance Analysis Broadband, Spirent plc
+0060F4 ADVANCED COMPUTER SOLUTIONS, Inc.
+0060F5 ICON WEST, INC.
+0060F6 NEXTEST COMMUNICATIONS PRODUCTS, INC.
+0060F7 DATAFUSION SYSTEMS
+0060F8 Loran International Technologies Inc.
+0060F9 DIAMOND LANE COMMUNICATIONS
+0060FA EDUCATIONAL TECHNOLOGY RESOURCES, INC.
+0060FB PACKETEER, INC.
+0060FC CONSERVATION THROUGH INNOVATION LTD.
+0060FD NetICs, Inc.
+0060FE LYNX SYSTEM DEVELOPERS, INC.
+0060FF QuVis, Inc.
+0070B0 M/A-COM INC. COMPANIES
+0070B3 DATA RECALL LTD.
+008000 MULTITECH SYSTEMS, INC.
+008001 PERIPHONICS CORPORATION
+008002 SATELCOM (UK) LTD
+008003 HYTEC ELECTRONICS LTD.
+008004 ANTLOW COMMUNICATIONS, LTD.
+008005 CACTUS COMPUTER INC.
+008006 COMPUADD CORPORATION
+008007 DLOG NC-SYSTEME
+008008 DYNATECH COMPUTER SYSTEMS
+008009 JUPITER SYSTEMS, INC.
+00800A JAPAN COMPUTER CORP.
+00800B CSK CORPORATION
+00800C VIDECOM LIMITED
+00800D VOSSWINKEL F.U.
+00800E ATLANTIX CORPORATION
+00800F STANDARD MICROSYSTEMS
+008010 COMMODORE INTERNATIONAL
+008011 DIGITAL SYSTEMS INT'L. INC.
+008012 INTEGRATED MEASUREMENT SYSTEMS
+008013 THOMAS-CONRAD CORPORATION
+008014 ESPRIT SYSTEMS
+008015 SEIKO SYSTEMS, INC.
+008016 WANDEL AND GOLTERMANN
+008017 PFU LIMITED
+008018 KOBE STEEL, LTD.
+008019 DAYNA COMMUNICATIONS, INC.
+00801A BELL ATLANTIC
+00801B KODIAK TECHNOLOGY
+00801C NEWPORT SYSTEMS SOLUTIONS
+00801D INTEGRATED INFERENCE MACHINES
+00801E XINETRON, INC.
+00801F KRUPP ATLAS ELECTRONIK GMBH
+008020 NETWORK PRODUCTS
+008021 Alcatel Canada Inc.
+008022 SCAN-OPTICS
+008023 INTEGRATED BUSINESS NETWORKS
+008024 KALPANA, INC.
+008025 STOLLMANN GMBH
+008026 NETWORK PRODUCTS CORPORATION
+008027 ADAPTIVE SYSTEMS, INC.
+008028 TRADPOST (HK) LTD
+008029 EAGLE TECHNOLOGY, INC.
+00802A TEST SYSTEMS & SIMULATIONS INC
+00802B INTEGRATED MARKETING CO
+00802C THE SAGE GROUP PLC
+00802D XYLOGICS INC
+00802E CASTLE ROCK COMPUTING
+00802F NATIONAL INSTRUMENTS CORP.
+008030 NEXUS ELECTRONICS
+008031 BASYS, CORP.
+008032 ACCESS CO., LTD.
+008033 FORMATION, INC.
+008034 SMT GOUPIL
+008035 TECHNOLOGY WORKS, INC.
+008036 REFLEX MANUFACTURING SYSTEMS
+008037 Ericsson Group
+008038 DATA RESEARCH & APPLICATIONS
+008039 ALCATEL STC AUSTRALIA
+00803A VARITYPER, INC.
+00803B APT COMMUNICATIONS, INC.
+00803C TVS ELECTRONICS LTD
+00803D SURIGIKEN CO.,  LTD.
+00803E SYNERNETICS
+00803F TATUNG COMPANY
+008040 JOHN FLUKE MANUFACTURING CO.
+008041 VEB KOMBINAT ROBOTRON
+008042 FORCE COMPUTERS
+008043 NETWORLD, INC.
+008044 SYSTECH COMPUTER CORP.
+008045 MATSUSHITA ELECTRIC IND. CO
+008046 UNIVERSITY OF TORONTO
+008047 IN-NET CORP.
+008048 COMPEX INCORPORATED
+008049 NISSIN ELECTRIC CO., LTD.
+00804A PRO-LOG
+00804B EAGLE TECHNOLOGIES PTY.LTD.
+00804C CONTEC CO., LTD.
+00804D CYCLONE MICROSYSTEMS, INC.
+00804E APEX COMPUTER COMPANY
+00804F DAIKIN INDUSTRIES, LTD.
+008050 ZIATECH CORPORATION
+008051 FIBERMUX
+008052 TECHNICALLY ELITE CONCEPTS
+008053 INTELLICOM, INC.
+008054 FRONTIER TECHNOLOGIES CORP.
+008055 FERMILAB
+008056 SPHINX ELEKTRONIK GMBH
+008057 ADSOFT, LTD.
+008058 PRINTER SYSTEMS CORPORATION
+008059 STANLEY ELECTRIC CO., LTD
+00805A TULIP COMPUTERS INTERNAT'L B.V
+00805B CONDOR SYSTEMS, INC.
+00805C AGILIS CORPORATION
+00805D CANSTAR
+00805E LSI LOGIC CORPORATION
+00805F COMPAQ COMPUTER CORPORATION
+008060 NETWORK INTERFACE CORPORATION
+008061 LITTON SYSTEMS, INC.
+008062 INTERFACE  CO.
+008063 RICHARD HIRSCHMANN GMBH & CO.
+008064 WYSE TECHNOLOGY
+008065 CYBERGRAPHIC SYSTEMS PTY LTD.
+008066 ARCOM CONTROL SYSTEMS, LTD.
+008067 SQUARE D COMPANY
+008068 YAMATECH SCIENTIFIC LTD.
+008069 COMPUTONE SYSTEMS
+00806A ERI (EMPAC RESEARCH INC.)
+00806B SCHMID TELECOMMUNICATION
+00806C CEGELEC PROJECTS LTD
+00806D CENTURY SYSTEMS CORP.
+00806E NIPPON STEEL CORPORATION
+00806F ONELAN LTD.
+008070 COMPUTADORAS MICRON
+008071 SAI TECHNOLOGY
+008072 MICROPLEX SYSTEMS LTD.
+008073 DWB ASSOCIATES
+008074 FISHER CONTROLS
+008075 PARSYTEC GMBH
+008076 MCNC
+008077 BROTHER INDUSTRIES, LTD.
+008078 PRACTICAL PERIPHERALS, INC.
+008079 MICROBUS DESIGNS LTD.
+00807A AITECH SYSTEMS LTD.
+00807B ARTEL COMMUNICATIONS CORP.
+00807C FIBERCOM, INC.
+00807D EQUINOX SYSTEMS INC.
+00807E SOUTHERN PACIFIC LTD.
+00807F DY-4 INCORPORATED
+008080 DATAMEDIA CORPORATION
+008081 KENDALL SQUARE RESEARCH CORP.
+008082 PEP MODULAR COMPUTERS GMBH
+008083 AMDAHL
+008084 THE CLOUD INC.
+008085 H-THREE SYSTEMS CORPORATION
+008086 COMPUTER GENERATION INC.
+008087 OKI ELECTRIC INDUSTRY CO., LTD
+008088 VICTOR COMPANY OF JAPAN, LTD.
+008089 TECNETICS (PTY) LTD.
+00808A SUMMIT MICROSYSTEMS CORP.
+00808B DACOLL LIMITED
+00808C NetScout Systems, Inc.
+00808D WESTCOAST TECHNOLOGY B.V.
+00808E RADSTONE TECHNOLOGY
+00808F C. ITOH ELECTRONICS, INC.
+008090 MICROTEK INTERNATIONAL, INC.
+008091 TOKYO ELECTRIC CO.,LTD
+008092 JAPAN COMPUTER INDUSTRY, INC.
+008093 XYRON CORPORATION
+008094 ALFA LAVAL AUTOMATION AB
+008095 BASIC MERTON HANDELSGES.M.B.H.
+008096 HUMAN DESIGNED SYSTEMS, INC.
+008097 CENTRALP AUTOMATISMES
+008098 TDK CORPORATION
+008099 KLOCKNER MOELLER IPC
+00809A NOVUS NETWORKS LTD
+00809B JUSTSYSTEM CORPORATION
+00809C LUXCOM, INC.
+00809D Commscraft Ltd.
+00809E DATUS GMBH
+00809F ALCATEL BUSINESS SYSTEMS
+0080A0 EDISA HEWLETT PACKARD S/A
+0080A1 MICROTEST, INC.
+0080A2 CREATIVE ELECTRONIC SYSTEMS
+0080A3 LANTRONIX
+0080A4 LIBERTY ELECTRONICS
+0080A5 SPEED INTERNATIONAL
+0080A6 REPUBLIC TECHNOLOGY, INC.
+0080A7 MEASUREX CORP.
+0080A8 VITACOM CORPORATION
+0080A9 CLEARPOINT RESEARCH
+0080AA MAXPEED
+0080AB DUKANE NETWORK INTEGRATION
+0080AC IMLOGIX, DIVISION OF GENESYS
+0080AD CNET TECHNOLOGY, INC.
+0080AE HUGHES NETWORK SYSTEMS
+0080AF ALLUMER CO., LTD.
+0080B0 ADVANCED INFORMATION
+0080B1 SOFTCOM A/S
+0080B2 NETWORK EQUIPMENT TECHNOLOGIES
+0080B3 AVAL DATA CORPORATION
+0080B4 SOPHIA SYSTEMS
+0080B5 UNITED NETWORKS INC.
+0080B6 THEMIS COMPUTER
+0080B7 STELLAR COMPUTER
+0080B8 BUG, INCORPORATED
+0080B9 ARCHE TECHNOLIGIES INC.
+0080BA SPECIALIX (ASIA) PTE, LTD
+0080BB HUGHES LAN SYSTEMS
+0080BC HITACHI ENGINEERING CO., LTD
+0080BD THE FURUKAWA ELECTRIC CO., LTD
+0080BE ARIES RESEARCH
+0080BF TAKAOKA ELECTRIC MFG. CO. LTD.
+0080C0 PENRIL DATACOMM
+0080C1 LANEX CORPORATION
+0080C2 IEEE 802.1 COMMITTEE
+0080C3 BICC INFORMATION SYSTEMS & SVC
+0080C4 DOCUMENT TECHNOLOGIES, INC.
+0080C5 NOVELLCO DE MEXICO
+0080C6 NATIONAL DATACOMM CORPORATION
+0080C7 XIRCOM
+0080C8 D-LINK SYSTEMS, INC.
+0080C9 ALBERTA MICROELECTRONIC CENTRE
+0080CA NETCOM RESEARCH INCORPORATED
+0080CB FALCO DATA PRODUCTS
+0080CC MICROWAVE BYPASS SYSTEMS
+0080CD MICRONICS COMPUTER, INC.
+0080CE BROADCAST TELEVISION SYSTEMS
+0080CF EMBEDDED PERFORMANCE INC.
+0080D0 COMPUTER PERIPHERALS, INC.
+0080D1 KIMTRON CORPORATION
+0080D2 SHINNIHONDENKO CO., LTD.
+0080D3 SHIVA CORP.
+0080D4 CHASE RESEARCH LTD.
+0080D5 CADRE TECHNOLOGIES
+0080D6 NUVOTECH, INC.
+0080D7 Fantum Engineering
+0080D8 NETWORK PERIPHERALS INC.
+0080D9 EMK ELEKTRONIK
+0080DA BRUEL & KJAER
+0080DB GRAPHON CORPORATION
+0080DC PICKER INTERNATIONAL
+0080DD GMX INC/GIMIX
+0080DE GIPSI S.A.
+0080DF ADC CODENOLL TECHNOLOGY CORP.
+0080E0 XTP SYSTEMS, INC.
+0080E1 STMICROELECTRONICS
+0080E2 T.D.I. CO., LTD.
+0080E3 CORAL NETWORK CORPORATION
+0080E4 NORTHWEST DIGITAL SYSTEMS, INC
+0080E5 MYLEX CORPORATION
+0080E6 PEER NETWORKS, INC.
+0080E7 LYNWOOD SCIENTIFIC DEV. LTD.
+0080E8 CUMULUS CORPORATIION
+0080E9 Madge Ltd.
+0080EA ADVA Optical Networking Ltd.
+0080EB COMPCONTROL B.V.
+0080EC SUPERCOMPUTING SOLUTIONS, INC.
+0080ED IQ TECHNOLOGIES, INC.
+0080EE THOMSON CSF
+0080EF RATIONAL
+0080F0 Panasonic Communications Co., Ltd.
+0080F1 OPUS SYSTEMS
+0080F2 RAYCOM SYSTEMS INC
+0080F3 SUN ELECTRONICS CORP.
+0080F4 TELEMECANIQUE ELECTRIQUE
+0080F5 QUANTEL LTD
+0080F6 SYNERGY MICROSYSTEMS
+0080F7 ZENITH ELECTRONICS
+0080F8 MIZAR, INC.
+0080F9 HEURIKON CORPORATION
+0080FA RWT GMBH
+0080FB BVM LIMITED
+0080FC AVATAR CORPORATION
+0080FD EXSCEED CORPRATION
+0080FE AZURE TECHNOLOGIES, INC.
+0080FF SOC. DE TELEINFORMATIQUE RTC
+009000 DIAMOND MULTIMEDIA
+009001 NISHIMU ELECTRONICS INDUSTRIES CO., LTD.
+009002 ALLGON AB
+009003 APLIO
+009004 3COM EUROPE LTD.
+009005 PROTECH SYSTEMS CO., LTD.
+009006 HAMAMATSU PHOTONICS K.K.
+009007 DOMEX TECHNOLOGY CORP.
+009008 HanA Systems Inc.
+009009 i Controls, Inc.
+00900A PROTON ELECTRONIC INDUSTRIAL CO., LTD.
+00900B LANNER ELECTRONICS, INC.
+00900C CISCO SYSTEMS, INC.
+00900D OVERLAND DATA INC.
+00900E HANDLINK TECHNOLOGIES, INC.
+00900F KAWASAKI HEAVY INDUSTRIES, LTD
+009010 SIMULATION LABORATORIES, INC.
+009011 WAVTrace, Inc.
+009012 GLOBESPAN SEMICONDUCTOR, INC.
+009013 SAMSAN CORP.
+009014 ROTORK INSTRUMENTS, LTD.
+009015 CENTIGRAM COMMUNICATIONS CORP.
+009016 ZAC
+009017 ZYPCOM, INC.
+009018 ITO ELECTRIC INDUSTRY CO, LTD.
+009019 HERMES ELECTRONICS CO., LTD.
+00901A UNISPHERE SOLUTIONS
+00901B DIGITAL CONTROLS
+00901C mps Software Gmbh
+00901D PEC (NZ) LTD.
+00901E SELESTA INGEGNE RIA S.P.A.
+00901F ADTEC PRODUCTIONS, INC.
+009020 PHILIPS ANALYTICAL X-RAY B.V.
+009021 CISCO SYSTEMS, INC.
+009022 IVEX
+009023 ZILOG INC.
+009024 PIPELINKS, INC.
+009025 VISION SYSTEMS LTD. PTY
+009026 ADVANCED SWITCHING COMMUNICATIONS, INC.
+009027 INTEL CORPORATION
+009028 NIPPON SIGNAL CO., LTD.
+009029 CRYPTO AG
+00902A COMMUNICATION DEVICES, INC.
+00902B CISCO SYSTEMS, INC.
+00902C DATA & CONTROL EQUIPMENT LTD.
+00902D DATA ELECTRONICS (AUST.) PTY, LTD.
+00902E NAMCO LIMITED
+00902F NETCORE SYSTEMS, INC.
+009030 HONEYWELL-DATING
+009031 MYSTICOM, LTD.
+009032 PELCOMBE GROUP LTD.
+009033 INNOVAPHONE GmbH
+009034 IMAGIC, INC.
+009035 ALPHA TELECOM, INC.
+009036 ens, inc.
+009037 ACUCOMM, INC.
+009038 FOUNTAIN TECHNOLOGIES, INC.
+009039 SHASTA NETWORKS
+00903A NIHON MEDIA TOOL INC.
+00903B TriEMS Research Lab, Inc.
+00903C ATLANTIC NETWORK SYSTEMS
+00903D BIOPAC SYSTEMS, INC.
+00903E N.V. PHILIPS INDUSTRIAL ACTIVITIES
+00903F AZTEC RADIOMEDIA
+009040 Siemens Network Convergence LLC
+009041 APPLIED DIGITAL ACCESS
+009042 ECCS, Inc.
+009043 NICHIBEI DENSHI CO., LTD.
+009044 ASSURED DIGITAL, INC.
+009045 Marconi Communications
+009046 DEXDYNE, LTD.
+009047 GIGA FAST E. LTD.
+009048 ZEAL CORPORATION
+009049 ENTRIDIA CORPORATION
+00904A CONCUR SYSTEM TECHNOLOGIES
+00904B GemTek Technology Co., Ltd.
+00904C EPIGRAM, INC.
+00904D SPEC S.A.
+00904E DELEM BV
+00904F ABB POWER T&D COMPANY, INC.
+009050 TELESTE OY
+009051 ULTIMATE TECHNOLOGY CORP.
+009052 SELCOM ELETTRONICA S.R.L.
+009053 DAEWOO ELECTRONICS CO., LTD.
+009054 INNOVATIVE SEMICONDUCTORS, INC
+009055 PARKER HANNIFIN CORPORATION COMPUMOTOR DIVISION
+009056 TELESTREAM, INC.
+009057 AANetcom, Inc.
+009058 Ultra Electronics Ltd., Command and Control Systems
+009059 TELECOM DEVICE K.K.
+00905A DEARBORN GROUP, INC.
+00905B RAYMOND AND LAE ENGINEERING
+00905C EDMI
+00905D NETCOM SICHERHEITSTECHNIK GmbH
+00905E RAULAND-BORG CORPORATION
+00905F CISCO SYSTEMS, INC.
+009060 SYSTEM CREATE CORP.
+009061 PACIFIC RESEARCH & ENGINEERING CORPORATION
+009062 ICP VORTEX COMPUTERSYSTEME GmbH
+009063 COHERENT COMMUNICATIONS SYSTEMS CORPORATION
+009064 THOMSON BROADCAST SYSTEMS
+009065 FINISAR CORPORATION
+009066 Troika Networks, Inc.
+009067 WalkAbout Computers, Inc.
+009068 DVT CORP.
+009069 JUNIPER NETWORKS, INC.
+00906A TURNSTONE SYSTEMS, INC.
+00906B APPLIED RESOURCES, INC.
+00906C GWT GLOBAL WEIGHING TECHNOLOGIES GmbH
+00906D CISCO SYSTEMS, INC.
+00906E PRAXON, INC.
+00906F CISCO SYSTEMS, INC.
+009070 NEO NETWORKS, INC.
+009071 Applied Innovation Inc.
+009072 SIMRAD AS
+009073 GAIO TECHNOLOGY
+009074 ARGON NETWORKS, INC.
+009075 NEC DO BRASIL S.A.
+009076 FMT AIRCRAFT GATE SUPPORT SYSTEMS AB
+009077 ADVANCED FIBRE COMMUNICATIONS
+009078 MER TELEMANAGEMENT SOLUTIONS, LTD.
+009079 ClearOne, Inc.
+00907A SPECTRALINK CORP.
+00907B E-TECH, INC.
+00907C DIGITALCAST, INC.
+00907D Lake Communications
+00907E VETRONIX CORP.
+00907F WatchGuard Technologies, Inc.
+009080 NOT LIMITED, INC.
+009081 ALOHA NETWORKS, INC.
+009082 FORCE INSTITUTE
+009083 TURBO COMMUNICATION, INC.
+009084 ATECH SYSTEM
+009085 GOLDEN ENTERPRISES, INC.
+009086 CISCO SYSTEMS, INC.
+009087 ITIS
+009088 BAXALL SECURITY LTD.
+009089 SOFTCOM MICROSYSTEMS, INC.
+00908A BAYLY COMMUNICATIONS, INC.
+00908B CELL COMPUTING, INC.
+00908C ETREND ELECTRONICS, INC.
+00908D VICKERS ELECTRONICS SYSTEMS
+00908E Nortel Networks Broadband Access
+00908F AUDIO CODES LTD.
+009090 I-BUS
+009091 DigitalScape, Inc.
+009092 CISCO SYSTEMS, INC.
+009093 NANAO CORPORATION
+009094 OSPREY TECHNOLOGIES, INC.
+009095 UNIVERSAL AVIONICS
+009096 ASKEY COMPUTER CORP.
+009097 SYCAMORE NETWORKS
+009098 SBC DESIGNS, INC.
+009099 ALLIED TELESIS, K.K.
+00909A ONE WORLD SYSTEMS, INC.
+00909B MARKPOINT AB
+00909C Terayon Communications Systems
+00909D GSE SYSTEMS, INC.
+00909E Critical IO, LLC
+00909F DIGI-DATA CORPORATION
+0090A0 8X8 INC.
+0090A1 FLYING PIG SYSTEMS, LTD.
+0090A2 CYBERTAN TECHNOLOGY, INC.
+0090A3 Corecess Inc.
+0090A4 ALTIGA NETWORKS
+0090A5 SPECTRA LOGIC
+0090A6 CISCO SYSTEMS, INC.
+0090A7 CLIENTEC CORPORATION
+0090A8 NineTiles Networks, Ltd.
+0090A9 WESTERN DIGITAL
+0090AA INDIGO ACTIVE VISION SYSTEMS LIMITED
+0090AB CISCO SYSTEMS, INC.
+0090AC OPTIVISION, INC.
+0090AD ASPECT ELECTRONICS, INC.
+0090AE ITALTEL S.p.A.
+0090AF J. MORITA MFG. CORP.
+0090B0 VADEM
+0090B1 CISCO SYSTEMS, INC.
+0090B2 AVICI SYSTEMS INC.
+0090B3 AGRANAT SYSTEMS
+0090B4 WILLOWBROOK TECHNOLOGIES
+0090B5 NIKON CORPORATION
+0090B6 FIBEX SYSTEMS
+0090B7 DIGITAL LIGHTWAVE, INC.
+0090B8 ROHDE & SCHWARZ GMBH & CO. KG
+0090B9 BERAN INSTRUMENTS LTD.
+0090BA VALID NETWORKS, INC.
+0090BB TAINET COMMUNICATION SYSTEM Corp.
+0090BC TELEMANN CO., LTD.
+0090BD OMNIA COMMUNICATIONS, INC.
+0090BE IBC/INTEGRATED BUSINESS COMPUTERS
+0090BF CISCO SYSTEMS, INC.
+0090C0 K.J. LAW ENGINEERS, INC.
+0090C1 Peco II, Inc.
+0090C2 JK microsystems, Inc.
+0090C3 TOPIC SEMICONDUCTOR CORP.
+0090C4 JAVELIN SYSTEMS, INC.
+0090C5 INTERNET MAGIC, INC.
+0090C6 OPTIM SYSTEMS, INC.
+0090C7 ICOM INC.
+0090C8 WAVERIDER COMMUNICATIONS (CANADA) INC.
+0090C9 DPAC Technologies
+0090CA ACCORD VIDEO TELECOMMUNICATIONS, LTD.
+0090CB Wireless OnLine, Inc.
+0090CC PLANET COMMUNICATIONS, INC.
+0090CD ENT-EMPRESA NACIONAL DE TELECOMMUNICACOES, S.A.
+0090CE TETRA GmbH
+0090CF NORTEL
+0090D0 Thomson Belgium
+0090D1 LEICHU ENTERPRISE CO., LTD.
+0090D2 ARTEL VIDEO SYSTEMS
+0090D3 GIESECKE & DEVRIENT GmbH
+0090D4 BindView Development Corp.
+0090D5 EUPHONIX, INC.
+0090D6 CRYSTAL GROUP
+0090D7 NetBoost Corp.
+0090D8 WHITECROSS SYSTEMS
+0090D9 CISCO SYSTEMS, INC.
+0090DA DYNARC, INC.
+0090DB NEXT LEVEL COMMUNICATIONS
+0090DC TECO INFORMATION SYSTEMS
+0090DD THE MIHARU COMMUNICATIONS CO., LTD.
+0090DE CARDKEY SYSTEMS, INC.
+0090DF MITSUBISHI CHEMICAL AMERICA, INC.
+0090E0 SYSTRAN CORP.
+0090E1 TELENA S.P.A.
+0090E2 DISTRIBUTED PROCESSING TECHNOLOGY
+0090E3 AVEX ELECTRONICS INC.
+0090E4 NEC AMERICA, INC.
+0090E5 TEKNEMA, INC.
+0090E6 ACER LABORATORIES, INC.
+0090E7 HORSCH ELEKTRONIK AG
+0090E8 MOXA TECHNOLOGIES CORP., LTD.
+0090E9 JANZ COMPUTER AG
+0090EA ALPHA TECHNOLOGIES, INC.
+0090EB SENTRY TELECOM SYSTEMS
+0090EC PYRESCOM
+0090ED CENTRAL SYSTEM RESEARCH CO., LTD.
+0090EE PERSONAL COMMUNICATIONS TECHNOLOGIES
+0090EF INTEGRIX, INC.
+0090F0 HARMONIC LIGHTWAVES, LTD.
+0090F1 DOT HILL SYSTEMS CORPORATION
+0090F2 CISCO SYSTEMS, INC.
+0090F3 ASPECT COMMUNICATIONS
+0090F4 LIGHTNING INSTRUMENTATION
+0090F5 CLEVO CO.
+0090F6 ESCALATE NETWORKS, INC.
+0090F7 NBASE COMMUNICATIONS LTD.
+0090F8 MEDIATRIX TELECOM
+0090F9 LEITCH
+0090FA GigaNet, Inc.
+0090FB PORTWELL, INC.
+0090FC NETWORK COMPUTING DEVICES
+0090FD CopperCom, Inc.
+0090FE ELECOM CO., LTD.  (LANEED DIV.)
+0090FF TELLUS TECHNOLOGY INC.
+0091D6 Crystal Group, Inc.
+009D8E CARDIAC RECORDERS, INC.
+00A000 CENTILLION NETWORKS, INC.
+00A001 WATKINS-JOHNSON COMPANY
+00A002 LEEDS & NORTHRUP AUSTRALIA PTY LTD
+00A003 STAEFA CONTROL SYSTEM
+00A004 NETPOWER, INC.
+00A005 DANIEL INSTRUMENTS, LTD.
+00A006 IMAGE DATA PROCESSING SYSTEM GROUP
+00A007 APEXX TECHNOLOGY, INC.
+00A008 NETCORP
+00A009 WHITETREE NETWORK
+00A00A R.D.C. COMMUNICATION
+00A00B COMPUTEX CO., LTD.
+00A00C KINGMAX TECHNOLOGY, INC.
+00A00D THE PANDA PROJECT
+00A00E VISUAL NETWORKS, INC.
+00A00F Broadband Technologies
+00A010 SYSLOGIC DATENTECHNIK AG
+00A011 MUTOH INDUSTRIES LTD.
+00A012 B.A.T.M. ADVANCED TECHNOLOGIES
+00A013 TELTREND LTD.
+00A014 CSIR
+00A015 WYLE
+00A016 MICROPOLIS CORP.
+00A017 J B M CORPORATION
+00A018 CREATIVE CONTROLLERS, INC.
+00A019 NEBULA CONSULTANTS, INC.
+00A01A BINAR ELEKTRONIK AB
+00A01B PREMISYS COMMUNICATIONS, INC.
+00A01C NASCENT NETWORKS CORPORATION
+00A01D SIXNET
+00A01E EST CORPORATION
+00A01F TRICORD SYSTEMS, INC.
+00A020 CITICORP/TTI
+00A021 GENERAL DYNAMICS-
+00A022 CENTRE FOR DEVELOPMENT OF ADVANCED COMPUTING
+00A023 APPLIED CREATIVE TECHNOLOGY, INC.
+00A024 3COM CORPORATION
+00A025 REDCOM LABS INC.
+00A026 TELDAT, S.A.
+00A027 FIREPOWER SYSTEMS, INC.
+00A028 CONNER PERIPHERALS
+00A029 COULTER CORPORATION
+00A02A TRANCELL SYSTEMS
+00A02B TRANSITIONS RESEARCH CORP.
+00A02C interWAVE Communications
+00A02D 1394 Trade Association
+00A02E BRAND COMMUNICATIONS, LTD.
+00A02F PIRELLI CAVI
+00A030 CAPTOR NV/SA
+00A031 HAZELTINE CORPORATION, MS 1-17
+00A032 GES SINGAPORE PTE. LTD.
+00A033 imc MeBsysteme GmbH
+00A034 AXEL
+00A035 CYLINK CORPORATION
+00A036 APPLIED NETWORK TECHNOLOGY
+00A037 DATASCOPE CORPORATION
+00A038 EMAIL ELECTRONICS
+00A039 ROSS TECHNOLOGY, INC.
+00A03A KUBOTEK CORPORATION
+00A03B TOSHIN ELECTRIC CO., LTD.
+00A03C EG&G NUCLEAR INSTRUMENTS
+00A03D OPTO-22
+00A03E ATM FORUM
+00A03F COMPUTER SOCIETY MICROPROCESSOR & MICROPROCESSOR STANDARDS C
+00A040 APPLE COMPUTER
+00A041 LEYBOLD-INFICON
+00A042 SPUR PRODUCTS CORP.
+00A043 AMERICAN TECHNOLOGY LABS, INC.
+00A044 NTT IT CO., LTD.
+00A045 PHOENIX CONTACT GMBH & CO.
+00A046 SCITEX CORP. LTD.
+00A047 INTEGRATED FITNESS CORP.
+00A048 QUESTECH, LTD.
+00A049 DIGITECH INDUSTRIES, INC.
+00A04A NISSHIN ELECTRIC CO., LTD.
+00A04B TFL LAN INC.
+00A04C INNOVATIVE SYSTEMS & TECHNOLOGIES, INC.
+00A04D EDA INSTRUMENTS, INC.
+00A04E VOELKER TECHNOLOGIES, INC.
+00A04F AMERITEC CORP.
+00A050 CYPRESS SEMICONDUCTOR
+00A051 ANGIA COMMUNICATIONS. INC.
+00A052 STANILITE ELECTRONICS PTY. LTD
+00A053 COMPACT DEVICES, INC.
+00A055 Data Device Corporation
+00A056 MICROPROSS
+00A057 LANCOM Systems GmbH
+00A058 GLORY, LTD.
+00A059 HAMILTON HALLMARK
+00A05A KOFAX IMAGE PRODUCTS
+00A05B MARQUIP, INC.
+00A05C INVENTORY CONVERSION, INC./
+00A05D CS COMPUTER SYSTEME GmbH
+00A05E MYRIAD LOGIC INC.
+00A05F BTG ENGINEERING BV
+00A060 ACER PERIPHERALS, INC.
+00A061 PURITAN BENNETT
+00A062 AES PRODATA
+00A063 JRL SYSTEMS, INC.
+00A064 KVB/ANALECT
+00A065 NEXLAND, INC.
+00A066 ISA CO., LTD.
+00A067 NETWORK SERVICES GROUP
+00A068 BHP LIMITED
+00A069 Symmetricom, Inc.
+00A06A Verilink Corporation
+00A06B DMS DORSCH MIKROSYSTEM GMBH
+00A06C SHINDENGEN ELECTRIC MFG. CO., LTD.
+00A06D MANNESMANN TALLY CORPORATION
+00A06E AUSTRON, INC.
+00A06F THE APPCON GROUP, INC.
+00A070 COASTCOM
+00A071 VIDEO LOTTERY TECHNOLOGIES,INC
+00A072 OVATION SYSTEMS LTD.
+00A073 COM21, INC.
+00A074 PERCEPTION TECHNOLOGY
+00A075 MICRON TECHNOLOGY, INC.
+00A076 CARDWARE LAB, INC.
+00A077 FUJITSU NEXION, INC.
+00A078 Marconi Communications
+00A079 ALPS ELECTRIC (USA), INC.
+00A07A ADVANCED PERIPHERALS TECHNOLOGIES, INC.
+00A07B DAWN COMPUTER INCORPORATION
+00A07C TONYANG NYLON CO., LTD.
+00A07D SEEQ TECHNOLOGY, INC.
+00A07E AVID TECHNOLOGY, INC.
+00A07F GSM-SYNTEL, LTD.
+00A080 ANTARES MICROSYSTEMS
+00A081 ALCATEL DATA NETWORKS
+00A082 NKT ELEKTRONIK A/S
+00A083 ASIMMPHONY TURKEY
+00A084 DATAPLEX PTY. LTD.
+00A086 AMBER WAVE SYSTEMS, INC.
+00A087 Zarlink Semiconductor Ltd.
+00A088 ESSENTIAL COMMUNICATIONS
+00A089 XPOINT TECHNOLOGIES, INC.
+00A08A BROOKTROUT TECHNOLOGY, INC.
+00A08B ASTON ELECTRONIC DESIGNS LTD.
+00A08C MultiMedia LANs, Inc.
+00A08D JACOMO CORPORATION
+00A08E Nokia Internet Communications
+00A08F DESKNET SYSTEMS, INC.
+00A090 TimeStep Corporation
+00A091 APPLICOM INTERNATIONAL
+00A092 H. BOLLMANN MANUFACTURERS, LTD
+00A093 B/E AEROSPACE, Inc.
+00A094 COMSAT CORPORATION
+00A095 ACACIA NETWORKS, INC.
+00A096 MITUMI ELECTRIC CO., LTD.
+00A097 JC INFORMATION SYSTEMS
+00A098 NETWORK APPLIANCE CORP.
+00A099 K-NET LTD.
+00A09A NIHON KOHDEN AMERICA
+00A09B QPSX COMMUNICATIONS, LTD.
+00A09C Xyplex, Inc.
+00A09D JOHNATHON FREEMAN TECHNOLOGIES
+00A09E ICTV
+00A09F COMMVISION CORP.
+00A0A0 COMPACT DATA, LTD.
+00A0A1 EPIC DATA INC.
+00A0A2 DIGICOM S.P.A.
+00A0A3 RELIABLE POWER METERS
+00A0A4 MICROS SYSTEMS, INC.
+00A0A5 TEKNOR MICROSYSTEME, INC.
+00A0A6 M.I. SYSTEMS, K.K.
+00A0A7 VORAX CORPORATION
+00A0A8 RENEX CORPORATION
+00A0A9 GN NETTEST (CANADA) NAVTEL DIVISION
+00A0AA SPACELABS MEDICAL
+00A0AB NETCS INFORMATIONSTECHNIK GMBH
+00A0AC GILAT SATELLITE NETWORKS, LTD.
+00A0AD MARCONI SPA
+00A0AE NUCOM SYSTEMS, INC.
+00A0AF WMS INDUSTRIES
+00A0B0 I-O DATA DEVICE, INC.
+00A0B1 FIRST VIRTUAL CORPORATION
+00A0B2 SHIMA SEIKI
+00A0B3 ZYKRONIX
+00A0B4 TEXAS MICROSYSTEMS, INC.
+00A0B5 3H TECHNOLOGY
+00A0B6 SANRITZ AUTOMATION CO., LTD.
+00A0B7 CORDANT, INC.
+00A0B8 SYMBIOS LOGIC INC.
+00A0B9 EAGLE TECHNOLOGY, INC.
+00A0BA PATTON ELECTRONICS CO.
+00A0BB HILAN GMBH
+00A0BC VIASAT, INCORPORATED
+00A0BD I-TECH CORP.
+00A0BE INTEGRATED CIRCUIT SYSTEMS, INC. COMMUNICATIONS GROUP
+00A0BF WIRELESS DATA GROUP MOTOROLA
+00A0C0 DIGITAL LINK CORP.
+00A0C1 ORTIVUS MEDICAL AB
+00A0C2 R.A. SYSTEMS CO., LTD.
+00A0C3 UNICOMPUTER GMBH
+00A0C4 CRISTIE ELECTRONICS LTD.
+00A0C5 ZYXEL COMMUNICATION
+00A0C6 QUALCOMM INCORPORATED
+00A0C7 TADIRAN TELECOMMUNICATIONS
+00A0C8 ADTRAN INC.
+00A0C9 INTEL CORPORATION - HF1-06
+00A0CA FUJITSU DENSO LTD.
+00A0CB ARK TELECOMMUNICATIONS, INC.
+00A0CC LITE-ON COMMUNICATIONS, INC.
+00A0CD DR. JOHANNES HEIDENHAIN GmbH
+00A0CE ASTROCOM CORPORATION
+00A0CF SOTAS, INC.
+00A0D0 TEN X TECHNOLOGY, INC.
+00A0D1 INVENTEC CORPORATION
+00A0D2 ALLIED TELESIS INTERNATIONAL CORPORATION
+00A0D3 INSTEM COMPUTER SYSTEMS, LTD.
+00A0D4 RADIOLAN,  INC.
+00A0D5 SIERRA WIRELESS INC.
+00A0D6 SBE, INC.
+00A0D7 KASTEN CHASE APPLIED RESEARCH
+00A0D8 SPECTRA - TEK
+00A0D9 CONVEX COMPUTER CORPORATION
+00A0DA INTEGRATED SYSTEMS Technology, Inc.
+00A0DB FISHER & PAYKEL PRODUCTION
+00A0DC O.N. ELECTRONIC CO., LTD.
+00A0DD AZONIX CORPORATION
+00A0DE YAMAHA CORPORATION
+00A0DF STS TECHNOLOGIES, INC.
+00A0E0 TENNYSON TECHNOLOGIES PTY LTD
+00A0E1 WESTPORT RESEARCH ASSOCIATES, INC.
+00A0E2 KEISOKU GIKEN CORP.
+00A0E3 XKL SYSTEMS CORP.
+00A0E4 OPTIQUEST
+00A0E5 NHC COMMUNICATIONS
+00A0E6 DIALOGIC CORPORATION
+00A0E7 CENTRAL DATA CORPORATION
+00A0E8 REUTERS HOLDINGS PLC
+00A0E9 ELECTRONIC RETAILING SYSTEMS INTERNATIONAL
+00A0EA ETHERCOM CORP.
+00A0EB Encore Networks
+00A0EC TRANSMITTON LTD.
+00A0ED PRI AUTOMATION
+00A0EE NASHOBA NETWORKS
+00A0EF LUCIDATA LTD.
+00A0F0 TORONTO MICROELECTRONICS INC.
+00A0F1 MTI
+00A0F2 INFOTEK COMMUNICATIONS, INC.
+00A0F3 STAUBLI
+00A0F4 GE
+00A0F5 RADGUARD LTD.
+00A0F6 AutoGas Systems Inc.
+00A0F7 V.I COMPUTER CORP.
+00A0F8 SYMBOL TECHNOLOGIES, INC.
+00A0F9 BINTEC COMMUNICATIONS GMBH
+00A0FA Marconi Communication GmbH
+00A0FB TORAY ENGINEERING CO., LTD.
+00A0FC IMAGE SCIENCES, INC.
+00A0FD SCITEX DIGITAL PRINTING, INC.
+00A0FE BOSTON TECHNOLOGY, INC.
+00A0FF TELLABS OPERATIONS, INC.
+00AA00 INTEL CORPORATION
+00AA01 INTEL CORPORATION
+00AA02 INTEL CORPORATION
+00AA3C OLIVETTI TELECOM SPA (OLTECO)
+00B009 Grass Valley Group
+00B017 InfoGear Technology Corp.
+00B019 Casi-Rusco
+00B01C Westport Technologies
+00B01E Rantic Labs, Inc.
+00B02A ORSYS GmbH
+00B02D ViaGate Technologies, Inc.
+00B03B HiQ Networks
+00B048 Marconi Communications Inc.
+00B04A Cisco Systems, Inc.
+00B052 Intellon Corporation
+00B064 Cisco Systems, Inc.
+00B069 Honewell Oy
+00B06D Jones Futurex Inc.
+00B080 Mannesmann Ipulsys B.V.
+00B086 LocSoft Limited
+00B08E Cisco Systems, Inc.
+00B091 Transmeta Corp.
+00B094 Alaris, Inc.
+00B09A Morrow Technologies Corp.
+00B09D Point Grey Research Inc.
+00B0AC SIAE-Microelettronica S.p.A.
+00B0AE Symmetricom
+00B0B3 Xstreamis PLC
+00B0C2 Cisco Systems, Inc.
+00B0C7 Tellabs Operations, Inc.
+00B0CE TECHNOLOGY RESCUE
+00B0D0 Dell Computer Corp.
+00B0DB Nextcell, Inc.
+00B0DF Reliable Data Technology, Inc.
+00B0E7 British Federal Ltd.
+00B0EC EACEM
+00B0EE Ajile Systems, Inc.
+00B0F0 CALY NETWORKS
+00B0F5 NetWorth Technologies, Inc.
+00BB01 OCTOTHORPE CORP.
+00BBF0 UNGERMANN-BASS INC.
+00C000 LANOPTICS, LTD.
+00C001 DIATEK PATIENT MANAGMENT
+00C002 SERCOMM CORPORATION
+00C003 GLOBALNET COMMUNICATIONS
+00C004 JAPAN BUSINESS COMPUTER CO.LTD
+00C005 LIVINGSTON ENTERPRISES, INC.
+00C006 NIPPON AVIONICS CO., LTD.
+00C007 PINNACLE DATA SYSTEMS, INC.
+00C008 SECO SRL
+00C009 KT TECHNOLOGY (S) PTE LTD
+00C00A MICRO CRAFT
+00C00B NORCONTROL A.S.
+00C00C RELIA TECHNOLGIES
+00C00D ADVANCED LOGIC RESEARCH, INC.
+00C00E PSITECH, INC.
+00C00F QUANTUM SOFTWARE SYSTEMS LTD.
+00C010 HIRAKAWA HEWTECH CORP.
+00C011 INTERACTIVE COMPUTING DEVICES
+00C012 NETSPAN CORPORATION
+00C013 NETRIX
+00C014 TELEMATICS CALABASAS INT'L,INC
+00C015 NEW MEDIA CORPORATION
+00C016 ELECTRONIC THEATRE CONTROLS
+00C017 FORTE NETWORKS
+00C018 LANART CORPORATION
+00C019 LEAP TECHNOLOGY, INC.
+00C01A COROMETRICS MEDICAL SYSTEMS
+00C01B SOCKET COMMUNICATIONS, INC.
+00C01C INTERLINK COMMUNICATIONS LTD.
+00C01D GRAND JUNCTION NETWORKS, INC.
+00C01E LA FRANCAISE DES JEUX
+00C01F S.E.R.C.E.L.
+00C020 ARCO ELECTRONIC, CONTROL LTD.
+00C021 NETEXPRESS
+00C022 LASERMASTER TECHNOLOGIES, INC.
+00C023 TUTANKHAMON ELECTRONICS
+00C024 EDEN SISTEMAS DE COMPUTACAO SA
+00C025 DATAPRODUCTS CORPORATION
+00C026 LANS TECHNOLOGY CO., LTD.
+00C027 CIPHER SYSTEMS, INC.
+00C028 JASCO CORPORATION
+00C029 Nexans Deutschland AG - ANS
+00C02A OHKURA ELECTRIC CO., LTD.
+00C02B GERLOFF GESELLSCHAFT FUR
+00C02C CENTRUM COMMUNICATIONS, INC.
+00C02D FUJI PHOTO FILM CO., LTD.
+00C02E NETWIZ
+00C02F OKUMA CORPORATION
+00C030 INTEGRATED ENGINEERING B. V.
+00C031 DESIGN RESEARCH SYSTEMS, INC.
+00C032 I-CUBED LIMITED
+00C033 TELEBIT COMMUNICATIONS APS
+00C034 TRANSACTION NETWORK
+00C035 QUINTAR COMPANY
+00C036 RAYTECH ELECTRONIC CORP.
+00C037 DYNATEM
+00C038 RASTER IMAGE PROCESSING SYSTEM
+00C039 TDK SEMICONDUCTOR CORPORATION
+00C03A MEN-MIKRO ELEKTRONIK GMBH
+00C03B MULTIACCESS COMPUTING CORP.
+00C03C TOWER TECH S.R.L.
+00C03D WIESEMANN & THEIS GMBH
+00C03E FA. GEBR. HELLER GMBH
+00C03F STORES AUTOMATED SYSTEMS, INC.
+00C040 ECCI
+00C041 DIGITAL TRANSMISSION SYSTEMS
+00C042 DATALUX CORP.
+00C043 STRATACOM
+00C044 EMCOM CORPORATION
+00C045 ISOLATION SYSTEMS, LTD.
+00C046 KEMITRON LTD.
+00C047 UNIMICRO SYSTEMS, INC.
+00C048 BAY TECHNICAL ASSOCIATES
+00C049 U.S. ROBOTICS, INC.
+00C04A GROUP 2000 AG
+00C04B CREATIVE MICROSYSTEMS
+00C04C DEPARTMENT OF FOREIGN AFFAIRS
+00C04D MITEC, INC.
+00C04E COMTROL CORPORATION
+00C04F DELL COMPUTER CORPORATION
+00C050 TOYO DENKI SEIZO K.K.
+00C051 ADVANCED INTEGRATION RESEARCH
+00C052 BURR-BROWN
+00C053 DAVOX CORPORATION
+00C054 NETWORK PERIPHERALS, LTD.
+00C055 MODULAR COMPUTING TECHNOLOGIES
+00C056 SOMELEC
+00C057 MYCO ELECTRONICS
+00C058 DATAEXPERT CORP.
+00C059 NIPPON DENSO CO., LTD.
+00C05A SEMAPHORE COMMUNICATIONS CORP.
+00C05B NETWORKS NORTHWEST, INC.
+00C05C ELONEX PLC
+00C05D L&N TECHNOLOGIES
+00C05E VARI-LITE, INC.
+00C05F FINE-PAL COMPANY LIMITED
+00C060 ID SCANDINAVIA AS
+00C061 SOLECTEK CORPORATION
+00C062 IMPULSE TECHNOLOGY
+00C063 MORNING STAR TECHNOLOGIES, INC
+00C064 GENERAL DATACOMM IND. INC.
+00C065 SCOPE COMMUNICATIONS, INC.
+00C066 DOCUPOINT, INC.
+00C067 UNITED BARCODE INDUSTRIES
+00C068 PHILIP DRAKE ELECTRONICS LTD.
+00C069 Axxcelera Broadband Wireless
+00C06A ZAHNER-ELEKTRIK GMBH & CO. KG
+00C06B OSI PLUS CORPORATION
+00C06C SVEC COMPUTER CORP.
+00C06D BOCA RESEARCH, INC.
+00C06E HAFT TECHNOLOGY, INC.
+00C06F KOMATSU LTD.
+00C070 SECTRA SECURE-TRANSMISSION AB
+00C071 AREANEX COMMUNICATIONS, INC.
+00C072 KNX LTD.
+00C073 XEDIA CORPORATION
+00C074 TOYODA AUTOMATIC LOOM
+00C075 XANTE CORPORATION
+00C076 I-DATA INTERNATIONAL A-S
+00C077 DAEWOO TELECOM LTD.
+00C078 COMPUTER SYSTEMS ENGINEERING
+00C079 FONSYS CO.,LTD.
+00C07A PRIVA B.V.
+00C07B ASCEND COMMUNICATIONS, INC.
+00C07C HIGHTECH INFORMATION
+00C07D RISC DEVELOPMENTS LTD.
+00C07E KUBOTA CORPORATION ELECTRONIC
+00C07F NUPON COMPUTING CORP.
+00C080 NETSTAR, INC.
+00C081 METRODATA LTD.
+00C082 MOORE PRODUCTS CO.
+00C083 TRACE MOUNTAIN PRODUCTS, INC.
+00C084 DATA LINK CORP. LTD.
+00C085 ELECTRONICS FOR IMAGING, INC.
+00C086 THE LYNK CORPORATION
+00C087 UUNET TECHNOLOGIES, INC.
+00C088 EKF ELEKTRONIK GMBH
+00C089 TELINDUS DISTRIBUTION
+00C08A LAUTERBACH DATENTECHNIK GMBH
+00C08B RISQ MODULAR SYSTEMS, INC.
+00C08C PERFORMANCE TECHNOLOGIES, INC.
+00C08D TRONIX PRODUCT DEVELOPMENT
+00C08E NETWORK INFORMATION TECHNOLOGY
+00C08F MATSUSHITA ELECTRIC WORKS, LTD
+00C090 PRAIM S.R.L.
+00C091 JABIL CIRCUIT, INC.
+00C092 MENNEN MEDICAL INC.
+00C093 ALTA RESEARCH CORP.
+00C094 VMX INC.
+00C095 ZNYX
+00C096 TAMURA CORPORATION
+00C097 ARCHIPEL SA
+00C098 CHUNTEX ELECTRONIC CO., LTD.
+00C099 YOSHIKI INDUSTRIAL CO.,LTD.
+00C09A PHOTONICS CORPORATION
+00C09B RELIANCE COMM/TEC, R-TEC
+00C09C TOA ELECTRONIC LTD.
+00C09D DISTRIBUTED SYSTEMS INT'L, INC
+00C09E CACHE COMPUTERS, INC.
+00C09F QUANTA COMPUTER, INC.
+00C0A0 ADVANCE MICRO RESEARCH, INC.
+00C0A1 TOKYO DENSHI SEKEI CO.
+00C0A2 INTERMEDIUM A/S
+00C0A3 DUAL ENTERPRISES CORPORATION
+00C0A4 UNIGRAF OY
+00C0A5 DICKENS DATA SYSTEMS
+00C0A6 EXICOM AUSTRALIA PTY. LTD
+00C0A7 SEEL LTD.
+00C0A8 GVC CORPORATION
+00C0A9 BARRON MCCANN LTD.
+00C0AA SILICON VALLEY COMPUTER
+00C0AB Telco Systems, Inc.
+00C0AC GAMBIT COMPUTER COMMUNICATIONS
+00C0AD MARBEN COMMUNICATION SYSTEMS
+00C0AE TOWERCOM CO. INC. DBA PC HOUSE
+00C0AF TEKLOGIX INC.
+00C0B0 GCC TECHNOLOGIES,INC.
+00C0B1 GENIUS NET CO.
+00C0B2 NORAND CORPORATION
+00C0B3 COMSTAT DATACOMM CORPORATION
+00C0B4 MYSON TECHNOLOGY, INC.
+00C0B5 CORPORATE NETWORK SYSTEMS,INC.
+00C0B6 Snap Appliance, Inc.
+00C0B7 AMERICAN POWER CONVERSION CORP
+00C0B8 FRASER'S HILL LTD.
+00C0B9 FUNK SOFTWARE, INC.
+00C0BA NETVANTAGE
+00C0BB FORVAL CREATIVE, INC.
+00C0BC TELECOM AUSTRALIA/CSSC
+00C0BD INEX TECHNOLOGIES, INC.
+00C0BE ALCATEL - SEL
+00C0BF TECHNOLOGY CONCEPTS, LTD.
+00C0C0 SHORE MICROSYSTEMS, INC.
+00C0C1 QUAD/GRAPHICS, INC.
+00C0C2 INFINITE NETWORKS LTD.
+00C0C3 ACUSON COMPUTED SONOGRAPHY
+00C0C4 COMPUTER OPERATIONAL
+00C0C5 SID INFORMATICA
+00C0C6 PERSONAL MEDIA CORP.
+00C0C7 SPARKTRUM MICROSYSTEMS, INC.
+00C0C8 MICRO BYTE PTY. LTD.
+00C0C9 ELSAG BAILEY PROCESS
+00C0CA ALFA, INC.
+00C0CB CONTROL TECHNOLOGY CORPORATION
+00C0CC TELESCIENCES CO SYSTEMS, INC.
+00C0CD COMELTA, S.A.
+00C0CE CEI SYSTEMS & ENGINEERING PTE
+00C0CF IMATRAN VOIMA OY
+00C0D0 RATOC SYSTEM INC.
+00C0D1 COMTREE TECHNOLOGY CORPORATION
+00C0D2 SYNTELLECT, INC.
+00C0D3 OLYMPUS IMAGE SYSTEMS, INC.
+00C0D4 AXON NETWORKS, INC.
+00C0D5 QUANCOM ELECTRONIC GMBH
+00C0D6 J1 SYSTEMS, INC.
+00C0D7 TAIWAN TRADING CENTER DBA
+00C0D8 UNIVERSAL DATA SYSTEMS
+00C0D9 QUINTE NETWORK CONFIDENTIALITY
+00C0DA NICE SYSTEMS LTD.
+00C0DB IPC CORPORATION (PTE) LTD.
+00C0DC EOS TECHNOLOGIES, INC.
+00C0DD QLogic Corporation
+00C0DE ZCOMM, INC.
+00C0DF KYE Systems Corp.
+00C0E0 DSC COMMUNICATION CORP.
+00C0E1 SONIC SOLUTIONS
+00C0E2 CALCOMP, INC.
+00C0E3 OSITECH COMMUNICATIONS, INC.
+00C0E4 SIEMENS BUILDING
+00C0E5 GESPAC, S.A.
+00C0E6 Verilink Corporation
+00C0E7 FIBERDATA AB
+00C0E8 PLEXCOM, INC.
+00C0E9 OAK SOLUTIONS, LTD.
+00C0EA ARRAY TECHNOLOGY LTD.
+00C0EB SEH COMPUTERTECHNIK GMBH
+00C0EC DAUPHIN TECHNOLOGY
+00C0ED US ARMY ELECTRONIC
+00C0EE KYOCERA CORPORATION
+00C0EF ABIT CORPORATION
+00C0F0 KINGSTON TECHNOLOGY CORP.
+00C0F1 SHINKO ELECTRIC CO., LTD.
+00C0F2 TRANSITION NETWORKS
+00C0F3 NETWORK COMMUNICATIONS CORP.
+00C0F4 INTERLINK SYSTEM CO., LTD.
+00C0F5 METACOMP, INC.
+00C0F6 CELAN TECHNOLOGY INC.
+00C0F7 ENGAGE COMMUNICATION, INC.
+00C0F8 ABOUT COMPUTING INC.
+00C0F9 HARRIS AND JEFFRIES, INC.
+00C0FA CANARY COMMUNICATIONS, INC.
+00C0FB ADVANCED TECHNOLOGY LABS
+00C0FC ELASTIC REALITY, INC.
+00C0FD PROSUM
+00C0FE APTEC COMPUTER SYSTEMS, INC.
+00C0FF DOT HILL SYSTEMS CORPORATION
+00CBBD Cambridge Broadband Ltd.
+00CF1C COMMUNICATION MACHINERY CORP.
+00D000 FERRAN SCIENTIFIC, INC.
+00D001 VST TECHNOLOGIES, INC.
+00D002 DITECH CORPORATION
+00D003 COMDA ENTERPRISES CORP.
+00D004 PENTACOM LTD.
+00D005 ZHS ZEITMANAGEMENTSYSTEME
+00D006 CISCO SYSTEMS, INC.
+00D007 MIC ASSOCIATES, INC.
+00D008 MACTELL CORPORATION
+00D009 HSING TECH. ENTERPRISE CO. LTD
+00D00A LANACCESS TELECOM S.A.
+00D00B RHK TECHNOLOGY, INC.
+00D00C SNIJDER MICRO SYSTEMS
+00D00D MICROMERITICS INSTRUMENT
+00D00E PLURIS, INC.
+00D00F SPEECH DESIGN GMBH
+00D010 CONVERGENT NETWORKS, INC.
+00D011 PRISM VIDEO, INC.
+00D012 GATEWORKS CORP.
+00D013 PRIMEX AEROSPACE COMPANY
+00D014 ROOT, INC.
+00D015 UNIVEX MICROTECHNOLOGY CORP.
+00D016 SCM MICROSYSTEMS, INC.
+00D017 SYNTECH INFORMATION CO., LTD.
+00D018 QWES. COM, INC.
+00D019 DAINIPPON SCREEN CORPORATE
+00D01A URMET SUD S.P.A.
+00D01B MIMAKI ENGINEERING CO., LTD.
+00D01C SBS TECHNOLOGIES,
+00D01D FURUNO ELECTRIC CO., LTD.
+00D01E PINGTEL CORP.
+00D01F CTAM PTY. LTD.
+00D020 AIM SYSTEM, INC.
+00D021 REGENT ELECTRONICS CORP.
+00D022 INCREDIBLE TECHNOLOGIES, INC.
+00D023 INFORTREND TECHNOLOGY, INC.
+00D024 Cognex Corporation
+00D025 XROSSTECH, INC.
+00D026 HIRSCHMANN AUSTRIA GMBH
+00D027 APPLIED AUTOMATION, INC.
+00D028 OMNEON VIDEO NETWORKS
+00D029 WAKEFERN FOOD CORPORATION
+00D02A Voxent Systems Ltd.
+00D02B JETCELL, INC.
+00D02C CAMPBELL SCIENTIFIC, INC.
+00D02D ADEMCO
+00D02E COMMUNICATION AUTOMATION CORP.
+00D02F VLSI TECHNOLOGY INC.
+00D030 SAFETRAN SYSTEMS CORP.
+00D031 INDUSTRIAL LOGIC CORPORATION
+00D032 YANO ELECTRIC CO., LTD.
+00D033 DALIAN DAXIAN NETWORK
+00D034 ORMEC SYSTEMS CORP.
+00D035 BEHAVIOR TECH. COMPUTER CORP.
+00D036 TECHNOLOGY ATLANTA CORP.
+00D037 PHILIPS-DVS-LO BDR
+00D038 FIVEMERE, LTD.
+00D039 UTILICOM, INC.
+00D03A ZONEWORX, INC.
+00D03B VISION PRODUCTS PTY. LTD.
+00D03C Vieo, Inc.
+00D03D GALILEO TECHNOLOGY, LTD.
+00D03E ROCKETCHIPS, INC.
+00D03F AMERICAN COMMUNICATION
+00D040 SYSMATE CO., LTD.
+00D041 AMIGO TECHNOLOGY CO., LTD.
+00D042 MAHLO GMBH & CO. UG
+00D043 ZONAL RETAIL DATA SYSTEMS
+00D044 ALIDIAN NETWORKS, INC.
+00D045 KVASER AB
+00D046 DOLBY LABORATORIES, INC.
+00D047 XN TECHNOLOGIES
+00D048 ECTON, INC.
+00D049 IMPRESSTEK CO., LTD.
+00D04A PRESENCE TECHNOLOGY GMBH
+00D04B LA CIE GROUP S.A.
+00D04C EUROTEL TELECOM LTD.
+00D04D DIV OF RESEARCH & STATISTICS
+00D04E LOGIBAG
+00D04F BITRONICS, INC.
+00D050 ISKRATEL
+00D051 O2 MICRO, INC.
+00D052 ASCEND COMMUNICATIONS, INC.
+00D053 CONNECTED SYSTEMS
+00D054 SAS INSTITUTE INC.
+00D055 KATHREIN-WERKE KG
+00D056 SOMAT CORPORATION
+00D057 ULTRAK, INC.
+00D058 CISCO SYSTEMS, INC.
+00D059 AMBIT MICROSYSTEMS CORP.
+00D05A SYMBIONICS, LTD.
+00D05B ACROLOOP MOTION CONTROL
+00D05C TECHNOTREND SYSTEMTECHNIK GMBH
+00D05D INTELLIWORXX, INC.
+00D05E STRATABEAM TECHNOLOGY, INC.
+00D05F VALCOM, INC.
+00D060 PANASONIC EUROPEAN
+00D061 TREMON ENTERPRISES CO., LTD.
+00D062 DIGIGRAM
+00D063 CISCO SYSTEMS, INC.
+00D064 MULTITEL
+00D065 TOKO ELECTRIC
+00D066 WINTRISS ENGINEERING CORP.
+00D067 CAMPIO COMMUNICATIONS
+00D068 IWILL CORPORATION
+00D069 TECHNOLOGIC SYSTEMS
+00D06A LINKUP SYSTEMS CORPORATION
+00D06B SR TELECOM INC.
+00D06C SHAREWAVE, INC.
+00D06D ACRISON, INC.
+00D06E TRENDVIEW RECORDERS LTD.
+00D06F KMC CONTROLS
+00D070 LONG WELL ELECTRONICS CORP.
+00D071 ECHELON CORP.
+00D072 BROADLOGIC
+00D073 ACN ADVANCED COMMUNICATIONS
+00D074 TAQUA SYSTEMS, INC.
+00D075 ALARIS MEDICAL SYSTEMS, INC.
+00D076 MERRILL LYNCH & CO., INC.
+00D077 LUCENT TECHNOLOGIES
+00D078 ELTEX OF SWEDEN AB
+00D079 CISCO SYSTEMS, INC.
+00D07A AMAQUEST COMPUTER CORP.
+00D07B COMCAM INTERNATIONAL LTD.
+00D07C KOYO ELECTRONICS INC. CO.,LTD.
+00D07D COSINE COMMUNICATIONS
+00D07E KEYCORP LTD.
+00D07F STRATEGY & TECHNOLOGY, LIMITED
+00D080 EXABYTE CORPORATION
+00D081 REAL TIME DEVICES USA, INC.
+00D082 IOWAVE INC.
+00D083 INVERTEX, INC.
+00D084 NEXCOMM SYSTEMS, INC.
+00D085 OTIS ELEVATOR COMPANY
+00D086 FOVEON, INC.
+00D087 MICROFIRST INC.
+00D088 Terayon Communications Systems
+00D089 DYNACOLOR, INC.
+00D08A PHOTRON USA
+00D08B ADVA Limited
+00D08C GENOA TECHNOLOGY, INC.
+00D08D PHOENIX GROUP, INC.
+00D08E NVISION INC.
+00D08F ARDENT TECHNOLOGIES, INC.
+00D090 CISCO SYSTEMS, INC.
+00D091 SMARTSAN SYSTEMS, INC.
+00D092 GLENAYRE WESTERN MULTIPLEX
+00D093 TQ - COMPONENTS GMBH
+00D094 TIMELINE VISTA, INC.
+00D095 XYLAN CORPORATION
+00D096 3COM EUROPE LTD.
+00D097 CISCO SYSTEMS, INC.
+00D098 Photon Dynamics Canada Inc.
+00D099 ELCARD OY
+00D09A FILANET CORPORATION
+00D09B SPECTEL LTD.
+00D09C KAPADIA COMMUNICATIONS
+00D09D VERIS INDUSTRIES
+00D09E 2WIRE, INC.
+00D09F NOVTEK TEST SYSTEMS
+00D0A0 MIPS DENMARK
+00D0A1 OSKAR VIERLING GMBH + CO. KG
+00D0A2 INTEGRATED DEVICE
+00D0A3 VOCAL DATA, INC.
+00D0A4 ALANTRO COMMUNICATIONS
+00D0A5 AMERICAN ARIUM
+00D0A6 LANBIRD TECHNOLOGY CO., LTD.
+00D0A7 TOKYO SOKKI KENKYUJO CO., LTD.
+00D0A8 NETWORK ENGINES, INC.
+00D0A9 SHINANO KENSHI CO., LTD.
+00D0AA CHASE COMMUNICATIONS
+00D0AB DELTAKABEL TELECOM CV
+00D0AC GRAYSON WIRELESS
+00D0AD TL INDUSTRIES
+00D0AE ORESIS COMMUNICATIONS, INC.
+00D0AF CUTLER-HAMMER, INC.
+00D0B0 BITSWITCH LTD.
+00D0B1 OMEGA ELECTRONICS SA
+00D0B2 XIOTECH CORPORATION
+00D0B3 DRS FLIGHT SAFETY AND
+00D0B4 KATSUJIMA CO., LTD.
+00D0B5 IPricot formerly DotCom
+00D0B6 CRESCENT NETWORKS, INC.
+00D0B7 INTEL CORPORATION
+00D0B8 IOMEGA CORP.
+00D0B9 MICROTEK INTERNATIONAL, INC.
+00D0BA CISCO SYSTEMS, INC.
+00D0BB CISCO SYSTEMS, INC.
+00D0BC CISCO SYSTEMS, INC.
+00D0BD SICAN GMBH
+00D0BE EMUTEC INC.
+00D0BF PIVOTAL TECHNOLOGIES
+00D0C0 CISCO SYSTEMS, INC.
+00D0C1 HARMONIC DATA SYSTEMS, LTD.
+00D0C2 BALTHAZAR TECHNOLOGY AB
+00D0C3 VIVID TECHNOLOGY PTE, LTD.
+00D0C4 TERATECH CORPORATION
+00D0C5 COMPUTATIONAL SYSTEMS, INC.
+00D0C6 THOMAS & BETTS CORP.
+00D0C7 PATHWAY, INC.
+00D0C8 I/O CONSULTING A/S
+00D0C9 ADVANTECH CO., LTD.
+00D0CA INTRINSYC SOFTWARE INC.
+00D0CB DASAN CO., LTD.
+00D0CC TECHNOLOGIES LYRE INC.
+00D0CD ATAN TECHNOLOGY INC.
+00D0CE ASYST ELECTRONIC
+00D0CF MORETON BAY
+00D0D0 ZHONGXING TELECOM LTD.
+00D0D1 SIROCCO SYSTEMS, INC.
+00D0D2 EPILOG CORPORATION
+00D0D3 CISCO SYSTEMS, INC.
+00D0D4 V-BITS, INC.
+00D0D5 GRUNDIG AG
+00D0D6 AETHRA TELECOMUNICAZIONI
+00D0D7 B2C2, INC.
+00D0D8 3Com Corporation
+00D0D9 DEDICATED MICROCOMPUTERS
+00D0DA TAICOM DATA SYSTEMS CO., LTD.
+00D0DB MCQUAY INTERNATIONAL
+00D0DC MODULAR MINING SYSTEMS, INC.
+00D0DD SUNRISE TELECOM, INC.
+00D0DE PHILIPS MULTIMEDIA NETWORK
+00D0DF KUZUMI ELECTRONICS, INC.
+00D0E0 DOOIN ELECTRONICS CO.
+00D0E1 AVIONITEK ISRAEL INC.
+00D0E2 MRT MICRO, INC.
+00D0E3 ELE-CHEM ENGINEERING CO., LTD.
+00D0E4 CISCO SYSTEMS, INC.
+00D0E5 SOLIDUM SYSTEMS CORP.
+00D0E6 IBOND INC.
+00D0E7 VCON TELECOMMUNICATION LTD.
+00D0E8 MAC SYSTEM CO., LTD.
+00D0E9 ADVANTAGE CENTURY
+00D0EA NEXTONE COMMUNICATIONS, INC.
+00D0EB LIGHTERA NETWORKS, INC.
+00D0EC NAKAYO TELECOMMUNICATIONS, INC
+00D0ED XIOX
+00D0EE DICTAPHONE CORPORATION
+00D0EF IGT
+00D0F0 CONVISION TECHNOLOGY GMBH
+00D0F1 SEGA ENTERPRISES, LTD.
+00D0F2 MONTEREY NETWORKS
+00D0F3 SOLARI DI UDINE SPA
+00D0F4 CARINTHIAN TECH INSTITUTE
+00D0F5 ORANGE MICRO, INC.
+00D0F6 Alcatel Canada
+00D0F7 NEXT NETS CORPORATION
+00D0F8 FUJIAN STAR TERMINAL
+00D0F9 ACUTE COMMUNICATIONS CORP.
+00D0FA RACAL GUARDATA
+00D0FB TEK MICROSYSTEMS, INCORPORATED
+00D0FC GRANITE MICROSYSTEMS
+00D0FD OPTIMA TELE.COM, INC.
+00D0FE ASTRAL POINT
+00D0FF CISCO SYSTEMS, INC.
+00DD00 UNGERMANN-BASS INC.
+00DD01 UNGERMANN-BASS INC.
+00DD02 UNGERMANN-BASS INC.
+00DD03 UNGERMANN-BASS INC.
+00DD04 UNGERMANN-BASS INC.
+00DD05 UNGERMANN-BASS INC.
+00DD06 UNGERMANN-BASS INC.
+00DD07 UNGERMANN-BASS INC.
+00DD08 UNGERMANN-BASS INC.
+00DD09 UNGERMANN-BASS INC.
+00DD0A UNGERMANN-BASS INC.
+00DD0B UNGERMANN-BASS INC.
+00DD0C UNGERMANN-BASS INC.
+00DD0D UNGERMANN-BASS INC.
+00DD0E UNGERMANN-BASS INC.
+00DD0F UNGERMANN-BASS INC.
+00E000 FUJITSU, LTD
+00E001 STRAND LIGHTING LIMITED
+00E002 CROSSROADS SYSTEMS, INC.
+00E003 NOKIA WIRELESS BUSINESS COMMUN
+00E004 PMC-SIERRA, INC.
+00E005 TECHNICAL CORP.
+00E006 SILICON INTEGRATED SYS. CORP.
+00E007 NETWORK ALCHEMY LTD.
+00E008 AMAZING CONTROLS! INC.
+00E009 MARATHON TECHNOLOGIES CORP.
+00E00A DIBA, INC.
+00E00B ROOFTOP COMMUNICATIONS CORP.
+00E00C MOTOROLA
+00E00D RADIANT SYSTEMS
+00E00E AVALON IMAGING SYSTEMS, INC.
+00E00F SHANGHAI BAUD DATA
+00E010 HESS SB-AUTOMATENBAU GmbH
+00E011 UNIDEN SAN DIEGO R&D CENTER, INC.
+00E012 PLUTO TECHNOLOGIES INTERNATIONAL INC.
+00E013 EASTERN ELECTRONIC CO., LTD.
+00E014 CISCO SYSTEMS, INC.
+00E015 HEIWA CORPORATION
+00E016 RAPID CITY COMMUNICATIONS
+00E017 EXXACT GmbH
+00E018 ASUSTEK COMPUTER INC.
+00E019 ING. GIORDANO ELETTRONICA
+00E01A COMTEC SYSTEMS. CO., LTD.
+00E01B SPHERE COMMUNICATIONS, INC.
+00E01C MOBILITY ELECTRONICSY
+00E01D WebTV NETWORKS, INC.
+00E01E CISCO SYSTEMS, INC.
+00E01F AVIDIA Systems, Inc.
+00E020 TECNOMEN OY
+00E021 FREEGATE CORP.
+00E022 MediaLight, Inc.
+00E023 TELRAD
+00E024 GADZOOX NETWORKS
+00E025 dit CO., LTD.
+00E026 EASTMAN KODAK CO.
+00E027 DUX, INC.
+00E028 APTIX CORPORATION
+00E029 STANDARD MICROSYSTEMS CORP.
+00E02A TANDBERG TELEVISION AS
+00E02B EXTREME NETWORKS
+00E02C AST COMPUTER
+00E02D InnoMediaLogic, Inc.
+00E02E SPC ELECTRONICS CORPORATION
+00E02F MCNS HOLDINGS, L.P.
+00E030 MELITA INTERNATIONAL CORP.
+00E031 HAGIWARA ELECTRIC CO., LTD.
+00E032 MISYS FINANCIAL SYSTEMS, LTD.
+00E033 E.E.P.D. GmbH
+00E034 CISCO SYSTEMS, INC.
+00E035 LOUGHBOROUGH SOUND IMAGES, PLC
+00E036 PIONEER CORPORATION
+00E037 CENTURY CORPORATION
+00E038 PROXIMA CORPORATION
+00E039 PARADYNE CORP.
+00E03A CABLETRON SYSTEMS, INC.
+00E03B PROMINET CORPORATION
+00E03C AdvanSys
+00E03D FOCON ELECTRONIC SYSTEMS A/S
+00E03E ALFATECH, INC.
+00E03F JATON CORPORATION
+00E040 DeskStation Technology, Inc.
+00E041 CSPI
+00E042 Pacom Systems Ltd.
+00E043 VitalCom
+00E044 LSICS CORPORATION
+00E045 TOUCHWAVE, INC.
+00E046 BENTLY NEVADA CORP.
+00E047 INFOCUS SYSTEMS
+00E048 SDL COMMUNICATIONS, INC.
+00E049 MICROWI ELECTRONIC GmbH
+00E04A ENHANCED MESSAGING SYSTEMS, INC
+00E04B JUMP INDUSTRIELLE COMPUTERTECHNIK GmbH
+00E04C REALTEK SEMICONDUCTOR CORP.
+00E04D INTERNET INITIATIVE JAPAN, INC
+00E04E SANYO DENKI CO., LTD.
+00E04F CISCO SYSTEMS, INC.
+00E050 EXECUTONE INFORMATION SYSTEMS, INC.
+00E051 TALX CORPORATION
+00E052 FOUNDRY NETWORKS, INC.
+00E053 CELLPORT LABS, INC.
+00E054 KODAI HITEC CO., LTD.
+00E055 INGENIERIA ELECTRONICA COMERCIAL INELCOM S.A.
+00E056 HOLONTECH CORPORATION
+00E057 HAN MICROTELECOM. CO., LTD.
+00E058 PHASE ONE DENMARK A/S
+00E059 CONTROLLED ENVIRONMENTS, LTD.
+00E05A GALEA NETWORK SECURITY
+00E05B WEST END SYSTEMS CORP.
+00E05C MATSUSHITA KOTOBUKI ELECTRONICS INDUSTRIES, LTD.
+00E05D UNITEC CO., LTD.
+00E05E JAPAN AVIATION ELECTRONICS INDUSTRY, LTD.
+00E05F e-Net, Inc.
+00E060 SHERWOOD
+00E061 EdgePoint Networks, Inc.
+00E062 HOST ENGINEERING
+00E063 CABLETRON - YAGO SYSTEMS, INC.
+00E064 SAMSUNG ELECTRONICS
+00E065 OPTICAL ACCESS INTERNATIONAL
+00E066 ProMax Systems, Inc.
+00E067 eac AUTOMATION-CONSULTING GmbH
+00E068 MERRIMAC SYSTEMS INC.
+00E069 JAYCOR
+00E06A KAPSCH AG
+00E06B W&G SPECIAL PRODUCTS
+00E06C AEP Systems International Ltd
+00E06D COMPUWARE CORPORATION
+00E06E FAR SYSTEMS S.p.A.
+00E06F Terayon Communications Systems
+00E070 DH TECHNOLOGY
+00E071 EPIS MICROCOMPUTER
+00E072 LYNK
+00E073 NATIONAL AMUSEMENT NETWORK, INC.
+00E074 TIERNAN COMMUNICATIONS, INC.
+00E075 Verilink Corporation
+00E076 DEVELOPMENT CONCEPTS, INC.
+00E077 WEBGEAR, INC.
+00E078 BERKELEY NETWORKS
+00E079 A.T.N.R.
+00E07A MIKRODIDAKT AB
+00E07B BAY NETWORKS
+00E07C METTLER-TOLEDO, INC.
+00E07D NETRONIX, INC.
+00E07E WALT DISNEY IMAGINEERING
+00E07F LOGISTISTEM s.r.l.
+00E080 CONTROL RESOURCES CORPORATION
+00E081 TYAN COMPUTER CORP.
+00E082 ANERMA
+00E083 JATO TECHNOLOGIES, INC.
+00E084 COMPULITE R&D
+00E085 GLOBAL MAINTECH, INC.
+00E086 CYBEX COMPUTER PRODUCTS
+00E087 LeCroy - Networking Productions Division
+00E088 LTX CORPORATION
+00E089 ION Networks, Inc.
+00E08A GEC AVERY, LTD.
+00E08B QLogic Corp.
+00E08C NEOPARADIGM LABS, INC.
+00E08D PRESSURE SYSTEMS, INC.
+00E08E UTSTARCOM
+00E08F CISCO SYSTEMS, INC.
+00E090 BECKMAN LAB. AUTOMATION DIV.
+00E091 LG ELECTRONICS, INC.
+00E092 ADMTEK INCORPORATED
+00E093 ACKFIN NETWORKS
+00E094 OSAI SRL
+00E095 ADVANCED-VISION TECHNOLGIES CORP.
+00E096 SHIMADZU CORPORATION
+00E097 CARRIER ACCESS CORPORATION
+00E098 AboCom Systems, Inc.
+00E099 SAMSON AG
+00E09A POSITRON INDUSTRIES, INC.
+00E09B ENGAGE NETWORKS, INC.
+00E09C MII
+00E09D SARNOFF CORPORATION
+00E09E QUANTUM CORPORATION
+00E09F PIXEL VISION
+00E0A0 WILTRON CO.
+00E0A1 HIMA PAUL HILDEBRANDT GmbH Co. KG
+00E0A2 MICROSLATE INC.
+00E0A3 CISCO SYSTEMS, INC.
+00E0A4 ESAOTE S.p.A.
+00E0A5 ComCore Semiconductor, Inc.
+00E0A6 TELOGY NETWORKS, INC.
+00E0A7 IPC INFORMATION SYSTEMS, INC.
+00E0A8 SAT GmbH & Co.
+00E0A9 FUNAI ELECTRIC CO., LTD.
+00E0AA ELECTROSONIC LTD.
+00E0AB DIMAT S.A.
+00E0AC MIDSCO, INC.
+00E0AD EES TECHNOLOGY, LTD.
+00E0AE XAQTI CORPORATION
+00E0AF GENERAL DYNAMICS INFORMATION SYSTEMS
+00E0B0 CISCO SYSTEMS, INC.
+00E0B1 PACKET ENGINES, INC.
+00E0B2 TELMAX COMMUNICATIONS CORP.
+00E0B3 EtherWAN Systems, Inc.
+00E0B4 TECHNO SCOPE CO., LTD.
+00E0B5 ARDENT COMMUNICATIONS CORP.
+00E0B6 Entrada Networks
+00E0B7 PI GROUP, LTD.
+00E0B8 GATEWAY 2000
+00E0B9 BYAS SYSTEMS
+00E0BA BERGHOF AUTOMATIONSTECHNIK GmbH
+00E0BB NBX CORPORATION
+00E0BC SYMON COMMUNICATIONS, INC.
+00E0BD INTERFACE SYSTEMS, INC.
+00E0BE GENROCO INTERNATIONAL, INC.
+00E0BF TORRENT NETWORKING TECHNOLOGIES CORP.
+00E0C0 SEIWA ELECTRIC MFG. CO., LTD.
+00E0C1 MEMOREX TELEX JAPAN, LTD.
+00E0C2 NECSY S.p.A.
+00E0C3 SAKAI SYSTEM DEVELOPMENT CORP.
+00E0C4 HORNER ELECTRIC, INC.
+00E0C5 BCOM ELECTRONICS INC.
+00E0C6 LINK2IT, L.L.C.
+00E0C7 EUROTECH SRL
+00E0C8 VIRTUAL ACCESS, LTD.
+00E0C9 AutomatedLogic Corporation
+00E0CA BEST DATA PRODUCTS
+00E0CB RESON, INC.
+00E0CC HERO SYSTEMS, LTD.
+00E0CD SENSIS CORPORATION
+00E0CE ARN
+00E0CF INTEGRATED DEVICE TECHNOLOGY, INC.
+00E0D0 NETSPEED, INC.
+00E0D1 TELSIS LIMITED
+00E0D2 VERSANET COMMUNICATIONS, INC.
+00E0D3 DATENTECHNIK GmbH
+00E0D4 EXCELLENT COMPUTER
+00E0D5 ARCXEL TECHNOLOGIES, INC.
+00E0D6 COMPUTER & COMMUNICATION RESEARCH LAB.
+00E0D7 SUNSHINE ELECTRONICS, INC.
+00E0D8 LANBit Computer, Inc.
+00E0D9 TAZMO CO., LTD.
+00E0DA ASSURED ACCESS TECHNOLOGY, INC.
+00E0DB ViaVideo Communications, Inc.
+00E0DC NEXWARE CORP.
+00E0DD ZENITH ELECTRONICS CORPORATION
+00E0DE DATAX NV
+00E0DF KE KOMMUNIKATIONS-ELECTRONIK
+00E0E0 SI ELECTRONICS, LTD.
+00E0E1 G2 NETWORKS, INC.
+00E0E2 INNOVA CORP.
+00E0E3 SK-ELEKTRONIK GmbH
+00E0E4 FANUC ROBOTICS NORTH AMERICA, Inc.
+00E0E5 CINCO NETWORKS, INC.
+00E0E6 INCAA DATACOM B.V.
+00E0E7 RAYTHEON E-SYSTEMS, INC.
+00E0E8 GRETACODER Data Systems AG
+00E0E9 DATA LABS, INC.
+00E0EA INNOVAT COMMUNICATIONS, INC.
+00E0EB DIGICOM SYSTEMS, INCORPORATED
+00E0EC CELESTICA INC.
+00E0ED SILICOM, LTD.
+00E0EE MAREL HF
+00E0EF DIONEX
+00E0F0 ABLER TECHNOLOGY, INC.
+00E0F1 THAT CORPORATION
+00E0F2 ARLOTTO COMNET, INC.
+00E0F3 WebSprint Communications, Inc.
+00E0F4 INSIDE Technology A/S
+00E0F5 TELES AG
+00E0F6 DECISION EUROPE
+00E0F7 CISCO SYSTEMS, INC.
+00E0F8 DICNA CONTROL AB
+00E0F9 CISCO SYSTEMS, INC.
+00E0FA TRL TECHNOLOGY, LTD.
+00E0FB LEIGHTRONIX, INC.
+00E0FC HUAWEI TECHNOLOGIES CO., LTD.
+00E0FD A-TREND TECHNOLOGY CO., LTD.
+00E0FE CISCO SYSTEMS, INC.
+00E0FF SECURITY DYNAMICS TECHNOLOGIES, Inc.
+00E6D3 NIXDORF COMPUTER CORP.
+020701 RACAL-DATACOM
+021C7C PERQ SYSTEMS CORPORATION
+026086 LOGIC REPLACEMENT TECH. LTD.
+02608C 3COM CORPORATION
+027001 RACAL-DATACOM
+0270B0 M/A-COM INC. COMPANIES
+0270B3 DATA RECALL LTD
+029D8E CARDIAC RECORDERS INC.
+02AA3C OLIVETTI TELECOMM SPA (OLTECO)
+02BB01 OCTOTHORPE CORP.
+02C08C 3COM CORPORATION
+02CF1C COMMUNICATION MACHINERY CORP.
+02E6D3 NIXDORF COMPUTER CORPORATION
+040AE0 XMIT AG COMPUTER NETWORKS
+04E0C4 TRIUMPH-ADLER AG
+080001 COMPUTERVISION CORPORATION
+080002 BRIDGE COMMUNICATIONS INC.
+080003 ADVANCED COMPUTER COMM.
+080004 CROMEMCO INCORPORATED
+080005 SYMBOLICS INC.
+080006 SIEMENS AG
+080007 APPLE COMPUTER INC.
+080008 BOLT BERANEK AND NEWMAN INC.
+080009 HEWLETT PACKARD
+08000A NESTAR SYSTEMS INCORPORATED
+08000B UNISYS CORPORATION
+08000C MIKLYN DEVELOPMENT CO.
+08000D INTERNATIONAL COMPUTERS LTD.
+08000E NCR CORPORATION
+08000F MITEL CORPORATION
+080011 TEKTRONIX INC.
+080012 BELL ATLANTIC INTEGRATED SYST.
+080013 EXXON
+080014 EXCELAN
+080015 STC BUSINESS SYSTEMS
+080016 BARRISTER INFO SYS CORP
+080017 NATIONAL SEMICONDUCTOR
+080018 PIRELLI FOCOM NETWORKS
+080019 GENERAL ELECTRIC CORPORATION
+08001A TIARA/ 10NET
+08001B DATA GENERAL
+08001C KDD-KOKUSAI DEBNSIN DENWA CO.
+08001D ABLE COMMUNICATIONS INC.
+08001E APOLLO COMPUTER INC.
+08001F SHARP CORPORATION
+080020 SUN MICROSYSTEMS INC.
+080021 3M COMPANY
+080022 NBI INC.
+080023 Panasonic Communications Co., Ltd.
+080024 10NET COMMUNICATIONS/DCA
+080025 CONTROL DATA
+080026 NORSK DATA A.S.
+080027 CADMUS COMPUTER SYSTEMS
+080028 Texas Instruments
+080029 MEGATEK CORPORATION
+08002A MOSAIC TECHNOLOGIES INC.
+08002B DIGITAL EQUIPMENT CORPORATION
+08002C BRITTON LEE INC.
+08002D LAN-TEC INC.
+08002E METAPHOR COMPUTER SYSTEMS
+08002F PRIME COMPUTER INC.
+080030 NETWORK RESEARCH CORPORATION
+080030 CERN
+080030 ROYAL MELBOURNE INST OF TECH
+080031 LITTLE MACHINES INC.
+080032 TIGAN INCORPORATED
+080033 BAUSCH & LOMB
+080034 FILENET CORPORATION
+080035 MICROFIVE CORPORATION
+080036 INTERGRAPH CORPORATION
+080037 FUJI-XEROX CO. LTD.
+080038 CII HONEYWELL BULL
+080039 SPIDER SYSTEMS LIMITED
+08003A ORCATECH INC.
+08003B TORUS SYSTEMS LIMITED
+08003C SCHLUMBERGER WELL SERVICES
+08003D CADNETIX CORPORATIONS
+08003E CODEX CORPORATION
+08003F FRED KOSCHARA ENTERPRISES
+080040 FERRANTI COMPUTER SYS. LIMITED
+080041 RACAL-MILGO INFORMATION SYS..
+080042 JAPAN MACNICS CORP.
+080043 PIXEL COMPUTER INC.
+080044 DAVID SYSTEMS INC.
+080045 CONCURRENT COMPUTER CORP.
+080046 SONY CORPORATION LTD.
+080047 SEQUENT COMPUTER SYSTEMS INC.
+080048 EUROTHERM GAUGING SYSTEMS
+080049 UNIVATION
+08004A BANYAN SYSTEMS INC.
+08004B PLANNING RESEARCH CORP.
+08004C HYDRA COMPUTER SYSTEMS INC.
+08004D CORVUS SYSTEMS INC.
+08004E 3COM EUROPE LTD.
+08004F CYGNET SYSTEMS
+080050 DAISY SYSTEMS CORP.
+080051 EXPERDATA
+080052 INSYSTEC
+080053 MIDDLE EAST TECH. UNIVERSITY
+080055 STANFORD TELECOMM. INC.
+080056 STANFORD LINEAR ACCEL. CENTER
+080057 EVANS & SUTHERLAND
+080058 SYSTEMS CONCEPTS
+080059 A/S MYCRON
+08005A IBM CORPORATION
+08005B VTA TECHNOLOGIES INC.
+08005C FOUR PHASE SYSTEMS
+08005D GOULD INC.
+08005E COUNTERPOINT COMPUTER INC.
+08005F SABER TECHNOLOGY CORP.
+080060 INDUSTRIAL NETWORKING INC.
+080061 JAROGATE LTD.
+080062 GENERAL DYNAMICS
+080063 PLESSEY
+080064 AUTOPHON AG
+080065 GENRAD INC.
+080066 AGFA CORPORATION
+080067 COMDESIGN
+080068 RIDGE COMPUTERS
+080069 SILICON GRAPHICS INC.
+08006A ATT BELL LABORATORIES
+08006B ACCEL TECHNOLOGIES INC.
+08006C SUNTEK TECHNOLOGY INT'L
+08006D WHITECHAPEL COMPUTER WORKS
+08006E MASSCOMP
+08006F PHILIPS APELDOORN B.V.
+080070 MITSUBISHI ELECTRIC CORP.
+080071 MATRA (DSIE)
+080072 XEROX CORP UNIV GRANT PROGRAM
+080073 TECMAR INC.
+080074 CASIO COMPUTER CO. LTD.
+080075 DANSK DATA ELECTRONIK
+080076 PC LAN TECHNOLOGIES
+080077 TSL COMMUNICATIONS LTD.
+080078 ACCELL CORPORATION
+080079 THE DROID WORKS
+08007A INDATA
+08007B SANYO ELECTRIC CO. LTD.
+08007C VITALINK COMMUNICATIONS CORP.
+08007E AMALGAMATED WIRELESS(AUS) LTD
+08007F CARNEGIE-MELLON UNIVERSITY
+080080 AES DATA INC.
+080081 ,ASTECH INC.
+080082 VERITAS SOFTWARE
+080083 Seiko Instruments Inc.
+080084 TOMEN ELECTRONICS CORP.
+080085 ELXSI
+080086 KONICA MINOLTA HOLDINGS, INC.
+080087 XYPLEX
+080088 MCDATA CORPORATION
+080089 KINETICS
+08008A PERFORMANCE TECHNOLOGY
+08008B PYRAMID TECHNOLOGY CORP.
+08008C NETWORK RESEARCH CORPORATION
+08008D XYVISION INC.
+08008E TANDEM COMPUTERS
+08008F CHIPCOM CORPORATION
+080090 SONOMA SYSTEMS
+081443 UNIBRAIN S.A.
+08BBCC AK-NORD EDV VERTRIEBSGES. mbH
+10005A IBM CORPORATION
+1000E8 NATIONAL SEMICONDUCTOR
+800010 ATT BELL LABORATORIES
+A06A00 Verilink Corporation
+AA0000 DIGITAL EQUIPMENT CORPORATION
+AA0001 DIGITAL EQUIPMENT CORPORATION
+AA0002 DIGITAL EQUIPMENT CORPORATION
+AA0003 DIGITAL EQUIPMENT CORPORATION
+AA0004 DIGITAL EQUIPMENT CORPORATION
diff --git a/drivers/ieee1394/oui2c.sh b/drivers/ieee1394/oui2c.sh
new file mode 100644
index 000000000000..d50dc7a2d087
--- /dev/null
+++ b/drivers/ieee1394/oui2c.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+cat <<EOF
+/* Generated file for OUI database */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_IEEE1394_OUI_DB
+struct oui_list_struct {
+	int oui;
+	char *name;
+} oui_list[] = {
+EOF
+
+while read oui name; do
+	echo "	{ 0x$oui, \"$name\" },"
+done
+
+cat <<EOF
+};
+
+#endif /* CONFIG_IEEE1394_OUI_DB */
+EOF
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
new file mode 100644
index 000000000000..a261d2b0e5ac
--- /dev/null
+++ b/drivers/ieee1394/pcilynx.c
@@ -0,0 +1,1982 @@
+/*
+ * pcilynx.c - Texas Instruments PCILynx driver
+ * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
+ *                         Stephan Linz <linz@mazet.de>
+ *                         Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Contributions:
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *        reading bus info block (containing GUID) from serial
+ *            eeprom via i2c and storing it in config ROM
+ *        Reworked code for initiating bus resets
+ *            (long, short, with or without hold-off)
+ *        Enhancements in async and iso send code
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/kdev_t.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "pcilynx.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+/* print general (card independent) information */
+#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
+/* print card specific information */
+#define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+#define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
+#define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
+#else
+#define PRINT_GD(level, fmt, args...) do {} while (0)
+#define PRINTD(level, card, fmt, args...) do {} while (0)
+#endif
+
+
+/* Module Parameters */
+static int skip_eeprom = 0;
+module_param(skip_eeprom, int, 0444);
+MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
+
+
+static struct hpsb_host_driver lynx_driver;
+static unsigned int card_id;
+
+
+
+/*
+ * I2C stuff
+ */
+
+/* the i2c stuff was inspired by i2c-philips-par.c */
+
+static void bit_setscl(void *data, int state)
+{
+	if (state) {
+		  ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
+	} else {
+		  ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
+	}
+	reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
+}
+
+static void bit_setsda(void *data, int state)
+{
+	if (state) {
+		  ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
+	} else {
+		  ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
+	}
+	reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
+}
+
+static int bit_getscl(void *data)
+{
+	return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
+}
+
+static int bit_getsda(void *data)
+{
+	return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
+}
+
+static int bit_reg(struct i2c_client *client)
+{
+	return 0;
+}
+
+static int bit_unreg(struct i2c_client *client)
+{
+	return 0;
+}
+
+static struct i2c_algo_bit_data bit_data = {
+	.setsda			= bit_setsda,
+	.setscl			= bit_setscl,
+	.getsda			= bit_getsda,
+	.getscl			= bit_getscl,
+	.udelay			= 5,
+	.mdelay			= 5,
+	.timeout		= 100,
+};
+
+static struct i2c_adapter bit_ops = {
+	.id 			= 0xAA, //FIXME: probably we should get an id in i2c-id.h
+	.client_register	= bit_reg,
+	.client_unregister	= bit_unreg,
+	.name			= "PCILynx I2C",
+};
+
+
+
+/*
+ * PCL handling functions.
+ */
+
+static pcl_t alloc_pcl(struct ti_lynx *lynx)
+{
+        u8 m;
+        int i, j;
+
+        spin_lock(&lynx->lock);
+        /* FIXME - use ffz() to make this readable */
+        for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
+                m = lynx->pcl_bmap[i];
+                for (j = 0; j < 8; j++) {
+                        if (m & 1<<j) {
+                                continue;
+                        }
+                        m |= 1<<j;
+                        lynx->pcl_bmap[i] = m;
+                        spin_unlock(&lynx->lock);
+                        return 8 * i + j;
+                }
+        }
+        spin_unlock(&lynx->lock);
+
+        return -1;
+}
+
+
+#if 0
+static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
+{
+        int off, bit;
+
+        off = pclid / 8;
+        bit = pclid % 8;
+
+        if (pclid < 0) {
+                return;
+        }
+
+        spin_lock(&lynx->lock);
+        if (lynx->pcl_bmap[off] & 1<<bit) {
+                lynx->pcl_bmap[off] &= ~(1<<bit);
+        } else {
+                PRINT(KERN_ERR, lynx->id,
+                      "attempted to free unallocated PCL %d", pclid);
+        }
+        spin_unlock(&lynx->lock);
+}
+
+/* functions useful for debugging */
+static void pretty_print_pcl(const struct ti_pcl *pcl)
+{
+        int i;
+
+        printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
+               pcl->next, pcl->user_data, pcl->pcl_status,
+               pcl->remaining_transfer_count, pcl->next_data_buffer);
+
+        printk("PCL");
+        for (i=0; i<13; i++) {
+                printk(" c%x:%08x d%x:%08x",
+                       i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
+                if (!(i & 0x3) && (i != 12)) printk("\nPCL");
+        }
+        printk("\n");
+}
+
+static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
+{
+        struct ti_pcl pcl;
+
+        get_pcl(lynx, pclid, &pcl);
+        pretty_print_pcl(&pcl);
+}
+#endif
+
+
+
+/***********************************
+ * IEEE-1394 functionality section *
+ ***********************************/
+
+
+static int get_phy_reg(struct ti_lynx *lynx, int addr)
+{
+        int retval;
+        int i = 0;
+
+        unsigned long flags;
+
+        if (addr > 15) {
+                PRINT(KERN_ERR, lynx->id,
+                      "%s: PHY register address %d out of range",
+		      __FUNCTION__, addr);
+                return -1;
+        }
+
+        spin_lock_irqsave(&lynx->phy_reg_lock, flags);
+
+        reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
+        do {
+                retval = reg_read(lynx, LINK_PHY);
+
+                if (i > 10000) {
+                        PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
+			      __FUNCTION__);
+                        retval = -1;
+                        break;
+                }
+                i++;
+        } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
+
+        reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
+        spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
+
+        if (retval != -1) {
+                return retval & 0xff;
+        } else {
+                return -1;
+        }
+}
+
+static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
+{
+        unsigned long flags;
+
+        if (addr > 15) {
+                PRINT(KERN_ERR, lynx->id,
+                      "%s: PHY register address %d out of range", __FUNCTION__, addr);
+                return -1;
+        }
+
+        if (val > 0xff) {
+                PRINT(KERN_ERR, lynx->id,
+                      "%s: PHY register value %d out of range", __FUNCTION__, val);
+                return -1;
+        }
+
+        spin_lock_irqsave(&lynx->phy_reg_lock, flags);
+
+        reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
+                  | LINK_PHY_WDATA(val));
+
+        spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
+
+        return 0;
+}
+
+static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
+{
+        int reg;
+
+        if (page > 7) {
+                PRINT(KERN_ERR, lynx->id,
+                      "%s: PHY page %d out of range", __FUNCTION__, page);
+                return -1;
+        }
+
+        reg = get_phy_reg(lynx, 7);
+        if (reg != -1) {
+                reg &= 0x1f;
+                reg |= (page << 5);
+                set_phy_reg(lynx, 7, reg);
+                return 0;
+        } else {
+                return -1;
+        }
+}
+
+#if 0 /* not needed at this time */
+static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
+{
+        int reg;
+
+        if (port > 15) {
+                PRINT(KERN_ERR, lynx->id,
+                      "%s: PHY port %d out of range", __FUNCTION__, port);
+                return -1;
+        }
+
+        reg = get_phy_reg(lynx, 7);
+        if (reg != -1) {
+                reg &= 0xf0;
+                reg |= port;
+                set_phy_reg(lynx, 7, reg);
+                return 0;
+        } else {
+                return -1;
+        }
+}
+#endif
+
+static u32 get_phy_vendorid(struct ti_lynx *lynx)
+{
+        u32 pvid = 0;
+        sel_phy_reg_page(lynx, 1);
+        pvid |= (get_phy_reg(lynx, 10) << 16);
+        pvid |= (get_phy_reg(lynx, 11) << 8);
+        pvid |= get_phy_reg(lynx, 12);
+        PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
+        return pvid;
+}
+
+static u32 get_phy_productid(struct ti_lynx *lynx)
+{
+        u32 id = 0;
+        sel_phy_reg_page(lynx, 1);
+        id |= (get_phy_reg(lynx, 13) << 16);
+        id |= (get_phy_reg(lynx, 14) << 8);
+        id |= get_phy_reg(lynx, 15);
+        PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
+        return id;
+}
+
+static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
+                                     struct hpsb_host *host)
+{
+        quadlet_t lsid;
+        char phyreg[7];
+        int i;
+
+        phyreg[0] = lynx->phy_reg0;
+        for (i = 1; i < 7; i++) {
+                phyreg[i] = get_phy_reg(lynx, i);
+        }
+
+        /* FIXME? We assume a TSB21LV03A phy here.  This code doesn't support
+           more than 3 ports on the PHY anyway. */
+
+        lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
+        lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
+        lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
+	if (!hpsb_disable_irm)
+		lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
+        /* lsid |= 1 << 11; *//* set contender (hack) */
+        lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
+
+        for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
+                if (phyreg[3 + i] & 0x4) {
+                        lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
+                                << (6 - i*2);
+                } else {
+                        lsid |= 1 << (6 - i*2);
+                }
+        }
+
+        cpu_to_be32s(&lsid);
+        PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
+        return lsid;
+}
+
+static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
+{
+        quadlet_t *q = lynx->rcv_page;
+        int phyid, isroot, size;
+        quadlet_t lsid = 0;
+        int i;
+
+        if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
+
+        size = lynx->selfid_size;
+        phyid = lynx->phy_reg0;
+
+        i = (size > 16 ? 16 : size) / 4 - 1;
+        while (i >= 0) {
+                cpu_to_be32s(&q[i]);
+                i--;
+        }
+
+        if (!lynx->phyic.reg_1394a) {
+                lsid = generate_own_selfid(lynx, host);
+        }
+
+        isroot = (phyid & 2) != 0;
+        phyid >>= 2;
+        PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
+              phyid, (isroot ? "root" : "not root"));
+        reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
+
+        if (!lynx->phyic.reg_1394a && !size) {
+                hpsb_selfid_received(host, lsid);
+        }
+
+        while (size > 0) {
+                struct selfid *sid = (struct selfid *)q;
+
+                if (!lynx->phyic.reg_1394a && !sid->extended
+                    && (sid->phy_id == (phyid + 1))) {
+                        hpsb_selfid_received(host, lsid);
+                }
+
+                if (q[0] == ~q[1]) {
+                        PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
+                              q[0]);
+                        hpsb_selfid_received(host, q[0]);
+                } else {
+                        PRINT(KERN_INFO, lynx->id,
+                              "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
+                }
+                q += 2;
+                size -= 8;
+        }
+
+        if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
+                hpsb_selfid_received(host, lsid);
+        }
+
+        hpsb_selfid_complete(host, phyid, isroot);
+
+        if (host->in_bus_reset) return; /* in bus reset again */
+
+        if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
+        reg_set_bits(lynx, LINK_CONTROL,
+                     LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
+                     | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
+}
+
+
+
+/* This must be called with the respective queue_lock held. */
+static void send_next(struct ti_lynx *lynx, int what)
+{
+        struct ti_pcl pcl;
+        struct lynx_send_data *d;
+        struct hpsb_packet *packet;
+
+        d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
+        if (!list_empty(&d->pcl_queue)) {
+                PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
+                BUG();
+        }
+
+        packet = driver_packet(d->queue.next);
+	list_move_tail(&packet->driver_list, &d->pcl_queue);
+
+        d->header_dma = pci_map_single(lynx->dev, packet->header,
+                                       packet->header_size, PCI_DMA_TODEVICE);
+        if (packet->data_size) {
+                d->data_dma = pci_map_single(lynx->dev, packet->data,
+                                             packet->data_size,
+                                             PCI_DMA_TODEVICE);
+        } else {
+                d->data_dma = 0;
+        }
+
+        pcl.next = PCL_NEXT_INVALID;
+        pcl.async_error_next = PCL_NEXT_INVALID;
+        pcl.pcl_status = 0;
+        pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
+#ifndef __BIG_ENDIAN
+        pcl.buffer[0].control |= PCL_BIGENDIAN;
+#endif
+        pcl.buffer[0].pointer = d->header_dma;
+        pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
+        pcl.buffer[1].pointer = d->data_dma;
+
+        switch (packet->type) {
+        case hpsb_async:
+                pcl.buffer[0].control |= PCL_CMD_XMT;
+                break;
+        case hpsb_iso:
+                pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
+                break;
+        case hpsb_raw:
+                pcl.buffer[0].control |= PCL_CMD_UNFXMT;
+                break;
+        }
+
+        put_pcl(lynx, d->pcl, &pcl);
+        run_pcl(lynx, d->pcl_start, d->channel);
+}
+
+
+/* called from subsystem core */
+static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
+{
+        struct ti_lynx *lynx = host->hostdata;
+        struct lynx_send_data *d;
+        unsigned long flags;
+
+        if (packet->data_size >= 4096) {
+                PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
+                      packet->data_size);
+                return -EOVERFLOW;
+        }
+
+        switch (packet->type) {
+        case hpsb_async:
+        case hpsb_raw:
+                d = &lynx->async;
+                break;
+        case hpsb_iso:
+                d = &lynx->iso_send;
+                break;
+        default:
+                PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
+                      packet->type);
+                return -EINVAL;
+        }
+
+        if (packet->tcode == TCODE_WRITEQ
+            || packet->tcode == TCODE_READQ_RESPONSE) {
+                cpu_to_be32s(&packet->header[3]);
+        }
+
+        spin_lock_irqsave(&d->queue_lock, flags);
+
+	list_add_tail(&packet->driver_list, &d->queue);
+	if (list_empty(&d->pcl_queue))
+                send_next(lynx, packet->type);
+
+        spin_unlock_irqrestore(&d->queue_lock, flags);
+
+        return 0;
+}
+
+
+/* called from subsystem core */
+static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
+{
+        struct ti_lynx *lynx = host->hostdata;
+        int retval = 0;
+        struct hpsb_packet *packet;
+	LIST_HEAD(packet_list);
+        unsigned long flags;
+	int phy_reg;
+
+        switch (cmd) {
+        case RESET_BUS:
+                if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
+                        retval = 0;
+                        break;
+                }
+
+		switch (arg) {
+		case SHORT_RESET:
+			if (lynx->phyic.reg_1394a) {
+				phy_reg = get_phy_reg(lynx, 5);
+				if (phy_reg == -1) {
+					PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+					retval = -1;
+					break;
+				}
+				phy_reg |= 0x40;
+
+				PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
+
+				lynx->selfid_size = -1;
+				lynx->phy_reg0 = -1;
+				set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
+				break;
+			} else {
+				PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
+				/* fall through to long bus reset */
+			}
+		case LONG_RESET:
+			phy_reg = get_phy_reg(lynx, 1);
+			if (phy_reg == -1) {
+				PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+				retval = -1;
+				break;
+			}
+			phy_reg |= 0x40;
+
+			PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
+
+			lynx->selfid_size = -1;
+			lynx->phy_reg0 = -1;
+			set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
+			break;
+		case SHORT_RESET_NO_FORCE_ROOT:
+			if (lynx->phyic.reg_1394a) {
+				phy_reg = get_phy_reg(lynx, 1);
+				if (phy_reg == -1) {
+					PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+					retval = -1;
+					break;
+				}
+				if (phy_reg & 0x80) {
+					phy_reg &= ~0x80;
+					set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
+				}
+
+				phy_reg = get_phy_reg(lynx, 5);
+				if (phy_reg == -1) {
+					PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+					retval = -1;
+					break;
+				}
+				phy_reg |= 0x40;
+
+				PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
+
+				lynx->selfid_size = -1;
+				lynx->phy_reg0 = -1;
+				set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
+				break;
+			} else {
+				PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
+				/* fall through to long bus reset */
+			}
+		case LONG_RESET_NO_FORCE_ROOT:
+			phy_reg = get_phy_reg(lynx, 1);
+			if (phy_reg == -1) {
+				PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+				retval = -1;
+				break;
+			}
+			phy_reg &= ~0x80;
+			phy_reg |= 0x40;
+
+			PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
+
+			lynx->selfid_size = -1;
+			lynx->phy_reg0 = -1;
+			set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
+			break;
+		case SHORT_RESET_FORCE_ROOT:
+			if (lynx->phyic.reg_1394a) {
+				phy_reg = get_phy_reg(lynx, 1);
+				if (phy_reg == -1) {
+					PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+					retval = -1;
+					break;
+				}
+				if (!(phy_reg & 0x80)) {
+					phy_reg |= 0x80;
+					set_phy_reg(lynx, 1, phy_reg); /* set RHB */
+				}
+
+				phy_reg = get_phy_reg(lynx, 5);
+				if (phy_reg == -1) {
+					PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+					retval = -1;
+					break;
+				}
+				phy_reg |= 0x40;
+
+				PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
+
+				lynx->selfid_size = -1;
+				lynx->phy_reg0 = -1;
+				set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
+				break;
+			} else {
+				PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
+				/* fall through to long bus reset */
+			}
+		case LONG_RESET_FORCE_ROOT:
+			phy_reg = get_phy_reg(lynx, 1);
+			if (phy_reg == -1) {
+				PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
+				retval = -1;
+				break;
+			}
+			phy_reg |= 0xc0;
+
+			PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
+
+			lynx->selfid_size = -1;
+			lynx->phy_reg0 = -1;
+			set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
+			break;
+		default:
+			PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
+			retval = -1;
+		}
+
+                break;
+
+        case GET_CYCLE_COUNTER:
+                retval = reg_read(lynx, CYCLE_TIMER);
+                break;
+
+        case SET_CYCLE_COUNTER:
+                reg_write(lynx, CYCLE_TIMER, arg);
+                break;
+
+        case SET_BUS_ID:
+                reg_write(lynx, LINK_ID,
+                          (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
+                break;
+
+        case ACT_CYCLE_MASTER:
+                if (arg) {
+                        reg_set_bits(lynx, LINK_CONTROL,
+                                     LINK_CONTROL_CYCMASTER);
+                } else {
+                        reg_clear_bits(lynx, LINK_CONTROL,
+                                       LINK_CONTROL_CYCMASTER);
+                }
+                break;
+
+        case CANCEL_REQUESTS:
+                spin_lock_irqsave(&lynx->async.queue_lock, flags);
+
+                reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
+		list_splice(&lynx->async.queue, &packet_list);
+		INIT_LIST_HEAD(&lynx->async.queue);
+
+                if (list_empty(&lynx->async.pcl_queue)) {
+                        spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
+                        PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
+                } else {
+                        struct ti_pcl pcl;
+                        u32 ack;
+                        struct hpsb_packet *packet;
+
+                        PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
+
+                        get_pcl(lynx, lynx->async.pcl, &pcl);
+
+                        packet = driver_packet(lynx->async.pcl_queue.next);
+			list_del_init(&packet->driver_list);
+
+                        pci_unmap_single(lynx->dev, lynx->async.header_dma,
+                                         packet->header_size, PCI_DMA_TODEVICE);
+                        if (packet->data_size) {
+                                pci_unmap_single(lynx->dev, lynx->async.data_dma,
+                                                 packet->data_size, PCI_DMA_TODEVICE);
+                        }
+
+                        spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
+
+                        if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
+                                if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                        PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
+                                        ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
+                                } else {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                }
+                        } else {
+                                PRINT(KERN_INFO, lynx->id, "async packet was not completed");
+                                ack = ACKX_ABORTED;
+                        }
+                        hpsb_packet_sent(host, packet, ack);
+                }
+
+		while (!list_empty(&packet_list)) {
+			packet = driver_packet(packet_list.next);
+			list_del_init(&packet->driver_list);
+			hpsb_packet_sent(host, packet, ACKX_ABORTED);
+		}
+
+                break;
+
+        case ISO_LISTEN_CHANNEL:
+                spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
+
+                if (lynx->iso_rcv.chan_count++ == 0) {
+                        reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
+                                  DMA_WORD1_CMP_ENABLE_MASTER);
+                }
+
+                spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
+                break;
+
+        case ISO_UNLISTEN_CHANNEL:
+                spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
+
+                if (--lynx->iso_rcv.chan_count == 0) {
+                        reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
+                                  0);
+                }
+
+                spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
+                break;
+
+        default:
+                PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
+                retval = -1;
+        }
+
+        return retval;
+}
+
+
+/***************************************
+ * IEEE-1394 functionality section END *
+ ***************************************/
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+/* VFS functions for local bus / aux device access.  Access to those
+ * is implemented as a character device instead of block devices
+ * because buffers are not wanted for this.  Therefore llseek (from
+ * VFS) can be used for these char devices with obvious effects.
+ */
+static int mem_open(struct inode*, struct file*);
+static int mem_release(struct inode*, struct file*);
+static unsigned int aux_poll(struct file*, struct poll_table_struct*);
+static loff_t mem_llseek(struct file*, loff_t, int);
+static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
+static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
+
+
+static struct file_operations aux_ops = {
+	.owner =	THIS_MODULE,
+        .read =         mem_read,
+        .write =        mem_write,
+        .poll =         aux_poll,
+        .llseek =       mem_llseek,
+        .open =         mem_open,
+        .release =      mem_release,
+};
+
+
+static void aux_setup_pcls(struct ti_lynx *lynx)
+{
+        struct ti_pcl pcl;
+
+        pcl.next = PCL_NEXT_INVALID;
+        pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
+        put_pcl(lynx, lynx->dmem_pcl, &pcl);
+}
+
+static int mem_open(struct inode *inode, struct file *file)
+{
+        int cid = iminor(inode);
+        enum { t_rom, t_aux, t_ram } type;
+        struct memdata *md;
+
+        if (cid < PCILYNX_MINOR_AUX_START) {
+                /* just for completeness */
+                return -ENXIO;
+        } else if (cid < PCILYNX_MINOR_ROM_START) {
+                cid -= PCILYNX_MINOR_AUX_START;
+                if (cid >= num_of_cards || !cards[cid].aux_port)
+                        return -ENXIO;
+                type = t_aux;
+        } else if (cid < PCILYNX_MINOR_RAM_START) {
+                cid -= PCILYNX_MINOR_ROM_START;
+                if (cid >= num_of_cards || !cards[cid].local_rom)
+                        return -ENXIO;
+                type = t_rom;
+        } else {
+                /* WARNING: Know what you are doing when opening RAM.
+                 * It is currently used inside the driver! */
+                cid -= PCILYNX_MINOR_RAM_START;
+                if (cid >= num_of_cards || !cards[cid].local_ram)
+                        return -ENXIO;
+                type = t_ram;
+        }
+
+        md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
+        if (md == NULL)
+                return -ENOMEM;
+
+        md->lynx = &cards[cid];
+        md->cid = cid;
+
+        switch (type) {
+        case t_rom:
+                md->type = rom;
+                break;
+        case t_ram:
+                md->type = ram;
+                break;
+        case t_aux:
+                atomic_set(&md->aux_intr_last_seen,
+                           atomic_read(&cards[cid].aux_intr_seen));
+                md->type = aux;
+                break;
+        }
+
+        file->private_data = md;
+
+        return 0;
+}
+
+static int mem_release(struct inode *inode, struct file *file)
+{
+        kfree(file->private_data);
+        return 0;
+}
+
+static unsigned int aux_poll(struct file *file, poll_table *pt)
+{
+        struct memdata *md = (struct memdata *)file->private_data;
+        int cid = md->cid;
+        unsigned int mask;
+
+        /* reading and writing is always allowed */
+        mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+        if (md->type == aux) {
+                poll_wait(file, &cards[cid].aux_intr_wait, pt);
+
+                if (atomic_read(&md->aux_intr_last_seen)
+                    != atomic_read(&cards[cid].aux_intr_seen)) {
+                        mask |= POLLPRI;
+                        atomic_inc(&md->aux_intr_last_seen);
+                }
+        }
+
+        return mask;
+}
+
+loff_t mem_llseek(struct file *file, loff_t offs, int orig)
+{
+        loff_t newoffs;
+
+        switch (orig) {
+        case 0:
+                newoffs = offs;
+                break;
+        case 1:
+                newoffs = offs + file->f_pos;
+                break;
+        case 2:
+                newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
+                break;
+        default:
+                return -EINVAL;
+        }
+
+        if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
+
+        file->f_pos = newoffs;
+        return newoffs;
+}
+
+/*
+ * do not DMA if count is too small because this will have a serious impact
+ * on performance - the value 2400 was found by experiment and may not work
+ * everywhere as good as here - use mem_mindma option for modules to change
+ */
+static short mem_mindma = 2400;
+module_param(mem_mindma, short, 0444);
+MODULE_PARM_DESC(mem_mindma, "Minimum amount of data required to use DMA");
+
+static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
+                           int offset)
+{
+        pcltmp_t pcltmp;
+        struct ti_pcl *pcl;
+        size_t retval;
+        int i;
+        DECLARE_WAITQUEUE(wait, current);
+
+        count &= ~3;
+        count = min(count, 53196);
+        retval = count;
+
+        if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
+            & DMA_CHAN_CTRL_BUSY) {
+                PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
+        }
+
+        reg_write(md->lynx, LBUS_ADDR, md->type | offset);
+
+        pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
+        pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | min(count, 4092);
+        pcl->buffer[0].pointer = physbuf;
+        count -= 4092;
+
+        i = 0;
+        while (count > 0) {
+                i++;
+                pcl->buffer[i].control = min(count, 4092);
+                pcl->buffer[i].pointer = physbuf + i * 4092;
+                count -= 4092;
+        }
+        pcl->buffer[i].control |= PCL_LAST_BUFF;
+        commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
+
+        set_current_state(TASK_INTERRUPTIBLE);
+        add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
+        run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
+
+        schedule();
+        while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
+               & DMA_CHAN_CTRL_BUSY) {
+                if (signal_pending(current)) {
+                        retval = -EINTR;
+                        break;
+                }
+                schedule();
+        }
+
+        reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
+        remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
+
+        if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
+            & DMA_CHAN_CTRL_BUSY) {
+                PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
+        }
+
+        return retval;
+}
+
+static ssize_t mem_read(struct file *file, char *buffer, size_t count,
+                        loff_t *offset)
+{
+        struct memdata *md = (struct memdata *)file->private_data;
+        ssize_t bcount;
+        size_t alignfix;
+	loff_t off = *offset; /* avoid useless 64bit-arithmetic */
+        ssize_t retval;
+        void *membase;
+
+        if ((off + count) > PCILYNX_MAX_MEMORY+1) {
+                count = PCILYNX_MAX_MEMORY+1 - off;
+        }
+        if (count == 0 || off > PCILYNX_MAX_MEMORY) {
+                return -ENOSPC;
+        }
+
+        switch (md->type) {
+        case rom:
+                membase = md->lynx->local_rom;
+                break;
+        case ram:
+                membase = md->lynx->local_ram;
+                break;
+        case aux:
+                membase = md->lynx->aux_port;
+                break;
+        default:
+                panic("pcilynx%d: unsupported md->type %d in %s",
+                      md->lynx->id, md->type, __FUNCTION__);
+        }
+
+        down(&md->lynx->mem_dma_mutex);
+
+        if (count < mem_mindma) {
+                memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
+                goto out;
+        }
+
+        bcount = count;
+        alignfix = 4 - (off % 4);
+        if (alignfix != 4) {
+                if (bcount < alignfix) {
+                        alignfix = bcount;
+                }
+                memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
+                              alignfix);
+                if (bcount == alignfix) {
+                        goto out;
+                }
+                bcount -= alignfix;
+                off += alignfix;
+        }
+
+        while (bcount >= 4) {
+                retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
+                                     + count - bcount, bcount, off);
+                if (retval < 0) return retval;
+
+                bcount -= retval;
+                off += retval;
+        }
+
+        if (bcount) {
+                memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
+                              membase+off, bcount);
+        }
+
+ out:
+        retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
+        up(&md->lynx->mem_dma_mutex);
+
+	if (retval) return -EFAULT;
+        *offset += count;
+        return count;
+}
+
+
+static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
+                         loff_t *offset)
+{
+        struct memdata *md = (struct memdata *)file->private_data;
+
+        if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
+                count = PCILYNX_MAX_MEMORY+1 - *offset;
+        }
+        if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
+                return -ENOSPC;
+        }
+
+        /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
+        switch (md->type) {
+        case aux:
+		if (copy_from_user(md->lynx->aux_port+(*offset), buffer, count))
+			return -EFAULT;
+                break;
+        case ram:
+		if (copy_from_user(md->lynx->local_ram+(*offset), buffer, count))
+			return -EFAULT;
+                break;
+        case rom:
+                /* the ROM may be writeable */
+		if (copy_from_user(md->lynx->local_rom+(*offset), buffer, count))
+			return -EFAULT;
+                break;
+        }
+
+        file->f_pos += count;
+        return count;
+}
+#endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
+
+
+/********************************************************
+ * Global stuff (interrupt handler, init/shutdown code) *
+ ********************************************************/
+
+
+static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
+                             struct pt_regs *regs_are_unused)
+{
+        struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
+        struct hpsb_host *host = lynx->host;
+        u32 intmask;
+        u32 linkint;
+
+        linkint = reg_read(lynx, LINK_INT_STATUS);
+        intmask = reg_read(lynx, PCI_INT_STATUS);
+
+        if (!(intmask & PCI_INT_INT_PEND))
+		return IRQ_NONE;
+
+        PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
+               linkint);
+
+        reg_write(lynx, LINK_INT_STATUS, linkint);
+        reg_write(lynx, PCI_INT_STATUS, intmask);
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        if (intmask & PCI_INT_AUX_INT) {
+                atomic_inc(&lynx->aux_intr_seen);
+                wake_up_interruptible(&lynx->aux_intr_wait);
+        }
+
+        if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
+                wake_up_interruptible(&lynx->mem_dma_intr_wait);
+        }
+#endif
+
+
+        if (intmask & PCI_INT_1394) {
+                if (linkint & LINK_INT_PHY_TIMEOUT) {
+                        PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
+                }
+                if (linkint & LINK_INT_PHY_BUSRESET) {
+                        PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
+                        lynx->selfid_size = -1;
+                        lynx->phy_reg0 = -1;
+                        if (!host->in_bus_reset)
+                                hpsb_bus_reset(host);
+                }
+                if (linkint & LINK_INT_PHY_REG_RCVD) {
+                        u32 reg;
+
+                        spin_lock(&lynx->phy_reg_lock);
+                        reg = reg_read(lynx, LINK_PHY);
+                        spin_unlock(&lynx->phy_reg_lock);
+
+                        if (!host->in_bus_reset) {
+                                PRINT(KERN_INFO, lynx->id,
+                                      "phy reg received without reset");
+                        } else if (reg & 0xf00) {
+                                PRINT(KERN_INFO, lynx->id,
+                                      "unsolicited phy reg %d received",
+                                      (reg >> 8) & 0xf);
+                        } else {
+                                lynx->phy_reg0 = reg & 0xff;
+                                handle_selfid(lynx, host);
+                        }
+                }
+                if (linkint & LINK_INT_ISO_STUCK) {
+                        PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
+                }
+                if (linkint & LINK_INT_ASYNC_STUCK) {
+                        PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
+                }
+                if (linkint & LINK_INT_SENT_REJECT) {
+                        PRINT(KERN_INFO, lynx->id, "sent reject");
+                }
+                if (linkint & LINK_INT_TX_INVALID_TC) {
+                        PRINT(KERN_INFO, lynx->id, "invalid transaction code");
+                }
+                if (linkint & LINK_INT_GRF_OVERFLOW) {
+                        /* flush FIFO if overflow happens during reset */
+                        if (host->in_bus_reset)
+                                reg_write(lynx, FIFO_CONTROL,
+                                          FIFO_CONTROL_GRF_FLUSH);
+                        PRINT(KERN_INFO, lynx->id, "GRF overflow");
+                }
+                if (linkint & LINK_INT_ITF_UNDERFLOW) {
+                        PRINT(KERN_INFO, lynx->id, "ITF underflow");
+                }
+                if (linkint & LINK_INT_ATF_UNDERFLOW) {
+                        PRINT(KERN_INFO, lynx->id, "ATF underflow");
+                }
+        }
+
+        if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
+                PRINTD(KERN_DEBUG, lynx->id, "iso receive");
+
+                spin_lock(&lynx->iso_rcv.lock);
+
+                lynx->iso_rcv.stat[lynx->iso_rcv.next] =
+                        reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
+
+                lynx->iso_rcv.used++;
+                lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
+
+                if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
+                    || !lynx->iso_rcv.chan_count) {
+                        PRINTD(KERN_DEBUG, lynx->id, "stopped");
+                        reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
+                }
+
+                run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
+                            CHANNEL_ISO_RCV);
+
+                spin_unlock(&lynx->iso_rcv.lock);
+
+		tasklet_schedule(&lynx->iso_rcv.tq);
+        }
+
+        if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
+                PRINTD(KERN_DEBUG, lynx->id, "async sent");
+                spin_lock(&lynx->async.queue_lock);
+
+                if (list_empty(&lynx->async.pcl_queue)) {
+                        spin_unlock(&lynx->async.queue_lock);
+                        PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
+                } else {
+                        struct ti_pcl pcl;
+                        u32 ack;
+                        struct hpsb_packet *packet;
+
+                        get_pcl(lynx, lynx->async.pcl, &pcl);
+
+                        packet = driver_packet(lynx->async.pcl_queue.next);
+                        list_del_init(&packet->driver_list);
+
+                        pci_unmap_single(lynx->dev, lynx->async.header_dma,
+                                         packet->header_size, PCI_DMA_TODEVICE);
+                        if (packet->data_size) {
+                                pci_unmap_single(lynx->dev, lynx->async.data_dma,
+                                                 packet->data_size, PCI_DMA_TODEVICE);
+                        }
+
+                        if (!list_empty(&lynx->async.queue)) {
+                                send_next(lynx, hpsb_async);
+                        }
+
+                        spin_unlock(&lynx->async.queue_lock);
+
+                        if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
+                                if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                        PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
+                                        ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
+                                } else {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                }
+                        } else {
+                                PRINT(KERN_INFO, lynx->id, "async packet was not completed");
+                                ack = ACKX_SEND_ERROR;
+                        }
+                        hpsb_packet_sent(host, packet, ack);
+                }
+        }
+
+        if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
+                PRINTD(KERN_DEBUG, lynx->id, "iso sent");
+                spin_lock(&lynx->iso_send.queue_lock);
+
+                if (list_empty(&lynx->iso_send.pcl_queue)) {
+                        spin_unlock(&lynx->iso_send.queue_lock);
+                        PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
+                } else {
+                        struct ti_pcl pcl;
+                        u32 ack;
+                        struct hpsb_packet *packet;
+
+                        get_pcl(lynx, lynx->iso_send.pcl, &pcl);
+
+                        packet = driver_packet(lynx->iso_send.pcl_queue.next);
+                        list_del_init(&packet->driver_list);
+
+                        pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
+                                         packet->header_size, PCI_DMA_TODEVICE);
+                        if (packet->data_size) {
+                                pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
+                                                 packet->data_size, PCI_DMA_TODEVICE);
+                        }
+
+                        if (!list_empty(&lynx->iso_send.queue)) {
+                                send_next(lynx, hpsb_iso);
+                        }
+
+                        spin_unlock(&lynx->iso_send.queue_lock);
+
+                        if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
+                                if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                        PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
+                                        ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
+                                } else {
+                                        ack = (pcl.pcl_status >> 15) & 0xf;
+                                }
+                        } else {
+                                PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
+                                ack = ACKX_SEND_ERROR;
+                        }
+
+                        hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
+                }
+        }
+
+        if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
+                /* general receive DMA completed */
+                int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
+
+                PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
+                       stat & 0x1fff);
+
+                if (stat & DMA_CHAN_STAT_SELFID) {
+                        lynx->selfid_size = stat & 0x1fff;
+                        handle_selfid(lynx, host);
+                } else {
+                        quadlet_t *q_data = lynx->rcv_page;
+                        if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
+                            || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
+                                cpu_to_be32s(q_data + 3);
+                        }
+                        hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
+                }
+
+                run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
+        }
+
+	return IRQ_HANDLED;
+}
+
+
+static void iso_rcv_bh(struct ti_lynx *lynx)
+{
+        unsigned int idx;
+        quadlet_t *data;
+        unsigned long flags;
+
+        spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
+
+        while (lynx->iso_rcv.used) {
+                idx = lynx->iso_rcv.last;
+                spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
+
+                data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
+                        + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
+
+                if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
+                        PRINT(KERN_ERR, lynx->id,
+                              "iso length mismatch 0x%08x/0x%08x", *data,
+                              lynx->iso_rcv.stat[idx]);
+                }
+
+                if (lynx->iso_rcv.stat[idx]
+                    & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
+                        PRINT(KERN_INFO, lynx->id,
+                              "iso receive error on %d to 0x%p", idx, data);
+                } else {
+                        hpsb_packet_received(lynx->host, data,
+                                             lynx->iso_rcv.stat[idx] & 0x1fff,
+                                             0);
+                }
+
+                spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
+                lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
+                lynx->iso_rcv.used--;
+        }
+
+        if (lynx->iso_rcv.chan_count) {
+                reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
+                          DMA_WORD1_CMP_ENABLE_MASTER);
+        }
+        spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
+}
+
+
+static void remove_card(struct pci_dev *dev)
+{
+        struct ti_lynx *lynx;
+	struct device *lynx_dev;
+        int i;
+
+        lynx = pci_get_drvdata(dev);
+        if (!lynx) return;
+        pci_set_drvdata(dev, NULL);
+
+	lynx_dev = get_device(&lynx->host->device);
+
+        switch (lynx->state) {
+        case is_host:
+                reg_write(lynx, PCI_INT_ENABLE, 0);
+                hpsb_remove_host(lynx->host);
+        case have_intr:
+                reg_write(lynx, PCI_INT_ENABLE, 0);
+                free_irq(lynx->dev->irq, lynx);
+
+		/* Disable IRM Contender and LCtrl */
+		if (lynx->phyic.reg_1394a)
+			set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
+
+		/* Let all other nodes know to ignore us */
+		lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
+
+        case have_iomappings:
+                reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
+                /* Fix buggy cards with autoboot pin not tied low: */
+                reg_write(lynx, DMA0_CHAN_CTRL, 0);
+                iounmap(lynx->registers);
+                iounmap(lynx->local_rom);
+                iounmap(lynx->local_ram);
+                iounmap(lynx->aux_port);
+        case have_1394_buffers:
+                for (i = 0; i < ISORCV_PAGES; i++) {
+                        if (lynx->iso_rcv.page[i]) {
+                                pci_free_consistent(lynx->dev, PAGE_SIZE,
+                                                    lynx->iso_rcv.page[i],
+                                                    lynx->iso_rcv.page_dma[i]);
+                        }
+                }
+                pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
+                                    lynx->rcv_page_dma);
+        case have_aux_buf:
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+                pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
+                                    lynx->mem_dma_buffer_dma);
+#endif
+        case have_pcl_mem:
+#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
+                pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
+                                    lynx->pcl_mem_dma);
+#endif
+        case clear:
+                /* do nothing - already freed */
+                ;
+        }
+
+	tasklet_kill(&lynx->iso_rcv.tq);
+
+	if (lynx_dev)
+		put_device(lynx_dev);
+}
+
+
+static int __devinit add_card(struct pci_dev *dev,
+                              const struct pci_device_id *devid_is_unused)
+{
+#define FAIL(fmt, args...) do { \
+        PRINT_G(KERN_ERR, fmt , ## args); \
+        remove_card(dev); \
+        return error; \
+        } while (0)
+
+	char irq_buf[16];
+	struct hpsb_host *host;
+        struct ti_lynx *lynx; /* shortcut to currently handled device */
+        struct ti_pcl pcl;
+        u32 *pcli;
+        int i;
+        int error;
+
+        error = -ENXIO;
+
+        if (pci_set_dma_mask(dev, 0xffffffff))
+                FAIL("DMA address limits not supported for PCILynx hardware");
+        if (pci_enable_device(dev))
+                FAIL("failed to enable PCILynx hardware");
+        pci_set_master(dev);
+
+        error = -ENOMEM;
+
+	host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
+        if (!host) FAIL("failed to allocate control structure memory");
+
+        lynx = host->hostdata;
+	lynx->id = card_id++;
+        lynx->dev = dev;
+        lynx->state = clear;
+	lynx->host = host;
+        host->pdev = dev;
+        pci_set_drvdata(dev, lynx);
+
+        spin_lock_init(&lynx->lock);
+        spin_lock_init(&lynx->phy_reg_lock);
+
+#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
+        lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
+                                             &lynx->pcl_mem_dma);
+
+        if (lynx->pcl_mem != NULL) {
+                lynx->state = have_pcl_mem;
+                PRINT(KERN_INFO, lynx->id,
+                      "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
+                      lynx->pcl_mem);
+        } else {
+                FAIL("failed to allocate PCL memory area");
+        }
+#endif
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
+                                                    &lynx->mem_dma_buffer_dma);
+        if (lynx->mem_dma_buffer == NULL) {
+                FAIL("failed to allocate DMA buffer for aux");
+        }
+        lynx->state = have_aux_buf;
+#endif
+
+        lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
+                                              &lynx->rcv_page_dma);
+        if (lynx->rcv_page == NULL) {
+                FAIL("failed to allocate receive buffer");
+        }
+        lynx->state = have_1394_buffers;
+
+        for (i = 0; i < ISORCV_PAGES; i++) {
+                lynx->iso_rcv.page[i] =
+                        pci_alloc_consistent(dev, PAGE_SIZE,
+                                             &lynx->iso_rcv.page_dma[i]);
+                if (lynx->iso_rcv.page[i] == NULL) {
+                        FAIL("failed to allocate iso receive buffers");
+                }
+        }
+
+        lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
+                                          PCILYNX_MAX_REGISTER);
+        lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
+        lynx->aux_port  = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
+        lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
+                                  PCILYNX_MAX_MEMORY);
+        lynx->state = have_iomappings;
+
+        if (lynx->registers == NULL) {
+                FAIL("failed to remap registers - card not accessible");
+        }
+
+#ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
+        if (lynx->local_ram == NULL) {
+                FAIL("failed to remap local RAM which is required for "
+                     "operation");
+        }
+#endif
+
+        reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
+        /* Fix buggy cards with autoboot pin not tied low: */
+        reg_write(lynx, DMA0_CHAN_CTRL, 0);
+
+#ifndef __sparc__
+	sprintf (irq_buf, "%d", dev->irq);
+#else
+	sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
+#endif
+
+        if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
+                         PCILYNX_DRIVER_NAME, lynx)) {
+                PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
+                lynx->state = have_intr;
+        } else {
+                FAIL("failed to allocate shared interrupt %s", irq_buf);
+        }
+
+        /* alloc_pcl return values are not checked, it is expected that the
+         * provided PCL space is sufficient for the initial allocations */
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        if (lynx->aux_port != NULL) {
+                lynx->dmem_pcl = alloc_pcl(lynx);
+                aux_setup_pcls(lynx);
+                sema_init(&lynx->mem_dma_mutex, 1);
+        }
+#endif
+        lynx->rcv_pcl = alloc_pcl(lynx);
+        lynx->rcv_pcl_start = alloc_pcl(lynx);
+        lynx->async.pcl = alloc_pcl(lynx);
+        lynx->async.pcl_start = alloc_pcl(lynx);
+        lynx->iso_send.pcl = alloc_pcl(lynx);
+        lynx->iso_send.pcl_start = alloc_pcl(lynx);
+
+        for (i = 0; i < NUM_ISORCV_PCL; i++) {
+                lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
+        }
+        lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
+
+        /* all allocations successful - simple init stuff follows */
+
+        reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
+        init_waitqueue_head(&lynx->mem_dma_intr_wait);
+        init_waitqueue_head(&lynx->aux_intr_wait);
+#endif
+
+	tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
+		     (unsigned long)lynx);
+
+        spin_lock_init(&lynx->iso_rcv.lock);
+
+        spin_lock_init(&lynx->async.queue_lock);
+        lynx->async.channel = CHANNEL_ASYNC_SEND;
+        spin_lock_init(&lynx->iso_send.queue_lock);
+        lynx->iso_send.channel = CHANNEL_ISO_SEND;
+
+        PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
+              "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
+              lynx->local_ram, lynx->aux_port);
+
+        /* now, looking for PHY register set */
+        if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
+                lynx->phyic.reg_1394a = 1;
+                PRINT(KERN_INFO, lynx->id,
+                      "found 1394a conform PHY (using extended register set)");
+                lynx->phyic.vendor = get_phy_vendorid(lynx);
+                lynx->phyic.product = get_phy_productid(lynx);
+        } else {
+                lynx->phyic.reg_1394a = 0;
+                PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
+        }
+
+        lynx->selfid_size = -1;
+        lynx->phy_reg0 = -1;
+
+	INIT_LIST_HEAD(&lynx->async.queue);
+	INIT_LIST_HEAD(&lynx->async.pcl_queue);
+	INIT_LIST_HEAD(&lynx->iso_send.queue);
+	INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
+
+        pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
+        put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
+
+        pcl.next = PCL_NEXT_INVALID;
+        pcl.async_error_next = PCL_NEXT_INVALID;
+
+        pcl.buffer[0].control = PCL_CMD_RCV | 16;
+#ifndef __BIG_ENDIAN
+	pcl.buffer[0].control |= PCL_BIGENDIAN;
+#endif
+	pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
+
+        pcl.buffer[0].pointer = lynx->rcv_page_dma;
+        pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
+        put_pcl(lynx, lynx->rcv_pcl, &pcl);
+
+        pcl.next = pcl_bus(lynx, lynx->async.pcl);
+        pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
+        put_pcl(lynx, lynx->async.pcl_start, &pcl);
+
+        pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
+        pcl.async_error_next = PCL_NEXT_INVALID;
+        put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
+
+        pcl.next = PCL_NEXT_INVALID;
+        pcl.async_error_next = PCL_NEXT_INVALID;
+        pcl.buffer[0].control = PCL_CMD_RCV | 4;
+#ifndef __BIG_ENDIAN
+        pcl.buffer[0].control |= PCL_BIGENDIAN;
+#endif
+        pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
+
+        for (i = 0; i < NUM_ISORCV_PCL; i++) {
+                int page = i / ISORCV_PER_PAGE;
+                int sec = i % ISORCV_PER_PAGE;
+
+                pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
+                        + sec * MAX_ISORCV_SIZE;
+                pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
+                put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
+        }
+
+        pcli = (u32 *)&pcl;
+        for (i = 0; i < NUM_ISORCV_PCL; i++) {
+                pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
+        }
+        put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
+
+        /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
+        reg_write(lynx, FIFO_SIZES, 0x003030a0);
+        /* 20 byte threshold before triggering PCI transfer */
+        reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
+        /* threshold on both send FIFOs before transmitting:
+           FIFO size - cache line size - 1 */
+        i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
+        i = 0x30 - i - 1;
+        reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
+
+        reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
+
+        reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
+                  | LINK_INT_PHY_REG_RCVD  | LINK_INT_PHY_BUSRESET
+                  | LINK_INT_ISO_STUCK     | LINK_INT_ASYNC_STUCK
+                  | LINK_INT_SENT_REJECT   | LINK_INT_TX_INVALID_TC
+                  | LINK_INT_GRF_OVERFLOW  | LINK_INT_ITF_UNDERFLOW
+                  | LINK_INT_ATF_UNDERFLOW);
+
+        reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
+        reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
+        reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
+        reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
+                  DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
+                  | DMA_WORD1_CMP_MATCH_EXACT    | DMA_WORD1_CMP_MATCH_BUS_BCAST
+                  | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
+
+        run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
+
+        reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
+        reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
+        reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
+        reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
+
+        run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
+
+        reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
+                  | LINK_CONTROL_TX_ISO_EN   | LINK_CONTROL_RX_ISO_EN
+                  | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
+                  | LINK_CONTROL_RESET_TX    | LINK_CONTROL_RESET_RX);
+
+	if (!lynx->phyic.reg_1394a) {
+		if (!hpsb_disable_irm) {
+			/* attempt to enable contender bit -FIXME- would this
+			 * work elsewhere? */
+			reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
+			reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
+		}
+	} else {
+		/* set the contender (if appropriate) and LCtrl bit in the
+		 * extended PHY register set. (Should check that PHY_02_EXTENDED
+		 * is set in register 2?)
+		 */
+		i = get_phy_reg(lynx, 4);
+		i |= PHY_04_LCTRL;
+		if (hpsb_disable_irm)
+			i &= !PHY_04_CONTENDER;
+		else
+			i |= PHY_04_CONTENDER;
+		if (i != -1) set_phy_reg(lynx, 4, i);
+	}
+	
+        if (!skip_eeprom)
+        {
+        	/* needed for i2c communication with serial eeprom */
+        	struct i2c_adapter *i2c_ad;
+        	struct i2c_algo_bit_data i2c_adapter_data;
+
+        	error = -ENOMEM;
+		i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
+        	if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
+
+		memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
+                i2c_adapter_data = bit_data;
+                i2c_ad->algo_data = &i2c_adapter_data;
+                i2c_adapter_data.data = lynx;
+
+		PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
+		       reg_read(lynx, SERIAL_EEPROM_CONTROL));
+
+        	/* reset hardware to sane state */
+        	lynx->i2c_driven_state = 0x00000070;
+        	reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
+
+        	if (i2c_bit_add_bus(i2c_ad) < 0)
+        	{
+			kfree(i2c_ad);
+			error = -ENXIO;
+			FAIL("unable to register i2c");
+        	}
+        	else
+        	{
+                        /* do i2c stuff */
+                        unsigned char i2c_cmd = 0x10;
+                        struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
+                                                  { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
+                                                };
+
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+                        union i2c_smbus_data data;
+
+                        if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
+                                PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
+                        else
+                        {
+                                u16 addr;
+                                for (addr=0x00; addr < 0x100; addr++) {
+                                        if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
+                                                PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
+                                                break;
+                                        }
+                                        else
+                                                PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
+                                }
+                        }
+#endif
+
+                        /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
+                           do it more efficiently in one transaction rather then using several reads */
+                        if (i2c_transfer(i2c_ad, msg, 2) < 0) {
+                                PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
+                        } else {
+                                int i;
+
+                                PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
+				/* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
+				 * generation(1394a) and link_spd(1394a) field and recalculate
+				 * the CRC */
+
+                                for (i = 0; i < 5 ; i++)
+                                        PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
+					       i, be32_to_cpu(lynx->bus_info_block[i]));
+
+                                /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
+				if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
+				    (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
+                                {
+                                        PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
+                                } else {
+					kfree(i2c_ad);
+					error = -ENXIO;
+					FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
+                                }
+
+                        }
+
+                        i2c_bit_del_bus(i2c_ad);
+			kfree(i2c_ad);
+                }
+        }
+
+	host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
+	host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
+	host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
+	host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
+	if (!lynx->phyic.reg_1394a)
+		host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
+	else
+		host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
+
+	if (hpsb_add_host(host)) {
+		error = -ENOMEM;
+		FAIL("Failed to register host with highlevel");
+	}
+
+	lynx->state = is_host;
+
+	return 0;
+#undef FAIL
+}
+
+
+static struct pci_device_id pci_table[] = {
+	{
+                .vendor =    PCI_VENDOR_ID_TI,
+                .device =    PCI_DEVICE_ID_TI_PCILYNX,
+                .subvendor = PCI_ANY_ID,
+                .subdevice = PCI_ANY_ID,
+	},
+	{ }			/* Terminating entry */
+};
+
+static struct pci_driver lynx_pci_driver = {
+        .name =     PCILYNX_DRIVER_NAME,
+        .id_table = pci_table,
+        .probe =    add_card,
+        .remove =   remove_card,
+};
+
+static struct hpsb_host_driver lynx_driver = {
+	.owner =	   THIS_MODULE,
+	.name =		   PCILYNX_DRIVER_NAME,
+	.set_hw_config_rom = NULL,
+        .transmit_packet = lynx_transmit,
+        .devctl =          lynx_devctl,
+	.isoctl =          NULL,
+};
+
+MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
+MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("pcilynx");
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static int __init pcilynx_init(void)
+{
+        int ret;
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
+                PRINT_G(KERN_ERR, "allocation of char major number %d failed",
+                        PCILYNX_MAJOR);
+                return -EBUSY;
+        }
+#endif
+
+        ret = pci_register_driver(&lynx_pci_driver);
+        if (ret < 0) {
+                PRINT_G(KERN_ERR, "PCI module init failed");
+                goto free_char_dev;
+        }
+
+        return 0;
+
+ free_char_dev:
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
+#endif
+
+        return ret;
+}
+
+static void __exit pcilynx_cleanup(void)
+{
+        pci_unregister_driver(&lynx_pci_driver);
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
+#endif
+}
+
+
+module_init(pcilynx_init);
+module_exit(pcilynx_cleanup);
diff --git a/drivers/ieee1394/pcilynx.h b/drivers/ieee1394/pcilynx.h
new file mode 100644
index 000000000000..644ec55d3d46
--- /dev/null
+++ b/drivers/ieee1394/pcilynx.h
@@ -0,0 +1,516 @@
+#ifndef __PCILYNX_H__
+#define __PCILYNX_H__
+
+#include <linux/config.h>
+
+#define PCILYNX_DRIVER_NAME      "pcilynx"
+#define PCILYNX_MAJOR            177
+
+#define PCILYNX_MINOR_AUX_START  0
+#define PCILYNX_MINOR_ROM_START  16
+#define PCILYNX_MINOR_RAM_START  32
+
+#define PCILYNX_MAX_REGISTER     0xfff
+#define PCILYNX_MAX_MEMORY       0xffff
+
+#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
+#define MAX_PCILYNX_CARDS        4
+#define LOCALRAM_SIZE            4096
+
+#define NUM_ISORCV_PCL           4
+#define MAX_ISORCV_SIZE          2048
+#define ISORCV_PER_PAGE          (PAGE_SIZE / MAX_ISORCV_SIZE)
+#define ISORCV_PAGES             (NUM_ISORCV_PCL / ISORCV_PER_PAGE)
+
+#define CHANNEL_LOCALBUS         0
+#define CHANNEL_ASYNC_RCV        1
+#define CHANNEL_ISO_RCV          2
+#define CHANNEL_ASYNC_SEND       3
+#define CHANNEL_ISO_SEND         4
+
+#define PCILYNX_CONFIG_ROM_LENGTH   1024
+
+typedef int pcl_t;
+
+struct ti_lynx {
+        int id; /* sequential card number */
+
+        spinlock_t lock;
+
+        struct pci_dev *dev;
+
+        struct {
+                unsigned reg_1394a:1;
+                u32 vendor;
+                u32 product;
+        } phyic;
+
+        enum { clear, have_intr, have_aux_buf, have_pcl_mem,
+               have_1394_buffers, have_iomappings, is_host } state;
+
+        /* remapped memory spaces */
+        void __iomem *registers;
+        void __iomem *local_rom;
+        void __iomem *local_ram;
+        void __iomem *aux_port;
+	quadlet_t bus_info_block[5];
+
+#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
+        atomic_t aux_intr_seen;
+        wait_queue_head_t aux_intr_wait;
+
+        void *mem_dma_buffer;
+        dma_addr_t mem_dma_buffer_dma;
+        struct semaphore mem_dma_mutex;
+        wait_queue_head_t mem_dma_intr_wait;
+#endif
+
+        /*
+         * use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
+         * LOCALRAM_SIZE * 8 PCLs (each sized 128 bytes);
+         * the following is an allocation bitmap
+         */
+        u8 pcl_bmap[LOCALRAM_SIZE / 1024];
+
+#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
+	/* point to PCLs memory area if needed */
+	void *pcl_mem;
+        dma_addr_t pcl_mem_dma;
+#endif
+
+        /* PCLs for local mem / aux transfers */
+        pcl_t dmem_pcl;
+
+        /* IEEE-1394 part follows */
+        struct hpsb_host *host;
+
+        int phyid, isroot;
+        int selfid_size;
+        int phy_reg0;
+
+        spinlock_t phy_reg_lock;
+
+        pcl_t rcv_pcl_start, rcv_pcl;
+        void *rcv_page;
+        dma_addr_t rcv_page_dma;
+        int rcv_active;
+
+        struct lynx_send_data {
+                pcl_t pcl_start, pcl;
+                struct list_head queue;
+                struct list_head pcl_queue; /* this queue contains at most one packet */
+                spinlock_t queue_lock;
+                dma_addr_t header_dma, data_dma;
+                int channel;
+        } async, iso_send;
+
+        struct {
+                pcl_t pcl[NUM_ISORCV_PCL];
+                u32 stat[NUM_ISORCV_PCL];
+                void *page[ISORCV_PAGES];
+                dma_addr_t page_dma[ISORCV_PAGES];
+                pcl_t pcl_start;
+                int chan_count;
+                int next, last, used, running;
+                struct tasklet_struct tq;
+                spinlock_t lock;
+        } iso_rcv;
+
+	u32 i2c_driven_state; /* the state we currently drive the Serial EEPROM Control register */
+};
+
+/* the per-file data structure for mem space access */
+struct memdata {
+        struct ti_lynx *lynx;
+        int cid;
+        atomic_t aux_intr_last_seen;
+	/* enum values are the same as LBUS_ADDR_SEL_* values below */
+        enum { rom = 0x10000, aux = 0x20000, ram = 0 } type;
+};
+
+
+
+/*
+ * Register read and write helper functions.
+ */
+static inline void reg_write(const struct ti_lynx *lynx, int offset, u32 data)
+{
+        writel(data, lynx->registers + offset);
+}
+
+static inline u32 reg_read(const struct ti_lynx *lynx, int offset)
+{
+        return readl(lynx->registers + offset);
+}
+
+static inline void reg_set_bits(const struct ti_lynx *lynx, int offset,
+                                u32 mask)
+{
+        reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
+}
+
+static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
+                                  u32 mask)
+{
+        reg_write(lynx, offset, (reg_read(lynx, offset) & ~mask));
+}
+
+
+
+/* chip register definitions follow */
+
+#define PCI_LATENCY_CACHELINE             0x0c
+
+#define MISC_CONTROL                      0x40
+#define MISC_CONTROL_SWRESET              (1<<0)
+
+#define SERIAL_EEPROM_CONTROL             0x44
+
+#define PCI_INT_STATUS                    0x48
+#define PCI_INT_ENABLE                    0x4c
+/* status and enable have identical bit numbers */
+#define PCI_INT_INT_PEND                  (1<<31)
+#define PCI_INT_FORCED_INT                (1<<30)
+#define PCI_INT_SLV_ADR_PERR              (1<<28)
+#define PCI_INT_SLV_DAT_PERR              (1<<27)
+#define PCI_INT_MST_DAT_PERR              (1<<26)
+#define PCI_INT_MST_DEV_TIMEOUT           (1<<25)
+#define PCI_INT_INTERNAL_SLV_TIMEOUT      (1<<23)
+#define PCI_INT_AUX_TIMEOUT               (1<<18)
+#define PCI_INT_AUX_INT                   (1<<17)
+#define PCI_INT_1394                      (1<<16)
+#define PCI_INT_DMA4_PCL                  (1<<9)
+#define PCI_INT_DMA4_HLT                  (1<<8)
+#define PCI_INT_DMA3_PCL                  (1<<7)
+#define PCI_INT_DMA3_HLT                  (1<<6)
+#define PCI_INT_DMA2_PCL                  (1<<5)
+#define PCI_INT_DMA2_HLT                  (1<<4)
+#define PCI_INT_DMA1_PCL                  (1<<3)
+#define PCI_INT_DMA1_HLT                  (1<<2)
+#define PCI_INT_DMA0_PCL                  (1<<1)
+#define PCI_INT_DMA0_HLT                  (1<<0)
+/* all DMA interrupts combined: */
+#define PCI_INT_DMA_ALL                   0x3ff
+
+#define PCI_INT_DMA_HLT(chan)             (1 << (chan * 2))
+#define PCI_INT_DMA_PCL(chan)             (1 << (chan * 2 + 1))
+
+#define LBUS_ADDR                         0xb4
+#define LBUS_ADDR_SEL_RAM                 (0x0<<16)
+#define LBUS_ADDR_SEL_ROM                 (0x1<<16)
+#define LBUS_ADDR_SEL_AUX                 (0x2<<16)
+#define LBUS_ADDR_SEL_ZV                  (0x3<<16)
+
+#define GPIO_CTRL_A                       0xb8
+#define GPIO_CTRL_B                       0xbc
+#define GPIO_DATA_BASE                    0xc0
+
+#define DMA_BREG(base, chan)              (base + chan * 0x20)
+#define DMA_SREG(base, chan)              (base + chan * 0x10)
+
+#define DMA0_PREV_PCL                     0x100
+#define DMA1_PREV_PCL                     0x120
+#define DMA2_PREV_PCL                     0x140
+#define DMA3_PREV_PCL                     0x160
+#define DMA4_PREV_PCL                     0x180
+#define DMA_PREV_PCL(chan)                (DMA_BREG(DMA0_PREV_PCL, chan))
+
+#define DMA0_CURRENT_PCL                  0x104
+#define DMA1_CURRENT_PCL                  0x124
+#define DMA2_CURRENT_PCL                  0x144
+#define DMA3_CURRENT_PCL                  0x164
+#define DMA4_CURRENT_PCL                  0x184
+#define DMA_CURRENT_PCL(chan)             (DMA_BREG(DMA0_CURRENT_PCL, chan))
+
+#define DMA0_CHAN_STAT                    0x10c
+#define DMA1_CHAN_STAT                    0x12c
+#define DMA2_CHAN_STAT                    0x14c
+#define DMA3_CHAN_STAT                    0x16c
+#define DMA4_CHAN_STAT                    0x18c
+#define DMA_CHAN_STAT(chan)               (DMA_BREG(DMA0_CHAN_STAT, chan))
+/* CHAN_STATUS registers share bits */
+#define DMA_CHAN_STAT_SELFID              (1<<31)
+#define DMA_CHAN_STAT_ISOPKT              (1<<30)
+#define DMA_CHAN_STAT_PCIERR              (1<<29)
+#define DMA_CHAN_STAT_PKTERR              (1<<28)
+#define DMA_CHAN_STAT_PKTCMPL             (1<<27)
+#define DMA_CHAN_STAT_SPECIALACK          (1<<14)
+
+
+#define DMA0_CHAN_CTRL                    0x110
+#define DMA1_CHAN_CTRL                    0x130
+#define DMA2_CHAN_CTRL                    0x150
+#define DMA3_CHAN_CTRL                    0x170
+#define DMA4_CHAN_CTRL                    0x190
+#define DMA_CHAN_CTRL(chan)               (DMA_BREG(DMA0_CHAN_CTRL, chan))
+/* CHAN_CTRL registers share bits */
+#define DMA_CHAN_CTRL_ENABLE              (1<<31)
+#define DMA_CHAN_CTRL_BUSY                (1<<30)
+#define DMA_CHAN_CTRL_LINK                (1<<29)
+
+#define DMA0_READY                        0x114
+#define DMA1_READY                        0x134
+#define DMA2_READY                        0x154
+#define DMA3_READY                        0x174
+#define DMA4_READY                        0x194
+#define DMA_READY(chan)                   (DMA_BREG(DMA0_READY, chan))
+
+#define DMA_GLOBAL_REGISTER               0x908
+
+#define FIFO_SIZES                        0xa00
+
+#define FIFO_CONTROL                      0xa10
+#define FIFO_CONTROL_GRF_FLUSH            (1<<4)
+#define FIFO_CONTROL_ITF_FLUSH            (1<<3)
+#define FIFO_CONTROL_ATF_FLUSH            (1<<2)
+
+#define FIFO_XMIT_THRESHOLD               0xa14
+
+#define DMA0_WORD0_CMP_VALUE              0xb00
+#define DMA1_WORD0_CMP_VALUE              0xb10
+#define DMA2_WORD0_CMP_VALUE              0xb20
+#define DMA3_WORD0_CMP_VALUE              0xb30
+#define DMA4_WORD0_CMP_VALUE              0xb40
+#define DMA_WORD0_CMP_VALUE(chan)         (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
+
+#define DMA0_WORD0_CMP_ENABLE             0xb04
+#define DMA1_WORD0_CMP_ENABLE             0xb14
+#define DMA2_WORD0_CMP_ENABLE             0xb24
+#define DMA3_WORD0_CMP_ENABLE             0xb34
+#define DMA4_WORD0_CMP_ENABLE             0xb44
+#define DMA_WORD0_CMP_ENABLE(chan)        (DMA_SREG(DMA0_WORD0_CMP_ENABLE,chan))
+
+#define DMA0_WORD1_CMP_VALUE              0xb08
+#define DMA1_WORD1_CMP_VALUE              0xb18
+#define DMA2_WORD1_CMP_VALUE              0xb28
+#define DMA3_WORD1_CMP_VALUE              0xb38
+#define DMA4_WORD1_CMP_VALUE              0xb48
+#define DMA_WORD1_CMP_VALUE(chan)         (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
+
+#define DMA0_WORD1_CMP_ENABLE             0xb0c
+#define DMA1_WORD1_CMP_ENABLE             0xb1c
+#define DMA2_WORD1_CMP_ENABLE             0xb2c
+#define DMA3_WORD1_CMP_ENABLE             0xb3c
+#define DMA4_WORD1_CMP_ENABLE             0xb4c
+#define DMA_WORD1_CMP_ENABLE(chan)        (DMA_SREG(DMA0_WORD1_CMP_ENABLE,chan))
+/* word 1 compare enable flags */
+#define DMA_WORD1_CMP_MATCH_OTHERBUS      (1<<15)
+#define DMA_WORD1_CMP_MATCH_BROADCAST     (1<<14)
+#define DMA_WORD1_CMP_MATCH_BUS_BCAST     (1<<13)
+#define DMA_WORD1_CMP_MATCH_LOCAL_NODE    (1<<12)
+#define DMA_WORD1_CMP_MATCH_EXACT         (1<<11)
+#define DMA_WORD1_CMP_ENABLE_SELF_ID      (1<<10)
+#define DMA_WORD1_CMP_ENABLE_MASTER       (1<<8)
+
+#define LINK_ID                           0xf00
+#define LINK_ID_BUS(id)                   (id<<22)
+#define LINK_ID_NODE(id)                  (id<<16)
+
+#define LINK_CONTROL                      0xf04
+#define LINK_CONTROL_BUSY                 (1<<29)
+#define LINK_CONTROL_TX_ISO_EN            (1<<26)
+#define LINK_CONTROL_RX_ISO_EN            (1<<25)
+#define LINK_CONTROL_TX_ASYNC_EN          (1<<24)
+#define LINK_CONTROL_RX_ASYNC_EN          (1<<23)
+#define LINK_CONTROL_RESET_TX             (1<<21)
+#define LINK_CONTROL_RESET_RX             (1<<20)
+#define LINK_CONTROL_CYCMASTER            (1<<11)
+#define LINK_CONTROL_CYCSOURCE            (1<<10)
+#define LINK_CONTROL_CYCTIMEREN           (1<<9)
+#define LINK_CONTROL_RCV_CMP_VALID        (1<<7)
+#define LINK_CONTROL_SNOOP_ENABLE         (1<<6)
+
+#define CYCLE_TIMER                       0xf08
+
+#define LINK_PHY                          0xf0c
+#define LINK_PHY_READ                     (1<<31)
+#define LINK_PHY_WRITE                    (1<<30)
+#define LINK_PHY_ADDR(addr)               (addr<<24)
+#define LINK_PHY_WDATA(data)              (data<<16)
+#define LINK_PHY_RADDR(addr)              (addr<<8)
+
+
+#define LINK_INT_STATUS                   0xf14
+#define LINK_INT_ENABLE                   0xf18
+/* status and enable have identical bit numbers */
+#define LINK_INT_LINK_INT                 (1<<31)
+#define LINK_INT_PHY_TIMEOUT              (1<<30)
+#define LINK_INT_PHY_REG_RCVD             (1<<29)
+#define LINK_INT_PHY_BUSRESET             (1<<28)
+#define LINK_INT_TX_RDY                   (1<<26)
+#define LINK_INT_RX_DATA_RDY              (1<<25)
+#define LINK_INT_ISO_STUCK                (1<<20)
+#define LINK_INT_ASYNC_STUCK              (1<<19)
+#define LINK_INT_SENT_REJECT              (1<<17)
+#define LINK_INT_HDR_ERR                  (1<<16)
+#define LINK_INT_TX_INVALID_TC            (1<<15)
+#define LINK_INT_CYC_SECOND               (1<<11)
+#define LINK_INT_CYC_START                (1<<10)
+#define LINK_INT_CYC_DONE                 (1<<9)
+#define LINK_INT_CYC_PENDING              (1<<8)
+#define LINK_INT_CYC_LOST                 (1<<7)
+#define LINK_INT_CYC_ARB_FAILED           (1<<6)
+#define LINK_INT_GRF_OVERFLOW             (1<<5)
+#define LINK_INT_ITF_UNDERFLOW            (1<<4)
+#define LINK_INT_ATF_UNDERFLOW            (1<<3)
+#define LINK_INT_ISOARB_FAILED            (1<<0)
+
+/* PHY specifics */
+#define PHY_VENDORID_TI                 0x800028
+#define PHY_PRODUCTID_TSB41LV03         0x000000
+
+
+/* this is the physical layout of a PCL, its size is 128 bytes */
+struct ti_pcl {
+        u32 next;
+        u32 async_error_next;
+        u32 user_data;
+        u32 pcl_status;
+        u32 remaining_transfer_count;
+        u32 next_data_buffer;
+        struct {
+                u32 control;
+                u32 pointer;
+        } buffer[13] __attribute__ ((packed));
+} __attribute__ ((packed));
+
+#include <linux/stddef.h>
+#define pcloffs(MEMBER) (offsetof(struct ti_pcl, MEMBER))
+
+
+#ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
+
+static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                           const struct ti_pcl *pcl)
+{
+        int i;
+        u32 *in = (u32 *)pcl;
+        u32 *out = (u32 *)(lynx->local_ram + pclid * sizeof(struct ti_pcl));
+
+        for (i = 0; i < 32; i++, out++, in++) {
+                writel(*in, out);
+        }
+}
+
+static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                           struct ti_pcl *pcl)
+{
+        int i;
+        u32 *out = (u32 *)pcl;
+        u32 *in = (u32 *)(lynx->local_ram + pclid * sizeof(struct ti_pcl));
+
+        for (i = 0; i < 32; i++, out++, in++) {
+                *out = readl(in);
+        }
+}
+
+static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
+{
+        return pci_resource_start(lynx->dev, 1) + pclid * sizeof(struct ti_pcl);
+}
+
+#else /* CONFIG_IEEE1394_PCILYNX_LOCALRAM */
+
+static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                           const struct ti_pcl *pcl)
+{
+        memcpy_le32((u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
+                    (u32 *)pcl, sizeof(struct ti_pcl));
+}
+
+static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                           struct ti_pcl *pcl)
+{
+        memcpy_le32((u32 *)pcl,
+                    (u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
+                    sizeof(struct ti_pcl));
+}
+
+static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
+{
+        return lynx->pcl_mem_dma + pclid * sizeof(struct ti_pcl);
+}
+
+#endif /* CONFIG_IEEE1394_PCILYNX_LOCALRAM */
+
+
+#if defined (CONFIG_IEEE1394_PCILYNX_LOCALRAM) || defined (__BIG_ENDIAN)
+typedef struct ti_pcl pcltmp_t;
+
+static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                                      pcltmp_t *tmp)
+{
+        get_pcl(lynx, pclid, tmp);
+        return tmp;
+}
+
+static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                              pcltmp_t *tmp)
+{
+        put_pcl(lynx, pclid, tmp);
+}
+
+#else
+typedef int pcltmp_t; /* just a dummy */
+
+static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                                      pcltmp_t *tmp)
+{
+        return lynx->pcl_mem + pclid * sizeof(struct ti_pcl);
+}
+
+static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
+                              pcltmp_t *tmp)
+{
+}
+#endif
+
+
+static inline void run_sub_pcl(const struct ti_lynx *lynx, pcl_t pclid, int idx,
+                               int dmachan)
+{
+        reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20,
+                  pcl_bus(lynx, pclid) + idx * 4);
+        reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
+                  DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
+}
+
+static inline void run_pcl(const struct ti_lynx *lynx, pcl_t pclid, int dmachan)
+{
+        run_sub_pcl(lynx, pclid, 0, dmachan);
+}
+
+#define PCL_NEXT_INVALID (1<<0)
+
+/* transfer commands */
+#define PCL_CMD_RCV            (0x1<<24)
+#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
+#define PCL_CMD_XMT            (0x2<<24)
+#define PCL_CMD_UNFXMT         (0xc<<24)
+#define PCL_CMD_PCI_TO_LBUS    (0x8<<24)
+#define PCL_CMD_LBUS_TO_PCI    (0x9<<24)
+
+/* aux commands */
+#define PCL_CMD_NOP            (0x0<<24)
+#define PCL_CMD_LOAD           (0x3<<24)
+#define PCL_CMD_STOREQ         (0x4<<24)
+#define PCL_CMD_STORED         (0xb<<24)
+#define PCL_CMD_STORE0         (0x5<<24)
+#define PCL_CMD_STORE1         (0x6<<24)
+#define PCL_CMD_COMPARE        (0xe<<24)
+#define PCL_CMD_SWAP_COMPARE   (0xf<<24)
+#define PCL_CMD_ADD            (0xd<<24)
+#define PCL_CMD_BRANCH         (0x7<<24)
+
+/* BRANCH condition codes */
+#define PCL_COND_DMARDY_SET    (0x1<<20)
+#define PCL_COND_DMARDY_CLEAR  (0x2<<20)
+
+#define PCL_GEN_INTR           (1<<19)
+#define PCL_LAST_BUFF          (1<<18)
+#define PCL_LAST_CMD           (PCL_LAST_BUFF)
+#define PCL_WAITSTAT           (1<<17)
+#define PCL_BIGENDIAN          (1<<16)
+#define PCL_ISOMODE            (1<<12)
+
+#endif
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
new file mode 100644
index 000000000000..c93587be9cab
--- /dev/null
+++ b/drivers/ieee1394/raw1394-private.h
@@ -0,0 +1,86 @@
+#ifndef IEEE1394_RAW1394_PRIVATE_H
+#define IEEE1394_RAW1394_PRIVATE_H
+
+/* header for definitions that are private to the raw1394 driver
+   and not visible to user-space */
+
+#define RAW1394_DEVICE_MAJOR      171
+#define RAW1394_DEVICE_NAME       "raw1394"
+
+#define RAW1394_MAX_USER_CSR_DIRS	16
+
+struct iso_block_store {
+        atomic_t refcount;
+        size_t data_size;
+        quadlet_t data[0];
+};
+
+enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
+			 RAW1394_ISO_RECV = 1,
+			 RAW1394_ISO_XMIT = 2 };
+
+struct file_info {
+        struct list_head list;
+
+        enum { opened, initialized, connected } state;
+        unsigned int protocol_version;
+
+        struct hpsb_host *host;
+
+        struct list_head req_pending;
+        struct list_head req_complete;
+        struct semaphore complete_sem;
+        spinlock_t reqlists_lock;
+        wait_queue_head_t poll_wait_complete;
+
+        struct list_head addr_list;
+
+        u8 __user *fcp_buffer;
+
+	/* old ISO API */
+        u64 listen_channels;
+        quadlet_t __user *iso_buffer;
+        size_t iso_buffer_length;
+
+        u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
+
+	/* new rawiso API */
+	enum raw1394_iso_state iso_state;
+	struct hpsb_iso *iso_handle;
+
+	/* User space's CSR1212 dynamic ConfigROM directories */
+	struct csr1212_keyval *csr1212_dirs[RAW1394_MAX_USER_CSR_DIRS];
+
+	/* Legacy ConfigROM update flag */
+	u8 cfgrom_upd;
+};
+
+struct arm_addr {
+        struct list_head addr_list; /* file_info list */
+        u64    start, end;
+        u64    arm_tag;
+        u8     access_rights;
+        u8     notification_options;
+        u8     client_transactions;
+        u64    recvb;
+        u16    rec_length;
+        u8     *addr_space_buffer; /* accessed by read/write/lock */
+};
+
+struct pending_request {
+        struct list_head list;
+        struct file_info *file_info;
+        struct hpsb_packet *packet;
+        struct iso_block_store *ibs;
+        quadlet_t *data;
+        int free_data;
+        struct raw1394_request req;
+};
+
+struct host_info {
+        struct list_head list;
+        struct hpsb_host *host;
+        struct list_head file_info_list;
+};
+
+#endif  /* IEEE1394_RAW1394_PRIVATE_H */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
new file mode 100644
index 000000000000..6a08a8982ea8
--- /dev/null
+++ b/drivers/ieee1394/raw1394.c
@@ -0,0 +1,2958 @@
+/*
+ * IEEE 1394 for Linux
+ *
+ * Raw interface to the bus
+ *
+ * Copyright (C) 1999, 2000 Andreas E. Bombe
+ *               2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *                     2002 Christian Toegel <christian.toegel@gmx.at>
+ *
+ * This code is licensed under the GPL.  See the file COPYING in the root
+ * directory of the kernel sources for details.
+ *
+ *
+ * Contributions:
+ *
+ * Manfred Weihs <weihs@ict.tuwien.ac.at>
+ *        configuration ROM manipulation
+ *        address range mapping
+ *        adaptation for new (transparent) loopback mechanism
+ *        sending of arbitrary async packets
+ * Christian Toegel <christian.toegel@gmx.at>
+ *        address range mapping
+ *        lock64 request
+ *        transmit physical packet
+ *        busreset notification control (switch on/off)
+ *        busreset with selection of type (short/long)
+ *        request_reply
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "ieee1394_core.h"
+#include "nodemgr.h"
+#include "hosts.h"
+#include "highlevel.h"
+#include "iso.h"
+#include "ieee1394_transactions.h"
+#include "raw1394.h"
+#include "raw1394-private.h"
+
+#define int2ptr(x) ((void __user *)(unsigned long)x)
+#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+#define RAW1394_DEBUG
+#endif
+
+#ifdef RAW1394_DEBUG
+#define DBGMSG(fmt, args...) \
+printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
+#else
+#define DBGMSG(fmt, args...)
+#endif
+
+static LIST_HEAD(host_info_list);
+static int host_count;
+static DEFINE_SPINLOCK(host_info_lock);
+static atomic_t internal_generation = ATOMIC_INIT(0);
+
+static atomic_t iso_buffer_size;
+static const int iso_buffer_max = 4 * 1024 * 1024;	/* 4 MB */
+
+static struct hpsb_highlevel raw1394_highlevel;
+
+static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
+		    u64 addr, size_t length, u16 flags);
+static int arm_write(struct hpsb_host *host, int nodeid, int destid,
+		     quadlet_t * data, u64 addr, size_t length, u16 flags);
+static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
+		    u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
+		    u16 flags);
+static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
+		      u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
+		      u16 flags);
+static struct hpsb_address_ops arm_ops = {
+	.read = arm_read,
+	.write = arm_write,
+	.lock = arm_lock,
+	.lock64 = arm_lock64,
+};
+
+static void queue_complete_cb(struct pending_request *req);
+
+static struct pending_request *__alloc_pending_request(int flags)
+{
+	struct pending_request *req;
+
+	req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
+						flags);
+	if (req != NULL) {
+		memset(req, 0, sizeof(struct pending_request));
+		INIT_LIST_HEAD(&req->list);
+	}
+
+	return req;
+}
+
+static inline struct pending_request *alloc_pending_request(void)
+{
+	return __alloc_pending_request(SLAB_KERNEL);
+}
+
+static void free_pending_request(struct pending_request *req)
+{
+	if (req->ibs) {
+		if (atomic_dec_and_test(&req->ibs->refcount)) {
+			atomic_sub(req->ibs->data_size, &iso_buffer_size);
+			kfree(req->ibs);
+		}
+	} else if (req->free_data) {
+		kfree(req->data);
+	}
+	hpsb_free_packet(req->packet);
+	kfree(req);
+}
+
+/* fi->reqlists_lock must be taken */
+static void __queue_complete_req(struct pending_request *req)
+{
+	struct file_info *fi = req->file_info;
+	list_del(&req->list);
+	list_add_tail(&req->list, &fi->req_complete);
+
+	up(&fi->complete_sem);
+	wake_up_interruptible(&fi->poll_wait_complete);
+}
+
+static void queue_complete_req(struct pending_request *req)
+{
+	unsigned long flags;
+	struct file_info *fi = req->file_info;
+
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
+	__queue_complete_req(req);
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
+}
+
+static void queue_complete_cb(struct pending_request *req)
+{
+	struct hpsb_packet *packet = req->packet;
+	int rcode = (packet->header[1] >> 12) & 0xf;
+
+	switch (packet->ack_code) {
+	case ACKX_NONE:
+	case ACKX_SEND_ERROR:
+		req->req.error = RAW1394_ERROR_SEND_ERROR;
+		break;
+	case ACKX_ABORTED:
+		req->req.error = RAW1394_ERROR_ABORTED;
+		break;
+	case ACKX_TIMEOUT:
+		req->req.error = RAW1394_ERROR_TIMEOUT;
+		break;
+	default:
+		req->req.error = (packet->ack_code << 16) | rcode;
+		break;
+	}
+
+	if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
+		req->req.length = 0;
+	}
+
+	if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
+	    (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
+	    (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
+	    (req->req.type == RAW1394_REQ_LOCK) ||
+	    (req->req.type == RAW1394_REQ_LOCK64))
+		hpsb_free_tlabel(packet);
+
+	queue_complete_req(req);
+}
+
+static void add_host(struct hpsb_host *host)
+{
+	struct host_info *hi;
+	unsigned long flags;
+
+	hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL);
+
+	if (hi != NULL) {
+		INIT_LIST_HEAD(&hi->list);
+		hi->host = host;
+		INIT_LIST_HEAD(&hi->file_info_list);
+
+		spin_lock_irqsave(&host_info_lock, flags);
+		list_add_tail(&hi->list, &host_info_list);
+		host_count++;
+		spin_unlock_irqrestore(&host_info_lock, flags);
+	}
+
+	atomic_inc(&internal_generation);
+}
+
+static struct host_info *find_host_info(struct hpsb_host *host)
+{
+	struct host_info *hi;
+
+	list_for_each_entry(hi, &host_info_list, list)
+	    if (hi->host == host)
+		return hi;
+
+	return NULL;
+}
+
+static void remove_host(struct hpsb_host *host)
+{
+	struct host_info *hi;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(host);
+
+	if (hi != NULL) {
+		list_del(&hi->list);
+		host_count--;
+		/*
+		   FIXME: address ranges should be removed
+		   and fileinfo states should be initialized
+		   (including setting generation to
+		   internal-generation ...)
+		 */
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+
+	if (hi == NULL) {
+		printk(KERN_ERR "raw1394: attempt to remove unknown host "
+		       "0x%p\n", host);
+		return;
+	}
+
+	kfree(hi);
+
+	atomic_inc(&internal_generation);
+}
+
+static void host_reset(struct hpsb_host *host)
+{
+	unsigned long flags;
+	struct host_info *hi;
+	struct file_info *fi;
+	struct pending_request *req;
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(host);
+
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			if (fi->notification == RAW1394_NOTIFY_ON) {
+				req = __alloc_pending_request(SLAB_ATOMIC);
+
+				if (req != NULL) {
+					req->file_info = fi;
+					req->req.type = RAW1394_REQ_BUS_RESET;
+					req->req.generation =
+					    get_hpsb_generation(host);
+					req->req.misc = (host->node_id << 16)
+					    | host->node_count;
+					if (fi->protocol_version > 3) {
+						req->req.misc |=
+						    (NODEID_TO_NODE
+						     (host->irm_id)
+						     << 8);
+					}
+
+					queue_complete_req(req);
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+}
+
+static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
+			size_t length)
+{
+	unsigned long flags;
+	struct host_info *hi;
+	struct file_info *fi;
+	struct pending_request *req, *req_next;
+	struct iso_block_store *ibs = NULL;
+	LIST_HEAD(reqs);
+
+	if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
+		HPSB_INFO("dropped iso packet");
+		return;
+	}
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(host);
+
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			if (!(fi->listen_channels & (1ULL << channel)))
+				continue;
+
+			req = __alloc_pending_request(SLAB_ATOMIC);
+			if (!req)
+				break;
+
+			if (!ibs) {
+				ibs = kmalloc(sizeof(struct iso_block_store)
+					      + length, SLAB_ATOMIC);
+				if (!ibs) {
+					kfree(req);
+					break;
+				}
+
+				atomic_add(length, &iso_buffer_size);
+				atomic_set(&ibs->refcount, 0);
+				ibs->data_size = length;
+				memcpy(ibs->data, data, length);
+			}
+
+			atomic_inc(&ibs->refcount);
+
+			req->file_info = fi;
+			req->ibs = ibs;
+			req->data = ibs->data;
+			req->req.type = RAW1394_REQ_ISO_RECEIVE;
+			req->req.generation = get_hpsb_generation(host);
+			req->req.misc = 0;
+			req->req.recvb = ptr2int(fi->iso_buffer);
+			req->req.length = min(length, fi->iso_buffer_length);
+
+			list_add_tail(&req->list, &reqs);
+		}
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+
+	list_for_each_entry_safe(req, req_next, &reqs, list)
+	    queue_complete_req(req);
+}
+
+static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
+			int cts, u8 * data, size_t length)
+{
+	unsigned long flags;
+	struct host_info *hi;
+	struct file_info *fi;
+	struct pending_request *req, *req_next;
+	struct iso_block_store *ibs = NULL;
+	LIST_HEAD(reqs);
+
+	if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
+		HPSB_INFO("dropped fcp request");
+		return;
+	}
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(host);
+
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			if (!fi->fcp_buffer)
+				continue;
+
+			req = __alloc_pending_request(SLAB_ATOMIC);
+			if (!req)
+				break;
+
+			if (!ibs) {
+				ibs = kmalloc(sizeof(struct iso_block_store)
+					      + length, SLAB_ATOMIC);
+				if (!ibs) {
+					kfree(req);
+					break;
+				}
+
+				atomic_add(length, &iso_buffer_size);
+				atomic_set(&ibs->refcount, 0);
+				ibs->data_size = length;
+				memcpy(ibs->data, data, length);
+			}
+
+			atomic_inc(&ibs->refcount);
+
+			req->file_info = fi;
+			req->ibs = ibs;
+			req->data = ibs->data;
+			req->req.type = RAW1394_REQ_FCP_REQUEST;
+			req->req.generation = get_hpsb_generation(host);
+			req->req.misc = nodeid | (direction << 16);
+			req->req.recvb = ptr2int(fi->fcp_buffer);
+			req->req.length = length;
+
+			list_add_tail(&req->list, &reqs);
+		}
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+
+	list_for_each_entry_safe(req, req_next, &reqs, list)
+	    queue_complete_req(req);
+}
+
+static ssize_t raw1394_read(struct file *file, char __user * buffer,
+			    size_t count, loff_t * offset_is_ignored)
+{
+	struct file_info *fi = (struct file_info *)file->private_data;
+	struct list_head *lh;
+	struct pending_request *req;
+	ssize_t ret;
+
+	if (count != sizeof(struct raw1394_request)) {
+		return -EINVAL;
+	}
+
+	if (!access_ok(VERIFY_WRITE, buffer, count)) {
+		return -EFAULT;
+	}
+
+	if (file->f_flags & O_NONBLOCK) {
+		if (down_trylock(&fi->complete_sem)) {
+			return -EAGAIN;
+		}
+	} else {
+		if (down_interruptible(&fi->complete_sem)) {
+			return -ERESTARTSYS;
+		}
+	}
+
+	spin_lock_irq(&fi->reqlists_lock);
+	lh = fi->req_complete.next;
+	list_del(lh);
+	spin_unlock_irq(&fi->reqlists_lock);
+
+	req = list_entry(lh, struct pending_request, list);
+
+	if (req->req.length) {
+		if (copy_to_user(int2ptr(req->req.recvb), req->data,
+				 req->req.length)) {
+			req->req.error = RAW1394_ERROR_MEMFAULT;
+		}
+	}
+	if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = (ssize_t) sizeof(struct raw1394_request);
+      out:
+	free_pending_request(req);
+	return ret;
+}
+
+static int state_opened(struct file_info *fi, struct pending_request *req)
+{
+	if (req->req.type == RAW1394_REQ_INITIALIZE) {
+		switch (req->req.misc) {
+		case RAW1394_KERNELAPI_VERSION:
+		case 3:
+			fi->state = initialized;
+			fi->protocol_version = req->req.misc;
+			req->req.error = RAW1394_ERROR_NONE;
+			req->req.generation = atomic_read(&internal_generation);
+			break;
+
+		default:
+			req->req.error = RAW1394_ERROR_COMPAT;
+			req->req.misc = RAW1394_KERNELAPI_VERSION;
+		}
+	} else {
+		req->req.error = RAW1394_ERROR_STATE_ORDER;
+	}
+
+	req->req.length = 0;
+	queue_complete_req(req);
+	return sizeof(struct raw1394_request);
+}
+
+static int state_initialized(struct file_info *fi, struct pending_request *req)
+{
+	struct host_info *hi;
+	struct raw1394_khost_list *khl;
+
+	if (req->req.generation != atomic_read(&internal_generation)) {
+		req->req.error = RAW1394_ERROR_GENERATION;
+		req->req.generation = atomic_read(&internal_generation);
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	switch (req->req.type) {
+	case RAW1394_REQ_LIST_CARDS:
+		spin_lock_irq(&host_info_lock);
+		khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
+			      SLAB_ATOMIC);
+
+		if (khl != NULL) {
+			req->req.misc = host_count;
+			req->data = (quadlet_t *) khl;
+
+			list_for_each_entry(hi, &host_info_list, list) {
+				khl->nodes = hi->host->node_count;
+				strcpy(khl->name, hi->host->driver->name);
+				khl++;
+			}
+		}
+		spin_unlock_irq(&host_info_lock);
+
+		if (khl != NULL) {
+			req->req.error = RAW1394_ERROR_NONE;
+			req->req.length = min(req->req.length,
+					      (u32) (sizeof
+						     (struct raw1394_khost_list)
+						     * req->req.misc));
+			req->free_data = 1;
+		} else {
+			return -ENOMEM;
+		}
+		break;
+
+	case RAW1394_REQ_SET_CARD:
+		spin_lock_irq(&host_info_lock);
+		if (req->req.misc < host_count) {
+			list_for_each_entry(hi, &host_info_list, list) {
+				if (!req->req.misc--)
+					break;
+			}
+			get_device(&hi->host->device);	// XXX Need to handle failure case
+			list_add_tail(&fi->list, &hi->file_info_list);
+			fi->host = hi->host;
+			fi->state = connected;
+
+			req->req.error = RAW1394_ERROR_NONE;
+			req->req.generation = get_hpsb_generation(fi->host);
+			req->req.misc = (fi->host->node_id << 16)
+			    | fi->host->node_count;
+			if (fi->protocol_version > 3) {
+				req->req.misc |=
+				    NODEID_TO_NODE(fi->host->irm_id) << 8;
+			}
+		} else {
+			req->req.error = RAW1394_ERROR_INVALID_ARG;
+		}
+		spin_unlock_irq(&host_info_lock);
+
+		req->req.length = 0;
+		break;
+
+	default:
+		req->req.error = RAW1394_ERROR_STATE_ORDER;
+		req->req.length = 0;
+		break;
+	}
+
+	queue_complete_req(req);
+	return sizeof(struct raw1394_request);
+}
+
+static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
+{
+	int channel = req->req.misc;
+
+	spin_lock_irq(&host_info_lock);
+	if ((channel > 63) || (channel < -64)) {
+		req->req.error = RAW1394_ERROR_INVALID_ARG;
+	} else if (channel >= 0) {
+		/* allocate channel req.misc */
+		if (fi->listen_channels & (1ULL << channel)) {
+			req->req.error = RAW1394_ERROR_ALREADY;
+		} else {
+			if (hpsb_listen_channel
+			    (&raw1394_highlevel, fi->host, channel)) {
+				req->req.error = RAW1394_ERROR_ALREADY;
+			} else {
+				fi->listen_channels |= 1ULL << channel;
+				fi->iso_buffer = int2ptr(req->req.recvb);
+				fi->iso_buffer_length = req->req.length;
+			}
+		}
+	} else {
+		/* deallocate channel (one's complement neg) req.misc */
+		channel = ~channel;
+
+		if (fi->listen_channels & (1ULL << channel)) {
+			hpsb_unlisten_channel(&raw1394_highlevel, fi->host,
+					      channel);
+			fi->listen_channels &= ~(1ULL << channel);
+		} else {
+			req->req.error = RAW1394_ERROR_INVALID_ARG;
+		}
+	}
+
+	req->req.length = 0;
+	queue_complete_req(req);
+	spin_unlock_irq(&host_info_lock);
+}
+
+static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
+{
+	if (req->req.misc) {
+		if (fi->fcp_buffer) {
+			req->req.error = RAW1394_ERROR_ALREADY;
+		} else {
+			fi->fcp_buffer = int2ptr(req->req.recvb);
+		}
+	} else {
+		if (!fi->fcp_buffer) {
+			req->req.error = RAW1394_ERROR_ALREADY;
+		} else {
+			fi->fcp_buffer = NULL;
+		}
+	}
+
+	req->req.length = 0;
+	queue_complete_req(req);
+}
+
+static int handle_async_request(struct file_info *fi,
+				struct pending_request *req, int node)
+{
+	struct hpsb_packet *packet = NULL;
+	u64 addr = req->req.address & 0xffffffffffffULL;
+
+	switch (req->req.type) {
+	case RAW1394_REQ_ASYNC_READ:
+		DBGMSG("read_request called");
+		packet =
+		    hpsb_make_readpacket(fi->host, node, addr, req->req.length);
+
+		if (!packet)
+			return -ENOMEM;
+
+		if (req->req.length == 4)
+			req->data = &packet->header[3];
+		else
+			req->data = packet->data;
+
+		break;
+
+	case RAW1394_REQ_ASYNC_WRITE:
+		DBGMSG("write_request called");
+
+		packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
+					       req->req.length);
+		if (!packet)
+			return -ENOMEM;
+
+		if (req->req.length == 4) {
+			if (copy_from_user
+			    (&packet->header[3], int2ptr(req->req.sendb),
+			     req->req.length))
+				req->req.error = RAW1394_ERROR_MEMFAULT;
+		} else {
+			if (copy_from_user
+			    (packet->data, int2ptr(req->req.sendb),
+			     req->req.length))
+				req->req.error = RAW1394_ERROR_MEMFAULT;
+		}
+
+		req->req.length = 0;
+		break;
+
+	case RAW1394_REQ_ASYNC_STREAM:
+		DBGMSG("stream_request called");
+
+		packet =
+		    hpsb_make_streampacket(fi->host, NULL, req->req.length,
+					   node & 0x3f /*channel */ ,
+					   (req->req.misc >> 16) & 0x3,
+					   req->req.misc & 0xf);
+		if (!packet)
+			return -ENOMEM;
+
+		if (copy_from_user(packet->data, int2ptr(req->req.sendb),
+				   req->req.length))
+			req->req.error = RAW1394_ERROR_MEMFAULT;
+
+		req->req.length = 0;
+		break;
+
+	case RAW1394_REQ_LOCK:
+		DBGMSG("lock_request called");
+		if ((req->req.misc == EXTCODE_FETCH_ADD)
+		    || (req->req.misc == EXTCODE_LITTLE_ADD)) {
+			if (req->req.length != 4) {
+				req->req.error = RAW1394_ERROR_INVALID_ARG;
+				break;
+			}
+		} else {
+			if (req->req.length != 8) {
+				req->req.error = RAW1394_ERROR_INVALID_ARG;
+				break;
+			}
+		}
+
+		packet = hpsb_make_lockpacket(fi->host, node, addr,
+					      req->req.misc, NULL, 0);
+		if (!packet)
+			return -ENOMEM;
+
+		if (copy_from_user(packet->data, int2ptr(req->req.sendb),
+				   req->req.length)) {
+			req->req.error = RAW1394_ERROR_MEMFAULT;
+			break;
+		}
+
+		req->data = packet->data;
+		req->req.length = 4;
+		break;
+
+	case RAW1394_REQ_LOCK64:
+		DBGMSG("lock64_request called");
+		if ((req->req.misc == EXTCODE_FETCH_ADD)
+		    || (req->req.misc == EXTCODE_LITTLE_ADD)) {
+			if (req->req.length != 8) {
+				req->req.error = RAW1394_ERROR_INVALID_ARG;
+				break;
+			}
+		} else {
+			if (req->req.length != 16) {
+				req->req.error = RAW1394_ERROR_INVALID_ARG;
+				break;
+			}
+		}
+		packet = hpsb_make_lock64packet(fi->host, node, addr,
+						req->req.misc, NULL, 0);
+		if (!packet)
+			return -ENOMEM;
+
+		if (copy_from_user(packet->data, int2ptr(req->req.sendb),
+				   req->req.length)) {
+			req->req.error = RAW1394_ERROR_MEMFAULT;
+			break;
+		}
+
+		req->data = packet->data;
+		req->req.length = 8;
+		break;
+
+	default:
+		req->req.error = RAW1394_ERROR_STATE_ORDER;
+	}
+
+	req->packet = packet;
+
+	if (req->req.error) {
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	hpsb_set_packet_complete_task(packet,
+				      (void (*)(void *))queue_complete_cb, req);
+
+	spin_lock_irq(&fi->reqlists_lock);
+	list_add_tail(&req->list, &fi->req_pending);
+	spin_unlock_irq(&fi->reqlists_lock);
+
+	packet->generation = req->req.generation;
+
+	if (hpsb_send_packet(packet) < 0) {
+		req->req.error = RAW1394_ERROR_SEND_ERROR;
+		req->req.length = 0;
+		hpsb_free_tlabel(packet);
+		queue_complete_req(req);
+	}
+	return sizeof(struct raw1394_request);
+}
+
+static int handle_iso_send(struct file_info *fi, struct pending_request *req,
+			   int channel)
+{
+	struct hpsb_packet *packet;
+
+	packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
+				     (req->req.misc >> 16) & 0x3,
+				     req->req.misc & 0xf);
+	if (!packet)
+		return -ENOMEM;
+
+	packet->speed_code = req->req.address & 0x3;
+
+	req->packet = packet;
+
+	if (copy_from_user(packet->data, int2ptr(req->req.sendb),
+			   req->req.length)) {
+		req->req.error = RAW1394_ERROR_MEMFAULT;
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	req->req.length = 0;
+	hpsb_set_packet_complete_task(packet,
+				      (void (*)(void *))queue_complete_req,
+				      req);
+
+	spin_lock_irq(&fi->reqlists_lock);
+	list_add_tail(&req->list, &fi->req_pending);
+	spin_unlock_irq(&fi->reqlists_lock);
+
+	/* Update the generation of the packet just before sending. */
+	packet->generation = req->req.generation;
+
+	if (hpsb_send_packet(packet) < 0) {
+		req->req.error = RAW1394_ERROR_SEND_ERROR;
+		queue_complete_req(req);
+	}
+
+	return sizeof(struct raw1394_request);
+}
+
+static int handle_async_send(struct file_info *fi, struct pending_request *req)
+{
+	struct hpsb_packet *packet;
+	int header_length = req->req.misc & 0xffff;
+	int expect_response = req->req.misc >> 16;
+
+	if ((header_length > req->req.length) || (header_length < 12)) {
+		req->req.error = RAW1394_ERROR_INVALID_ARG;
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	packet = hpsb_alloc_packet(req->req.length - header_length);
+	req->packet = packet;
+	if (!packet)
+		return -ENOMEM;
+
+	if (copy_from_user(packet->header, int2ptr(req->req.sendb),
+			   header_length)) {
+		req->req.error = RAW1394_ERROR_MEMFAULT;
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	if (copy_from_user
+	    (packet->data, int2ptr(req->req.sendb) + header_length,
+	     packet->data_size)) {
+		req->req.error = RAW1394_ERROR_MEMFAULT;
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	packet->type = hpsb_async;
+	packet->node_id = packet->header[0] >> 16;
+	packet->tcode = (packet->header[0] >> 4) & 0xf;
+	packet->tlabel = (packet->header[0] >> 10) & 0x3f;
+	packet->host = fi->host;
+	packet->expect_response = expect_response;
+	packet->header_size = header_length;
+	packet->data_size = req->req.length - header_length;
+
+	req->req.length = 0;
+	hpsb_set_packet_complete_task(packet,
+				      (void (*)(void *))queue_complete_cb, req);
+
+	spin_lock_irq(&fi->reqlists_lock);
+	list_add_tail(&req->list, &fi->req_pending);
+	spin_unlock_irq(&fi->reqlists_lock);
+
+	/* Update the generation of the packet just before sending. */
+	packet->generation = req->req.generation;
+
+	if (hpsb_send_packet(packet) < 0) {
+		req->req.error = RAW1394_ERROR_SEND_ERROR;
+		queue_complete_req(req);
+	}
+
+	return sizeof(struct raw1394_request);
+}
+
+static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
+		    u64 addr, size_t length, u16 flags)
+{
+	struct pending_request *req;
+	struct host_info *hi;
+	struct file_info *fi = NULL;
+	struct list_head *entry;
+	struct arm_addr *arm_addr = NULL;
+	struct arm_request *arm_req = NULL;
+	struct arm_response *arm_resp = NULL;
+	int found = 0, size = 0, rcode = -1;
+	struct arm_request_response *arm_req_resp = NULL;
+
+	DBGMSG("arm_read  called by node: %X"
+	       "addr: %4.4x %8.8x length: %Zu", nodeid,
+	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+	       length);
+	spin_lock(&host_info_lock);
+	hi = find_host_info(host);	/* search address-entry */
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			entry = fi->addr_list.next;
+			while (entry != &(fi->addr_list)) {
+				arm_addr =
+				    list_entry(entry, struct arm_addr,
+					       addr_list);
+				if (((arm_addr->start) <= (addr))
+				    && ((arm_addr->end) >= (addr + length))) {
+					found = 1;
+					break;
+				}
+				entry = entry->next;
+			}
+			if (found) {
+				break;
+			}
+		}
+	}
+	rcode = -1;
+	if (!found) {
+		printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
+		       " -> rcode_address_error\n");
+		spin_unlock(&host_info_lock);
+		return (RCODE_ADDRESS_ERROR);
+	} else {
+		DBGMSG("arm_read addr_entry FOUND");
+	}
+	if (arm_addr->rec_length < length) {
+		DBGMSG("arm_read blocklength too big -> rcode_data_error");
+		rcode = RCODE_DATA_ERROR;	/* hardware error, data is unavailable */
+	}
+	if (rcode == -1) {
+		if (arm_addr->access_rights & ARM_READ) {
+			if (!(arm_addr->client_transactions & ARM_READ)) {
+				memcpy(buffer,
+				       (arm_addr->addr_space_buffer) + (addr -
+									(arm_addr->
+									 start)),
+				       length);
+				DBGMSG("arm_read -> (rcode_complete)");
+				rcode = RCODE_COMPLETE;
+			}
+		} else {
+			rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+			DBGMSG("arm_read -> rcode_type_error (access denied)");
+		}
+	}
+	if (arm_addr->notification_options & ARM_READ) {
+		DBGMSG("arm_read -> entering notification-section");
+		req = __alloc_pending_request(SLAB_ATOMIC);
+		if (!req) {
+			DBGMSG("arm_read -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		if (rcode == RCODE_COMPLETE) {
+			size =
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response) +
+			    length * sizeof(byte_t) +
+			    sizeof(struct arm_request_response);
+		} else {
+			size =
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response) +
+			    sizeof(struct arm_request_response);
+		}
+		req->data = kmalloc(size, SLAB_ATOMIC);
+		if (!(req->data)) {
+			free_pending_request(req);
+			DBGMSG("arm_read -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		req->free_data = 1;
+		req->file_info = fi;
+		req->req.type = RAW1394_REQ_ARM;
+		req->req.generation = get_hpsb_generation(host);
+		req->req.misc =
+		    (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
+		req->req.tag = arm_addr->arm_tag;
+		req->req.recvb = arm_addr->recvb;
+		req->req.length = size;
+		arm_req_resp = (struct arm_request_response *)(req->data);
+		arm_req = (struct arm_request *)((byte_t *) (req->data) +
+						 (sizeof
+						  (struct
+						   arm_request_response)));
+		arm_resp =
+		    (struct arm_response *)((byte_t *) (arm_req) +
+					    (sizeof(struct arm_request)));
+		arm_req->buffer = NULL;
+		arm_resp->buffer = NULL;
+		if (rcode == RCODE_COMPLETE) {
+			byte_t *buf =
+			    (byte_t *) arm_resp + sizeof(struct arm_response);
+			memcpy(buf,
+			       (arm_addr->addr_space_buffer) + (addr -
+								(arm_addr->
+								 start)),
+			       length);
+			arm_resp->buffer =
+			    int2ptr((arm_addr->recvb) +
+				    sizeof(struct arm_request_response) +
+				    sizeof(struct arm_request) +
+				    sizeof(struct arm_response));
+		}
+		arm_resp->buffer_length =
+		    (rcode == RCODE_COMPLETE) ? length : 0;
+		arm_resp->response_code = rcode;
+		arm_req->buffer_length = 0;
+		arm_req->generation = req->req.generation;
+		arm_req->extended_transaction_code = 0;
+		arm_req->destination_offset = addr;
+		arm_req->source_nodeid = nodeid;
+		arm_req->destination_nodeid = host->node_id;
+		arm_req->tlabel = (flags >> 10) & 0x3f;
+		arm_req->tcode = (flags >> 4) & 0x0f;
+		arm_req_resp->request = int2ptr((arm_addr->recvb) +
+						sizeof(struct
+						       arm_request_response));
+		arm_req_resp->response =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request));
+		queue_complete_req(req);
+	}
+	spin_unlock(&host_info_lock);
+	return (rcode);
+}
+
+static int arm_write(struct hpsb_host *host, int nodeid, int destid,
+		     quadlet_t * data, u64 addr, size_t length, u16 flags)
+{
+	struct pending_request *req;
+	struct host_info *hi;
+	struct file_info *fi = NULL;
+	struct list_head *entry;
+	struct arm_addr *arm_addr = NULL;
+	struct arm_request *arm_req = NULL;
+	struct arm_response *arm_resp = NULL;
+	int found = 0, size = 0, rcode = -1, length_conflict = 0;
+	struct arm_request_response *arm_req_resp = NULL;
+
+	DBGMSG("arm_write called by node: %X"
+	       "addr: %4.4x %8.8x length: %Zu", nodeid,
+	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+	       length);
+	spin_lock(&host_info_lock);
+	hi = find_host_info(host);	/* search address-entry */
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			entry = fi->addr_list.next;
+			while (entry != &(fi->addr_list)) {
+				arm_addr =
+				    list_entry(entry, struct arm_addr,
+					       addr_list);
+				if (((arm_addr->start) <= (addr))
+				    && ((arm_addr->end) >= (addr + length))) {
+					found = 1;
+					break;
+				}
+				entry = entry->next;
+			}
+			if (found) {
+				break;
+			}
+		}
+	}
+	rcode = -1;
+	if (!found) {
+		printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
+		       " -> rcode_address_error\n");
+		spin_unlock(&host_info_lock);
+		return (RCODE_ADDRESS_ERROR);
+	} else {
+		DBGMSG("arm_write addr_entry FOUND");
+	}
+	if (arm_addr->rec_length < length) {
+		DBGMSG("arm_write blocklength too big -> rcode_data_error");
+		length_conflict = 1;
+		rcode = RCODE_DATA_ERROR;	/* hardware error, data is unavailable */
+	}
+	if (rcode == -1) {
+		if (arm_addr->access_rights & ARM_WRITE) {
+			if (!(arm_addr->client_transactions & ARM_WRITE)) {
+				memcpy((arm_addr->addr_space_buffer) +
+				       (addr - (arm_addr->start)), data,
+				       length);
+				DBGMSG("arm_write -> (rcode_complete)");
+				rcode = RCODE_COMPLETE;
+			}
+		} else {
+			rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+			DBGMSG("arm_write -> rcode_type_error (access denied)");
+		}
+	}
+	if (arm_addr->notification_options & ARM_WRITE) {
+		DBGMSG("arm_write -> entering notification-section");
+		req = __alloc_pending_request(SLAB_ATOMIC);
+		if (!req) {
+			DBGMSG("arm_write -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request my be retried */
+		}
+		size =
+		    sizeof(struct arm_request) + sizeof(struct arm_response) +
+		    (length) * sizeof(byte_t) +
+		    sizeof(struct arm_request_response);
+		req->data = kmalloc(size, SLAB_ATOMIC);
+		if (!(req->data)) {
+			free_pending_request(req);
+			DBGMSG("arm_write -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		req->free_data = 1;
+		req->file_info = fi;
+		req->req.type = RAW1394_REQ_ARM;
+		req->req.generation = get_hpsb_generation(host);
+		req->req.misc =
+		    (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
+		req->req.tag = arm_addr->arm_tag;
+		req->req.recvb = arm_addr->recvb;
+		req->req.length = size;
+		arm_req_resp = (struct arm_request_response *)(req->data);
+		arm_req = (struct arm_request *)((byte_t *) (req->data) +
+						 (sizeof
+						  (struct
+						   arm_request_response)));
+		arm_resp =
+		    (struct arm_response *)((byte_t *) (arm_req) +
+					    (sizeof(struct arm_request)));
+		arm_resp->buffer = NULL;
+		memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
+		       data, length);
+		arm_req->buffer = int2ptr((arm_addr->recvb) +
+					  sizeof(struct arm_request_response) +
+					  sizeof(struct arm_request) +
+					  sizeof(struct arm_response));
+		arm_req->buffer_length = length;
+		arm_req->generation = req->req.generation;
+		arm_req->extended_transaction_code = 0;
+		arm_req->destination_offset = addr;
+		arm_req->source_nodeid = nodeid;
+		arm_req->destination_nodeid = destid;
+		arm_req->tlabel = (flags >> 10) & 0x3f;
+		arm_req->tcode = (flags >> 4) & 0x0f;
+		arm_resp->buffer_length = 0;
+		arm_resp->response_code = rcode;
+		arm_req_resp->request = int2ptr((arm_addr->recvb) +
+						sizeof(struct
+						       arm_request_response));
+		arm_req_resp->response =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request));
+		queue_complete_req(req);
+	}
+	spin_unlock(&host_info_lock);
+	return (rcode);
+}
+
+static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
+		    u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
+		    u16 flags)
+{
+	struct pending_request *req;
+	struct host_info *hi;
+	struct file_info *fi = NULL;
+	struct list_head *entry;
+	struct arm_addr *arm_addr = NULL;
+	struct arm_request *arm_req = NULL;
+	struct arm_response *arm_resp = NULL;
+	int found = 0, size = 0, rcode = -1;
+	quadlet_t old, new;
+	struct arm_request_response *arm_req_resp = NULL;
+
+	if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
+	    ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
+		DBGMSG("arm_lock  called by node: %X "
+		       "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
+		       nodeid, (u16) ((addr >> 32) & 0xFFFF),
+		       (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
+		       be32_to_cpu(data));
+	} else {
+		DBGMSG("arm_lock  called by node: %X "
+		       "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
+		       nodeid, (u16) ((addr >> 32) & 0xFFFF),
+		       (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
+		       be32_to_cpu(data), be32_to_cpu(arg));
+	}
+	spin_lock(&host_info_lock);
+	hi = find_host_info(host);	/* search address-entry */
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			entry = fi->addr_list.next;
+			while (entry != &(fi->addr_list)) {
+				arm_addr =
+				    list_entry(entry, struct arm_addr,
+					       addr_list);
+				if (((arm_addr->start) <= (addr))
+				    && ((arm_addr->end) >=
+					(addr + sizeof(*store)))) {
+					found = 1;
+					break;
+				}
+				entry = entry->next;
+			}
+			if (found) {
+				break;
+			}
+		}
+	}
+	rcode = -1;
+	if (!found) {
+		printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
+		       " -> rcode_address_error\n");
+		spin_unlock(&host_info_lock);
+		return (RCODE_ADDRESS_ERROR);
+	} else {
+		DBGMSG("arm_lock addr_entry FOUND");
+	}
+	if (rcode == -1) {
+		if (arm_addr->access_rights & ARM_LOCK) {
+			if (!(arm_addr->client_transactions & ARM_LOCK)) {
+				memcpy(&old,
+				       (arm_addr->addr_space_buffer) + (addr -
+									(arm_addr->
+									 start)),
+				       sizeof(old));
+				switch (ext_tcode) {
+				case (EXTCODE_MASK_SWAP):
+					new = data | (old & ~arg);
+					break;
+				case (EXTCODE_COMPARE_SWAP):
+					if (old == arg) {
+						new = data;
+					} else {
+						new = old;
+					}
+					break;
+				case (EXTCODE_FETCH_ADD):
+					new =
+					    cpu_to_be32(be32_to_cpu(data) +
+							be32_to_cpu(old));
+					break;
+				case (EXTCODE_LITTLE_ADD):
+					new =
+					    cpu_to_le32(le32_to_cpu(data) +
+							le32_to_cpu(old));
+					break;
+				case (EXTCODE_BOUNDED_ADD):
+					if (old != arg) {
+						new =
+						    cpu_to_be32(be32_to_cpu
+								(data) +
+								be32_to_cpu
+								(old));
+					} else {
+						new = old;
+					}
+					break;
+				case (EXTCODE_WRAP_ADD):
+					if (old != arg) {
+						new =
+						    cpu_to_be32(be32_to_cpu
+								(data) +
+								be32_to_cpu
+								(old));
+					} else {
+						new = data;
+					}
+					break;
+				default:
+					rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+					printk(KERN_ERR
+					       "raw1394: arm_lock FAILED "
+					       "ext_tcode not allowed -> rcode_type_error\n");
+					break;
+				}	/*switch */
+				if (rcode == -1) {
+					DBGMSG("arm_lock -> (rcode_complete)");
+					rcode = RCODE_COMPLETE;
+					memcpy(store, &old, sizeof(*store));
+					memcpy((arm_addr->addr_space_buffer) +
+					       (addr - (arm_addr->start)),
+					       &new, sizeof(*store));
+				}
+			}
+		} else {
+			rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+			DBGMSG("arm_lock -> rcode_type_error (access denied)");
+		}
+	}
+	if (arm_addr->notification_options & ARM_LOCK) {
+		byte_t *buf1, *buf2;
+		DBGMSG("arm_lock -> entering notification-section");
+		req = __alloc_pending_request(SLAB_ATOMIC);
+		if (!req) {
+			DBGMSG("arm_lock -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
+		req->data = kmalloc(size, SLAB_ATOMIC);
+		if (!(req->data)) {
+			free_pending_request(req);
+			DBGMSG("arm_lock -> rcode_conflict_error");
+			spin_unlock(&host_info_lock);
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		req->free_data = 1;
+		arm_req_resp = (struct arm_request_response *)(req->data);
+		arm_req = (struct arm_request *)((byte_t *) (req->data) +
+						 (sizeof
+						  (struct
+						   arm_request_response)));
+		arm_resp =
+		    (struct arm_response *)((byte_t *) (arm_req) +
+					    (sizeof(struct arm_request)));
+		buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
+		buf2 = buf1 + 2 * sizeof(*store);
+		if ((ext_tcode == EXTCODE_FETCH_ADD) ||
+		    (ext_tcode == EXTCODE_LITTLE_ADD)) {
+			arm_req->buffer_length = sizeof(*store);
+			memcpy(buf1, &data, sizeof(*store));
+
+		} else {
+			arm_req->buffer_length = 2 * sizeof(*store);
+			memcpy(buf1, &arg, sizeof(*store));
+			memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
+		}
+		if (rcode == RCODE_COMPLETE) {
+			arm_resp->buffer_length = sizeof(*store);
+			memcpy(buf2, &old, sizeof(*store));
+		} else {
+			arm_resp->buffer_length = 0;
+		}
+		req->file_info = fi;
+		req->req.type = RAW1394_REQ_ARM;
+		req->req.generation = get_hpsb_generation(host);
+		req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
+				 (ARM_LOCK & 0xFF));
+		req->req.tag = arm_addr->arm_tag;
+		req->req.recvb = arm_addr->recvb;
+		req->req.length = size;
+		arm_req->generation = req->req.generation;
+		arm_req->extended_transaction_code = ext_tcode;
+		arm_req->destination_offset = addr;
+		arm_req->source_nodeid = nodeid;
+		arm_req->destination_nodeid = host->node_id;
+		arm_req->tlabel = (flags >> 10) & 0x3f;
+		arm_req->tcode = (flags >> 4) & 0x0f;
+		arm_resp->response_code = rcode;
+		arm_req_resp->request = int2ptr((arm_addr->recvb) +
+						sizeof(struct
+						       arm_request_response));
+		arm_req_resp->response =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request));
+		arm_req->buffer =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response));
+		arm_resp->buffer =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response) + 2 * sizeof(*store));
+		queue_complete_req(req);
+	}
+	spin_unlock(&host_info_lock);
+	return (rcode);
+}
+
+static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
+		      u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
+		      u16 flags)
+{
+	struct pending_request *req;
+	struct host_info *hi;
+	struct file_info *fi = NULL;
+	struct list_head *entry;
+	struct arm_addr *arm_addr = NULL;
+	struct arm_request *arm_req = NULL;
+	struct arm_response *arm_resp = NULL;
+	int found = 0, size = 0, rcode = -1;
+	octlet_t old, new;
+	struct arm_request_response *arm_req_resp = NULL;
+
+	if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
+	    ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
+		DBGMSG("arm_lock64 called by node: %X "
+		       "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
+		       nodeid, (u16) ((addr >> 32) & 0xFFFF),
+		       (u32) (addr & 0xFFFFFFFF),
+		       ext_tcode & 0xFF,
+		       (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
+		       (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
+	} else {
+		DBGMSG("arm_lock64 called by node: %X "
+		       "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
+		       "%8.8X %8.8X ",
+		       nodeid, (u16) ((addr >> 32) & 0xFFFF),
+		       (u32) (addr & 0xFFFFFFFF),
+		       ext_tcode & 0xFF,
+		       (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
+		       (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
+		       (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
+		       (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
+	}
+	spin_lock(&host_info_lock);
+	hi = find_host_info(host);	/* search addressentry in file_info's for host */
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			entry = fi->addr_list.next;
+			while (entry != &(fi->addr_list)) {
+				arm_addr =
+				    list_entry(entry, struct arm_addr,
+					       addr_list);
+				if (((arm_addr->start) <= (addr))
+				    && ((arm_addr->end) >=
+					(addr + sizeof(*store)))) {
+					found = 1;
+					break;
+				}
+				entry = entry->next;
+			}
+			if (found) {
+				break;
+			}
+		}
+	}
+	rcode = -1;
+	if (!found) {
+		printk(KERN_ERR
+		       "raw1394: arm_lock64 FAILED addr_entry not found"
+		       " -> rcode_address_error\n");
+		spin_unlock(&host_info_lock);
+		return (RCODE_ADDRESS_ERROR);
+	} else {
+		DBGMSG("arm_lock64 addr_entry FOUND");
+	}
+	if (rcode == -1) {
+		if (arm_addr->access_rights & ARM_LOCK) {
+			if (!(arm_addr->client_transactions & ARM_LOCK)) {
+				memcpy(&old,
+				       (arm_addr->addr_space_buffer) + (addr -
+									(arm_addr->
+									 start)),
+				       sizeof(old));
+				switch (ext_tcode) {
+				case (EXTCODE_MASK_SWAP):
+					new = data | (old & ~arg);
+					break;
+				case (EXTCODE_COMPARE_SWAP):
+					if (old == arg) {
+						new = data;
+					} else {
+						new = old;
+					}
+					break;
+				case (EXTCODE_FETCH_ADD):
+					new =
+					    cpu_to_be64(be64_to_cpu(data) +
+							be64_to_cpu(old));
+					break;
+				case (EXTCODE_LITTLE_ADD):
+					new =
+					    cpu_to_le64(le64_to_cpu(data) +
+							le64_to_cpu(old));
+					break;
+				case (EXTCODE_BOUNDED_ADD):
+					if (old != arg) {
+						new =
+						    cpu_to_be64(be64_to_cpu
+								(data) +
+								be64_to_cpu
+								(old));
+					} else {
+						new = old;
+					}
+					break;
+				case (EXTCODE_WRAP_ADD):
+					if (old != arg) {
+						new =
+						    cpu_to_be64(be64_to_cpu
+								(data) +
+								be64_to_cpu
+								(old));
+					} else {
+						new = data;
+					}
+					break;
+				default:
+					printk(KERN_ERR
+					       "raw1394: arm_lock64 FAILED "
+					       "ext_tcode not allowed -> rcode_type_error\n");
+					rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+					break;
+				}	/*switch */
+				if (rcode == -1) {
+					DBGMSG
+					    ("arm_lock64 -> (rcode_complete)");
+					rcode = RCODE_COMPLETE;
+					memcpy(store, &old, sizeof(*store));
+					memcpy((arm_addr->addr_space_buffer) +
+					       (addr - (arm_addr->start)),
+					       &new, sizeof(*store));
+				}
+			}
+		} else {
+			rcode = RCODE_TYPE_ERROR;	/* function not allowed */
+			DBGMSG
+			    ("arm_lock64 -> rcode_type_error (access denied)");
+		}
+	}
+	if (arm_addr->notification_options & ARM_LOCK) {
+		byte_t *buf1, *buf2;
+		DBGMSG("arm_lock64 -> entering notification-section");
+		req = __alloc_pending_request(SLAB_ATOMIC);
+		if (!req) {
+			spin_unlock(&host_info_lock);
+			DBGMSG("arm_lock64 -> rcode_conflict_error");
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
+		req->data = kmalloc(size, SLAB_ATOMIC);
+		if (!(req->data)) {
+			free_pending_request(req);
+			spin_unlock(&host_info_lock);
+			DBGMSG("arm_lock64 -> rcode_conflict_error");
+			return (RCODE_CONFLICT_ERROR);	/* A resource conflict was detected.
+							   The request may be retried */
+		}
+		req->free_data = 1;
+		arm_req_resp = (struct arm_request_response *)(req->data);
+		arm_req = (struct arm_request *)((byte_t *) (req->data) +
+						 (sizeof
+						  (struct
+						   arm_request_response)));
+		arm_resp =
+		    (struct arm_response *)((byte_t *) (arm_req) +
+					    (sizeof(struct arm_request)));
+		buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
+		buf2 = buf1 + 2 * sizeof(*store);
+		if ((ext_tcode == EXTCODE_FETCH_ADD) ||
+		    (ext_tcode == EXTCODE_LITTLE_ADD)) {
+			arm_req->buffer_length = sizeof(*store);
+			memcpy(buf1, &data, sizeof(*store));
+
+		} else {
+			arm_req->buffer_length = 2 * sizeof(*store);
+			memcpy(buf1, &arg, sizeof(*store));
+			memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
+		}
+		if (rcode == RCODE_COMPLETE) {
+			arm_resp->buffer_length = sizeof(*store);
+			memcpy(buf2, &old, sizeof(*store));
+		} else {
+			arm_resp->buffer_length = 0;
+		}
+		req->file_info = fi;
+		req->req.type = RAW1394_REQ_ARM;
+		req->req.generation = get_hpsb_generation(host);
+		req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
+				 (ARM_LOCK & 0xFF));
+		req->req.tag = arm_addr->arm_tag;
+		req->req.recvb = arm_addr->recvb;
+		req->req.length = size;
+		arm_req->generation = req->req.generation;
+		arm_req->extended_transaction_code = ext_tcode;
+		arm_req->destination_offset = addr;
+		arm_req->source_nodeid = nodeid;
+		arm_req->destination_nodeid = host->node_id;
+		arm_req->tlabel = (flags >> 10) & 0x3f;
+		arm_req->tcode = (flags >> 4) & 0x0f;
+		arm_resp->response_code = rcode;
+		arm_req_resp->request = int2ptr((arm_addr->recvb) +
+						sizeof(struct
+						       arm_request_response));
+		arm_req_resp->response =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request));
+		arm_req->buffer =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response));
+		arm_resp->buffer =
+		    int2ptr((arm_addr->recvb) +
+			    sizeof(struct arm_request_response) +
+			    sizeof(struct arm_request) +
+			    sizeof(struct arm_response) + 2 * sizeof(*store));
+		queue_complete_req(req);
+	}
+	spin_unlock(&host_info_lock);
+	return (rcode);
+}
+
+static int arm_register(struct file_info *fi, struct pending_request *req)
+{
+	int retval;
+	struct arm_addr *addr;
+	struct host_info *hi;
+	struct file_info *fi_hlp = NULL;
+	struct list_head *entry;
+	struct arm_addr *arm_addr = NULL;
+	int same_host, another_host;
+	unsigned long flags;
+
+	DBGMSG("arm_register called "
+	       "addr(Offset): %8.8x %8.8x length: %u "
+	       "rights: %2.2X notify: %2.2X "
+	       "max_blk_len: %4.4X",
+	       (u32) ((req->req.address >> 32) & 0xFFFF),
+	       (u32) (req->req.address & 0xFFFFFFFF),
+	       req->req.length, ((req->req.misc >> 8) & 0xFF),
+	       (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
+	/* check addressrange */
+	if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
+	    (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
+	     0)) {
+		req->req.length = 0;
+		return (-EINVAL);
+	}
+	/* addr-list-entry for fileinfo */
+	addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL);
+	if (!addr) {
+		req->req.length = 0;
+		return (-ENOMEM);
+	}
+	/* allocation of addr_space_buffer */
+	addr->addr_space_buffer = (u8 *) vmalloc(req->req.length);
+	if (!(addr->addr_space_buffer)) {
+		kfree(addr);
+		req->req.length = 0;
+		return (-ENOMEM);
+	}
+	/* initialization of addr_space_buffer */
+	if ((req->req.sendb) == (unsigned long)NULL) {
+		/* init: set 0 */
+		memset(addr->addr_space_buffer, 0, req->req.length);
+	} else {
+		/* init: user -> kernel */
+		if (copy_from_user
+		    (addr->addr_space_buffer, int2ptr(req->req.sendb),
+		     req->req.length)) {
+			vfree(addr->addr_space_buffer);
+			kfree(addr);
+			return (-EFAULT);
+		}
+	}
+	INIT_LIST_HEAD(&addr->addr_list);
+	addr->arm_tag = req->req.tag;
+	addr->start = req->req.address;
+	addr->end = req->req.address + req->req.length;
+	addr->access_rights = (u8) (req->req.misc & 0x0F);
+	addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
+	addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
+	addr->access_rights |= addr->client_transactions;
+	addr->notification_options |= addr->client_transactions;
+	addr->recvb = req->req.recvb;
+	addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(fi->host);
+	same_host = 0;
+	another_host = 0;
+	/* same host with address-entry containing same addressrange ? */
+	list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
+		entry = fi_hlp->addr_list.next;
+		while (entry != &(fi_hlp->addr_list)) {
+			arm_addr =
+			    list_entry(entry, struct arm_addr, addr_list);
+			if ((arm_addr->start == addr->start)
+			    && (arm_addr->end == addr->end)) {
+				DBGMSG("same host ownes same "
+				       "addressrange -> EALREADY");
+				same_host = 1;
+				break;
+			}
+			entry = entry->next;
+		}
+		if (same_host) {
+			break;
+		}
+	}
+	if (same_host) {
+		/* addressrange occupied by same host */
+		vfree(addr->addr_space_buffer);
+		kfree(addr);
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return (-EALREADY);
+	}
+	/* another host with valid address-entry containing same addressrange */
+	list_for_each_entry(hi, &host_info_list, list) {
+		if (hi->host != fi->host) {
+			list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
+				entry = fi_hlp->addr_list.next;
+				while (entry != &(fi_hlp->addr_list)) {
+					arm_addr =
+					    list_entry(entry, struct arm_addr,
+						       addr_list);
+					if ((arm_addr->start == addr->start)
+					    && (arm_addr->end == addr->end)) {
+						DBGMSG
+						    ("another host ownes same "
+						     "addressrange");
+						another_host = 1;
+						break;
+					}
+					entry = entry->next;
+				}
+				if (another_host) {
+					break;
+				}
+			}
+		}
+	}
+	if (another_host) {
+		DBGMSG("another hosts entry is valid -> SUCCESS");
+		if (copy_to_user(int2ptr(req->req.recvb),
+				 &addr->start, sizeof(u64))) {
+			printk(KERN_ERR "raw1394: arm_register failed "
+			       " address-range-entry is invalid -> EFAULT !!!\n");
+			vfree(addr->addr_space_buffer);
+			kfree(addr);
+			spin_unlock_irqrestore(&host_info_lock, flags);
+			return (-EFAULT);
+		}
+		free_pending_request(req);	/* immediate success or fail */
+		/* INSERT ENTRY */
+		list_add_tail(&addr->addr_list, &fi->addr_list);
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return sizeof(struct raw1394_request);
+	}
+	retval =
+	    hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
+				    req->req.address,
+				    req->req.address + req->req.length);
+	if (retval) {
+		/* INSERT ENTRY */
+		list_add_tail(&addr->addr_list, &fi->addr_list);
+	} else {
+		DBGMSG("arm_register failed errno: %d \n", retval);
+		vfree(addr->addr_space_buffer);
+		kfree(addr);
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return (-EALREADY);
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+	free_pending_request(req);	/* immediate success or fail */
+	return sizeof(struct raw1394_request);
+}
+
+static int arm_unregister(struct file_info *fi, struct pending_request *req)
+{
+	int found = 0;
+	int retval = 0;
+	struct list_head *entry;
+	struct arm_addr *addr = NULL;
+	struct host_info *hi;
+	struct file_info *fi_hlp = NULL;
+	struct arm_addr *arm_addr = NULL;
+	int another_host;
+	unsigned long flags;
+
+	DBGMSG("arm_Unregister called addr(Offset): "
+	       "%8.8x %8.8x",
+	       (u32) ((req->req.address >> 32) & 0xFFFF),
+	       (u32) (req->req.address & 0xFFFFFFFF));
+	spin_lock_irqsave(&host_info_lock, flags);
+	/* get addr */
+	entry = fi->addr_list.next;
+	while (entry != &(fi->addr_list)) {
+		addr = list_entry(entry, struct arm_addr, addr_list);
+		if (addr->start == req->req.address) {
+			found = 1;
+			break;
+		}
+		entry = entry->next;
+	}
+	if (!found) {
+		DBGMSG("arm_Unregister addr not found");
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return (-EINVAL);
+	}
+	DBGMSG("arm_Unregister addr found");
+	another_host = 0;
+	/* another host with valid address-entry containing
+	   same addressrange */
+	list_for_each_entry(hi, &host_info_list, list) {
+		if (hi->host != fi->host) {
+			list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
+				entry = fi_hlp->addr_list.next;
+				while (entry != &(fi_hlp->addr_list)) {
+					arm_addr = list_entry(entry,
+							      struct arm_addr,
+							      addr_list);
+					if (arm_addr->start == addr->start) {
+						DBGMSG("another host ownes "
+						       "same addressrange");
+						another_host = 1;
+						break;
+					}
+					entry = entry->next;
+				}
+				if (another_host) {
+					break;
+				}
+			}
+		}
+	}
+	if (another_host) {
+		DBGMSG("delete entry from list -> success");
+		list_del(&addr->addr_list);
+		vfree(addr->addr_space_buffer);
+		kfree(addr);
+		free_pending_request(req);	/* immediate success or fail */
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return sizeof(struct raw1394_request);
+	}
+	retval =
+	    hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
+				      addr->start);
+	if (!retval) {
+		printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
+		spin_unlock_irqrestore(&host_info_lock, flags);
+		return (-EINVAL);
+	}
+	DBGMSG("delete entry from list -> success");
+	list_del(&addr->addr_list);
+	spin_unlock_irqrestore(&host_info_lock, flags);
+	vfree(addr->addr_space_buffer);
+	kfree(addr);
+	free_pending_request(req);	/* immediate success or fail */
+	return sizeof(struct raw1394_request);
+}
+
+/* Copy data from ARM buffer(s) to user buffer. */
+static int arm_get_buf(struct file_info *fi, struct pending_request *req)
+{
+	struct arm_addr *arm_addr = NULL;
+	unsigned long flags;
+	unsigned long offset;
+
+	struct list_head *entry;
+
+	DBGMSG("arm_get_buf "
+	       "addr(Offset): %04X %08X length: %u",
+	       (u32) ((req->req.address >> 32) & 0xFFFF),
+	       (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	entry = fi->addr_list.next;
+	while (entry != &(fi->addr_list)) {
+		arm_addr = list_entry(entry, struct arm_addr, addr_list);
+		if ((arm_addr->start <= req->req.address) &&
+		    (arm_addr->end > req->req.address)) {
+			if (req->req.address + req->req.length <= arm_addr->end) {
+				offset = req->req.address - arm_addr->start;
+
+				DBGMSG
+				    ("arm_get_buf copy_to_user( %08X, %p, %u )",
+				     (u32) req->req.recvb,
+				     arm_addr->addr_space_buffer + offset,
+				     (u32) req->req.length);
+
+				if (copy_to_user
+				    (int2ptr(req->req.recvb),
+				     arm_addr->addr_space_buffer + offset,
+				     req->req.length)) {
+					spin_unlock_irqrestore(&host_info_lock,
+							       flags);
+					return (-EFAULT);
+				}
+
+				spin_unlock_irqrestore(&host_info_lock, flags);
+				/* We have to free the request, because we
+				 * queue no response, and therefore nobody
+				 * will free it. */
+				free_pending_request(req);
+				return sizeof(struct raw1394_request);
+			} else {
+				DBGMSG("arm_get_buf request exceeded mapping");
+				spin_unlock_irqrestore(&host_info_lock, flags);
+				return (-EINVAL);
+			}
+		}
+		entry = entry->next;
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+	return (-EINVAL);
+}
+
+/* Copy data from user buffer to ARM buffer(s). */
+static int arm_set_buf(struct file_info *fi, struct pending_request *req)
+{
+	struct arm_addr *arm_addr = NULL;
+	unsigned long flags;
+	unsigned long offset;
+
+	struct list_head *entry;
+
+	DBGMSG("arm_set_buf "
+	       "addr(Offset): %04X %08X length: %u",
+	       (u32) ((req->req.address >> 32) & 0xFFFF),
+	       (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	entry = fi->addr_list.next;
+	while (entry != &(fi->addr_list)) {
+		arm_addr = list_entry(entry, struct arm_addr, addr_list);
+		if ((arm_addr->start <= req->req.address) &&
+		    (arm_addr->end > req->req.address)) {
+			if (req->req.address + req->req.length <= arm_addr->end) {
+				offset = req->req.address - arm_addr->start;
+
+				DBGMSG
+				    ("arm_set_buf copy_from_user( %p, %08X, %u )",
+				     arm_addr->addr_space_buffer + offset,
+				     (u32) req->req.sendb,
+				     (u32) req->req.length);
+
+				if (copy_from_user
+				    (arm_addr->addr_space_buffer + offset,
+				     int2ptr(req->req.sendb),
+				     req->req.length)) {
+					spin_unlock_irqrestore(&host_info_lock,
+							       flags);
+					return (-EFAULT);
+				}
+
+				spin_unlock_irqrestore(&host_info_lock, flags);
+				free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+				return sizeof(struct raw1394_request);
+			} else {
+				DBGMSG("arm_set_buf request exceeded mapping");
+				spin_unlock_irqrestore(&host_info_lock, flags);
+				return (-EINVAL);
+			}
+		}
+		entry = entry->next;
+	}
+	spin_unlock_irqrestore(&host_info_lock, flags);
+	return (-EINVAL);
+}
+
+static int reset_notification(struct file_info *fi, struct pending_request *req)
+{
+	DBGMSG("reset_notification called - switch %s ",
+	       (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
+	if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
+	    (req->req.misc == RAW1394_NOTIFY_ON)) {
+		fi->notification = (u8) req->req.misc;
+		free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+		return sizeof(struct raw1394_request);
+	}
+	/* error EINVAL (22) invalid argument */
+	return (-EINVAL);
+}
+
+static int write_phypacket(struct file_info *fi, struct pending_request *req)
+{
+	struct hpsb_packet *packet = NULL;
+	int retval = 0;
+	quadlet_t data;
+
+	data = be32_to_cpu((u32) req->req.sendb);
+	DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
+	packet = hpsb_make_phypacket(fi->host, data);
+	if (!packet)
+		return -ENOMEM;
+	req->req.length = 0;
+	req->packet = packet;
+	hpsb_set_packet_complete_task(packet,
+				      (void (*)(void *))queue_complete_cb, req);
+	spin_lock_irq(&fi->reqlists_lock);
+	list_add_tail(&req->list, &fi->req_pending);
+	spin_unlock_irq(&fi->reqlists_lock);
+	packet->generation = req->req.generation;
+	retval = hpsb_send_packet(packet);
+	DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
+	if (retval < 0) {
+		req->req.error = RAW1394_ERROR_SEND_ERROR;
+		req->req.length = 0;
+		queue_complete_req(req);
+	}
+	return sizeof(struct raw1394_request);
+}
+
+static int get_config_rom(struct file_info *fi, struct pending_request *req)
+{
+	int ret = sizeof(struct raw1394_request);
+	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	int status;
+
+	if (!data)
+		return -ENOMEM;
+
+	status =
+	    csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
+			 data, req->req.length);
+	if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
+		ret = -EFAULT;
+	if (copy_to_user
+	    (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
+	     sizeof(fi->host->csr.rom->cache_head->len)))
+		ret = -EFAULT;
+	if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
+			 sizeof(fi->host->csr.generation)))
+		ret = -EFAULT;
+	if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
+		ret = -EFAULT;
+	kfree(data);
+	if (ret >= 0) {
+		free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+	}
+	return ret;
+}
+
+static int update_config_rom(struct file_info *fi, struct pending_request *req)
+{
+	int ret = sizeof(struct raw1394_request);
+	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
+		ret = -EFAULT;
+	} else {
+		int status = hpsb_update_config_rom(fi->host,
+						    data, req->req.length,
+						    (unsigned char)req->req.
+						    misc);
+		if (copy_to_user
+		    (int2ptr(req->req.recvb), &status, sizeof(status)))
+			ret = -ENOMEM;
+	}
+	kfree(data);
+	if (ret >= 0) {
+		free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+		fi->cfgrom_upd = 1;
+	}
+	return ret;
+}
+
+static int modify_config_rom(struct file_info *fi, struct pending_request *req)
+{
+	struct csr1212_keyval *kv;
+	struct csr1212_csr_rom_cache *cache;
+	struct csr1212_dentry *dentry;
+	u32 dr;
+	int ret = 0;
+
+	if (req->req.misc == ~0) {
+		if (req->req.length == 0)
+			return -EINVAL;
+
+		/* Find an unused slot */
+		for (dr = 0;
+		     dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
+		     dr++) ;
+
+		if (dr == RAW1394_MAX_USER_CSR_DIRS)
+			return -ENOMEM;
+
+		fi->csr1212_dirs[dr] =
+		    csr1212_new_directory(CSR1212_KV_ID_VENDOR);
+		if (!fi->csr1212_dirs[dr])
+			return -ENOMEM;
+	} else {
+		dr = req->req.misc;
+		if (!fi->csr1212_dirs[dr])
+			return -EINVAL;
+
+		/* Delete old stuff */
+		for (dentry =
+		     fi->csr1212_dirs[dr]->value.directory.dentries_head;
+		     dentry; dentry = dentry->next) {
+			csr1212_detach_keyval_from_directory(fi->host->csr.rom->
+							     root_kv,
+							     dentry->kv);
+		}
+
+		if (req->req.length == 0) {
+			csr1212_release_keyval(fi->csr1212_dirs[dr]);
+			fi->csr1212_dirs[dr] = NULL;
+
+			hpsb_update_config_rom_image(fi->host);
+			free_pending_request(req);
+			return sizeof(struct raw1394_request);
+		}
+	}
+
+	cache = csr1212_rom_cache_malloc(0, req->req.length);
+	if (!cache) {
+		csr1212_release_keyval(fi->csr1212_dirs[dr]);
+		fi->csr1212_dirs[dr] = NULL;
+		return -ENOMEM;
+	}
+
+	cache->filled_head =
+	    kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL);
+	if (!cache->filled_head) {
+		csr1212_release_keyval(fi->csr1212_dirs[dr]);
+		fi->csr1212_dirs[dr] = NULL;
+		CSR1212_FREE(cache);
+		return -ENOMEM;
+	}
+	cache->filled_tail = cache->filled_head;
+
+	if (copy_from_user(cache->data, int2ptr(req->req.sendb),
+			   req->req.length)) {
+		csr1212_release_keyval(fi->csr1212_dirs[dr]);
+		fi->csr1212_dirs[dr] = NULL;
+		CSR1212_FREE(cache);
+		ret = -EFAULT;
+	} else {
+		cache->len = req->req.length;
+		cache->filled_head->offset_start = 0;
+		cache->filled_head->offset_end = cache->size - 1;
+
+		cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
+
+		ret = CSR1212_SUCCESS;
+		/* parse all the items */
+		for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
+		     kv = kv->next) {
+			ret = csr1212_parse_keyval(kv, cache);
+		}
+
+		/* attach top level items to the root directory */
+		for (dentry =
+		     fi->csr1212_dirs[dr]->value.directory.dentries_head;
+		     ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
+			ret =
+			    csr1212_attach_keyval_to_directory(fi->host->csr.
+							       rom->root_kv,
+							       dentry->kv);
+		}
+
+		if (ret == CSR1212_SUCCESS) {
+			ret = hpsb_update_config_rom_image(fi->host);
+
+			if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
+						     &dr, sizeof(dr))) {
+				ret = -ENOMEM;
+			}
+		}
+	}
+	kfree(cache->filled_head);
+	kfree(cache);
+
+	if (ret >= 0) {
+		/* we have to free the request, because we queue no response,
+		 * and therefore nobody will free it */
+		free_pending_request(req);
+		return sizeof(struct raw1394_request);
+	} else {
+		for (dentry =
+		     fi->csr1212_dirs[dr]->value.directory.dentries_head;
+		     dentry; dentry = dentry->next) {
+			csr1212_detach_keyval_from_directory(fi->host->csr.rom->
+							     root_kv,
+							     dentry->kv);
+		}
+		csr1212_release_keyval(fi->csr1212_dirs[dr]);
+		fi->csr1212_dirs[dr] = NULL;
+		return ret;
+	}
+}
+
+static int state_connected(struct file_info *fi, struct pending_request *req)
+{
+	int node = req->req.address >> 48;
+
+	req->req.error = RAW1394_ERROR_NONE;
+
+	switch (req->req.type) {
+
+	case RAW1394_REQ_ECHO:
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+
+	case RAW1394_REQ_ISO_SEND:
+		return handle_iso_send(fi, req, node);
+
+	case RAW1394_REQ_ARM_REGISTER:
+		return arm_register(fi, req);
+
+	case RAW1394_REQ_ARM_UNREGISTER:
+		return arm_unregister(fi, req);
+
+	case RAW1394_REQ_ARM_SET_BUF:
+		return arm_set_buf(fi, req);
+
+	case RAW1394_REQ_ARM_GET_BUF:
+		return arm_get_buf(fi, req);
+
+	case RAW1394_REQ_RESET_NOTIFY:
+		return reset_notification(fi, req);
+
+	case RAW1394_REQ_ISO_LISTEN:
+		handle_iso_listen(fi, req);
+		return sizeof(struct raw1394_request);
+
+	case RAW1394_REQ_FCP_LISTEN:
+		handle_fcp_listen(fi, req);
+		return sizeof(struct raw1394_request);
+
+	case RAW1394_REQ_RESET_BUS:
+		if (req->req.misc == RAW1394_LONG_RESET) {
+			DBGMSG("busreset called (type: LONG)");
+			hpsb_reset_bus(fi->host, LONG_RESET);
+			free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+			return sizeof(struct raw1394_request);
+		}
+		if (req->req.misc == RAW1394_SHORT_RESET) {
+			DBGMSG("busreset called (type: SHORT)");
+			hpsb_reset_bus(fi->host, SHORT_RESET);
+			free_pending_request(req);	/* we have to free the request, because we queue no response, and therefore nobody will free it */
+			return sizeof(struct raw1394_request);
+		}
+		/* error EINVAL (22) invalid argument */
+		return (-EINVAL);
+	case RAW1394_REQ_GET_ROM:
+		return get_config_rom(fi, req);
+
+	case RAW1394_REQ_UPDATE_ROM:
+		return update_config_rom(fi, req);
+
+	case RAW1394_REQ_MODIFY_ROM:
+		return modify_config_rom(fi, req);
+	}
+
+	if (req->req.generation != get_hpsb_generation(fi->host)) {
+		req->req.error = RAW1394_ERROR_GENERATION;
+		req->req.generation = get_hpsb_generation(fi->host);
+		req->req.length = 0;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	switch (req->req.type) {
+	case RAW1394_REQ_PHYPACKET:
+		return write_phypacket(fi, req);
+	case RAW1394_REQ_ASYNC_SEND:
+		return handle_async_send(fi, req);
+	}
+
+	if (req->req.length == 0) {
+		req->req.error = RAW1394_ERROR_INVALID_ARG;
+		queue_complete_req(req);
+		return sizeof(struct raw1394_request);
+	}
+
+	return handle_async_request(fi, req, node);
+}
+
+static ssize_t raw1394_write(struct file *file, const char __user * buffer,
+			     size_t count, loff_t * offset_is_ignored)
+{
+	struct file_info *fi = (struct file_info *)file->private_data;
+	struct pending_request *req;
+	ssize_t retval = 0;
+
+	if (count != sizeof(struct raw1394_request)) {
+		return -EINVAL;
+	}
+
+	req = alloc_pending_request();
+	if (req == NULL) {
+		return -ENOMEM;
+	}
+	req->file_info = fi;
+
+	if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
+		free_pending_request(req);
+		return -EFAULT;
+	}
+
+	switch (fi->state) {
+	case opened:
+		retval = state_opened(fi, req);
+		break;
+
+	case initialized:
+		retval = state_initialized(fi, req);
+		break;
+
+	case connected:
+		retval = state_connected(fi, req);
+		break;
+	}
+
+	if (retval < 0) {
+		free_pending_request(req);
+	}
+
+	return retval;
+}
+
+/* rawiso operations */
+
+/* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
+ * completion queue (reqlists_lock must be taken) */
+static inline int __rawiso_event_in_queue(struct file_info *fi)
+{
+	struct pending_request *req;
+
+	list_for_each_entry(req, &fi->req_complete, list)
+	    if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
+		return 1;
+
+	return 0;
+}
+
+/* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
+static void queue_rawiso_event(struct file_info *fi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fi->reqlists_lock, flags);
+
+	/* only one ISO activity event may be in the queue */
+	if (!__rawiso_event_in_queue(fi)) {
+		struct pending_request *req =
+		    __alloc_pending_request(SLAB_ATOMIC);
+
+		if (req) {
+			req->file_info = fi;
+			req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
+			req->req.generation = get_hpsb_generation(fi->host);
+			__queue_complete_req(req);
+		} else {
+			/* on allocation failure, signal an overflow */
+			if (fi->iso_handle) {
+				atomic_inc(&fi->iso_handle->overflows);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&fi->reqlists_lock, flags);
+}
+
+static void rawiso_activity_cb(struct hpsb_iso *iso)
+{
+	unsigned long flags;
+	struct host_info *hi;
+	struct file_info *fi;
+
+	spin_lock_irqsave(&host_info_lock, flags);
+	hi = find_host_info(iso->host);
+
+	if (hi != NULL) {
+		list_for_each_entry(fi, &hi->file_info_list, list) {
+			if (fi->iso_handle == iso)
+				queue_rawiso_event(fi);
+		}
+	}
+
+	spin_unlock_irqrestore(&host_info_lock, flags);
+}
+
+/* helper function - gather all the kernel iso status bits for returning to user-space */
+static void raw1394_iso_fill_status(struct hpsb_iso *iso,
+				    struct raw1394_iso_status *stat)
+{
+	stat->config.data_buf_size = iso->buf_size;
+	stat->config.buf_packets = iso->buf_packets;
+	stat->config.channel = iso->channel;
+	stat->config.speed = iso->speed;
+	stat->config.irq_interval = iso->irq_interval;
+	stat->n_packets = hpsb_iso_n_ready(iso);
+	stat->overflows = atomic_read(&iso->overflows);
+	stat->xmit_cycle = iso->xmit_cycle;
+}
+
+static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
+{
+	struct raw1394_iso_status stat;
+
+	if (!fi->host)
+		return -EINVAL;
+
+	if (copy_from_user(&stat, uaddr, sizeof(stat)))
+		return -EFAULT;
+
+	fi->iso_handle = hpsb_iso_xmit_init(fi->host,
+					    stat.config.data_buf_size,
+					    stat.config.buf_packets,
+					    stat.config.channel,
+					    stat.config.speed,
+					    stat.config.irq_interval,
+					    rawiso_activity_cb);
+	if (!fi->iso_handle)
+		return -ENOMEM;
+
+	fi->iso_state = RAW1394_ISO_XMIT;
+
+	raw1394_iso_fill_status(fi->iso_handle, &stat);
+	if (copy_to_user(uaddr, &stat, sizeof(stat)))
+		return -EFAULT;
+
+	/* queue an event to get things started */
+	rawiso_activity_cb(fi->iso_handle);
+
+	return 0;
+}
+
+static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
+{
+	struct raw1394_iso_status stat;
+
+	if (!fi->host)
+		return -EINVAL;
+
+	if (copy_from_user(&stat, uaddr, sizeof(stat)))
+		return -EFAULT;
+
+	fi->iso_handle = hpsb_iso_recv_init(fi->host,
+					    stat.config.data_buf_size,
+					    stat.config.buf_packets,
+					    stat.config.channel,
+					    stat.config.dma_mode,
+					    stat.config.irq_interval,
+					    rawiso_activity_cb);
+	if (!fi->iso_handle)
+		return -ENOMEM;
+
+	fi->iso_state = RAW1394_ISO_RECV;
+
+	raw1394_iso_fill_status(fi->iso_handle, &stat);
+	if (copy_to_user(uaddr, &stat, sizeof(stat)))
+		return -EFAULT;
+	return 0;
+}
+
+static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
+{
+	struct raw1394_iso_status stat;
+	struct hpsb_iso *iso = fi->iso_handle;
+
+	raw1394_iso_fill_status(fi->iso_handle, &stat);
+	if (copy_to_user(uaddr, &stat, sizeof(stat)))
+		return -EFAULT;
+
+	/* reset overflow counter */
+	atomic_set(&iso->overflows, 0);
+
+	return 0;
+}
+
+/* copy N packet_infos out of the ringbuffer into user-supplied array */
+static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
+{
+	struct raw1394_iso_packets upackets;
+	unsigned int packet = fi->iso_handle->first_packet;
+	int i;
+
+	if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
+		return -EFAULT;
+
+	if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
+		return -EINVAL;
+
+	/* ensure user-supplied buffer is accessible and big enough */
+	if (!access_ok(VERIFY_WRITE, upackets.infos,
+			upackets.n_packets *
+			sizeof(struct raw1394_iso_packet_info)))
+		return -EFAULT;
+
+	/* copy the packet_infos out */
+	for (i = 0; i < upackets.n_packets; i++) {
+		if (__copy_to_user(&upackets.infos[i],
+				   &fi->iso_handle->infos[packet],
+				   sizeof(struct raw1394_iso_packet_info)))
+			return -EFAULT;
+
+		packet = (packet + 1) % fi->iso_handle->buf_packets;
+	}
+
+	return 0;
+}
+
+/* copy N packet_infos from user to ringbuffer, and queue them for transmission */
+static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
+{
+	struct raw1394_iso_packets upackets;
+	int i, rv;
+
+	if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
+		return -EFAULT;
+
+	if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
+		return -EINVAL;
+
+	/* ensure user-supplied buffer is accessible and big enough */
+	if (!access_ok(VERIFY_READ, upackets.infos,
+			upackets.n_packets *
+			sizeof(struct raw1394_iso_packet_info)))
+		return -EFAULT;
+
+	/* copy the infos structs in and queue the packets */
+	for (i = 0; i < upackets.n_packets; i++) {
+		struct raw1394_iso_packet_info info;
+
+		if (__copy_from_user(&info, &upackets.infos[i],
+				     sizeof(struct raw1394_iso_packet_info)))
+			return -EFAULT;
+
+		rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
+						info.len, info.tag, info.sy);
+		if (rv)
+			return rv;
+	}
+
+	return 0;
+}
+
+static void raw1394_iso_shutdown(struct file_info *fi)
+{
+	if (fi->iso_handle)
+		hpsb_iso_shutdown(fi->iso_handle);
+
+	fi->iso_handle = NULL;
+	fi->iso_state = RAW1394_ISO_INACTIVE;
+}
+
+/* mmap the rawiso xmit/recv buffer */
+static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct file_info *fi = file->private_data;
+
+	if (fi->iso_state == RAW1394_ISO_INACTIVE)
+		return -EINVAL;
+
+	return dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
+}
+
+/* ioctl is only used for rawiso operations */
+static int raw1394_ioctl(struct inode *inode, struct file *file,
+			 unsigned int cmd, unsigned long arg)
+{
+	struct file_info *fi = file->private_data;
+	void __user *argp = (void __user *)arg;
+
+	switch (fi->iso_state) {
+	case RAW1394_ISO_INACTIVE:
+		switch (cmd) {
+		case RAW1394_IOC_ISO_XMIT_INIT:
+			return raw1394_iso_xmit_init(fi, argp);
+		case RAW1394_IOC_ISO_RECV_INIT:
+			return raw1394_iso_recv_init(fi, argp);
+		default:
+			break;
+		}
+		break;
+	case RAW1394_ISO_RECV:
+		switch (cmd) {
+		case RAW1394_IOC_ISO_RECV_START:{
+				/* copy args from user-space */
+				int args[3];
+				if (copy_from_user
+				    (&args[0], argp, sizeof(args)))
+					return -EFAULT;
+				return hpsb_iso_recv_start(fi->iso_handle,
+							   args[0], args[1],
+							   args[2]);
+			}
+		case RAW1394_IOC_ISO_XMIT_RECV_STOP:
+			hpsb_iso_stop(fi->iso_handle);
+			return 0;
+		case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
+			return hpsb_iso_recv_listen_channel(fi->iso_handle,
+							    arg);
+		case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
+			return hpsb_iso_recv_unlisten_channel(fi->iso_handle,
+							      arg);
+		case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
+				/* copy the u64 from user-space */
+				u64 mask;
+				if (copy_from_user(&mask, argp, sizeof(mask)))
+					return -EFAULT;
+				return hpsb_iso_recv_set_channel_mask(fi->
+								      iso_handle,
+								      mask);
+			}
+		case RAW1394_IOC_ISO_GET_STATUS:
+			return raw1394_iso_get_status(fi, argp);
+		case RAW1394_IOC_ISO_RECV_PACKETS:
+			return raw1394_iso_recv_packets(fi, argp);
+		case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
+			return hpsb_iso_recv_release_packets(fi->iso_handle,
+							     arg);
+		case RAW1394_IOC_ISO_RECV_FLUSH:
+			return hpsb_iso_recv_flush(fi->iso_handle);
+		case RAW1394_IOC_ISO_SHUTDOWN:
+			raw1394_iso_shutdown(fi);
+			return 0;
+		case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
+			queue_rawiso_event(fi);
+			return 0;
+		}
+		break;
+	case RAW1394_ISO_XMIT:
+		switch (cmd) {
+		case RAW1394_IOC_ISO_XMIT_START:{
+				/* copy two ints from user-space */
+				int args[2];
+				if (copy_from_user
+				    (&args[0], argp, sizeof(args)))
+					return -EFAULT;
+				return hpsb_iso_xmit_start(fi->iso_handle,
+							   args[0], args[1]);
+			}
+		case RAW1394_IOC_ISO_XMIT_SYNC:
+			return hpsb_iso_xmit_sync(fi->iso_handle);
+		case RAW1394_IOC_ISO_XMIT_RECV_STOP:
+			hpsb_iso_stop(fi->iso_handle);
+			return 0;
+		case RAW1394_IOC_ISO_GET_STATUS:
+			return raw1394_iso_get_status(fi, argp);
+		case RAW1394_IOC_ISO_XMIT_PACKETS:
+			return raw1394_iso_send_packets(fi, argp);
+		case RAW1394_IOC_ISO_SHUTDOWN:
+			raw1394_iso_shutdown(fi);
+			return 0;
+		case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
+			queue_rawiso_event(fi);
+			return 0;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned int raw1394_poll(struct file *file, poll_table * pt)
+{
+	struct file_info *fi = file->private_data;
+	unsigned int mask = POLLOUT | POLLWRNORM;
+
+	poll_wait(file, &fi->poll_wait_complete, pt);
+
+	spin_lock_irq(&fi->reqlists_lock);
+	if (!list_empty(&fi->req_complete)) {
+		mask |= POLLIN | POLLRDNORM;
+	}
+	spin_unlock_irq(&fi->reqlists_lock);
+
+	return mask;
+}
+
+static int raw1394_open(struct inode *inode, struct file *file)
+{
+	struct file_info *fi;
+
+	fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
+	if (fi == NULL)
+		return -ENOMEM;
+
+	memset(fi, 0, sizeof(struct file_info));
+	fi->notification = (u8) RAW1394_NOTIFY_ON;	/* busreset notification */
+
+	INIT_LIST_HEAD(&fi->list);
+	fi->state = opened;
+	INIT_LIST_HEAD(&fi->req_pending);
+	INIT_LIST_HEAD(&fi->req_complete);
+	sema_init(&fi->complete_sem, 0);
+	spin_lock_init(&fi->reqlists_lock);
+	init_waitqueue_head(&fi->poll_wait_complete);
+	INIT_LIST_HEAD(&fi->addr_list);
+
+	file->private_data = fi;
+
+	return 0;
+}
+
+static int raw1394_release(struct inode *inode, struct file *file)
+{
+	struct file_info *fi = file->private_data;
+	struct list_head *lh;
+	struct pending_request *req;
+	int done = 0, i, fail = 0;
+	int retval = 0;
+	struct list_head *entry;
+	struct arm_addr *addr = NULL;
+	struct host_info *hi;
+	struct file_info *fi_hlp = NULL;
+	struct arm_addr *arm_addr = NULL;
+	int another_host;
+	int csr_mod = 0;
+
+	if (fi->iso_state != RAW1394_ISO_INACTIVE)
+		raw1394_iso_shutdown(fi);
+
+	for (i = 0; i < 64; i++) {
+		if (fi->listen_channels & (1ULL << i)) {
+			hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i);
+		}
+	}
+
+	spin_lock_irq(&host_info_lock);
+	fi->listen_channels = 0;
+	spin_unlock_irq(&host_info_lock);
+
+	fail = 0;
+	/* set address-entries invalid */
+	spin_lock_irq(&host_info_lock);
+
+	while (!list_empty(&fi->addr_list)) {
+		another_host = 0;
+		lh = fi->addr_list.next;
+		addr = list_entry(lh, struct arm_addr, addr_list);
+		/* another host with valid address-entry containing
+		   same addressrange? */
+		list_for_each_entry(hi, &host_info_list, list) {
+			if (hi->host != fi->host) {
+				list_for_each_entry(fi_hlp, &hi->file_info_list,
+						    list) {
+					entry = fi_hlp->addr_list.next;
+					while (entry != &(fi_hlp->addr_list)) {
+						arm_addr = list_entry(entry,
+								      struct
+								      arm_addr,
+								      addr_list);
+						if (arm_addr->start ==
+						    addr->start) {
+							DBGMSG
+							    ("raw1394_release: "
+							     "another host ownes "
+							     "same addressrange");
+							another_host = 1;
+							break;
+						}
+						entry = entry->next;
+					}
+					if (another_host) {
+						break;
+					}
+				}
+			}
+		}
+		if (!another_host) {
+			DBGMSG("raw1394_release: call hpsb_arm_unregister");
+			retval =
+			    hpsb_unregister_addrspace(&raw1394_highlevel,
+						      fi->host, addr->start);
+			if (!retval) {
+				++fail;
+				printk(KERN_ERR
+				       "raw1394_release arm_Unregister failed\n");
+			}
+		}
+		DBGMSG("raw1394_release: delete addr_entry from list");
+		list_del(&addr->addr_list);
+		vfree(addr->addr_space_buffer);
+		kfree(addr);
+	}			/* while */
+	spin_unlock_irq(&host_info_lock);
+	if (fail > 0) {
+		printk(KERN_ERR "raw1394: during addr_list-release "
+		       "error(s) occurred \n");
+	}
+
+	while (!done) {
+		spin_lock_irq(&fi->reqlists_lock);
+
+		while (!list_empty(&fi->req_complete)) {
+			lh = fi->req_complete.next;
+			list_del(lh);
+
+			req = list_entry(lh, struct pending_request, list);
+
+			free_pending_request(req);
+		}
+
+		if (list_empty(&fi->req_pending))
+			done = 1;
+
+		spin_unlock_irq(&fi->reqlists_lock);
+
+		if (!done)
+			down_interruptible(&fi->complete_sem);
+	}
+
+	/* Remove any sub-trees left by user space programs */
+	for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
+		struct csr1212_dentry *dentry;
+		if (!fi->csr1212_dirs[i])
+			continue;
+		for (dentry =
+		     fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
+		     dentry = dentry->next) {
+			csr1212_detach_keyval_from_directory(fi->host->csr.rom->
+							     root_kv,
+							     dentry->kv);
+		}
+		csr1212_release_keyval(fi->csr1212_dirs[i]);
+		fi->csr1212_dirs[i] = NULL;
+		csr_mod = 1;
+	}
+
+	if ((csr_mod || fi->cfgrom_upd)
+	    && hpsb_update_config_rom_image(fi->host) < 0)
+		HPSB_ERR
+		    ("Failed to generate Configuration ROM image for host %d",
+		     fi->host->id);
+
+	if (fi->state == connected) {
+		spin_lock_irq(&host_info_lock);
+		list_del(&fi->list);
+		spin_unlock_irq(&host_info_lock);
+
+		put_device(&fi->host->device);
+	}
+
+	kfree(fi);
+
+	return 0;
+}
+
+/*** HOTPLUG STUFF **********************************************************/
+/*
+ * Export information about protocols/devices supported by this driver.
+ */
+static struct ieee1394_device_id raw1394_id_table[] = {
+	{
+	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+	 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
+	 .version = AVC_SW_VERSION_ENTRY & 0xffffff},
+	{
+	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+	 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+	 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
+	{
+	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+	 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+	 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
+	{
+	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+	 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+	 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
+	{}
+};
+
+MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
+
+static struct hpsb_protocol_driver raw1394_driver = {
+	.name = "raw1394 Driver",
+	.id_table = raw1394_id_table,
+	.driver = {
+		   .name = "raw1394",
+		   .bus = &ieee1394_bus_type,
+		   },
+};
+
+/******************************************************************************/
+
+static struct hpsb_highlevel raw1394_highlevel = {
+	.name = RAW1394_DEVICE_NAME,
+	.add_host = add_host,
+	.remove_host = remove_host,
+	.host_reset = host_reset,
+	.iso_receive = iso_receive,
+	.fcp_request = fcp_request,
+};
+
+static struct cdev raw1394_cdev;
+static struct file_operations raw1394_fops = {
+	.owner = THIS_MODULE,
+	.read = raw1394_read,
+	.write = raw1394_write,
+	.mmap = raw1394_mmap,
+	.ioctl = raw1394_ioctl,
+	.poll = raw1394_poll,
+	.open = raw1394_open,
+	.release = raw1394_release,
+};
+
+static int __init init_raw1394(void)
+{
+	int ret = 0;
+
+	hpsb_register_highlevel(&raw1394_highlevel);
+
+	if (IS_ERR(class_simple_device_add(hpsb_protocol_class, MKDEV(
+		IEEE1394_MAJOR,	IEEE1394_MINOR_BLOCK_RAW1394 * 16), 
+		NULL, RAW1394_DEVICE_NAME))) {
+		ret = -EFAULT;
+		goto out_unreg;
+	}
+	
+	devfs_mk_cdev(MKDEV(
+		IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
+		S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
+
+	cdev_init(&raw1394_cdev, &raw1394_fops);
+	raw1394_cdev.owner = THIS_MODULE;
+	kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
+	ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
+	if (ret) {
+		HPSB_ERR("raw1394 failed to register minor device block");
+		goto out_dev;
+	}
+
+	HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
+
+	ret = hpsb_register_protocol(&raw1394_driver);
+	if (ret) {
+		HPSB_ERR("raw1394: failed to register protocol");
+		cdev_del(&raw1394_cdev);
+		goto out_dev;
+	}
+
+	goto out;
+
+out_dev:
+	devfs_remove(RAW1394_DEVICE_NAME);
+	class_simple_device_remove(MKDEV(
+		IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
+out_unreg:
+	hpsb_unregister_highlevel(&raw1394_highlevel);
+out:
+	return ret;
+}
+
+static void __exit cleanup_raw1394(void)
+{
+	class_simple_device_remove(MKDEV(
+		IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
+	cdev_del(&raw1394_cdev);
+	devfs_remove(RAW1394_DEVICE_NAME);
+	hpsb_unregister_highlevel(&raw1394_highlevel);
+	hpsb_unregister_protocol(&raw1394_driver);
+}
+
+module_init(init_raw1394);
+module_exit(cleanup_raw1394);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16);
diff --git a/drivers/ieee1394/raw1394.h b/drivers/ieee1394/raw1394.h
new file mode 100644
index 000000000000..35bfc38f013c
--- /dev/null
+++ b/drivers/ieee1394/raw1394.h
@@ -0,0 +1,181 @@
+#ifndef IEEE1394_RAW1394_H
+#define IEEE1394_RAW1394_H
+
+/* header for the raw1394 API that is exported to user-space */
+
+#define RAW1394_KERNELAPI_VERSION 4
+
+/* state: opened */
+#define RAW1394_REQ_INITIALIZE    1
+
+/* state: initialized */
+#define RAW1394_REQ_LIST_CARDS    2
+#define RAW1394_REQ_SET_CARD      3
+
+/* state: connected */
+#define RAW1394_REQ_ASYNC_READ      100
+#define RAW1394_REQ_ASYNC_WRITE     101
+#define RAW1394_REQ_LOCK            102
+#define RAW1394_REQ_LOCK64          103
+#define RAW1394_REQ_ISO_SEND        104
+#define RAW1394_REQ_ASYNC_SEND      105
+#define RAW1394_REQ_ASYNC_STREAM    106
+
+#define RAW1394_REQ_ISO_LISTEN      200
+#define RAW1394_REQ_FCP_LISTEN      201
+#define RAW1394_REQ_RESET_BUS       202
+#define RAW1394_REQ_GET_ROM         203
+#define RAW1394_REQ_UPDATE_ROM      204
+#define RAW1394_REQ_ECHO            205
+#define RAW1394_REQ_MODIFY_ROM      206
+
+#define RAW1394_REQ_ARM_REGISTER    300
+#define RAW1394_REQ_ARM_UNREGISTER  301
+#define RAW1394_REQ_ARM_SET_BUF     302
+#define RAW1394_REQ_ARM_GET_BUF     303
+
+#define RAW1394_REQ_RESET_NOTIFY    400
+
+#define RAW1394_REQ_PHYPACKET       500
+
+/* kernel to user */
+#define RAW1394_REQ_BUS_RESET        10000
+#define RAW1394_REQ_ISO_RECEIVE      10001
+#define RAW1394_REQ_FCP_REQUEST      10002
+#define RAW1394_REQ_ARM              10003
+#define RAW1394_REQ_RAWISO_ACTIVITY  10004
+
+/* error codes */
+#define RAW1394_ERROR_NONE        0
+#define RAW1394_ERROR_COMPAT      (-1001)
+#define RAW1394_ERROR_STATE_ORDER (-1002)
+#define RAW1394_ERROR_GENERATION  (-1003)
+#define RAW1394_ERROR_INVALID_ARG (-1004)
+#define RAW1394_ERROR_MEMFAULT    (-1005)
+#define RAW1394_ERROR_ALREADY     (-1006)
+
+#define RAW1394_ERROR_EXCESSIVE   (-1020)
+#define RAW1394_ERROR_UNTIDY_LEN  (-1021)
+
+#define RAW1394_ERROR_SEND_ERROR  (-1100)
+#define RAW1394_ERROR_ABORTED     (-1101)
+#define RAW1394_ERROR_TIMEOUT     (-1102)
+
+/* arm_codes */
+#define ARM_READ   1
+#define ARM_WRITE  2
+#define ARM_LOCK   4
+
+#define RAW1394_LONG_RESET  0
+#define RAW1394_SHORT_RESET 1
+
+/* busresetnotify ... */
+#define RAW1394_NOTIFY_OFF 0
+#define RAW1394_NOTIFY_ON  1
+
+#include <asm/types.h>
+
+struct raw1394_request {
+        __u32 type;
+        __s32 error;
+        __u32 misc;
+
+        __u32 generation;
+        __u32 length;
+
+        __u64 address;
+
+        __u64 tag;
+
+        __u64 sendb;
+        __u64 recvb;
+};
+
+struct raw1394_khost_list {
+        __u32 nodes;
+        __u8 name[32];
+};
+
+typedef struct arm_request {
+        __u16           destination_nodeid;
+        __u16           source_nodeid;
+        __u64           destination_offset;
+        __u8            tlabel;
+        __u8            tcode;
+        __u8            extended_transaction_code;
+        __u32           generation;
+        __u16           buffer_length;
+        __u8            __user *buffer;
+} *arm_request_t;
+
+typedef struct arm_response {
+        __s32           response_code;
+        __u16           buffer_length;
+        __u8            __user *buffer;
+} *arm_response_t;
+
+typedef struct arm_request_response {
+        struct arm_request  __user *request;
+        struct arm_response __user *response;
+} *arm_request_response_t;
+
+/* rawiso API */
+#include "ieee1394-ioctl.h"
+
+/* per-packet metadata embedded in the ringbuffer */
+/* must be identical to hpsb_iso_packet_info in iso.h! */
+struct raw1394_iso_packet_info {
+	__u32 offset;
+	__u16 len;
+	__u16 cycle;   /* recv only */
+	__u8  channel; /* recv only */
+	__u8  tag;
+	__u8  sy;
+};
+
+/* argument for RAW1394_ISO_RECV/XMIT_PACKETS ioctls */
+struct raw1394_iso_packets {
+	__u32 n_packets;
+	struct raw1394_iso_packet_info __user *infos;
+};
+
+struct raw1394_iso_config {
+	/* size of packet data buffer, in bytes (will be rounded up to PAGE_SIZE) */
+	__u32 data_buf_size;
+
+	/* # of packets to buffer */
+	__u32 buf_packets;
+
+	/* iso channel (set to -1 for multi-channel recv) */
+	__s32 channel;
+
+	/* xmit only - iso transmission speed */
+	__u8 speed;
+
+	/* The mode of the dma when receiving iso data. Must be supported by chip */
+	__u8 dma_mode;
+
+	/* max. latency of buffer, in packets (-1 if you don't care) */
+	__s32 irq_interval;
+};
+
+/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
+struct raw1394_iso_status {
+	/* current settings */
+	struct raw1394_iso_config config;
+
+	/* number of packets waiting to be filled with data (ISO transmission)
+	   or containing data received (ISO reception) */
+	__u32 n_packets;
+
+	/* approximate number of packets dropped due to overflow or
+	   underflow of the packet buffer (a value of zero guarantees
+	   that no packets have been dropped) */
+	__u32 overflows;
+
+	/* cycle number at which next packet will be transmitted;
+	   -1 if not known */
+	__s16 xmit_cycle;
+};
+
+#endif /* IEEE1394_RAW1394_H */
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
new file mode 100644
index 000000000000..00c7b958361a
--- /dev/null
+++ b/drivers/ieee1394/sbp2.c
@@ -0,0 +1,2864 @@
+/*
+ * sbp2.c - SBP-2 protocol driver for IEEE-1394
+ *
+ * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
+ * jamesg@filanet.com (JSG)
+ *
+ * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Brief Description:
+ *
+ * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
+ * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
+ * driver. It also registers as a SCSI lower-level driver in order to accept
+ * SCSI commands for transport using SBP-2.
+ *
+ * You may access any attached SBP-2 storage devices as if they were SCSI
+ * devices (e.g. mount /dev/sda1,  fdisk, mkfs, etc.).
+ *
+ * Current Issues:
+ *
+ *	- Error Handling: SCSI aborts and bus reset requests are handled somewhat
+ *	  but the code needs additional debugging.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/scatterlist.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "csr1212.h"
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "ieee1394_core.h"
+#include "nodemgr.h"
+#include "hosts.h"
+#include "highlevel.h"
+#include "ieee1394_transactions.h"
+#include "sbp2.h"
+
+static char version[] __devinitdata =
+	"$Rev: 1219 $ Ben Collins <bcollins@debian.org>";
+
+/*
+ * Module load parameter definitions
+ */
+
+/*
+ * Change max_speed on module load if you have a bad IEEE-1394
+ * controller that has trouble running 2KB packets at 400mb.
+ *
+ * NOTE: On certain OHCI parts I have seen short packets on async transmit
+ * (probably due to PCI latency/throughput issues with the part). You can
+ * bump down the speed if you are running into problems.
+ */
+static int max_speed = IEEE1394_SPEED_MAX;
+module_param(max_speed, int, 0644);
+MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)");
+
+/*
+ * Set serialize_io to 1 if you'd like only one scsi command sent
+ * down to us at a time (debugging). This might be necessary for very
+ * badly behaved sbp2 devices.
+ */
+static int serialize_io = 0;
+module_param(serialize_io, int, 0444);
+MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)");
+
+/*
+ * Bump up max_sectors if you'd like to support very large sized
+ * transfers. Please note that some older sbp2 bridge chips are broken for
+ * transfers greater or equal to 128KB.  Default is a value of 255
+ * sectors, or just under 128KB (at 512 byte sector size). I can note that
+ * the Oxsemi sbp2 chipsets have no problems supporting very large
+ * transfer sizes.
+ */
+static int max_sectors = SBP2_MAX_SECTORS;
+module_param(max_sectors, int, 0444);
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+
+/*
+ * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
+ * do an exclusive login, as it's generally unsafe to have two hosts
+ * talking to a single sbp2 device at the same time (filesystem coherency,
+ * etc.). If you're running an sbp2 device that supports multiple logins,
+ * and you're either running read-only filesystems or some sort of special
+ * filesystem supporting multiple hosts (one such filesystem is OpenGFS,
+ * see opengfs.sourceforge.net for more info), then set exclusive_login
+ * to zero. Note: The Oxsemi OXFW911 sbp2 chipset supports up to four
+ * concurrent logins.
+ */
+static int exclusive_login = 1;
+module_param(exclusive_login, int, 0644);
+MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
+
+/*
+ * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
+ * if your sbp2 device is not properly handling the SCSI inquiry command.
+ * This hack makes the inquiry look more like a typical MS Windows
+ * inquiry.
+ *
+ * If force_inquiry_hack=1 is required for your device to work,
+ * please submit the logged sbp2_firmware_revision value of this device to
+ * the linux1394-devel mailing list.
+ */
+static int force_inquiry_hack = 0;
+module_param(force_inquiry_hack, int, 0444);
+MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+
+
+/*
+ * Export information about protocols/devices supported by this driver.
+ */
+static struct ieee1394_device_id sbp2_id_table[] = {
+	{
+		.match_flags =IEEE1394_MATCH_SPECIFIER_ID |
+		              IEEE1394_MATCH_VERSION,
+		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
+		.version =    SBP2_SW_VERSION_ENTRY & 0xffffff
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+/*
+ * Debug levels, configured via kernel config, or enable here.
+ */
+
+/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
+/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
+/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
+/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
+/* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
+
+#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
+#define SBP2_ORB_DEBUG(fmt, args...)	HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
+static u32 global_outstanding_command_orbs = 0;
+#define outstanding_orb_incr global_outstanding_command_orbs++
+#define outstanding_orb_decr global_outstanding_command_orbs--
+#else
+#define SBP2_ORB_DEBUG(fmt, args...)
+#define outstanding_orb_incr
+#define outstanding_orb_decr
+#endif
+
+#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
+#define SBP2_DMA_ALLOC(fmt, args...) \
+	HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
+		 ++global_outstanding_dmas, ## args)
+#define SBP2_DMA_FREE(fmt, args...) \
+	HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
+		 --global_outstanding_dmas, ## args)
+static u32 global_outstanding_dmas = 0;
+#else
+#define SBP2_DMA_ALLOC(fmt, args...)
+#define SBP2_DMA_FREE(fmt, args...)
+#endif
+
+#if CONFIG_IEEE1394_SBP2_DEBUG >= 2
+#define SBP2_DEBUG(fmt, args...)	HPSB_ERR("sbp2: "fmt, ## args)
+#define SBP2_INFO(fmt, args...)		HPSB_ERR("sbp2: "fmt, ## args)
+#define SBP2_NOTICE(fmt, args...)	HPSB_ERR("sbp2: "fmt, ## args)
+#define SBP2_WARN(fmt, args...)		HPSB_ERR("sbp2: "fmt, ## args)
+#elif CONFIG_IEEE1394_SBP2_DEBUG == 1
+#define SBP2_DEBUG(fmt, args...)	HPSB_DEBUG("sbp2: "fmt, ## args)
+#define SBP2_INFO(fmt, args...)		HPSB_INFO("sbp2: "fmt, ## args)
+#define SBP2_NOTICE(fmt, args...)	HPSB_NOTICE("sbp2: "fmt, ## args)
+#define SBP2_WARN(fmt, args...)		HPSB_WARN("sbp2: "fmt, ## args)
+#else
+#define SBP2_DEBUG(fmt, args...)
+#define SBP2_INFO(fmt, args...)		HPSB_INFO("sbp2: "fmt, ## args)
+#define SBP2_NOTICE(fmt, args...)       HPSB_NOTICE("sbp2: "fmt, ## args)
+#define SBP2_WARN(fmt, args...)         HPSB_WARN("sbp2: "fmt, ## args)
+#endif
+
+#define SBP2_ERR(fmt, args...)		HPSB_ERR("sbp2: "fmt, ## args)
+
+
+/*
+ * Globals
+ */
+
+static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
+					   u32 status);
+
+static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
+				      u32 scsi_status, struct scsi_cmnd *SCpnt,
+				      void (*done)(struct scsi_cmnd *));
+
+static struct scsi_host_template scsi_driver_template;
+
+static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
+
+static void sbp2_host_reset(struct hpsb_host *host);
+
+static int sbp2_probe(struct device *dev);
+static int sbp2_remove(struct device *dev);
+static int sbp2_update(struct unit_directory *ud);
+
+static struct hpsb_highlevel sbp2_highlevel = {
+	.name =		SBP2_DEVICE_NAME,
+	.host_reset =	sbp2_host_reset,
+};
+
+static struct hpsb_address_ops sbp2_ops = {
+	.write = sbp2_handle_status_write
+};
+
+#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
+static struct hpsb_address_ops sbp2_physdma_ops = {
+        .read = sbp2_handle_physdma_read,
+        .write = sbp2_handle_physdma_write,
+};
+#endif
+
+static struct hpsb_protocol_driver sbp2_driver = {
+	.name		= "SBP2 Driver",
+	.id_table	= sbp2_id_table,
+	.update		= sbp2_update,
+	.driver		= {
+		.name		= SBP2_DEVICE_NAME,
+		.bus		= &ieee1394_bus_type,
+		.probe		= sbp2_probe,
+		.remove		= sbp2_remove,
+	},
+};
+
+
+/* List of device firmware's that require a forced 36 byte inquiry.  */
+static u32 sbp2_broken_inquiry_list[] = {
+	0x00002800,	/* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
+			/* DViCO Momobay CX-1 */
+	0x00000200	/* Andreas Plesch <plesch@fas.harvard.edu> */
+			/* QPS Fire DVDBurner */
+};
+
+#define NUM_BROKEN_INQUIRY_DEVS \
+	(sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
+
+/**************************************
+ * General utility functions
+ **************************************/
+
+
+#ifndef __BIG_ENDIAN
+/*
+ * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
+ */
+static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
+{
+	u32 *temp = buffer;
+
+	for (length = (length >> 2); length--; )
+		temp[length] = be32_to_cpu(temp[length]);
+
+	return;
+}
+
+/*
+ * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
+ */
+static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
+{
+	u32 *temp = buffer;
+
+	for (length = (length >> 2); length--; )
+		temp[length] = cpu_to_be32(temp[length]);
+
+	return;
+}
+#else /* BIG_ENDIAN */
+/* Why waste the cpu cycles? */
+#define sbp2util_be32_to_cpu_buffer(x,y)
+#define sbp2util_cpu_to_be32_buffer(x,y)
+#endif
+
+#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
+/*
+ * Debug packet dump routine. Length is in bytes.
+ */
+static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr)
+{
+	int i;
+	unsigned char *dump = buffer;
+
+	if (!dump || !length || !dump_name)
+		return;
+
+	if (dump_phys_addr)
+		printk("[%s, 0x%x]", dump_name, dump_phys_addr);
+	else
+		printk("[%s]", dump_name);
+	for (i = 0; i < length; i++) {
+		if (i > 0x3f) {
+			printk("\n   ...");
+			break;
+		}
+		if ((i & 0x3) == 0)
+			printk("  ");
+		if ((i & 0xf) == 0)
+			printk("\n   ");
+		printk("%02x ", (int) dump[i]);
+	}
+	printk("\n");
+
+	return;
+}
+#else
+#define sbp2util_packet_dump(w,x,y,z)
+#endif
+
+/*
+ * Goofy routine that basically does a down_timeout function.
+ */
+static int sbp2util_down_timeout(atomic_t *done, int timeout)
+{
+	int i;
+
+	for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
+		if (msleep_interruptible(100))	/* 100ms */
+			return(1);
+	}
+	return ((i > 0) ? 0:1);
+}
+
+/* Free's an allocated packet */
+static void sbp2_free_packet(struct hpsb_packet *packet)
+{
+	hpsb_free_tlabel(packet);
+	hpsb_free_packet(packet);
+}
+
+/* This is much like hpsb_node_write(), except it ignores the response
+ * subaction and returns immediately. Can be used from interrupts.
+ */
+static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
+				quadlet_t *buffer, size_t length)
+{
+	struct hpsb_packet *packet;
+
+	packet = hpsb_make_writepacket(ne->host, ne->nodeid,
+				       addr, buffer, length);
+        if (!packet)
+                return -ENOMEM;
+
+	hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet,
+				      packet);
+
+	hpsb_node_fill_packet(ne, packet);
+
+        if (hpsb_send_packet(packet) < 0) {
+		sbp2_free_packet(packet);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * This function is called to create a pool of command orbs used for
+ * command processing. It is called when a new sbp2 device is detected.
+ */
+static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	int i;
+	unsigned long flags, orbs;
+	struct sbp2_command_info *command;
+
+	orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	for (i = 0; i < orbs; i++) {
+		command = (struct sbp2_command_info *)
+		    kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
+		if (!command) {
+			spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+			return(-ENOMEM);
+		}
+		memset(command, '\0', sizeof(struct sbp2_command_info));
+		command->command_orb_dma =
+			pci_map_single (hi->host->pdev, &command->command_orb,
+					sizeof(struct sbp2_command_orb),
+					PCI_DMA_BIDIRECTIONAL);
+		SBP2_DMA_ALLOC("single command orb DMA");
+		command->sge_dma =
+			pci_map_single (hi->host->pdev, &command->scatter_gather_element,
+					sizeof(command->scatter_gather_element),
+					PCI_DMA_BIDIRECTIONAL);
+		SBP2_DMA_ALLOC("scatter_gather_element");
+		INIT_LIST_HEAD(&command->list);
+		list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
+	}
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+	return 0;
+}
+
+/*
+ * This function is called to delete a pool of command orbs.
+ */
+static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
+{
+	struct hpsb_host *host = scsi_id->hi->host;
+	struct list_head *lh, *next;
+	struct sbp2_command_info *command;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
+		list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
+			command = list_entry(lh, struct sbp2_command_info, list);
+
+			/* Release our generic DMA's */
+			pci_unmap_single(host->pdev, command->command_orb_dma,
+					 sizeof(struct sbp2_command_orb),
+					 PCI_DMA_BIDIRECTIONAL);
+			SBP2_DMA_FREE("single command orb DMA");
+			pci_unmap_single(host->pdev, command->sge_dma,
+					 sizeof(command->scatter_gather_element),
+					 PCI_DMA_BIDIRECTIONAL);
+			SBP2_DMA_FREE("scatter_gather_element");
+
+			kfree(command);
+		}
+	}
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+	return;
+}
+
+/*
+ * This function finds the sbp2_command for a given outstanding command
+ * orb.Only looks at the inuse list.
+ */
+static struct sbp2_command_info *sbp2util_find_command_for_orb(
+		struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
+{
+	struct sbp2_command_info *command;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+		list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
+			if (command->command_orb_dma == orb) {
+				spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+				return (command);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+
+	SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
+
+	return(NULL);
+}
+
+/*
+ * This function finds the sbp2_command for a given outstanding SCpnt.
+ * Only looks at the inuse list.
+ */
+static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
+{
+	struct sbp2_command_info *command;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+		list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
+			if (command->Current_SCpnt == SCpnt) {
+				spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+				return (command);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+	return(NULL);
+}
+
+/*
+ * This function allocates a command orb used to send a scsi command.
+ */
+static struct sbp2_command_info *sbp2util_allocate_command_orb(
+		struct scsi_id_instance_data *scsi_id,
+		struct scsi_cmnd *Current_SCpnt,
+		void (*Current_done)(struct scsi_cmnd *))
+{
+	struct list_head *lh;
+	struct sbp2_command_info *command = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
+		lh = scsi_id->sbp2_command_orb_completed.next;
+		list_del(lh);
+		command = list_entry(lh, struct sbp2_command_info, list);
+		command->Current_done = Current_done;
+		command->Current_SCpnt = Current_SCpnt;
+		list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
+	} else {
+		SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
+	}
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+	return (command);
+}
+
+/* Free our DMA's */
+static void sbp2util_free_command_dma(struct sbp2_command_info *command)
+{
+	struct scsi_id_instance_data *scsi_id =
+		(struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
+	struct hpsb_host *host;
+
+	if (!scsi_id) {
+		printk(KERN_ERR "%s: scsi_id == NULL\n", __FUNCTION__);
+		return;
+	}
+
+	host = scsi_id->ud->ne->host;
+
+	if (command->cmd_dma) {
+		if (command->dma_type == CMD_DMA_SINGLE) {
+			pci_unmap_single(host->pdev, command->cmd_dma,
+					 command->dma_size, command->dma_dir);
+			SBP2_DMA_FREE("single bulk");
+		} else if (command->dma_type == CMD_DMA_PAGE) {
+			pci_unmap_page(host->pdev, command->cmd_dma,
+				       command->dma_size, command->dma_dir);
+			SBP2_DMA_FREE("single page");
+		} /* XXX: Check for CMD_DMA_NONE bug */
+		command->dma_type = CMD_DMA_NONE;
+		command->cmd_dma = 0;
+	}
+
+	if (command->sge_buffer) {
+		pci_unmap_sg(host->pdev, command->sge_buffer,
+			     command->dma_size, command->dma_dir);
+		SBP2_DMA_FREE("scatter list");
+		command->sge_buffer = NULL;
+	}
+}
+
+/*
+ * This function moves a command to the completed orb list.
+ */
+static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
+	list_del(&command->list);
+	sbp2util_free_command_dma(command);
+	list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
+	spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+}
+
+
+
+/*********************************************
+ * IEEE-1394 core driver stack related section
+ *********************************************/
+static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
+
+static int sbp2_probe(struct device *dev)
+{
+	struct unit_directory *ud;
+	struct scsi_id_instance_data *scsi_id;
+
+	SBP2_DEBUG("sbp2_probe");
+
+	ud = container_of(dev, struct unit_directory, device);
+
+	/* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
+	 * instead. */
+	if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
+		return -ENODEV;
+
+        scsi_id = sbp2_alloc_device(ud);
+
+        if (!scsi_id)
+                return -ENOMEM;
+
+        sbp2_parse_unit_directory(scsi_id, ud);
+
+        return sbp2_start_device(scsi_id);
+}
+
+static int sbp2_remove(struct device *dev)
+{
+	struct unit_directory *ud;
+	struct scsi_id_instance_data *scsi_id;
+
+	SBP2_DEBUG("sbp2_remove");
+
+	ud = container_of(dev, struct unit_directory, device);
+	scsi_id = ud->device.driver_data;
+
+	sbp2_logout_device(scsi_id);
+	sbp2_remove_device(scsi_id);
+
+	return 0;
+}
+
+static int sbp2_update(struct unit_directory *ud)
+{
+	struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
+
+	SBP2_DEBUG("sbp2_update");
+
+	if (sbp2_reconnect_device(scsi_id)) {
+
+		/*
+		 * Ok, reconnect has failed. Perhaps we didn't
+		 * reconnect fast enough. Try doing a regular login, but
+		 * first do a logout just in case of any weirdness.
+		 */
+		sbp2_logout_device(scsi_id);
+
+		if (sbp2_login_device(scsi_id)) {
+			/* Login failed too, just fail, and the backend
+			 * will call our sbp2_remove for us */
+			SBP2_ERR("Failed to reconnect to sbp2 device!");
+			return -EBUSY;
+		}
+	}
+
+	/* Set max retries to something large on the device. */
+	sbp2_set_busy_timeout(scsi_id);
+
+	/* Do a SBP-2 fetch agent reset. */
+	sbp2_agent_reset(scsi_id, 1);
+
+	/* Get the max speed and packet size that we can use. */
+	sbp2_max_speed_and_size(scsi_id);
+
+	/* Complete any pending commands with busy (so they get
+	 * retried) and remove them from our queue
+	 */
+	sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
+
+	/* Make sure we unblock requests (since this is likely after a bus
+	 * reset). */
+	scsi_unblock_requests(scsi_id->scsi_host);
+
+	return 0;
+}
+
+/* This functions is called by the sbp2_probe, for each new device. We now
+ * allocate one scsi host for each scsi_id (unit directory). */
+static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
+{
+	struct sbp2scsi_host_info *hi;
+	struct Scsi_Host *scsi_host = NULL;
+	struct scsi_id_instance_data *scsi_id = NULL;
+
+	SBP2_DEBUG("sbp2_alloc_device");
+
+	scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL);
+	if (!scsi_id) {
+		SBP2_ERR("failed to create scsi_id");
+		goto failed_alloc;
+	}
+	memset(scsi_id, 0, sizeof(*scsi_id));
+
+	scsi_id->ne = ud->ne;
+	scsi_id->ud = ud;
+	scsi_id->speed_code = IEEE1394_SPEED_100;
+	scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
+	atomic_set(&scsi_id->sbp2_login_complete, 0);
+	INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
+	INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
+	INIT_LIST_HEAD(&scsi_id->scsi_list);
+	spin_lock_init(&scsi_id->sbp2_command_orb_lock);
+	scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED;
+
+	ud->device.driver_data = scsi_id;
+
+	hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
+	if (!hi) {
+		hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi));
+		if (!hi) {
+			SBP2_ERR("failed to allocate hostinfo");
+			goto failed_alloc;
+		}
+		SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
+		hi->host = ud->ne->host;
+		INIT_LIST_HEAD(&hi->scsi_ids);
+
+		/* Register our sbp2 status address space... */
+		hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
+					SBP2_STATUS_FIFO_ADDRESS,
+					SBP2_STATUS_FIFO_ADDRESS +
+					SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
+#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
+		/* Handle data movement if physical dma is not
+		 * enabled/supportedon host controller */
+		hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_physdma_ops,
+					0x0ULL, 0xfffffffcULL);
+#endif
+	}
+
+	scsi_id->hi = hi;
+
+	list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
+
+	/* Register our host with the SCSI stack. */
+	scsi_host = scsi_host_alloc(&scsi_driver_template, 0);
+	if (!scsi_host) {
+		SBP2_ERR("failed to register scsi host");
+		goto failed_alloc;
+	}
+
+	scsi_host->hostdata[0] = (unsigned long)scsi_id;
+
+	if (!scsi_add_host(scsi_host, &ud->device)) {
+		scsi_id->scsi_host = scsi_host;
+		return scsi_id;
+	}
+
+	SBP2_ERR("failed to add scsi host");
+	scsi_host_put(scsi_host);
+
+failed_alloc:
+	sbp2_remove_device(scsi_id);
+	return NULL;
+}
+
+
+static void sbp2_host_reset(struct hpsb_host *host)
+{
+	struct sbp2scsi_host_info *hi;
+	struct scsi_id_instance_data *scsi_id;
+
+	hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
+
+	if (hi) {
+		list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
+			scsi_block_requests(scsi_id->scsi_host);
+	}
+}
+
+
+/*
+ * This function is where we first pull the node unique ids, and then
+ * allocate memory and register a SBP-2 device.
+ */
+static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	struct scsi_device *sdev;
+
+	SBP2_DEBUG("sbp2_start_device");
+
+	/* Login FIFO DMA */
+	scsi_id->login_response =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response),
+				     &scsi_id->login_response_dma);
+	if (!scsi_id->login_response)
+		goto alloc_fail;
+	SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
+
+	/* Query logins ORB DMA */
+	scsi_id->query_logins_orb =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb),
+				     &scsi_id->query_logins_orb_dma);
+	if (!scsi_id->query_logins_orb)
+		goto alloc_fail;
+	SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
+
+	/* Query logins response DMA */
+	scsi_id->query_logins_response =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response),
+				     &scsi_id->query_logins_response_dma);
+	if (!scsi_id->query_logins_response)
+		goto alloc_fail;
+	SBP2_DMA_ALLOC("consistent DMA region for query logins response");
+
+	/* Reconnect ORB DMA */
+	scsi_id->reconnect_orb =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb),
+				     &scsi_id->reconnect_orb_dma);
+	if (!scsi_id->reconnect_orb)
+		goto alloc_fail;
+	SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
+
+	/* Logout ORB DMA */
+	scsi_id->logout_orb =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb),
+				     &scsi_id->logout_orb_dma);
+	if (!scsi_id->logout_orb)
+		goto alloc_fail;
+	SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
+
+	/* Login ORB DMA */
+	scsi_id->login_orb =
+		pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb),
+				     &scsi_id->login_orb_dma);
+	if (!scsi_id->login_orb) {
+alloc_fail:
+		if (scsi_id->query_logins_response) {
+			pci_free_consistent(hi->host->pdev,
+					    sizeof(struct sbp2_query_logins_response),
+					    scsi_id->query_logins_response,
+					    scsi_id->query_logins_response_dma);
+			SBP2_DMA_FREE("query logins response DMA");
+		}
+
+		if (scsi_id->query_logins_orb) {
+			pci_free_consistent(hi->host->pdev,
+					    sizeof(struct sbp2_query_logins_orb),
+					    scsi_id->query_logins_orb,
+					    scsi_id->query_logins_orb_dma);
+			SBP2_DMA_FREE("query logins ORB DMA");
+		}
+
+		if (scsi_id->logout_orb) {
+			pci_free_consistent(hi->host->pdev,
+					sizeof(struct sbp2_logout_orb),
+					scsi_id->logout_orb,
+					scsi_id->logout_orb_dma);
+			SBP2_DMA_FREE("logout ORB DMA");
+		}
+
+		if (scsi_id->reconnect_orb) {
+			pci_free_consistent(hi->host->pdev,
+					sizeof(struct sbp2_reconnect_orb),
+					scsi_id->reconnect_orb,
+					scsi_id->reconnect_orb_dma);
+			SBP2_DMA_FREE("reconnect ORB DMA");
+		}
+
+		if (scsi_id->login_response) {
+			pci_free_consistent(hi->host->pdev,
+					sizeof(struct sbp2_login_response),
+					scsi_id->login_response,
+					scsi_id->login_response_dma);
+			SBP2_DMA_FREE("login FIFO DMA");
+		}
+
+		list_del(&scsi_id->scsi_list);
+
+		kfree(scsi_id);
+
+		SBP2_ERR ("Could not allocate memory for scsi_id");
+
+		return -ENOMEM;
+	}
+	SBP2_DMA_ALLOC("consistent DMA region for login ORB");
+
+	SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
+
+	/*
+	 * Create our command orb pool
+	 */
+	if (sbp2util_create_command_orb_pool(scsi_id)) {
+		SBP2_ERR("sbp2util_create_command_orb_pool failed!");
+		sbp2_remove_device(scsi_id);
+		return -ENOMEM;
+	}
+
+	/* Schedule a timeout here. The reason is that we may be so close
+	 * to a bus reset, that the device is not available for logins.
+	 * This can happen when the bus reset is caused by the host
+	 * connected to the sbp2 device being removed. That host would
+	 * have a certain amount of time to relogin before the sbp2 device
+	 * allows someone else to login instead. One second makes sense. */
+	msleep_interruptible(1000);
+	if (signal_pending(current)) {
+		SBP2_WARN("aborting sbp2_start_device due to event");
+		sbp2_remove_device(scsi_id);
+		return -EINTR;
+	}
+	
+	/*
+	 * Login to the sbp-2 device
+	 */
+	if (sbp2_login_device(scsi_id)) {
+		/* Login failed, just remove the device. */
+		sbp2_remove_device(scsi_id);
+		return -EBUSY;
+	}
+
+	/*
+	 * Set max retries to something large on the device
+	 */
+	sbp2_set_busy_timeout(scsi_id);
+
+	/*
+	 * Do a SBP-2 fetch agent reset
+	 */
+	sbp2_agent_reset(scsi_id, 1);
+
+	/*
+	 * Get the max speed and packet size that we can use
+	 */
+	sbp2_max_speed_and_size(scsi_id);
+
+	/* Add this device to the scsi layer now */
+	sdev = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
+	if (IS_ERR(sdev)) {
+		SBP2_ERR("scsi_add_device failed");
+		return PTR_ERR(sdev);
+	}
+
+	return 0;
+}
+
+/*
+ * This function removes an sbp2 device from the sbp2scsi_host_info struct.
+ */
+static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi;
+
+	SBP2_DEBUG("sbp2_remove_device");
+
+	if (!scsi_id)
+		return;
+
+	hi = scsi_id->hi;
+
+	/* This will remove our scsi device aswell */
+	if (scsi_id->scsi_host) {
+		scsi_remove_host(scsi_id->scsi_host);
+		scsi_host_put(scsi_id->scsi_host);
+	}
+
+	sbp2util_remove_command_orb_pool(scsi_id);
+
+	list_del(&scsi_id->scsi_list);
+
+	if (scsi_id->login_response) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_login_response),
+				    scsi_id->login_response,
+				    scsi_id->login_response_dma);
+		SBP2_DMA_FREE("single login FIFO");
+	}
+
+	if (scsi_id->login_orb) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_login_orb),
+				    scsi_id->login_orb,
+				    scsi_id->login_orb_dma);
+		SBP2_DMA_FREE("single login ORB");
+	}
+
+	if (scsi_id->reconnect_orb) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_reconnect_orb),
+				    scsi_id->reconnect_orb,
+				    scsi_id->reconnect_orb_dma);
+		SBP2_DMA_FREE("single reconnect orb");
+	}
+
+	if (scsi_id->logout_orb) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_logout_orb),
+				    scsi_id->logout_orb,
+				    scsi_id->logout_orb_dma);
+		SBP2_DMA_FREE("single logout orb");
+	}
+
+	if (scsi_id->query_logins_orb) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_query_logins_orb),
+				    scsi_id->query_logins_orb,
+				    scsi_id->query_logins_orb_dma);
+		SBP2_DMA_FREE("single query logins orb");
+	}
+
+	if (scsi_id->query_logins_response) {
+		pci_free_consistent(hi->host->pdev,
+				    sizeof(struct sbp2_query_logins_response),
+				    scsi_id->query_logins_response,
+				    scsi_id->query_logins_response_dma);
+		SBP2_DMA_FREE("single query logins data");
+	}
+
+	scsi_id->ud->device.driver_data = NULL;
+
+	SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
+
+	kfree(scsi_id);
+}
+
+#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
+/*
+ * This function deals with physical dma write requests (for adapters that do not support
+ * physical dma in hardware). Mostly just here for debugging...
+ */
+static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
+                                     u64 addr, size_t length, u16 flags)
+{
+
+        /*
+         * Manually put the data in the right place.
+         */
+        memcpy(bus_to_virt((u32)addr), data, length);
+	sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr);
+        return(RCODE_COMPLETE);
+}
+
+/*
+ * This function deals with physical dma read requests (for adapters that do not support
+ * physical dma in hardware). Mostly just here for debugging...
+ */
+static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
+                                    u64 addr, size_t length, u16 flags)
+{
+
+        /*
+         * Grab data from memory and send a read response.
+         */
+        memcpy(data, bus_to_virt((u32)addr), length);
+	sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr);
+        return(RCODE_COMPLETE);
+}
+#endif
+
+
+/**************************************
+ * SBP-2 protocol related section
+ **************************************/
+
+/*
+ * This function determines if we should convert scsi commands for a particular sbp2 device type
+ */
+static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
+{
+	return (((device_type == TYPE_DISK) ||
+		 (device_type == TYPE_SDAD) ||
+		 (device_type == TYPE_ROM)) ? 1:0);
+}
+
+/*
+ * This function queries the device for the maximum concurrent logins it
+ * supports.
+ */
+static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	quadlet_t data[2];
+	int max_logins;
+	int active_logins;
+
+	SBP2_DEBUG("sbp2_query_logins");
+
+	scsi_id->query_logins_orb->reserved1 = 0x0;
+	scsi_id->query_logins_orb->reserved2 = 0x0;
+
+	scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
+	scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
+	SBP2_DEBUG("sbp2_query_logins: query_response_hi/lo initialized");
+
+	scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
+	scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
+	if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
+		scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
+		SBP2_DEBUG("sbp2_query_logins: set lun to %d",
+			   ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
+	}
+	SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
+
+	scsi_id->query_logins_orb->reserved_resp_length =
+		ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
+	SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
+
+	scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
+						    SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
+	scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
+						     SBP2_STATUS_FIFO_ADDRESS_HI);
+	SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
+
+	sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
+
+	SBP2_DEBUG("sbp2_query_logins: orb byte-swapped");
+
+	sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
+			     "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
+
+	memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
+	memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+	SBP2_DEBUG("sbp2_query_logins: query_logins_response/status FIFO memset");
+
+	data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+	data[1] = scsi_id->query_logins_orb_dma;
+	sbp2util_cpu_to_be32_buffer(data, 8);
+
+	atomic_set(&scsi_id->sbp2_login_complete, 0);
+
+	SBP2_DEBUG("sbp2_query_logins: prepared to write");
+	hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
+	SBP2_DEBUG("sbp2_query_logins: written");
+
+	if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
+		SBP2_INFO("Error querying logins to SBP-2 device - timed out");
+		return(-EIO);
+	}
+
+	if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
+		SBP2_INFO("Error querying logins to SBP-2 device - timed out");
+		return(-EIO);
+	}
+
+	if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+		SBP2_INFO("Error querying logins to SBP-2 device - timed out");
+		return(-EIO);
+	}
+
+	sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
+
+	SBP2_DEBUG("length_max_logins = %x",
+		   (unsigned int)scsi_id->query_logins_response->length_max_logins);
+
+	SBP2_DEBUG("Query logins to SBP-2 device successful");
+
+	max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
+	SBP2_DEBUG("Maximum concurrent logins supported: %d", max_logins);
+
+	active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
+	SBP2_DEBUG("Number of active logins: %d", active_logins);
+
+	if (active_logins >= max_logins) {
+		return(-EIO);
+	}
+
+	return 0;
+}
+
+/*
+ * This function is called in order to login to a particular SBP-2 device,
+ * after a bus reset.
+ */
+static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	quadlet_t data[2];
+
+	SBP2_DEBUG("sbp2_login_device");
+
+	if (!scsi_id->login_orb) {
+		SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
+		return(-EIO);
+	}
+
+	if (!exclusive_login) {
+		if (sbp2_query_logins(scsi_id)) {
+			SBP2_INFO("Device does not support any more concurrent logins");
+			return(-EIO);
+		}
+	}
+
+	/* Set-up login ORB, assume no password */
+	scsi_id->login_orb->password_hi = 0;
+	scsi_id->login_orb->password_lo = 0;
+	SBP2_DEBUG("sbp2_login_device: password_hi/lo initialized");
+
+	scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
+	scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
+	SBP2_DEBUG("sbp2_login_device: login_response_hi/lo initialized");
+
+	scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
+	scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0);	/* One second reconnect time */
+	scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login);	/* Exclusive access to device */
+	scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1);	/* Notify us of login complete */
+	/* Set the lun if we were able to pull it from the device's unit directory */
+	if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
+		scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
+		SBP2_DEBUG("sbp2_query_logins: set lun to %d",
+			   ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
+	}
+	SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
+
+	scsi_id->login_orb->passwd_resp_lengths =
+		ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
+	SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
+
+	scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
+					     SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
+	scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
+					      SBP2_STATUS_FIFO_ADDRESS_HI);
+	SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
+
+	/*
+	 * Byte swap ORB if necessary
+	 */
+	sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
+
+	SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
+
+	sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
+			     "sbp2 login orb", scsi_id->login_orb_dma);
+
+	/*
+	 * Initialize login response and status fifo
+	 */
+	memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
+	memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+	SBP2_DEBUG("sbp2_login_device: login_response/status FIFO memset");
+
+	/*
+	 * Ok, let's write to the target's management agent register
+	 */
+	data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+	data[1] = scsi_id->login_orb_dma;
+	sbp2util_cpu_to_be32_buffer(data, 8);
+
+	atomic_set(&scsi_id->sbp2_login_complete, 0);
+
+	SBP2_DEBUG("sbp2_login_device: prepared to write to %08x",
+		   (unsigned int)scsi_id->sbp2_management_agent_addr);
+	hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
+	SBP2_DEBUG("sbp2_login_device: written");
+
+	/*
+	 * Wait for login status (up to 20 seconds)...
+	 */
+	if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
+		SBP2_ERR("Error logging into SBP-2 device - login timed-out");
+		return(-EIO);
+	}
+
+	/*
+	 * Sanity. Make sure status returned matches login orb.
+	 */
+	if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
+		SBP2_ERR("Error logging into SBP-2 device - login timed-out");
+		return(-EIO);
+	}
+
+	/*
+	 * Check status
+	 */
+	if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+		SBP2_ERR("Error logging into SBP-2 device - login failed");
+		return(-EIO);
+	}
+
+	/*
+	 * Byte swap the login response, for use when reconnecting or
+	 * logging out.
+	 */
+	sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
+
+	/*
+	 * Grab our command block agent address from the login response.
+	 */
+	SBP2_DEBUG("command_block_agent_hi = %x",
+		   (unsigned int)scsi_id->login_response->command_block_agent_hi);
+	SBP2_DEBUG("command_block_agent_lo = %x",
+		   (unsigned int)scsi_id->login_response->command_block_agent_lo);
+
+	scsi_id->sbp2_command_block_agent_addr =
+		((u64)scsi_id->login_response->command_block_agent_hi) << 32;
+	scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
+	scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
+
+	SBP2_INFO("Logged into SBP-2 device");
+
+	return(0);
+
+}
+
+/*
+ * This function is called in order to logout from a particular SBP-2
+ * device, usually called during driver unload.
+ */
+static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	quadlet_t data[2];
+	int error;
+
+	SBP2_DEBUG("sbp2_logout_device");
+
+	/*
+	 * Set-up logout ORB
+	 */
+	scsi_id->logout_orb->reserved1 = 0x0;
+	scsi_id->logout_orb->reserved2 = 0x0;
+	scsi_id->logout_orb->reserved3 = 0x0;
+	scsi_id->logout_orb->reserved4 = 0x0;
+
+	scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
+	scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
+
+	/* Notify us when complete */
+	scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
+
+	scsi_id->logout_orb->reserved5 = 0x0;
+	scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
+					      SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
+	scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
+					       SBP2_STATUS_FIFO_ADDRESS_HI);
+
+	/*
+	 * Byte swap ORB if necessary
+	 */
+	sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
+
+	sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
+			     "sbp2 logout orb", scsi_id->logout_orb_dma);
+
+	/*
+	 * Ok, let's write to the target's management agent register
+	 */
+	data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+	data[1] = scsi_id->logout_orb_dma;
+	sbp2util_cpu_to_be32_buffer(data, 8);
+
+	atomic_set(&scsi_id->sbp2_login_complete, 0);
+
+	error = hpsb_node_write(scsi_id->ne,
+	                            scsi_id->sbp2_management_agent_addr,
+	                            data, 8);
+	if (error)
+		return error;
+
+	/* Wait for device to logout...1 second. */
+	if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ))
+		return -EIO;
+
+	SBP2_INFO("Logged out of SBP-2 device");
+
+	return(0);
+
+}
+
+/*
+ * This function is called in order to reconnect to a particular SBP-2
+ * device, after a bus reset.
+ */
+static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	quadlet_t data[2];
+	int error;
+
+	SBP2_DEBUG("sbp2_reconnect_device");
+
+	/*
+	 * Set-up reconnect ORB
+	 */
+	scsi_id->reconnect_orb->reserved1 = 0x0;
+	scsi_id->reconnect_orb->reserved2 = 0x0;
+	scsi_id->reconnect_orb->reserved3 = 0x0;
+	scsi_id->reconnect_orb->reserved4 = 0x0;
+
+	scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
+	scsi_id->reconnect_orb->login_ID_misc |=
+		ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
+
+	/* Notify us when complete */
+	scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
+
+	scsi_id->reconnect_orb->reserved5 = 0x0;
+	scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
+						 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
+	scsi_id->reconnect_orb->status_FIFO_hi =
+		(ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
+
+	/*
+	 * Byte swap ORB if necessary
+	 */
+	sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
+
+	sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
+			     "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
+
+	/*
+	 * Initialize status fifo
+	 */
+	memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+	/*
+	 * Ok, let's write to the target's management agent register
+	 */
+	data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+	data[1] = scsi_id->reconnect_orb_dma;
+	sbp2util_cpu_to_be32_buffer(data, 8);
+
+	atomic_set(&scsi_id->sbp2_login_complete, 0);
+
+	error = hpsb_node_write(scsi_id->ne,
+	                            scsi_id->sbp2_management_agent_addr,
+	                            data, 8);
+	if (error)
+		return error;
+
+	/*
+	 * Wait for reconnect status (up to 1 second)...
+	 */
+	if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
+		SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
+		return(-EIO);
+	}
+
+	/*
+	 * Sanity. Make sure status returned matches reconnect orb.
+	 */
+	if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
+		SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
+		return(-EIO);
+	}
+
+	/*
+	 * Check status
+	 */
+	if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
+	    STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+		SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
+		return(-EIO);
+	}
+
+	HPSB_DEBUG("Reconnected to SBP-2 device");
+
+	return(0);
+
+}
+
+/*
+ * This function is called in order to set the busy timeout (number of
+ * retries to attempt) on the sbp2 device.
+ */
+static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
+{
+	quadlet_t data;
+
+	SBP2_DEBUG("sbp2_set_busy_timeout");
+
+	/*
+	 * Ok, let's write to the target's busy timeout register
+	 */
+	data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
+
+	if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
+		SBP2_ERR("sbp2_set_busy_timeout error");
+	}
+
+	return(0);
+}
+
+
+/*
+ * This function is called to parse sbp2 device's config rom unit
+ * directory. Used to determine things like sbp2 management agent offset,
+ * and command set used (SCSI or RBC).
+ */
+static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
+				      struct unit_directory *ud)
+{
+	struct csr1212_keyval *kv;
+	struct csr1212_dentry *dentry;
+	u64 management_agent_addr;
+	u32 command_set_spec_id, command_set, unit_characteristics,
+		firmware_revision, workarounds;
+	int i;
+
+	SBP2_DEBUG("sbp2_parse_unit_directory");
+
+	management_agent_addr = 0x0;
+	command_set_spec_id = 0x0;
+	command_set = 0x0;
+	unit_characteristics = 0x0;
+	firmware_revision = 0x0;
+
+	/* Handle different fields in the unit directory, based on keys */
+	csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
+		switch (kv->key.id) {
+		case CSR1212_KV_ID_DEPENDENT_INFO:
+			if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
+				/* Save off the management agent address */
+				management_agent_addr =
+					CSR1212_REGISTER_SPACE_BASE +
+					(kv->value.csr_offset << 2);
+
+				SBP2_DEBUG("sbp2_management_agent_addr = %x",
+					   (unsigned int) management_agent_addr);
+			} else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
+				scsi_id->sbp2_device_type_and_lun = kv->value.immediate;
+			}
+			break;
+
+		case SBP2_COMMAND_SET_SPEC_ID_KEY:
+			/* Command spec organization */
+			command_set_spec_id = kv->value.immediate;
+			SBP2_DEBUG("sbp2_command_set_spec_id = %x",
+				   (unsigned int) command_set_spec_id);
+			break;
+
+		case SBP2_COMMAND_SET_KEY:
+			/* Command set used by sbp2 device */
+			command_set = kv->value.immediate;
+			SBP2_DEBUG("sbp2_command_set = %x",
+				   (unsigned int) command_set);
+			break;
+
+		case SBP2_UNIT_CHARACTERISTICS_KEY:
+			/*
+			 * Unit characterisitcs (orb related stuff
+			 * that I'm not yet paying attention to)
+			 */
+			unit_characteristics = kv->value.immediate;
+			SBP2_DEBUG("sbp2_unit_characteristics = %x",
+				   (unsigned int) unit_characteristics);
+			break;
+
+		case SBP2_FIRMWARE_REVISION_KEY:
+			/* Firmware revision */
+			firmware_revision = kv->value.immediate;
+			if (force_inquiry_hack)
+				SBP2_INFO("sbp2_firmware_revision = %x",
+				   (unsigned int) firmware_revision);
+			else	SBP2_DEBUG("sbp2_firmware_revision = %x",
+				   (unsigned int) firmware_revision);
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	/* This is the start of our broken device checking. We try to hack
+	 * around oddities and known defects.  */
+	workarounds = 0x0;
+
+	/* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
+	 * bridge with 128KB max transfer size limitation. For sanity, we
+	 * only voice this when the current max_sectors setting
+	 * exceeds the 128k limit. By default, that is not the case.
+	 *
+	 * It would be really nice if we could detect this before the scsi
+	 * host gets initialized. That way we can down-force the
+	 * max_sectors to account for it. That is not currently
+	 * possible.  */
+	if ((firmware_revision & 0xffff00) ==
+			SBP2_128KB_BROKEN_FIRMWARE &&
+			(max_sectors * 512) > (128*1024)) {
+		SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
+				NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
+		SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
+				max_sectors);
+		workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
+	}
+
+	/* Check for a blacklisted set of devices that require us to force
+	 * a 36 byte host inquiry. This can be overriden as a module param
+	 * (to force all hosts).  */
+	for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
+		if ((firmware_revision & 0xffff00) ==
+				sbp2_broken_inquiry_list[i]) {
+			SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
+					NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
+			workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
+			break; /* No need to continue. */
+		}
+	}
+
+	/* If this is a logical unit directory entry, process the parent
+	 * to get the values. */
+	if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
+		struct unit_directory *parent_ud =
+			container_of(ud->device.parent, struct unit_directory, device);
+		sbp2_parse_unit_directory(scsi_id, parent_ud);
+	} else {
+		scsi_id->sbp2_management_agent_addr = management_agent_addr;
+		scsi_id->sbp2_command_set_spec_id = command_set_spec_id;
+		scsi_id->sbp2_command_set = command_set;
+		scsi_id->sbp2_unit_characteristics = unit_characteristics;
+		scsi_id->sbp2_firmware_revision = firmware_revision;
+		scsi_id->workarounds = workarounds;
+		if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
+			scsi_id->sbp2_device_type_and_lun = ud->lun;
+	}
+}
+
+/*
+ * This function is called in order to determine the max speed and packet
+ * size we can use in our ORBs. Note, that we (the driver and host) only
+ * initiate the transaction. The SBP-2 device actually transfers the data
+ * (by reading from the DMA area we tell it). This means that the SBP-2
+ * device decides the actual maximum data it can transfer. We just tell it
+ * the speed that it needs to use, and the max_rec the host supports, and
+ * it takes care of the rest.
+ */
+static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+
+	SBP2_DEBUG("sbp2_max_speed_and_size");
+
+	/* Initial setting comes from the hosts speed map */
+	scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64
+						  + NODEID_TO_NODE(scsi_id->ne->nodeid)];
+
+	/* Bump down our speed if the user requested it */
+	if (scsi_id->speed_code > max_speed) {
+		scsi_id->speed_code = max_speed;
+		SBP2_ERR("Forcing SBP-2 max speed down to %s",
+			 hpsb_speedto_str[scsi_id->speed_code]);
+	}
+
+	/* Payload size is the lesser of what our speed supports and what
+	 * our host supports.  */
+	scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code],
+					(u8)(hi->host->csr.max_rec - 1));
+
+	HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
+		   NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
+		   hpsb_speedto_str[scsi_id->speed_code],
+		   1 << ((u32)scsi_id->max_payload_size + 2));
+
+	return(0);
+}
+
+/*
+ * This function is called in order to perform a SBP-2 agent reset.
+ */
+static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
+{
+	quadlet_t data;
+	u64 addr;
+	int retval;
+
+	SBP2_DEBUG("sbp2_agent_reset");
+
+	/*
+	 * Ok, let's write to the target's management agent register
+	 */
+	data = ntohl(SBP2_AGENT_RESET_DATA);
+	addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
+
+	if (wait)
+		retval = hpsb_node_write(scsi_id->ne, addr, &data, 4);
+	else
+		retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4);
+
+	if (retval < 0) {
+		SBP2_ERR("hpsb_node_write failed.\n");
+		return -EIO;
+	}
+
+	/*
+	 * Need to make sure orb pointer is written on next command
+	 */
+	scsi_id->last_orb = NULL;
+
+	return(0);
+}
+
+/*
+ * This function is called to create the actual command orb and s/g list
+ * out of the scsi command itself.
+ */
+static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
+				   struct sbp2_command_info *command,
+				   unchar *scsi_cmd,
+				   unsigned int scsi_use_sg,
+				   unsigned int scsi_request_bufflen,
+				   void *scsi_request_buffer,
+				   enum dma_data_direction dma_dir)
+
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer;
+	struct sbp2_command_orb *command_orb = &command->command_orb;
+	struct sbp2_unrestricted_page_table *scatter_gather_element =
+		&command->scatter_gather_element[0];
+	u32 sg_count, sg_len, orb_direction;
+	dma_addr_t sg_addr;
+	int i;
+
+	/*
+	 * Set-up our command ORB..
+	 *
+	 * NOTE: We're doing unrestricted page tables (s/g), as this is
+	 * best performance (at least with the devices I have). This means
+	 * that data_size becomes the number of s/g elements, and
+	 * page_size should be zero (for unrestricted).
+	 */
+	command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
+	command_orb->next_ORB_lo = 0x0;
+	command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
+	command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
+	command_orb->misc |= ORB_SET_NOTIFY(1);		/* Notify us when complete */
+
+	/*
+	 * Get the direction of the transfer. If the direction is unknown, then use our
+	 * goofy table as a back-up.
+	 */
+	switch (dma_dir) {
+		case DMA_NONE:
+			orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
+			break;
+		case DMA_TO_DEVICE:
+			orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
+			break;
+		case DMA_FROM_DEVICE:
+			orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
+			break;
+		case DMA_BIDIRECTIONAL:
+		default:
+			SBP2_ERR("SCSI data transfer direction not specified. "
+				 "Update the SBP2 direction table in sbp2.h if "
+				 "necessary for your application");
+			__scsi_print_command(scsi_cmd);
+			orb_direction = sbp2scsi_direction_table[*scsi_cmd];
+			break;
+	}
+
+	/*
+	 * Set-up our pagetable stuff... unfortunately, this has become
+	 * messier than I'd like. Need to clean this up a bit.   ;-)
+	 */
+	if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
+
+		SBP2_DEBUG("No data transfer");
+
+		/*
+		 * Handle no data transfer
+		 */
+		command_orb->data_descriptor_hi = 0x0;
+		command_orb->data_descriptor_lo = 0x0;
+		command_orb->misc |= ORB_SET_DIRECTION(1);
+
+	} else if (scsi_use_sg) {
+
+		SBP2_DEBUG("Use scatter/gather");
+
+		/*
+		 * Special case if only one element (and less than 64KB in size)
+		 */
+		if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
+
+			SBP2_DEBUG("Only one s/g element");
+			command->dma_dir = dma_dir;
+			command->dma_size = sgpnt[0].length;
+			command->dma_type = CMD_DMA_PAGE;
+			command->cmd_dma = pci_map_page(hi->host->pdev,
+							sgpnt[0].page,
+							sgpnt[0].offset,
+							command->dma_size,
+							command->dma_dir);
+			SBP2_DMA_ALLOC("single page scatter element");
+
+			command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+			command_orb->data_descriptor_lo = command->cmd_dma;
+			command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
+			command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
+
+		} else {
+			int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
+			SBP2_DMA_ALLOC("scatter list");
+
+			command->dma_size = scsi_use_sg;
+			command->dma_dir = dma_dir;
+			command->sge_buffer = sgpnt;
+
+			/* use page tables (s/g) */
+			command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+			command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
+			command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+			command_orb->data_descriptor_lo = command->sge_dma;
+
+			/*
+			 * Loop through and fill out our sbp-2 page tables
+			 * (and split up anything too large)
+			 */
+			for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
+				sg_len = sg_dma_len(sgpnt);
+				sg_addr = sg_dma_address(sgpnt);
+				while (sg_len) {
+					scatter_gather_element[sg_count].segment_base_lo = sg_addr;
+					if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+						scatter_gather_element[sg_count].length_segment_base_hi =
+							PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+						sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+						sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+					} else {
+						scatter_gather_element[sg_count].length_segment_base_hi =
+							PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+						sg_len = 0;
+					}
+					sg_count++;
+				}
+			}
+
+			/* Number of page table (s/g) elements */
+			command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
+
+			sbp2util_packet_dump(scatter_gather_element,
+					     (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
+					     "sbp2 s/g list", command->sge_dma);
+
+			/*
+			 * Byte swap page tables if necessary
+			 */
+			sbp2util_cpu_to_be32_buffer(scatter_gather_element,
+						    (sizeof(struct sbp2_unrestricted_page_table)) *
+						    sg_count);
+
+		}
+
+	} else {
+
+		SBP2_DEBUG("No scatter/gather");
+
+		command->dma_dir = dma_dir;
+		command->dma_size = scsi_request_bufflen;
+		command->dma_type = CMD_DMA_SINGLE;
+		command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
+						   command->dma_size,
+						   command->dma_dir);
+		SBP2_DMA_ALLOC("single bulk");
+
+		/*
+		 * Handle case where we get a command w/o s/g enabled (but
+		 * check for transfers larger than 64K)
+		 */
+		if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
+
+			command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+			command_orb->data_descriptor_lo = command->cmd_dma;
+			command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
+			command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
+
+			/*
+			 * Sanity, in case our direction table is not
+			 * up-to-date
+			 */
+			if (!scsi_request_bufflen) {
+				command_orb->data_descriptor_hi = 0x0;
+				command_orb->data_descriptor_lo = 0x0;
+				command_orb->misc |= ORB_SET_DIRECTION(1);
+			}
+
+		} else {
+			/*
+			 * Need to turn this into page tables, since the
+			 * buffer is too large.
+			 */
+			command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+			command_orb->data_descriptor_lo = command->sge_dma;
+
+			/* Use page tables (s/g) */
+			command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+			command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
+
+			/*
+			 * fill out our sbp-2 page tables (and split up
+			 * the large buffer)
+			 */
+			sg_count = 0;
+			sg_len = scsi_request_bufflen;
+			sg_addr = command->cmd_dma;
+			while (sg_len) {
+				scatter_gather_element[sg_count].segment_base_lo = sg_addr;
+				if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+					scatter_gather_element[sg_count].length_segment_base_hi =
+						PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+					sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+					sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+				} else {
+					scatter_gather_element[sg_count].length_segment_base_hi =
+						PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+					sg_len = 0;
+				}
+				sg_count++;
+			}
+
+			/* Number of page table (s/g) elements */
+			command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
+
+			sbp2util_packet_dump(scatter_gather_element,
+					     (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
+					     "sbp2 s/g list", command->sge_dma);
+
+			/*
+			 * Byte swap page tables if necessary
+			 */
+			sbp2util_cpu_to_be32_buffer(scatter_gather_element,
+						    (sizeof(struct sbp2_unrestricted_page_table)) *
+						     sg_count);
+
+		}
+
+	}
+
+	/*
+	 * Byte swap command ORB if necessary
+	 */
+	sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
+
+	/*
+	 * Put our scsi command in the command ORB
+	 */
+	memset(command_orb->cdb, 0, 12);
+	memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
+
+	return(0);
+}
+
+/*
+ * This function is called in order to begin a regular SBP-2 command.
+ */
+static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
+				 struct sbp2_command_info *command)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	struct sbp2_command_orb *command_orb = &command->command_orb;
+	struct node_entry *ne = scsi_id->ne;
+	u64 addr;
+
+	outstanding_orb_incr;
+	SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
+			command_orb, global_outstanding_command_orbs);
+
+	pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
+				       sizeof(struct sbp2_command_orb),
+				       PCI_DMA_BIDIRECTIONAL);
+	pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
+				       sizeof(command->scatter_gather_element),
+				       PCI_DMA_BIDIRECTIONAL);
+	/*
+	 * Check to see if there are any previous orbs to use
+	 */
+	if (scsi_id->last_orb == NULL) {
+		quadlet_t data[2];
+
+		/*
+		 * Ok, let's write to the target's management agent register
+		 */
+		addr = scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET;
+		data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+		data[1] = command->command_orb_dma;
+		sbp2util_cpu_to_be32_buffer(data, 8);
+
+		SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
+
+		if (sbp2util_node_write_no_wait(ne, addr, data, 8) < 0) {
+			SBP2_ERR("sbp2util_node_write_no_wait failed.\n");
+			return -EIO;
+		}
+
+		SBP2_ORB_DEBUG("write command agent complete");
+
+		scsi_id->last_orb = command_orb;
+		scsi_id->last_orb_dma = command->command_orb_dma;
+
+	} else {
+		quadlet_t data;
+
+		/*
+		 * We have an orb already sent (maybe or maybe not
+		 * processed) that we can append this orb to. So do so,
+		 * and ring the doorbell. Have to be very careful
+		 * modifying these next orb pointers, as they are accessed
+		 * both by the sbp2 device and us.
+		 */
+		scsi_id->last_orb->next_ORB_lo =
+			cpu_to_be32(command->command_orb_dma);
+		/* Tells hardware that this pointer is valid */
+		scsi_id->last_orb->next_ORB_hi = 0x0;
+		pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma,
+					       sizeof(struct sbp2_command_orb),
+					       PCI_DMA_BIDIRECTIONAL);
+
+		/*
+		 * Ring the doorbell
+		 */
+		data = cpu_to_be32(command->command_orb_dma);
+		addr = scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET;
+
+		SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
+
+		if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
+			SBP2_ERR("sbp2util_node_write_no_wait failed");
+			return(-EIO);
+		}
+
+		scsi_id->last_orb = command_orb;
+		scsi_id->last_orb_dma = command->command_orb_dma;
+
+	}
+       	return(0);
+}
+
+/*
+ * This function is called in order to begin a regular SBP-2 command.
+ */
+static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
+			     struct scsi_cmnd *SCpnt,
+			     void (*done)(struct scsi_cmnd *))
+{
+	unchar *cmd = (unchar *) SCpnt->cmnd;
+	unsigned int request_bufflen = SCpnt->request_bufflen;
+	struct sbp2_command_info *command;
+
+	SBP2_DEBUG("sbp2_send_command");
+#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
+	printk("[scsi command]\n   ");
+	scsi_print_command(SCpnt);
+#endif
+	SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
+	SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
+
+	/*
+	 * Allocate a command orb and s/g structure
+	 */
+	command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
+	if (!command) {
+		return(-EIO);
+	}
+
+	/*
+	 * The scsi stack sends down a request_bufflen which does not match the
+	 * length field in the scsi cdb. This causes some sbp2 devices to
+	 * reject this inquiry command. Fix the request_bufflen.
+	 */
+	if (*cmd == INQUIRY) {
+		if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
+			request_bufflen = cmd[4] = 0x24;
+		else
+			request_bufflen = cmd[4];
+	}
+
+	/*
+	 * Now actually fill in the comamnd orb and sbp2 s/g list
+	 */
+	sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
+				request_bufflen, SCpnt->request_buffer,
+				SCpnt->sc_data_direction);
+	/*
+	 * Update our cdb if necessary (to handle sbp2 RBC command set
+	 * differences). This is where the command set hacks go!   =)
+	 */
+	sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
+
+	sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
+			     "sbp2 command orb", command->command_orb_dma);
+
+	/*
+	 * Initialize status fifo
+	 */
+	memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+	/*
+	 * Link up the orb, and ring the doorbell if needed
+	 */
+	sbp2_link_orb_command(scsi_id, command);
+
+	return(0);
+}
+
+
+/*
+ * This function deals with command set differences between Linux scsi
+ * command set and sbp2 RBC command set.
+ */
+static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
+{
+	unchar new_cmd[16];
+	u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
+
+	SBP2_DEBUG("sbp2_check_sbp2_command");
+
+	switch (*cmd) {
+
+		case READ_6:
+
+			if (sbp2_command_conversion_device_type(device_type)) {
+
+				SBP2_DEBUG("Convert READ_6 to READ_10");
+
+				/*
+				 * Need to turn read_6 into read_10
+				 */
+				new_cmd[0] = 0x28;
+				new_cmd[1] = (cmd[1] & 0xe0);
+				new_cmd[2] = 0x0;
+				new_cmd[3] = (cmd[1] & 0x1f);
+				new_cmd[4] = cmd[2];
+				new_cmd[5] = cmd[3];
+				new_cmd[6] = 0x0;
+				new_cmd[7] = 0x0;
+				new_cmd[8] = cmd[4];
+				new_cmd[9] = cmd[5];
+
+				memcpy(cmd, new_cmd, 10);
+
+			}
+
+			break;
+
+		case WRITE_6:
+
+			if (sbp2_command_conversion_device_type(device_type)) {
+
+				SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
+
+				/*
+				 * Need to turn write_6 into write_10
+				 */
+				new_cmd[0] = 0x2a;
+				new_cmd[1] = (cmd[1] & 0xe0);
+				new_cmd[2] = 0x0;
+				new_cmd[3] = (cmd[1] & 0x1f);
+				new_cmd[4] = cmd[2];
+				new_cmd[5] = cmd[3];
+				new_cmd[6] = 0x0;
+				new_cmd[7] = 0x0;
+				new_cmd[8] = cmd[4];
+				new_cmd[9] = cmd[5];
+
+				memcpy(cmd, new_cmd, 10);
+
+			}
+
+			break;
+
+		case MODE_SENSE:
+
+			if (sbp2_command_conversion_device_type(device_type)) {
+
+				SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
+
+				/*
+				 * Need to turn mode_sense_6 into mode_sense_10
+				 */
+				new_cmd[0] = 0x5a;
+				new_cmd[1] = cmd[1];
+				new_cmd[2] = cmd[2];
+				new_cmd[3] = 0x0;
+				new_cmd[4] = 0x0;
+				new_cmd[5] = 0x0;
+				new_cmd[6] = 0x0;
+				new_cmd[7] = 0x0;
+				new_cmd[8] = cmd[4];
+				new_cmd[9] = cmd[5];
+
+				memcpy(cmd, new_cmd, 10);
+
+			}
+
+			break;
+
+		case MODE_SELECT:
+
+			/*
+			 * TODO. Probably need to change mode select to 10 byte version
+			 */
+
+		default:
+			break;
+	}
+
+	return;
+}
+
+/*
+ * Translates SBP-2 status into SCSI sense data for check conditions
+ */
+static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
+{
+	SBP2_DEBUG("sbp2_status_to_sense_data");
+
+	/*
+	 * Ok, it's pretty ugly...   ;-)
+	 */
+	sense_data[0] = 0x70;
+	sense_data[1] = 0x0;
+	sense_data[2] = sbp2_status[9];
+	sense_data[3] = sbp2_status[12];
+	sense_data[4] = sbp2_status[13];
+	sense_data[5] = sbp2_status[14];
+	sense_data[6] = sbp2_status[15];
+	sense_data[7] = 10;
+	sense_data[8] = sbp2_status[16];
+	sense_data[9] = sbp2_status[17];
+	sense_data[10] = sbp2_status[18];
+	sense_data[11] = sbp2_status[19];
+	sense_data[12] = sbp2_status[10];
+	sense_data[13] = sbp2_status[11];
+	sense_data[14] = sbp2_status[20];
+	sense_data[15] = sbp2_status[21];
+
+	return(sbp2_status[8] & 0x3f);	/* return scsi status */
+}
+
+/*
+ * This function is called after a command is completed, in order to do any necessary SBP-2
+ * response data translations for the SCSI stack
+ */
+static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, 
+				     struct scsi_cmnd *SCpnt)
+{
+	u8 *scsi_buf = SCpnt->request_buffer;
+	u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
+
+	SBP2_DEBUG("sbp2_check_sbp2_response");
+
+	switch (SCpnt->cmnd[0]) {
+
+		case INQUIRY:
+
+			/*
+			 * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill 
+			 * this information in from the inquiry response data. Lun is set to zero.
+			 */
+			if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
+				SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
+				scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
+			}
+
+			/*
+			 * Make sure data length is ok. Minimum length is 36 bytes
+			 */
+			if (scsi_buf[4] == 0) {
+				scsi_buf[4] = 36 - 5;
+			}
+
+			/*
+			 * Check for Simple Direct Access Device and change it to TYPE_DISK
+			 */
+			if ((scsi_buf[0] & 0x1f) == TYPE_SDAD) {
+				SBP2_DEBUG("Changing TYPE_SDAD to TYPE_DISK");
+				scsi_buf[0] &= 0xe0;
+			}
+
+			/*
+			 * Fix ansi revision and response data format
+			 */
+			scsi_buf[2] |= 2;
+			scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
+
+			break;
+
+		case MODE_SENSE:
+
+			if (sbp2_command_conversion_device_type(device_type)) {
+
+				SBP2_DEBUG("Modify mode sense response (10 byte version)");
+
+				scsi_buf[0] = scsi_buf[1];	/* Mode data length */
+				scsi_buf[1] = scsi_buf[2];	/* Medium type */
+				scsi_buf[2] = scsi_buf[3];	/* Device specific parameter */
+				scsi_buf[3] = scsi_buf[7];	/* Block descriptor length */
+				memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
+			}
+
+			break;
+
+		case MODE_SELECT:
+
+			/*
+			 * TODO. Probably need to change mode select to 10 byte version
+			 */
+
+		default:
+			break;
+	}
+	return;
+}
+
+/*
+ * This function deals with status writes from the SBP-2 device
+ */
+static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
+				    quadlet_t *data, u64 addr, size_t length, u16 fl)
+{
+	struct sbp2scsi_host_info *hi;
+	struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
+	u32 id;
+	struct scsi_cmnd *SCpnt = NULL;
+	u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
+	struct sbp2_command_info *command;
+
+	SBP2_DEBUG("sbp2_handle_status_write");
+
+	sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
+
+	if (!host) {
+		SBP2_ERR("host is NULL - this is bad!");
+		return(RCODE_ADDRESS_ERROR);
+	}
+
+	hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
+
+	if (!hi) {
+		SBP2_ERR("host info is NULL - this is bad!");
+		return(RCODE_ADDRESS_ERROR);
+	}
+
+	/*
+	 * Find our scsi_id structure by looking at the status fifo address written to by
+	 * the sbp2 device.
+	 */
+	id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
+	list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
+		if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
+			scsi_id = scsi_id_tmp;
+			break;
+		}
+	}
+
+	if (!scsi_id) {
+		SBP2_ERR("scsi_id is NULL - device is gone?");
+		return(RCODE_ADDRESS_ERROR);
+	}
+
+	/*
+	 * Put response into scsi_id status fifo...
+	 */
+	memcpy(&scsi_id->status_block, data, length);
+
+	/*
+	 * Byte swap first two quadlets (8 bytes) of status for processing
+	 */
+	sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8);
+
+	/*
+	 * Handle command ORB status here if necessary. First, need to match status with command.
+	 */
+	command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo);
+	if (command) {
+
+		SBP2_DEBUG("Found status for command ORB");
+		pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
+					    sizeof(struct sbp2_command_orb),
+					    PCI_DMA_BIDIRECTIONAL);
+		pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
+					    sizeof(command->scatter_gather_element),
+					    PCI_DMA_BIDIRECTIONAL);
+
+		SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
+		outstanding_orb_decr;
+
+		/*
+		 * Matched status with command, now grab scsi command pointers and check status
+		 */
+		SCpnt = command->Current_SCpnt;
+		sbp2util_mark_command_completed(scsi_id, command);
+
+		if (SCpnt) {
+
+			/*
+			 * See if the target stored any scsi status information
+			 */
+			if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) {
+				/*
+				 * Translate SBP-2 status to SCSI sense data
+				 */
+				SBP2_DEBUG("CHECK CONDITION");
+				scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
+			}
+
+			/*
+			 * Check to see if the dead bit is set. If so, we'll have to initiate
+			 * a fetch agent reset.
+			 */
+			if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+				/*
+				 * Initiate a fetch agent reset.
+				 */
+				SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
+                                sbp2_agent_reset(scsi_id, 0);
+			}
+
+			SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
+		}
+
+		/*
+		 * Check here to see if there are no commands in-use. If there are none, we can
+		 * null out last orb so that next time around we write directly to the orb pointer...
+		 * Quick start saves one 1394 bus transaction.
+		 */
+		if (list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+			scsi_id->last_orb = NULL;
+		}
+
+	} else {
+
+		/*
+		 * It's probably a login/logout/reconnect status.
+		 */
+		if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
+		    (scsi_id->query_logins_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
+		    (scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
+		    (scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) {
+			atomic_set(&scsi_id->sbp2_login_complete, 1);
+		}
+	}
+
+	if (SCpnt) {
+
+		/* Complete the SCSI command. */
+		SBP2_DEBUG("Completing SCSI command");
+		sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
+					  command->Current_done);
+		SBP2_ORB_DEBUG("command orb completed");
+	}
+
+	return(RCODE_COMPLETE);
+}
+
+
+/**************************************
+ * SCSI interface related section
+ **************************************/
+
+/*
+ * This routine is the main request entry routine for doing I/O. It is
+ * called from the scsi stack directly.
+ */
+static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
+				 void (*done)(struct scsi_cmnd *))
+{
+	struct scsi_id_instance_data *scsi_id =
+		(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
+	struct sbp2scsi_host_info *hi;
+
+	SBP2_DEBUG("sbp2scsi_queuecommand");
+
+	/*
+	 * If scsi_id is null, it means there is no device in this slot,
+	 * so we should return selection timeout.
+	 */
+	if (!scsi_id) {
+		SCpnt->result = DID_NO_CONNECT << 16;
+		done (SCpnt);
+		return 0;
+	}
+
+	hi = scsi_id->hi;
+
+	if (!hi) {
+		SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
+		SCpnt->result = DID_NO_CONNECT << 16;
+		done (SCpnt);
+		return(0);
+	}
+
+	/*
+	 * Until we handle multiple luns, just return selection time-out
+	 * to any IO directed at non-zero LUNs
+	 */
+	if (SCpnt->device->lun) {
+		SCpnt->result = DID_NO_CONNECT << 16;
+		done (SCpnt);
+		return(0);
+	}
+
+	/*
+	 * Check for request sense command, and handle it here
+	 * (autorequest sense)
+	 */
+	if (SCpnt->cmnd[0] == REQUEST_SENSE) {
+		SBP2_DEBUG("REQUEST_SENSE");
+		memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
+		memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+		sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
+		return(0);
+	}
+
+	/*
+	 * Check to see if we are in the middle of a bus reset.
+	 */
+	if (!hpsb_node_entry_valid(scsi_id->ne)) {
+		SBP2_ERR("Bus reset in progress - rejecting command");
+		SCpnt->result = DID_BUS_BUSY << 16;
+		done (SCpnt);
+		return(0);
+	}
+
+	/*
+	 * Try and send our SCSI command
+	 */
+	if (sbp2_send_command(scsi_id, SCpnt, done)) {
+		SBP2_ERR("Error sending SCSI command");
+		sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
+					  SCpnt, done);
+	}
+
+	return(0);
+}
+
+/*
+ * This function is called in order to complete all outstanding SBP-2
+ * commands (in case of resets, etc.).
+ */
+static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
+					   u32 status)
+{
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	struct list_head *lh;
+	struct sbp2_command_info *command;
+
+	SBP2_DEBUG("sbp2scsi_complete_all_commands");
+
+	while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+		SBP2_DEBUG("Found pending command to complete");
+		lh = scsi_id->sbp2_command_orb_inuse.next;
+		command = list_entry(lh, struct sbp2_command_info, list);
+		pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
+					    sizeof(struct sbp2_command_orb),
+					    PCI_DMA_BIDIRECTIONAL);
+		pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
+					    sizeof(command->scatter_gather_element),
+					    PCI_DMA_BIDIRECTIONAL);
+		sbp2util_mark_command_completed(scsi_id, command);
+		if (command->Current_SCpnt) {
+			command->Current_SCpnt->result = status << 16;
+			command->Current_done(command->Current_SCpnt);
+		}
+	}
+
+	return;
+}
+
+/*
+ * This function is called in order to complete a regular SBP-2 command.
+ *
+ * This can be called in interrupt context.
+ */
+static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
+				      u32 scsi_status, struct scsi_cmnd *SCpnt,
+				      void (*done)(struct scsi_cmnd *))
+{
+	unsigned long flags;
+
+	SBP2_DEBUG("sbp2scsi_complete_command");
+
+	/*
+	 * Sanity
+	 */
+	if (!SCpnt) {
+		SBP2_ERR("SCpnt is NULL");
+		return;
+	}
+
+	/*
+	 * If a bus reset is in progress and there was an error, don't
+	 * complete the command, just let it get retried at the end of the
+	 * bus reset.
+	 */
+	if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+		SBP2_ERR("Bus reset in progress - retry command later");
+		return;
+	}
+ 
+	/*
+	 * Switch on scsi status
+	 */
+	switch (scsi_status) {
+		case SBP2_SCSI_STATUS_GOOD:
+			SCpnt->result = DID_OK;
+			break;
+
+		case SBP2_SCSI_STATUS_BUSY:
+			SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
+			SCpnt->result = DID_BUS_BUSY << 16;
+			break;
+
+		case SBP2_SCSI_STATUS_CHECK_CONDITION:
+			SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
+			SCpnt->result = CHECK_CONDITION << 1;
+
+			/*
+			 * Debug stuff
+			 */
+#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
+			scsi_print_command(SCpnt);
+			scsi_print_sense("bh", SCpnt);
+#endif
+
+			break;
+
+		case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
+			SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
+			SCpnt->result = DID_NO_CONNECT << 16;
+			scsi_print_command(SCpnt);
+			break;
+
+		case SBP2_SCSI_STATUS_CONDITION_MET:
+		case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
+		case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
+			SBP2_ERR("Bad SCSI status = %x", scsi_status);
+			SCpnt->result = DID_ERROR << 16;
+			scsi_print_command(SCpnt);
+			break;
+
+		default:
+			SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
+			SCpnt->result = DID_ERROR << 16;
+	}
+
+	/*
+	 * Take care of any sbp2 response data mucking here (RBC stuff, etc.)
+	 */
+	if (SCpnt->result == DID_OK) {
+		sbp2_check_sbp2_response(scsi_id, SCpnt);
+	}
+
+	/*
+	 * If a bus reset is in progress and there was an error, complete
+	 * the command as busy so that it will get retried.
+	 */
+	if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+		SBP2_ERR("Completing command with busy (bus reset)");
+		SCpnt->result = DID_BUS_BUSY << 16;
+	}
+
+	/*
+	 * If a unit attention occurs, return busy status so it gets
+	 * retried... it could have happened because of a 1394 bus reset
+	 * or hot-plug...
+	 */
+#if 0
+	if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
+	    (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
+		SBP2_DEBUG("UNIT ATTENTION - return busy");
+		SCpnt->result = DID_BUS_BUSY << 16;
+	}
+#endif
+
+	/*
+	 * Tell scsi stack that we're done with this command
+	 */
+	spin_lock_irqsave(scsi_id->scsi_host->host_lock,flags);
+	done (SCpnt);
+	spin_unlock_irqrestore(scsi_id->scsi_host->host_lock,flags);
+
+	return;
+}
+
+
+static int sbp2scsi_slave_configure (struct scsi_device *sdev)
+{
+	blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+	return 0;
+}
+
+
+/*
+ * Called by scsi stack when something has really gone wrong.  Usually
+ * called when a command has timed-out for some reason.
+ */
+static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
+{
+	struct scsi_id_instance_data *scsi_id =
+		(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
+	struct sbp2scsi_host_info *hi = scsi_id->hi;
+	struct sbp2_command_info *command;
+
+	SBP2_ERR("aborting sbp2 command");
+	scsi_print_command(SCpnt);
+
+	if (scsi_id) {
+
+		/*
+		 * Right now, just return any matching command structures
+		 * to the free pool.
+		 */
+		command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
+		if (command) {
+			SBP2_DEBUG("Found command to abort");
+			pci_dma_sync_single_for_cpu(hi->host->pdev,
+						    command->command_orb_dma,
+						    sizeof(struct sbp2_command_orb),
+						    PCI_DMA_BIDIRECTIONAL);
+			pci_dma_sync_single_for_cpu(hi->host->pdev,
+						    command->sge_dma,
+						    sizeof(command->scatter_gather_element),
+						    PCI_DMA_BIDIRECTIONAL);
+			sbp2util_mark_command_completed(scsi_id, command);
+			if (command->Current_SCpnt) {
+				command->Current_SCpnt->result = DID_ABORT << 16;
+				command->Current_done(command->Current_SCpnt);
+			}
+		}
+
+		/*
+		 * Initiate a fetch agent reset.
+		 */
+		sbp2_agent_reset(scsi_id, 0);
+		sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
+	}
+
+	return(SUCCESS);
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.
+ */
+static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
+{
+	struct scsi_id_instance_data *scsi_id =
+		(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
+
+	SBP2_ERR("reset requested");
+
+	if (scsi_id) {
+		SBP2_ERR("Generating sbp2 fetch agent reset");
+		sbp2_agent_reset(scsi_id, 0);
+	}
+
+	return(SUCCESS);
+}
+
+static const char *sbp2scsi_info (struct Scsi_Host *host)
+{
+        return "SCSI emulation for IEEE-1394 SBP-2 Devices";
+}
+
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, char *buf)
+{
+	struct scsi_device *sdev;
+	struct scsi_id_instance_data *scsi_id;
+	int lun;
+
+	if (!(sdev = to_scsi_device(dev)))
+		return 0;
+
+	if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
+		return 0;
+
+	if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED)
+		lun = 0;
+	else
+		lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
+
+	return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
+		       scsi_id->ud->id, lun);
+}
+static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
+
+static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
+	&dev_attr_ieee1394_id,
+	NULL
+};
+
+MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
+MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
+MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
+MODULE_LICENSE("GPL");
+
+/* SCSI host template */
+static struct scsi_host_template scsi_driver_template = {
+	.module =			THIS_MODULE,
+	.name =				"SBP-2 IEEE-1394",
+	.proc_name =			SBP2_DEVICE_NAME,
+	.info =				sbp2scsi_info,
+	.queuecommand =			sbp2scsi_queuecommand,
+	.eh_abort_handler =		sbp2scsi_abort,
+	.eh_device_reset_handler =	sbp2scsi_reset,
+	.eh_bus_reset_handler =		sbp2scsi_reset,
+	.eh_host_reset_handler =	sbp2scsi_reset,
+	.slave_configure =		sbp2scsi_slave_configure,
+	.this_id =			-1,
+	.sg_tablesize =			SG_ALL,
+	.use_clustering =		ENABLE_CLUSTERING,
+	.cmd_per_lun =			SBP2_MAX_CMDS,
+	.can_queue = 			SBP2_MAX_CMDS,
+	.emulated =			1,
+	.sdev_attrs =			sbp2_sysfs_sdev_attrs,
+};
+
+static int sbp2_module_init(void)
+{
+	int ret;
+
+	SBP2_DEBUG("sbp2_module_init");
+
+	printk(KERN_INFO "sbp2: %s\n", version);
+
+	/* Module load debug option to force one command at a time (serializing I/O) */
+	if (serialize_io) {
+		SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
+		scsi_driver_template.can_queue = 1;
+		scsi_driver_template.cmd_per_lun = 1;
+	}
+
+	/* Set max sectors (module load option). Default is 255 sectors. */
+	scsi_driver_template.max_sectors = max_sectors;
+
+
+	/* Register our high level driver with 1394 stack */
+	hpsb_register_highlevel(&sbp2_highlevel);
+
+	ret = hpsb_register_protocol(&sbp2_driver);
+	if (ret) {
+		SBP2_ERR("Failed to register protocol");
+		hpsb_unregister_highlevel(&sbp2_highlevel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit sbp2_module_exit(void)
+{
+	SBP2_DEBUG("sbp2_module_exit");
+
+	hpsb_unregister_protocol(&sbp2_driver);
+
+	hpsb_unregister_highlevel(&sbp2_highlevel);
+}
+
+module_init(sbp2_module_init);
+module_exit(sbp2_module_exit);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
new file mode 100644
index 000000000000..a84b039a05b9
--- /dev/null
+++ b/drivers/ieee1394/sbp2.h
@@ -0,0 +1,484 @@
+/*
+ * sbp2.h - Defines and prototypes for sbp2.c
+ *
+ * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
+ * jamesg@filanet.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef SBP2_H
+#define SBP2_H
+
+#define SBP2_DEVICE_NAME		"sbp2"
+
+/*
+ * SBP2 specific structures and defines
+ */
+
+#define ORB_DIRECTION_WRITE_TO_MEDIA    0x0
+#define ORB_DIRECTION_READ_FROM_MEDIA   0x1
+#define ORB_DIRECTION_NO_DATA_TRANSFER  0x2
+
+#define ORB_SET_NULL_PTR(value)			((value & 0x1) << 31)
+#define ORB_SET_NOTIFY(value)                   ((value & 0x1) << 31)
+#define ORB_SET_RQ_FMT(value)                   ((value & 0x3) << 29)	/* unused ? */
+#define ORB_SET_NODE_ID(value)			((value & 0xffff) << 16)
+#define ORB_SET_DATA_SIZE(value)                (value & 0xffff)
+#define ORB_SET_PAGE_SIZE(value)                ((value & 0x7) << 16)
+#define ORB_SET_PAGE_TABLE_PRESENT(value)       ((value & 0x1) << 19)
+#define ORB_SET_MAX_PAYLOAD(value)              ((value & 0xf) << 20)
+#define ORB_SET_SPEED(value)                    ((value & 0x7) << 24)
+#define ORB_SET_DIRECTION(value)                ((value & 0x1) << 27)
+
+struct sbp2_command_orb {
+	volatile u32 next_ORB_hi;
+	volatile u32 next_ORB_lo;
+	u32 data_descriptor_hi;
+	u32 data_descriptor_lo;
+	u32 misc;
+	u8 cdb[12];
+};
+
+#define SBP2_LOGIN_REQUEST		0x0
+#define SBP2_QUERY_LOGINS_REQUEST	0x1
+#define SBP2_RECONNECT_REQUEST		0x3
+#define SBP2_SET_PASSWORD_REQUEST	0x4
+#define SBP2_LOGOUT_REQUEST		0x7
+#define SBP2_ABORT_TASK_REQUEST		0xb
+#define SBP2_ABORT_TASK_SET		0xc
+#define SBP2_LOGICAL_UNIT_RESET		0xe
+#define SBP2_TARGET_RESET_REQUEST	0xf
+
+#define ORB_SET_LUN(value)                      (value & 0xffff)
+#define ORB_SET_FUNCTION(value)                 ((value & 0xf) << 16)
+#define ORB_SET_RECONNECT(value)                ((value & 0xf) << 20)
+#define ORB_SET_EXCLUSIVE(value)                ((value & 0x1) << 28)
+#define ORB_SET_LOGIN_RESP_LENGTH(value)        (value & 0xffff)
+#define ORB_SET_PASSWD_LENGTH(value)            ((value & 0xffff) << 16)
+
+struct sbp2_login_orb {
+	u32 password_hi;
+	u32 password_lo;
+	u32 login_response_hi;
+	u32 login_response_lo;
+	u32 lun_misc;
+	u32 passwd_resp_lengths;
+	u32 status_FIFO_hi;
+	u32 status_FIFO_lo;
+};
+
+#define RESPONSE_GET_LOGIN_ID(value)            (value & 0xffff)
+#define RESPONSE_GET_LENGTH(value)              ((value >> 16) & 0xffff)
+#define RESPONSE_GET_RECONNECT_HOLD(value)      (value & 0xffff)
+
+struct sbp2_login_response {
+	u32 length_login_ID;
+	u32 command_block_agent_hi;
+	u32 command_block_agent_lo;
+	u32 reconnect_hold;
+};
+
+#define ORB_SET_LOGIN_ID(value)                 (value & 0xffff)
+
+#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(value) (value & 0xffff)
+
+struct sbp2_query_logins_orb {
+	u32 reserved1;
+	u32 reserved2;
+	u32 query_response_hi;
+	u32 query_response_lo;
+	u32 lun_misc;
+	u32 reserved_resp_length;
+	u32 status_FIFO_hi;
+	u32 status_FIFO_lo;
+};
+
+#define RESPONSE_GET_MAX_LOGINS(value)          (value & 0xffff)
+#define RESPONSE_GET_ACTIVE_LOGINS(value)       ((RESPONSE_GET_LENGTH(value) - 4) / 12)
+
+struct sbp2_query_logins_response {
+	u32 length_max_logins;
+	u32 misc_IDs;
+	u32 initiator_misc_hi;
+	u32 initiator_misc_lo;
+};
+
+struct sbp2_reconnect_orb {
+	u32 reserved1;
+	u32 reserved2;
+        u32 reserved3;
+        u32 reserved4;
+	u32 login_ID_misc;
+	u32 reserved5;
+	u32 status_FIFO_hi;
+	u32 status_FIFO_lo;
+};
+
+struct sbp2_logout_orb {
+	u32 reserved1;
+	u32 reserved2;
+        u32 reserved3;
+        u32 reserved4;
+	u32 login_ID_misc;
+	u32 reserved5;
+	u32 status_FIFO_hi;
+	u32 status_FIFO_lo;
+};
+
+#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value)   (value & 0xffff)
+#define PAGE_TABLE_SET_SEGMENT_LENGTH(value)    ((value & 0xffff) << 16)
+
+struct sbp2_unrestricted_page_table {
+	u32 length_segment_base_hi;
+	u32 segment_base_lo;
+};
+
+#define RESP_STATUS_REQUEST_COMPLETE		0x0
+#define RESP_STATUS_TRANSPORT_FAILURE		0x1
+#define RESP_STATUS_ILLEGAL_REQUEST		0x2
+#define RESP_STATUS_VENDOR_DEPENDENT		0x3
+
+#define SBP2_STATUS_NO_ADDITIONAL_INFO		0x0
+#define SBP2_STATUS_REQ_TYPE_NOT_SUPPORTED	0x1
+#define SBP2_STATUS_SPEED_NOT_SUPPORTED		0x2
+#define SBP2_STATUS_PAGE_SIZE_NOT_SUPPORTED	0x3
+#define SBP2_STATUS_ACCESS_DENIED		0x4
+#define SBP2_STATUS_LU_NOT_SUPPORTED		0x5
+#define SBP2_STATUS_MAX_PAYLOAD_TOO_SMALL	0x6
+#define SBP2_STATUS_RESOURCES_UNAVAILABLE	0x8
+#define SBP2_STATUS_FUNCTION_REJECTED		0x9
+#define SBP2_STATUS_LOGIN_ID_NOT_RECOGNIZED	0xa
+#define SBP2_STATUS_DUMMY_ORB_COMPLETED		0xb
+#define SBP2_STATUS_REQUEST_ABORTED		0xc
+#define SBP2_STATUS_UNSPECIFIED_ERROR		0xff
+
+#define SFMT_CURRENT_ERROR			0x0
+#define SFMT_DEFERRED_ERROR			0x1
+#define SFMT_VENDOR_DEPENDENT_STATUS		0x3
+
+#define SBP2_SCSI_STATUS_GOOD			0x0
+#define SBP2_SCSI_STATUS_CHECK_CONDITION	0x2
+#define SBP2_SCSI_STATUS_CONDITION_MET		0x4
+#define SBP2_SCSI_STATUS_BUSY			0x8
+#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT	0x18
+#define SBP2_SCSI_STATUS_COMMAND_TERMINATED	0x22
+
+#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT	0xff
+
+#define STATUS_GET_ORB_OFFSET_HI(value)         (value & 0xffff)
+#define STATUS_GET_SBP_STATUS(value)            ((value >> 16) & 0xff)
+#define STATUS_GET_LENGTH(value)                ((value >> 24) & 0x7)
+#define STATUS_GET_DEAD_BIT(value)              ((value >> 27) & 0x1)
+#define STATUS_GET_RESP(value)                  ((value >> 28) & 0x3)
+#define STATUS_GET_SRC(value)                   ((value >> 30) & 0x3)
+
+struct sbp2_status_block {
+	u32 ORB_offset_hi_misc;
+	u32 ORB_offset_lo;
+        u8 command_set_dependent[24];
+};
+
+/*
+ * Miscellaneous SBP2 related config rom defines
+ */
+
+/* The status fifo address definition below is used as a base for each
+ * node, which a chunk seperately assigned to each unit directory in the
+ * node.  For example, 0xfffe00000000ULL is used for the first sbp2 device
+ * detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node
+ * 0, and so on.
+ *
+ * Note: We could use a single status fifo address for all sbp2 devices,
+ * and figure out which sbp2 device the status belongs to by looking at
+ * the source node id of the status write... but, using separate addresses
+ * for each sbp2 unit directory allows for better code and the ability to
+ * support multiple luns within a single 1394 node.
+ *
+ * Also note that we choose the address range below as it is a region
+ * specified for write posting, where the ohci controller will
+ * automatically send an ack_complete when the status is written by the
+ * sbp2 device... saving a split transaction.   =)
+ */ 
+#define SBP2_STATUS_FIFO_ADDRESS				0xfffe00000000ULL
+#define SBP2_STATUS_FIFO_ADDRESS_HI                             0xfffe
+#define SBP2_STATUS_FIFO_ADDRESS_LO                             0x0
+
+#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry)			((entry) << 5)
+#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset)		((offset) >> 5)
+
+#define SBP2_UNIT_DIRECTORY_OFFSET_KEY				0xd1
+#define SBP2_CSR_OFFSET_KEY					0x54
+#define SBP2_UNIT_SPEC_ID_KEY					0x12
+#define SBP2_UNIT_SW_VERSION_KEY				0x13
+#define SBP2_COMMAND_SET_SPEC_ID_KEY				0x38
+#define SBP2_COMMAND_SET_KEY					0x39
+#define SBP2_UNIT_CHARACTERISTICS_KEY				0x3a
+#define SBP2_DEVICE_TYPE_AND_LUN_KEY				0x14
+#define SBP2_FIRMWARE_REVISION_KEY				0x3c
+
+#define SBP2_DEVICE_TYPE(q)					(((q) >> 16) & 0x1f)
+#define SBP2_DEVICE_LUN(q)					((q) & 0xffff)
+
+#define SBP2_AGENT_STATE_OFFSET					0x00ULL
+#define SBP2_AGENT_RESET_OFFSET					0x04ULL
+#define SBP2_ORB_POINTER_OFFSET					0x08ULL
+#define SBP2_DOORBELL_OFFSET					0x10ULL
+#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET			0x14ULL
+#define SBP2_UNSOLICITED_STATUS_VALUE				0xf
+
+#define SBP2_BUSY_TIMEOUT_ADDRESS				0xfffff0000210ULL
+#define SBP2_BUSY_TIMEOUT_VALUE					0xf
+
+#define SBP2_AGENT_RESET_DATA					0xf
+
+/*
+ * Unit spec id and sw version entry for SBP-2 devices
+ */
+
+#define SBP2_UNIT_SPEC_ID_ENTRY					0x0000609e
+#define SBP2_SW_VERSION_ENTRY					0x00010483
+
+/*
+ * Other misc defines
+ */
+#define SBP2_128KB_BROKEN_FIRMWARE				0xa0b800
+
+#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED			0xffffffff
+
+/*
+ * SCSI specific stuff
+ */
+
+#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
+#define SBP2_MAX_UDS_PER_NODE		16	/* Maximum scsi devices per node */
+#define SBP2_MAX_SECTORS		255	/* Max sectors supported */
+
+#ifndef TYPE_SDAD
+#define TYPE_SDAD			0x0e	/* simplified direct access device */
+#endif
+
+/*
+ * SCSI direction table...
+ * (now used as a back-up in case the direction passed down from above is "unknown")
+ *
+ * DIN = IN data direction
+ * DOU = OUT data direction
+ * DNO = No data transfer
+ * DUN = Unknown data direction
+ *
+ * Opcode 0xec (Teac specific "opc execute") possibly should be DNO,
+ * but we'll change it when somebody reports a problem with this.
+ */
+#define DIN				ORB_DIRECTION_READ_FROM_MEDIA
+#define DOU				ORB_DIRECTION_WRITE_TO_MEDIA
+#define DNO				ORB_DIRECTION_NO_DATA_TRANSFER
+#define DUN				DIN
+
+static unchar sbp2scsi_direction_table[0x100] = {
+	DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
+	DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
+	DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
+	DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
+	DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
+	DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
+	DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+	DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
+};
+
+/* This should be safe */
+#define SBP2_MAX_CMDS		8
+
+/* This is the two dma types we use for cmd_dma below */
+enum cmd_dma_types {
+	CMD_DMA_NONE,
+	CMD_DMA_PAGE,
+	CMD_DMA_SINGLE
+};
+
+/*
+ * Encapsulates all the info necessary for an outstanding command.
+ */
+struct sbp2_command_info {
+
+	struct list_head list;
+	struct sbp2_command_orb command_orb ____cacheline_aligned;
+	dma_addr_t command_orb_dma ____cacheline_aligned;
+	struct scsi_cmnd *Current_SCpnt;
+	void (*Current_done)(struct scsi_cmnd *);
+
+	/* Also need s/g structure for each sbp2 command */
+	struct sbp2_unrestricted_page_table scatter_gather_element[SG_ALL] ____cacheline_aligned;
+	dma_addr_t sge_dma ____cacheline_aligned;
+	void *sge_buffer;
+	dma_addr_t cmd_dma;
+	enum cmd_dma_types dma_type;
+	unsigned long dma_size;
+	int dma_dir;
+
+};
+
+/* A list of flags for detected oddities and brokeness. */
+#define SBP2_BREAKAGE_128K_MAX_TRANSFER		0x1
+#define SBP2_BREAKAGE_INQUIRY_HACK		0x2
+
+
+struct sbp2scsi_host_info;
+
+
+/*
+ * Information needed on a per scsi id basis (one for each sbp2 device)
+ */
+struct scsi_id_instance_data {
+	/*
+	 * Various sbp2 specific structures
+	 */
+	struct sbp2_command_orb *last_orb;
+	dma_addr_t last_orb_dma;
+	struct sbp2_login_orb *login_orb;
+	dma_addr_t login_orb_dma;
+	struct sbp2_login_response *login_response;
+	dma_addr_t login_response_dma;
+	struct sbp2_query_logins_orb *query_logins_orb;
+	dma_addr_t query_logins_orb_dma;
+	struct sbp2_query_logins_response *query_logins_response;
+	dma_addr_t query_logins_response_dma;
+	struct sbp2_reconnect_orb *reconnect_orb;
+	dma_addr_t reconnect_orb_dma;
+	struct sbp2_logout_orb *logout_orb;
+	dma_addr_t logout_orb_dma;
+	struct sbp2_status_block status_block;
+
+	/*
+	 * Stuff we need to know about the sbp2 device itself
+	 */
+	u64 sbp2_management_agent_addr;
+	u64 sbp2_command_block_agent_addr;
+	u32 speed_code;
+	u32 max_payload_size;
+
+	/*
+	 * Values pulled from the device's unit directory
+	 */
+	u32 sbp2_command_set_spec_id;
+	u32 sbp2_command_set;
+	u32 sbp2_unit_characteristics;
+	u32 sbp2_device_type_and_lun;
+	u32 sbp2_firmware_revision;
+
+	/*
+	 * Variable used for logins, reconnects, logouts, query logins
+	 */
+	atomic_t sbp2_login_complete;
+
+	/*
+	 * Pool of command orbs, so we can have more than overlapped command per id
+	 */
+	spinlock_t sbp2_command_orb_lock;
+	struct list_head sbp2_command_orb_inuse;
+	struct list_head sbp2_command_orb_completed;
+
+	struct list_head scsi_list;
+
+	/* Node entry, as retrieved from NodeMgr entries */
+	struct node_entry *ne;
+	struct unit_directory *ud;
+
+	/* A backlink to our host_info */
+	struct sbp2scsi_host_info *hi;
+
+	/* SCSI related pointers */
+	struct scsi_device *sdev;
+	struct Scsi_Host *scsi_host;
+
+	/* Device specific workarounds/brokeness */
+	u32 workarounds;
+};
+
+
+/* Sbp2 host data structure (one per IEEE1394 host) */
+struct sbp2scsi_host_info {
+	struct hpsb_host *host;		/* IEEE1394 host */
+	struct list_head scsi_ids;	/* List of scsi ids on this host */
+};
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * Various utility prototypes
+ */
+static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id);
+static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id);
+static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
+static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
+static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
+							  struct scsi_cmnd *Current_SCpnt,
+							  void (*Current_done)(struct scsi_cmnd *));
+static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
+		struct sbp2_command_info *command);
+
+
+static int sbp2_start_device(struct scsi_id_instance_data *scsi_id);
+static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id);
+
+#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
+static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
+                                     u64 addr, size_t length, u16 flags);
+static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
+                                    u64 addr, size_t length, u16 flags);
+#endif
+
+/*
+ * SBP-2 protocol related prototypes
+ */
+static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id);
+static int sbp2_login_device(struct scsi_id_instance_data *scsi_id);
+static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
+static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
+static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
+				    quadlet_t *data, u64 addr, size_t length, u16 flags);
+static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
+static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
+				   struct sbp2_command_info *command,
+				   unchar *scsi_cmd,
+				   unsigned int scsi_use_sg,
+				   unsigned int scsi_request_bufflen,
+				   void *scsi_request_buffer,
+				   enum dma_data_direction dma_dir);
+static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
+				 struct sbp2_command_info *command);
+static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
+			     struct scsi_cmnd *SCpnt,
+			     void (*done)(struct scsi_cmnd *));
+static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
+static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
+static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
+				     struct scsi_cmnd *SCpnt);
+static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
+				      struct unit_directory *ud);
+static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id);
+static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id);
+
+#endif /* SBP2_H */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
new file mode 100644
index 000000000000..4bedf7113f40
--- /dev/null
+++ b/drivers/ieee1394/video1394.c
@@ -0,0 +1,1527 @@
+/*
+ * video1394.c - video driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                        Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * NOTES:
+ *
+ * jds -- add private data to file to keep track of iso contexts associated
+ * with each open -- so release won't kill all iso transfers.
+ * 
+ * Damien Douxchamps: Fix failure when the number of DMA pages per frame is
+ * one.
+ * 
+ * ioctl return codes:
+ * EFAULT is only for invalid address for the argp
+ * EINVAL for out of range values
+ * EBUSY when trying to use an already used resource
+ * ESRCH when trying to free/stop a not used resource
+ * EAGAIN for resource allocation failure that could perhaps succeed later
+ * ENOTTY for unsupported ioctl request
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+#include <linux/ioctl32.h>
+#include <linux/compat.h>
+#include <linux/cdev.h>
+
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "hosts.h"
+#include "ieee1394_core.h"
+#include "highlevel.h"
+#include "video1394.h"
+#include "nodemgr.h"
+#include "dma.h"
+
+#include "ohci1394.h"
+
+#define ISO_CHANNELS 64
+
+#ifndef virt_to_page
+#define virt_to_page(x) MAP_NR(x)
+#endif
+
+#ifndef vmalloc_32
+#define vmalloc_32(x) vmalloc(x)
+#endif
+
+struct it_dma_prg {
+	struct dma_cmd begin;
+	quadlet_t data[4];
+	struct dma_cmd end;
+	quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
+};
+
+struct dma_iso_ctx {
+	struct ti_ohci *ohci;
+	int type; /* OHCI_ISO_TRANSMIT or OHCI_ISO_RECEIVE */
+	struct ohci1394_iso_tasklet iso_tasklet;
+	int channel;
+	int ctx;
+	int last_buffer;
+	int * next_buffer;  /* For ISO Transmit of video packets
+			       to write the correct SYT field
+			       into the next block */
+	unsigned int num_desc;
+	unsigned int buf_size;
+	unsigned int frame_size;
+	unsigned int packet_size;
+	unsigned int left_size;
+	unsigned int nb_cmd;
+
+	struct dma_region dma;
+
+	struct dma_prog_region *prg_reg;
+
+        struct dma_cmd **ir_prg;
+	struct it_dma_prg **it_prg;
+
+	unsigned int *buffer_status;
+        struct timeval *buffer_time; /* time when the buffer was received */
+	unsigned int *last_used_cmd; /* For ISO Transmit with
+					variable sized packets only ! */
+	int ctrlClear;
+	int ctrlSet;
+	int cmdPtr;
+	int ctxMatch;
+	wait_queue_head_t waitq;
+	spinlock_t lock;
+	unsigned int syt_offset;
+	int flags;
+
+	struct list_head link;
+};
+
+
+struct file_ctx {
+	struct ti_ohci *ohci;
+	struct list_head context_list;
+	struct dma_iso_ctx *current_ctx;
+};
+
+#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
+#define VIDEO1394_DEBUG
+#endif
+
+#ifdef DBGMSG
+#undef DBGMSG
+#endif
+
+#ifdef VIDEO1394_DEBUG
+#define DBGMSG(card, fmt, args...) \
+printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args)
+#else
+#define DBGMSG(card, fmt, args...)
+#endif
+
+/* print general (card independent) information */
+#define PRINT_G(level, fmt, args...) \
+printk(level "video1394: " fmt "\n" , ## args)
+
+/* print card specific information */
+#define PRINT(level, card, fmt, args...) \
+printk(level "video1394_%d: " fmt "\n" , card , ## args)
+
+static void wakeup_dma_ir_ctx(unsigned long l);
+static void wakeup_dma_it_ctx(unsigned long l);
+
+static struct hpsb_highlevel video1394_highlevel;
+
+static int free_dma_iso_ctx(struct dma_iso_ctx *d)
+{
+	int i;
+
+	DBGMSG(d->ohci->host->id, "Freeing dma_iso_ctx %d", d->ctx);
+
+	ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
+	if (d->iso_tasklet.link.next != NULL)
+		ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
+
+	dma_region_free(&d->dma);
+
+	if (d->prg_reg) {
+		for (i = 0; i < d->num_desc; i++)
+			dma_prog_region_free(&d->prg_reg[i]);
+		kfree(d->prg_reg);
+	}
+
+	if (d->ir_prg)
+		kfree(d->ir_prg);
+
+	if (d->it_prg)
+		kfree(d->it_prg);
+
+	if (d->buffer_status)
+		kfree(d->buffer_status);
+	if (d->buffer_time)
+		kfree(d->buffer_time);
+	if (d->last_used_cmd)
+		kfree(d->last_used_cmd);
+	if (d->next_buffer)
+		kfree(d->next_buffer);
+
+	list_del(&d->link);
+
+	kfree(d);
+
+	return 0;
+}
+
+static struct dma_iso_ctx *
+alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
+		  int buf_size, int channel, unsigned int packet_size)
+{
+	struct dma_iso_ctx *d;
+	int i;
+
+	d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL);
+	if (d == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
+		return NULL;
+	}
+
+	memset(d, 0, sizeof *d);
+
+	d->ohci = ohci;
+	d->type = type;
+	d->channel = channel;
+	d->num_desc = num_desc;
+	d->frame_size = buf_size;
+	d->buf_size = PAGE_ALIGN(buf_size);
+	d->last_buffer = -1;
+	INIT_LIST_HEAD(&d->link);
+	init_waitqueue_head(&d->waitq);
+
+	/* Init the regions for easy cleanup */
+	dma_region_init(&d->dma);
+
+	if (dma_region_alloc(&d->dma, d->num_desc * d->buf_size, ohci->dev,
+			     PCI_DMA_BIDIRECTIONAL)) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma buffer");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+
+	if (type == OHCI_ISO_RECEIVE)
+		ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
+					  wakeup_dma_ir_ctx,
+					  (unsigned long) d);
+	else
+		ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
+					  wakeup_dma_it_ctx,
+					  (unsigned long) d);
+
+	if (ohci1394_register_iso_tasklet(ohci, &d->iso_tasklet) < 0) {
+		PRINT(KERN_ERR, ohci->host->id, "no free iso %s contexts",
+		      type == OHCI_ISO_RECEIVE ? "receive" : "transmit");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	d->ctx = d->iso_tasklet.context;
+
+	d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region),
+			GFP_KERNEL);
+	if (d->prg_reg == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	/* Makes for easier cleanup */
+	for (i = 0; i < d->num_desc; i++)
+		dma_prog_region_init(&d->prg_reg[i]);
+
+	if (type == OHCI_ISO_RECEIVE) {
+		d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
+		d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
+		d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
+		d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
+
+		d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *),
+				    GFP_KERNEL);
+
+		if (d->ir_prg == NULL) {
+			PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
+			free_dma_iso_ctx(d);
+			return NULL;
+		}
+		memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
+
+		d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
+		d->left_size = (d->frame_size % PAGE_SIZE) ?
+			d->frame_size % PAGE_SIZE : PAGE_SIZE;
+
+		for (i = 0;i < d->num_desc; i++) {
+			if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
+						  sizeof(struct dma_cmd), ohci->dev)) {
+				PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
+				free_dma_iso_ctx(d);
+				return NULL;
+			}
+			d->ir_prg[i] = (struct dma_cmd *)d->prg_reg[i].kvirt;
+		}
+
+	} else {  /* OHCI_ISO_TRANSMIT */
+		d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
+		d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
+		d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
+
+		d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *),
+				    GFP_KERNEL);
+
+		if (d->it_prg == NULL) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Failed to allocate dma it prg");
+			free_dma_iso_ctx(d);
+			return NULL;
+		}
+		memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
+
+		d->packet_size = packet_size;
+
+		if (PAGE_SIZE % packet_size || packet_size>4096) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Packet size %d (page_size: %ld) "
+			      "not yet supported\n",
+			      packet_size, PAGE_SIZE);
+			free_dma_iso_ctx(d);
+			return NULL;
+		}
+
+		d->nb_cmd = d->frame_size / d->packet_size;
+		if (d->frame_size % d->packet_size) {
+			d->nb_cmd++;
+			d->left_size = d->frame_size % d->packet_size;
+		} else
+			d->left_size = d->packet_size;
+
+		for (i = 0; i < d->num_desc; i++) {
+			if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
+						sizeof(struct it_dma_prg), ohci->dev)) {
+				PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma it prg");
+				free_dma_iso_ctx(d);
+				return NULL;
+			}
+			d->it_prg[i] = (struct it_dma_prg *)d->prg_reg[i].kvirt;
+		}
+	}
+
+	d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int),
+				   GFP_KERNEL);
+	d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval),
+				   GFP_KERNEL);
+	d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int),
+				   GFP_KERNEL);
+	d->next_buffer = kmalloc(d->num_desc * sizeof(int),
+				 GFP_KERNEL);
+
+	if (d->buffer_status == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	if (d->buffer_time == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	if (d->last_used_cmd == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	if (d->next_buffer == NULL) {
+		PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer");
+		free_dma_iso_ctx(d);
+		return NULL;
+	}
+	memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
+	memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
+	memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
+	memset(d->next_buffer, -1, d->num_desc * sizeof(int));
+
+        spin_lock_init(&d->lock);
+
+	PRINT(KERN_INFO, ohci->host->id, "Iso %s DMA: %d buffers "
+	      "of size %d allocated for a frame size %d, each with %d prgs",
+	      (type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
+	      d->num_desc, d->buf_size, d->frame_size, d->nb_cmd);
+
+	return d;
+}
+
+static void reset_ir_status(struct dma_iso_ctx *d, int n)
+{
+	int i;
+	d->ir_prg[n][0].status = cpu_to_le32(4);
+	d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
+	for (i = 2; i < d->nb_cmd - 1; i++)
+		d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
+	d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
+}
+
+static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
+{
+	struct dma_cmd *ir_prg = d->ir_prg[n];
+	struct dma_prog_region *ir_reg = &d->prg_reg[n];
+	unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
+	int i;
+
+	/* the first descriptor will read only 4 bytes */
+	ir_prg[0].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
+		DMA_CTL_BRANCH | 4);
+
+	/* set the sync flag */
+	if (flags & VIDEO1394_SYNC_FRAMES)
+		ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
+
+	ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
+				(unsigned long)d->dma.kvirt));
+	ir_prg[0].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
+					1 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
+
+	/* If there is *not* only one DMA page per frame (hence, d->nb_cmd==2) */
+	if (d->nb_cmd > 2) {
+		/* The second descriptor will read PAGE_SIZE-4 bytes */
+		ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
+						DMA_CTL_BRANCH | (PAGE_SIZE-4));
+		ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf + 4) -
+						(unsigned long)d->dma.kvirt));
+		ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
+						      2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
+
+		for (i = 2; i < d->nb_cmd - 1; i++) {
+			ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
+							DMA_CTL_BRANCH | PAGE_SIZE);
+			ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
+							(buf+(i-1)*PAGE_SIZE) -
+							(unsigned long)d->dma.kvirt));
+
+			ir_prg[i].branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
+					    (i + 1) * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
+		}
+
+		/* The last descriptor will generate an interrupt */
+		ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
+						DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
+		ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
+						(buf+(i-1)*PAGE_SIZE) -
+						(unsigned long)d->dma.kvirt));
+	} else {
+		/* Only one DMA page is used. Read d->left_size immediately and */
+		/* generate an interrupt as this is also the last page. */
+		ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
+						DMA_CTL_IRQ | DMA_CTL_BRANCH | (d->left_size-4));
+		ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
+						(buf + 4) - (unsigned long)d->dma.kvirt));
+	}
+}
+
+static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
+{
+	struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
+	int i;
+
+	d->flags = flags;
+
+	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
+
+	for (i=0;i<d->num_desc;i++) {
+		initialize_dma_ir_prg(d, i, flags);
+		reset_ir_status(d, i);
+	}
+
+	/* reset the ctrl register */
+	reg_write(ohci, d->ctrlClear, 0xf0000000);
+
+	/* Set bufferFill */
+	reg_write(ohci, d->ctrlSet, 0x80000000);
+
+	/* Set isoch header */
+	if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
+		reg_write(ohci, d->ctrlSet, 0x40000000);
+
+	/* Set the context match register to match on all tags,
+	   sync for sync tag, and listen to d->channel */
+	reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
+
+	/* Set up isoRecvIntMask to generate interrupts */
+	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1<<d->ctx);
+}
+
+/* find which context is listening to this channel */
+static struct dma_iso_ctx *
+find_ctx(struct list_head *list, int type, int channel)
+{
+	struct dma_iso_ctx *ctx;
+
+	list_for_each_entry(ctx, list, link) {
+		if (ctx->type == type && ctx->channel == channel)
+			return ctx;
+	}
+
+	return NULL;
+}
+
+static void wakeup_dma_ir_ctx(unsigned long l)
+{
+	struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
+	int i;
+
+	spin_lock(&d->lock);
+
+	for (i = 0; i < d->num_desc; i++) {
+		if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
+			reset_ir_status(d, i);
+			d->buffer_status[i] = VIDEO1394_BUFFER_READY;
+			do_gettimeofday(&d->buffer_time[i]);
+		}
+	}
+
+	spin_unlock(&d->lock);
+
+	if (waitqueue_active(&d->waitq))
+		wake_up_interruptible(&d->waitq);
+}
+
+static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
+				 int n)
+{
+	unsigned char* buf = d->dma.kvirt + n * d->buf_size;
+	u32 cycleTimer;
+	u32 timeStamp;
+
+	if (n == -1) {
+	  return;
+	}
+
+	cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+	timeStamp = ((cycleTimer & 0x0fff) + d->syt_offset); /* 11059 = 450 us */
+	timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
+		+ (cycleTimer & 0xf000)) & 0xffff;
+
+	buf[6] = timeStamp >> 8;
+	buf[7] = timeStamp & 0xff;
+
+    /* if first packet is empty packet, then put timestamp into the next full one too */
+    if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
+   	    buf += d->packet_size;
+    	buf[6] = timeStamp >> 8;
+	    buf[7] = timeStamp & 0xff;
+	}
+
+    /* do the next buffer frame too in case of irq latency */
+	n = d->next_buffer[n];
+	if (n == -1) {
+	  return;
+	}
+	buf = d->dma.kvirt + n * d->buf_size;
+
+	timeStamp += (d->last_used_cmd[n] << 12) & 0xffff;
+
+	buf[6] = timeStamp >> 8;
+	buf[7] = timeStamp & 0xff;
+
+    /* if first packet is empty packet, then put timestamp into the next full one too */
+    if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
+   	    buf += d->packet_size;
+    	buf[6] = timeStamp >> 8;
+	    buf[7] = timeStamp & 0xff;
+	}
+
+#if 0
+	printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
+	       curr, n, cycleTimer, timeStamp);
+#endif
+}
+
+static void wakeup_dma_it_ctx(unsigned long l)
+{
+	struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
+	struct ti_ohci *ohci = d->ohci;
+	int i;
+
+	spin_lock(&d->lock);
+
+	for (i = 0; i < d->num_desc; i++) {
+		if (d->it_prg[i][d->last_used_cmd[i]].end.status &
+		    cpu_to_le32(0xFFFF0000)) {
+			int next = d->next_buffer[i];
+			put_timestamp(ohci, d, next);
+			d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
+			d->buffer_status[i] = VIDEO1394_BUFFER_READY;
+		}
+	}
+
+	spin_unlock(&d->lock);
+
+	if (waitqueue_active(&d->waitq))
+		wake_up_interruptible(&d->waitq);
+}
+
+static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
+{
+	struct it_dma_prg *it_prg = d->it_prg[n];
+	struct dma_prog_region *it_reg = &d->prg_reg[n];
+	unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
+	int i;
+	d->last_used_cmd[n] = d->nb_cmd - 1;
+	for (i=0;i<d->nb_cmd;i++) {
+
+		it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
+			DMA_CTL_IMMEDIATE | 8) ;
+		it_prg[i].begin.address = 0;
+
+		it_prg[i].begin.status = 0;
+
+		it_prg[i].data[0] = cpu_to_le32(
+			(IEEE1394_SPEED_100 << 16)
+			| (/* tag */ 1 << 14)
+			| (d->channel << 8)
+			| (TCODE_ISO_DATA << 4));
+		if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
+		it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
+		it_prg[i].data[2] = 0;
+		it_prg[i].data[3] = 0;
+
+		it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
+			    	    	     DMA_CTL_BRANCH);
+		it_prg[i].end.address =
+			cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf+i*d->packet_size) -
+						(unsigned long)d->dma.kvirt));
+
+		if (i<d->nb_cmd-1) {
+			it_prg[i].end.control |= cpu_to_le32(d->packet_size);
+			it_prg[i].begin.branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
+					sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
+			it_prg[i].end.branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
+					sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
+		} else {
+			/* the last prg generates an interrupt */
+			it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
+				DMA_CTL_IRQ | d->left_size);
+			/* the last prg doesn't branch */
+			it_prg[i].begin.branchAddress = 0;
+			it_prg[i].end.branchAddress = 0;
+		}
+		it_prg[i].end.status = 0;
+	}
+}
+
+static void initialize_dma_it_prg_var_packet_queue(
+	struct dma_iso_ctx *d, int n, unsigned int * packet_sizes,
+	struct ti_ohci *ohci)
+{
+	struct it_dma_prg *it_prg = d->it_prg[n];
+	struct dma_prog_region *it_reg = &d->prg_reg[n];
+	int i;
+
+#if 0
+	if (n != -1) {
+		put_timestamp(ohci, d, n);
+	}
+#endif
+	d->last_used_cmd[n] = d->nb_cmd - 1;
+
+	for (i = 0; i < d->nb_cmd; i++) {
+		unsigned int size;
+		if (packet_sizes[i] > d->packet_size) {
+			size = d->packet_size;
+		} else {
+			size = packet_sizes[i];
+		}
+		it_prg[i].data[1] = cpu_to_le32(size << 16);
+		it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
+
+		if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
+			it_prg[i].end.control |= cpu_to_le32(size);
+			it_prg[i].begin.branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
+					sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
+			it_prg[i].end.branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
+					sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
+		} else {
+			/* the last prg generates an interrupt */
+			it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
+				DMA_CTL_IRQ | size);
+			/* the last prg doesn't branch */
+			it_prg[i].begin.branchAddress = 0;
+			it_prg[i].end.branchAddress = 0;
+			d->last_used_cmd[n] = i;
+			break;
+		}
+	}
+}
+
+static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
+				  unsigned int syt_offset, int flags)
+{
+	struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
+	int i;
+
+	d->flags = flags;
+	d->syt_offset = (syt_offset == 0 ? 11000 : syt_offset);
+
+	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
+
+	for (i=0;i<d->num_desc;i++)
+		initialize_dma_it_prg(d, i, sync_tag);
+
+	/* Set up isoRecvIntMask to generate interrupts */
+	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
+}
+
+static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
+					      unsigned int buffer)
+{
+	unsigned long flags;
+	unsigned int ret;
+	spin_lock_irqsave(&d->lock, flags);
+	ret = d->buffer_status[buffer];
+	spin_unlock_irqrestore(&d->lock, flags);
+	return ret;
+}
+
+static int __video1394_ioctl(struct file *file,
+			     unsigned int cmd, unsigned long arg)
+{
+	struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+	struct ti_ohci *ohci = ctx->ohci;
+	unsigned long flags;
+	void __user *argp = (void __user *)arg;
+
+	switch(cmd)
+	{
+	case VIDEO1394_IOC_LISTEN_CHANNEL:
+	case VIDEO1394_IOC_TALK_CHANNEL:
+	{
+		struct video1394_mmap v;
+		u64 mask;
+		struct dma_iso_ctx *d;
+		int i;
+
+		if (copy_from_user(&v, argp, sizeof(v)))
+			return -EFAULT;
+
+		/* if channel < 0, find lowest available one */
+		if (v.channel < 0) {
+		    mask = (u64)0x1;
+		    for (i=0; ; i++) {
+			if (i == ISO_CHANNELS) {
+			    PRINT(KERN_ERR, ohci->host->id, 
+				  "No free channel found");
+			    return EAGAIN;
+			}
+			if (!(ohci->ISO_channel_usage & mask)) {
+			    v.channel = i;
+			    PRINT(KERN_INFO, ohci->host->id, "Found free channel %d", i);
+			    break;
+			}
+			mask = mask << 1;
+		    }
+		} else if (v.channel >= ISO_CHANNELS) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Iso channel %d out of bounds", v.channel);
+			return -EINVAL;
+		} else {
+			mask = (u64)0x1<<v.channel;
+		}
+		PRINT(KERN_INFO, ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
+			(u32)(mask>>32),(u32)(mask&0xffffffff),
+			(u32)(ohci->ISO_channel_usage>>32),
+			(u32)(ohci->ISO_channel_usage&0xffffffff));
+		if (ohci->ISO_channel_usage & mask) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Channel %d is already taken", v.channel);
+			return -EBUSY;
+		}
+
+		if (v.buf_size == 0 || v.buf_size > VIDEO1394_MAX_SIZE) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Invalid %d length buffer requested",v.buf_size);
+			return -EINVAL;
+		}
+
+		if (v.nb_buffers == 0 || v.nb_buffers > VIDEO1394_MAX_SIZE) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Invalid %d buffers requested",v.nb_buffers);
+			return -EINVAL;
+		}
+
+		if (v.nb_buffers * v.buf_size > VIDEO1394_MAX_SIZE) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "%d buffers of size %d bytes is too big",
+			      v.nb_buffers, v.buf_size);
+			return -EINVAL;
+		}
+
+		if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
+			d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
+					      v.nb_buffers, v.buf_size,
+					      v.channel, 0);
+
+			if (d == NULL) {
+				PRINT(KERN_ERR, ohci->host->id,
+				      "Couldn't allocate ir context");
+				return -EAGAIN;
+			}
+			initialize_dma_ir_ctx(d, v.sync_tag, v.flags);
+
+			ctx->current_ctx = d;
+
+			v.buf_size = d->buf_size;
+			list_add_tail(&d->link, &ctx->context_list);
+
+			PRINT(KERN_INFO, ohci->host->id,
+			      "iso context %d listen on channel %d",
+			      d->ctx, v.channel);
+		}
+		else {
+			d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
+					      v.nb_buffers, v.buf_size,
+					      v.channel, v.packet_size);
+
+			if (d == NULL) {
+				PRINT(KERN_ERR, ohci->host->id,
+				      "Couldn't allocate it context");
+				return -EAGAIN;
+			}
+			initialize_dma_it_ctx(d, v.sync_tag,
+					      v.syt_offset, v.flags);
+
+			ctx->current_ctx = d;
+
+			v.buf_size = d->buf_size;
+
+			list_add_tail(&d->link, &ctx->context_list);
+
+			PRINT(KERN_INFO, ohci->host->id,
+			      "Iso context %d talk on channel %d", d->ctx,
+			      v.channel);
+		}
+
+		if (copy_to_user((void *)arg, &v, sizeof(v))) {
+			/* FIXME : free allocated dma resources */
+			return -EFAULT;
+		}
+		
+		ohci->ISO_channel_usage |= mask;
+
+		return 0;
+	}
+	case VIDEO1394_IOC_UNLISTEN_CHANNEL:
+	case VIDEO1394_IOC_UNTALK_CHANNEL:
+	{
+		int channel;
+		u64 mask;
+		struct dma_iso_ctx *d;
+
+		if (copy_from_user(&channel, argp, sizeof(int)))
+			return -EFAULT;
+
+		if (channel < 0 || channel >= ISO_CHANNELS) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Iso channel %d out of bound", channel);
+			return -EINVAL;
+		}
+		mask = (u64)0x1<<channel;
+		if (!(ohci->ISO_channel_usage & mask)) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Channel %d is not being used", channel);
+			return -ESRCH;
+		}
+
+		/* Mark this channel as unused */
+		ohci->ISO_channel_usage &= ~mask;
+
+		if (cmd == VIDEO1394_IOC_UNLISTEN_CHANNEL)
+			d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
+		else
+			d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
+
+		if (d == NULL) return -ESRCH;
+		PRINT(KERN_INFO, ohci->host->id, "Iso context %d "
+		      "stop talking on channel %d", d->ctx, channel);
+		free_dma_iso_ctx(d);
+
+		return 0;
+	}
+	case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
+	{
+		struct video1394_wait v;
+		struct dma_iso_ctx *d;
+
+		if (copy_from_user(&v, argp, sizeof(v)))
+			return -EFAULT;
+
+		d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
+		if (d == NULL) return -EFAULT;
+
+		if ((v.buffer<0) || (v.buffer>d->num_desc)) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d out of range",v.buffer);
+			return -EINVAL;
+		}
+
+		spin_lock_irqsave(&d->lock,flags);
+
+		if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d is already used",v.buffer);
+			spin_unlock_irqrestore(&d->lock,flags);
+			return -EBUSY;
+		}
+
+		d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
+
+		if (d->last_buffer>=0)
+			d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
+				cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0)
+					& 0xfffffff0) | 0x1);
+
+		d->last_buffer = v.buffer;
+
+		d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress = 0;
+
+		spin_unlock_irqrestore(&d->lock,flags);
+
+		if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
+		{
+			DBGMSG(ohci->host->id, "Starting iso DMA ctx=%d",d->ctx);
+
+			/* Tell the controller where the first program is */
+			reg_write(ohci, d->cmdPtr,
+				dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x1);
+
+			/* Run IR context */
+			reg_write(ohci, d->ctrlSet, 0x8000);
+		}
+		else {
+			/* Wake up dma context if necessary */
+			if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
+				PRINT(KERN_INFO, ohci->host->id,
+				      "Waking up iso dma ctx=%d", d->ctx);
+				reg_write(ohci, d->ctrlSet, 0x1000);
+			}
+		}
+		return 0;
+
+	}
+	case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
+	case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
+	{
+		struct video1394_wait v;
+		struct dma_iso_ctx *d;
+		int i;
+
+		if (copy_from_user(&v, argp, sizeof(v)))
+			return -EFAULT;
+
+		d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
+		if (d == NULL) return -EFAULT;
+
+		if ((v.buffer<0) || (v.buffer>d->num_desc)) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d out of range",v.buffer);
+			return -EINVAL;
+		}
+
+		/*
+		 * I change the way it works so that it returns
+		 * the last received frame.
+		 */
+		spin_lock_irqsave(&d->lock, flags);
+		switch(d->buffer_status[v.buffer]) {
+		case VIDEO1394_BUFFER_READY:
+			d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
+			break;
+		case VIDEO1394_BUFFER_QUEUED:
+			if (cmd == VIDEO1394_IOC_LISTEN_POLL_BUFFER) {
+			    /* for polling, return error code EINTR */
+			    spin_unlock_irqrestore(&d->lock, flags);
+			    return -EINTR;
+			}
+
+			spin_unlock_irqrestore(&d->lock, flags);
+			wait_event_interruptible(d->waitq,
+					video1394_buffer_state(d, v.buffer) ==
+					 VIDEO1394_BUFFER_READY);
+			if (signal_pending(current))
+                                return -EINTR;
+			spin_lock_irqsave(&d->lock, flags);
+			d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
+			break;
+		default:
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d is not queued",v.buffer);
+			spin_unlock_irqrestore(&d->lock, flags);
+			return -ESRCH;
+		}
+
+		/* set time of buffer */
+		v.filltime = d->buffer_time[v.buffer];
+//		printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec);
+
+		/*
+		 * Look ahead to see how many more buffers have been received
+		 */
+		i=0;
+		while (d->buffer_status[(v.buffer+1)%d->num_desc]==
+		       VIDEO1394_BUFFER_READY) {
+			v.buffer=(v.buffer+1)%d->num_desc;
+			i++;
+		}
+		spin_unlock_irqrestore(&d->lock, flags);
+
+		v.buffer=i;
+		if (copy_to_user(argp, &v, sizeof(v)))
+			return -EFAULT;
+
+		return 0;
+	}
+	case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
+	{
+		struct video1394_wait v;
+		unsigned int *psizes = NULL;
+		struct dma_iso_ctx *d;
+
+		if (copy_from_user(&v, argp, sizeof(v)))
+			return -EFAULT;
+
+		d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
+		if (d == NULL) return -EFAULT;
+
+		if ((v.buffer<0) || (v.buffer>d->num_desc)) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d out of range",v.buffer);
+			return -EINVAL;
+		}
+
+		if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+			int buf_size = d->nb_cmd * sizeof(unsigned int);
+			struct video1394_queue_variable __user *p = argp;
+			unsigned int __user *qv;
+
+			if (get_user(qv, &p->packet_sizes))
+				return -EFAULT;
+
+			psizes = kmalloc(buf_size, GFP_KERNEL);
+			if (!psizes)
+				return -ENOMEM;
+
+			if (copy_from_user(psizes, qv, buf_size)) {
+				kfree(psizes);
+				return -EFAULT;
+			}
+		}
+
+		spin_lock_irqsave(&d->lock,flags);
+
+		if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d is already used",v.buffer);
+			spin_unlock_irqrestore(&d->lock,flags);
+			if (psizes)
+				kfree(psizes);
+			return -EBUSY;
+		}
+
+		if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+			initialize_dma_it_prg_var_packet_queue(
+				d, v.buffer, psizes,
+				ohci);
+		}
+
+		d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
+
+		if (d->last_buffer >= 0) {
+			d->it_prg[d->last_buffer]
+				[ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
+					cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
+						0) & 0xfffffff0) | 0x3);
+
+			d->it_prg[d->last_buffer]
+				[ d->last_used_cmd[d->last_buffer] ].begin.branchAddress =
+					cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
+						0) & 0xfffffff0) | 0x3);
+			d->next_buffer[d->last_buffer] = v.buffer;
+		}
+		d->last_buffer = v.buffer;
+		d->next_buffer[d->last_buffer] = -1;
+
+		d->it_prg[d->last_buffer][d->last_used_cmd[d->last_buffer]].end.branchAddress = 0;
+
+		spin_unlock_irqrestore(&d->lock,flags);
+
+		if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
+		{
+			DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
+			       d->ctx);
+			put_timestamp(ohci, d, d->last_buffer);
+
+			/* Tell the controller where the first program is */
+			reg_write(ohci, d->cmdPtr,
+				dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x3);
+
+			/* Run IT context */
+			reg_write(ohci, d->ctrlSet, 0x8000);
+		}
+		else {
+			/* Wake up dma context if necessary */
+			if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
+				PRINT(KERN_INFO, ohci->host->id,
+				      "Waking up iso transmit dma ctx=%d",
+				      d->ctx);
+				put_timestamp(ohci, d, d->last_buffer);
+				reg_write(ohci, d->ctrlSet, 0x1000);
+			}
+		}
+
+		if (psizes)
+			kfree(psizes);
+
+		return 0;
+
+	}
+	case VIDEO1394_IOC_TALK_WAIT_BUFFER:
+	{
+		struct video1394_wait v;
+		struct dma_iso_ctx *d;
+
+		if (copy_from_user(&v, argp, sizeof(v)))
+			return -EFAULT;
+
+		d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
+		if (d == NULL) return -EFAULT;
+
+		if ((v.buffer<0) || (v.buffer>d->num_desc)) {
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d out of range",v.buffer);
+			return -EINVAL;
+		}
+
+		switch(d->buffer_status[v.buffer]) {
+		case VIDEO1394_BUFFER_READY:
+			d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
+			return 0;
+		case VIDEO1394_BUFFER_QUEUED:
+			wait_event_interruptible(d->waitq,
+					(d->buffer_status[v.buffer] == VIDEO1394_BUFFER_READY));
+			if (signal_pending(current))
+				return -EINTR;
+			d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
+			return 0;
+		default:
+			PRINT(KERN_ERR, ohci->host->id,
+			      "Buffer %d is not queued",v.buffer);
+			return -ESRCH;
+		}
+	}
+	default:
+		return -ENOTTY;
+	}
+}
+
+static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int err;
+	lock_kernel();
+	err = __video1394_ioctl(file, cmd, arg);
+	unlock_kernel();
+	return err;
+}
+
+/*
+ *	This maps the vmalloced and reserved buffer to user space.
+ *
+ *  FIXME:
+ *  - PAGE_READONLY should suffice!?
+ *  - remap_pfn_range is kind of inefficient for page by page remapping.
+ *    But e.g. pte_alloc() does not work in modules ... :-(
+ */
+
+static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+	int res = -EINVAL;
+
+	lock_kernel();
+	if (ctx->current_ctx == NULL) {
+		PRINT(KERN_ERR, ctx->ohci->host->id, "Current iso context not set");
+	} else
+		res = dma_region_mmap(&ctx->current_ctx->dma, file, vma);
+	unlock_kernel();
+
+	return res;
+}
+
+static int video1394_open(struct inode *inode, struct file *file)
+{
+	int i = ieee1394_file_to_instance(file);
+	struct ti_ohci *ohci;
+	struct file_ctx *ctx;
+
+	ohci = hpsb_get_hostinfo_bykey(&video1394_highlevel, i);
+        if (ohci == NULL)
+                return -EIO;
+
+	ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL);
+	if (ctx == NULL)  {
+		PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
+		return -ENOMEM;
+	}
+
+	memset(ctx, 0, sizeof(struct file_ctx));
+	ctx->ohci = ohci;
+	INIT_LIST_HEAD(&ctx->context_list);
+	ctx->current_ctx = NULL;
+	file->private_data = ctx;
+
+	return 0;
+}
+
+static int video1394_release(struct inode *inode, struct file *file)
+{
+	struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+	struct ti_ohci *ohci = ctx->ohci;
+	struct list_head *lh, *next;
+	u64 mask;
+
+	lock_kernel();
+	list_for_each_safe(lh, next, &ctx->context_list) {
+		struct dma_iso_ctx *d;
+		d = list_entry(lh, struct dma_iso_ctx, link);
+		mask = (u64) 1 << d->channel;
+
+		if (!(ohci->ISO_channel_usage & mask))
+			PRINT(KERN_ERR, ohci->host->id, "On release: Channel %d "
+			      "is not being used", d->channel);
+		else
+			ohci->ISO_channel_usage &= ~mask;
+		PRINT(KERN_INFO, ohci->host->id, "On release: Iso %s context "
+		      "%d stop listening on channel %d",
+		      d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
+		      d->ctx, d->channel);
+		free_dma_iso_ctx(d);
+	}
+
+	kfree(ctx);
+	file->private_data = NULL;
+
+	unlock_kernel();
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+#endif
+
+static struct cdev video1394_cdev;
+static struct file_operations video1394_fops=
+{
+	.owner =	THIS_MODULE,
+	.unlocked_ioctl = video1394_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = video1394_compat_ioctl,
+#endif
+	.mmap =		video1394_mmap,
+	.open =		video1394_open,
+	.release =	video1394_release
+};
+
+/*** HOTPLUG STUFF **********************************************************/
+/*
+ * Export information about protocols/devices supported by this driver.
+ */
+static struct ieee1394_device_id video1394_id_table[] = {
+	{
+		.match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+		.specifier_id	= CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+		.version	= CAMERA_SW_VERSION_ENTRY & 0xffffff
+	},
+        {
+                .match_flags    = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+                .specifier_id   = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+                .version        = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff
+        },
+        {
+                .match_flags    = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+                .specifier_id   = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+                .version        = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
+        },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
+
+static struct hpsb_protocol_driver video1394_driver = {
+	.name		= "1394 Digital Camera Driver",
+	.id_table	= video1394_id_table,
+	.driver		= {
+		.name	= VIDEO1394_DRIVER_NAME,
+		.bus	= &ieee1394_bus_type,
+	},
+};
+
+
+static void video1394_add_host (struct hpsb_host *host)
+{
+	struct ti_ohci *ohci;
+	int minor;
+
+	/* We only work with the OHCI-1394 driver */
+	if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
+		return;
+
+	ohci = (struct ti_ohci *)host->hostdata;
+
+	if (!hpsb_create_hostinfo(&video1394_highlevel, host, 0)) {
+		PRINT(KERN_ERR, ohci->host->id, "Cannot allocate hostinfo");
+		return;
+	}
+
+	hpsb_set_hostinfo(&video1394_highlevel, host, ohci);
+	hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
+
+	minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
+	class_simple_device_add(hpsb_protocol_class, MKDEV(
+		IEEE1394_MAJOR,	minor), 
+		NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
+	devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
+		       S_IFCHR | S_IRUSR | S_IWUSR,
+		       "%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
+}
+
+
+static void video1394_remove_host (struct hpsb_host *host)
+{
+	struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
+
+	if (ohci) {
+		class_simple_device_remove(MKDEV(IEEE1394_MAJOR, 
+			IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
+		devfs_remove("%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
+	}
+	
+	return;
+}
+
+
+static struct hpsb_highlevel video1394_highlevel = {
+	.name =		VIDEO1394_DRIVER_NAME,
+	.add_host =	video1394_add_host,
+	.remove_host =	video1394_remove_host,
+};
+
+MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
+MODULE_DESCRIPTION("driver for digital video on OHCI board");
+MODULE_SUPPORTED_DEVICE(VIDEO1394_DRIVER_NAME);
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_COMPAT
+
+#define VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER     \
+	_IOW ('#', 0x12, struct video1394_wait32)
+#define VIDEO1394_IOC32_LISTEN_WAIT_BUFFER      \
+	_IOWR('#', 0x13, struct video1394_wait32)
+#define VIDEO1394_IOC32_TALK_WAIT_BUFFER        \
+	_IOW ('#', 0x17, struct video1394_wait32)
+#define VIDEO1394_IOC32_LISTEN_POLL_BUFFER      \
+	_IOWR('#', 0x18, struct video1394_wait32)
+
+struct video1394_wait32 {
+	u32 channel;
+	u32 buffer;
+	struct compat_timeval filltime;
+};
+
+static int video1394_wr_wait32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+        struct video1394_wait32 __user *argp = (void __user *)arg;
+        struct video1394_wait32 wait32;
+        struct video1394_wait wait;
+        mm_segment_t old_fs;
+        int ret;
+
+        if (copy_from_user(&wait32, argp, sizeof(wait32)))
+                return -EFAULT;
+
+        wait.channel = wait32.channel;
+        wait.buffer = wait32.buffer;
+        wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
+        wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
+
+        old_fs = get_fs();
+        set_fs(KERNEL_DS);
+        if (cmd == VIDEO1394_IOC32_LISTEN_WAIT_BUFFER)
+		ret = video1394_ioctl(file,
+				      VIDEO1394_IOC_LISTEN_WAIT_BUFFER,
+				      (unsigned long) &wait);
+        else
+		ret = video1394_ioctl(file,
+				      VIDEO1394_IOC_LISTEN_POLL_BUFFER,
+				      (unsigned long) &wait);
+        set_fs(old_fs);
+
+        if (!ret) {
+                wait32.channel = wait.channel;
+                wait32.buffer = wait.buffer;
+                wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
+                wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
+
+                if (copy_to_user(argp, &wait32, sizeof(wait32)))
+                        ret = -EFAULT;
+        }
+
+        return ret;
+}
+
+static int video1394_w_wait32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+        struct video1394_wait32 wait32;
+        struct video1394_wait wait;
+        mm_segment_t old_fs;
+        int ret;
+
+        if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
+                return -EFAULT;
+
+        wait.channel = wait32.channel;
+        wait.buffer = wait32.buffer;
+        wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
+        wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
+
+        old_fs = get_fs();
+        set_fs(KERNEL_DS);
+        if (cmd == VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER)
+		ret = video1394_ioctl(file,
+				      VIDEO1394_IOC_LISTEN_QUEUE_BUFFER,
+				      (unsigned long) &wait);
+        else
+		ret = video1394_ioctl(file,
+				      VIDEO1394_IOC_TALK_WAIT_BUFFER,
+				      (unsigned long) &wait);
+        set_fs(old_fs);
+
+        return ret;
+}
+
+static int video1394_queue_buf32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+        return -EFAULT;   /* ??? was there before. */
+
+	return video1394_ioctl(file,
+				VIDEO1394_IOC_TALK_QUEUE_BUFFER, arg);
+}
+
+static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case VIDEO1394_IOC_LISTEN_CHANNEL:
+	case VIDEO1394_IOC_UNLISTEN_CHANNEL:
+	case VIDEO1394_IOC_TALK_CHANNEL:
+	case VIDEO1394_IOC_UNTALK_CHANNEL:
+		return video1394_ioctl(f, cmd, arg);
+
+	case VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER:
+		return video1394_w_wait32(f, cmd, arg);
+	case VIDEO1394_IOC32_LISTEN_WAIT_BUFFER:
+		return video1394_wr_wait32(f, cmd, arg);
+	case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
+		return video1394_queue_buf32(f, cmd, arg);
+	case VIDEO1394_IOC32_TALK_WAIT_BUFFER:
+		return video1394_w_wait32(f, cmd, arg);
+	case VIDEO1394_IOC32_LISTEN_POLL_BUFFER:
+		return video1394_wr_wait32(f, cmd, arg);
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+#endif /* CONFIG_COMPAT */
+
+static void __exit video1394_exit_module (void)
+{
+	hpsb_unregister_protocol(&video1394_driver);
+
+	hpsb_unregister_highlevel(&video1394_highlevel);
+
+	devfs_remove(VIDEO1394_DRIVER_NAME);
+	cdev_del(&video1394_cdev);
+
+	PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module");
+}
+
+static int __init video1394_init_module (void)
+{
+	int ret;
+
+	cdev_init(&video1394_cdev, &video1394_fops);
+	video1394_cdev.owner = THIS_MODULE;
+	kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
+	ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
+	if (ret) {
+		PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
+		return ret;
+        }
+
+	devfs_mk_dir(VIDEO1394_DRIVER_NAME);
+
+	hpsb_register_highlevel(&video1394_highlevel);
+
+	ret = hpsb_register_protocol(&video1394_driver);
+	if (ret) {
+		PRINT_G(KERN_ERR, "video1394: failed to register protocol");
+		hpsb_unregister_highlevel(&video1394_highlevel);
+		devfs_remove(VIDEO1394_DRIVER_NAME);
+		cdev_del(&video1394_cdev);
+		return ret;
+	}
+
+	PRINT_G(KERN_INFO, "Installed " VIDEO1394_DRIVER_NAME " module");
+	return 0;
+}
+
+
+module_init(video1394_init_module);
+module_exit(video1394_exit_module);
+MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16);
diff --git a/drivers/ieee1394/video1394.h b/drivers/ieee1394/video1394.h
new file mode 100644
index 000000000000..9a89d9cc3c85
--- /dev/null
+++ b/drivers/ieee1394/video1394.h
@@ -0,0 +1,67 @@
+/*
+ * video1394.h - driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ *                        Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _VIDEO_1394_H
+#define _VIDEO_1394_H
+
+#include "ieee1394-ioctl.h"
+
+#define VIDEO1394_DRIVER_NAME "video1394"
+
+#define VIDEO1394_MAX_SIZE 0x4000000
+
+enum {
+	VIDEO1394_BUFFER_FREE = 0,
+	VIDEO1394_BUFFER_QUEUED,
+	VIDEO1394_BUFFER_READY
+};
+
+#define VIDEO1394_SYNC_FRAMES          0x00000001
+#define VIDEO1394_INCLUDE_ISO_HEADERS  0x00000002
+#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
+
+struct video1394_mmap {
+	int channel;			/* -1 to find an open channel in LISTEN/TALK */
+	unsigned int sync_tag;
+	unsigned int nb_buffers;
+	unsigned int buf_size;
+	unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
+				     Maximum packet size */
+	unsigned int fps;
+	unsigned int syt_offset;
+	unsigned int flags;
+};
+
+/* For TALK_QUEUE_BUFFER with VIDEO1394_VARIABLE_PACKET_SIZE use */
+struct video1394_queue_variable {
+	unsigned int channel;
+	unsigned int buffer;
+	unsigned int __user * packet_sizes; /* Buffer of size:
+				       buf_size / packet_size  */
+};
+
+struct video1394_wait {
+	unsigned int channel;
+	unsigned int buffer;
+	struct timeval filltime;	/* time of buffer full */
+};
+
+
+#endif