summary refs log tree commit diff
path: root/drivers/scsi/aacraid
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/aacraid')
-rw-r--r--drivers/scsi/aacraid/Makefile8
-rw-r--r--drivers/scsi/aacraid/README66
-rw-r--r--drivers/scsi/aacraid/TODO6
-rw-r--r--drivers/scsi/aacraid/aachba.c2037
-rw-r--r--drivers/scsi/aacraid/aacraid.h1623
-rw-r--r--drivers/scsi/aacraid/commctrl.c683
-rw-r--r--drivers/scsi/aacraid/comminit.c325
-rw-r--r--drivers/scsi/aacraid/commsup.c939
-rw-r--r--drivers/scsi/aacraid/dpcsup.c215
-rw-r--r--drivers/scsi/aacraid/linit.c749
-rw-r--r--drivers/scsi/aacraid/rkt.c440
-rw-r--r--drivers/scsi/aacraid/rx.c441
-rw-r--r--drivers/scsi/aacraid/sa.c374
13 files changed, 7906 insertions, 0 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
new file mode 100644
index 000000000000..28d133a3094f
--- /dev/null
+++ b/drivers/scsi/aacraid/Makefile
@@ -0,0 +1,8 @@
+# Adaptec aacraid
+
+obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
+
+aacraid-objs	:= linit.o aachba.o commctrl.o comminit.o commsup.o \
+		   dpcsup.o rx.o sa.o rkt.o
+
+EXTRA_CFLAGS	:= -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/README b/drivers/scsi/aacraid/README
new file mode 100644
index 000000000000..fdb0f45f7336
--- /dev/null
+++ b/drivers/scsi/aacraid/README
@@ -0,0 +1,66 @@
+AACRAID Driver for Linux (take two)
+
+Introduction
+-------------------------
+The aacraid driver adds support for Adaptec (http://www.adaptec.com)
+RAID controllers. This is a major rewrite from the original 
+Adaptec supplied driver. It has signficantly cleaned up both the code
+and the running binary size (the module is less than half the size of
+the original).
+
+Supported Cards/Chipsets
+-------------------------
+	Adaptec 2020S
+	Adaptec 2025S
+	Adaptec 2120S
+	Adaptec 2200S
+	Adaptec 2230S
+	Adaptec 2240S
+	Adaptec 2410SA
+	Adaptec 2610SA
+	Adaptec 2810SA
+	Adaptec 21610SA
+	Adaptec 3230S
+	Adaptec 3240S
+	Adaptec 4000SAS
+	Adaptec 4005SAS
+	Adaptec 4800SAS
+	Adaptec 4805SAS
+	Adaptec 5400S
+	Dell PERC 2 Quad Channel
+	Dell PERC 2/Si
+	Dell PERC 3/Si
+	Dell PERC 3/Di
+	Dell CERC 2
+	HP NetRAID-4M
+	Legend S220
+	Legend S230
+
+People
+-------------------------
+Alan Cox <alan@redhat.com>
+Christoph Hellwig <hch@infradead.org>	(updates for new-style PCI probing and SCSI host registration,
+					 small cleanups/fixes)
+Matt Domsch <matt_domsch@dell.com>	(revision ioctl, adapter messages)
+Deanna Bonds                            (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
+					 added new ioctls, changed scsi interface to use new error handler,
+					 increased the number of fibs and outstanding commands to a container)
+
+					(fixed 64bit and 64G memory model, changed confusing naming convention
+					 where fibs that go to the hardware are consistently called hw_fibs and
+					 not just fibs like the name of the driver tracking structure)
+Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
+
+Original Driver
+-------------------------
+Adaptec Unix OEM Product Group
+
+Mailing List
+-------------------------
+linux-scsi@vger.kernel.org (Interested parties troll here)
+Also note this is very different to Brian's original driver
+so don't expect him to support it.
+Adaptec does support this driver.  Contact either tech support or Mark Salyzyn.
+
+Original by Brian Boerner February 2001
+Rewritten by Alan Cox, November 2001
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
new file mode 100644
index 000000000000..25856a21d982
--- /dev/null
+++ b/drivers/scsi/aacraid/TODO
@@ -0,0 +1,6 @@
+o	Testing
+o	More testing
+o	Feature request: display the firmware/bios/etc revisions in the
+	/proc info
+o	Drop irq_mask, basically unused
+o	I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
new file mode 100644
index 000000000000..f3fc35386060
--- /dev/null
+++ b/drivers/scsi/aacraid/aachba.c
@@ -0,0 +1,2037 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/* values for inqd_pdt: Peripheral device type in plain English */
+#define	INQD_PDT_DA	0x00	/* Direct-access (DISK) device */
+#define	INQD_PDT_PROC	0x03	/* Processor device */
+#define	INQD_PDT_CHNGR	0x08	/* Changer (jukebox, scsi2) */
+#define	INQD_PDT_COMM	0x09	/* Communication device (scsi2) */
+#define	INQD_PDT_NOLUN2 0x1f	/* Unknown Device (scsi2) */
+#define	INQD_PDT_NOLUN	0x7f	/* Logical Unit Not Present */
+
+#define	INQD_PDT_DMASK	0x1F	/* Peripheral Device Type Mask */
+#define	INQD_PDT_QMASK	0xE0	/* Peripheral Device Qualifer Mask */
+
+#define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
+
+#define MAX_DRIVER_SG_SEGMENT_COUNT 17
+
+/*
+ *	Sense codes
+ */
+ 
+#define SENCODE_NO_SENSE                        0x00
+#define SENCODE_END_OF_DATA                     0x00
+#define SENCODE_BECOMING_READY                  0x04
+#define SENCODE_INIT_CMD_REQUIRED               0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
+#define SENCODE_INVALID_COMMAND                 0x20
+#define SENCODE_LBA_OUT_OF_RANGE                0x21
+#define SENCODE_INVALID_CDB_FIELD               0x24
+#define SENCODE_LUN_NOT_SUPPORTED               0x25
+#define SENCODE_INVALID_PARAM_FIELD             0x26
+#define SENCODE_PARAM_NOT_SUPPORTED             0x26
+#define SENCODE_PARAM_VALUE_INVALID             0x26
+#define SENCODE_RESET_OCCURRED                  0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
+#define SENCODE_DIAGNOSTIC_FAILURE              0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
+#define SENCODE_INVALID_MESSAGE_ERROR           0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
+#define SENCODE_OVERLAPPED_COMMAND              0x4E
+
+/*
+ *	Additional sense codes
+ */
+ 
+#define ASENCODE_NO_SENSE                       0x00
+#define ASENCODE_END_OF_DATA                    0x05
+#define ASENCODE_BECOMING_READY                 0x01
+#define ASENCODE_INIT_CMD_REQUIRED              0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
+#define ASENCODE_INVALID_COMMAND                0x00
+#define ASENCODE_LBA_OUT_OF_RANGE               0x00
+#define ASENCODE_INVALID_CDB_FIELD              0x00
+#define ASENCODE_LUN_NOT_SUPPORTED              0x00
+#define ASENCODE_INVALID_PARAM_FIELD            0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
+#define ASENCODE_PARAM_VALUE_INVALID            0x02
+#define ASENCODE_RESET_OCCURRED                 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
+#define ASENCODE_OVERLAPPED_COMMAND             0x00
+
+#define BYTE0(x) (unsigned char)(x)
+#define BYTE1(x) (unsigned char)((x) >> 8)
+#define BYTE2(x) (unsigned char)((x) >> 16)
+#define BYTE3(x) (unsigned char)((x) >> 24)
+
+/*------------------------------------------------------------------------------
+ *              S T R U C T S / T Y P E D E F S
+ *----------------------------------------------------------------------------*/
+/* SCSI inquiry data */
+struct inquiry_data {
+	u8 inqd_pdt;	/* Peripheral qualifier | Peripheral Device Type  */
+	u8 inqd_dtq;	/* RMB | Device Type Qualifier  */
+	u8 inqd_ver;	/* ISO version | ECMA version | ANSI-approved version */
+	u8 inqd_rdf;	/* AENC | TrmIOP | Response data format */
+	u8 inqd_len;	/* Additional length (n-4) */
+	u8 inqd_pad1[2];/* Reserved - must be zero */
+	u8 inqd_pad2;	/* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+	u8 inqd_vid[8];	/* Vendor ID */
+	u8 inqd_pid[16];/* Product ID */
+	u8 inqd_prl[4];	/* Product Revision Level */
+};
+
+/*
+ *              M O D U L E   G L O B A L S
+ */
+ 
+static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
+static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
+#ifdef AAC_DETAILED_STATUS_INFO
+static char *aac_get_status_string(u32 status);
+#endif
+
+/*
+ *	Non dasd selection is handled entirely in aachba now
+ */	
+ 
+static int nondasd = -1;
+static int dacmode = -1;
+
+static int commit = -1;
+
+module_param(nondasd, int, 0);
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+module_param(dacmode, int, 0);
+MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
+module_param(commit, int, 0);
+MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
+
+/**
+ *	aac_get_config_status	-	check the adapter configuration
+ *	@common: adapter to query
+ *
+ *	Query config status, and commit the configuration if needed.
+ */
+int aac_get_config_status(struct aac_dev *dev)
+{
+	int status = 0;
+	struct fib * fibptr;
+
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(fibptr);
+	{
+		struct aac_get_config_status *dinfo;
+		dinfo = (struct aac_get_config_status *) fib_data(fibptr);
+
+		dinfo->command = cpu_to_le32(VM_ContainerConfig);
+		dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
+		dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
+	}
+
+	status = fib_send(ContainerCommand,
+			    fibptr,
+			    sizeof (struct aac_get_config_status),
+			    FsaNormal,
+			    1, 1,
+			    NULL, NULL);
+	if (status < 0 ) {
+		printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
+	} else {
+		struct aac_get_config_status_resp *reply
+		  = (struct aac_get_config_status_resp *) fib_data(fibptr);
+		dprintk((KERN_WARNING
+		  "aac_get_config_status: response=%d status=%d action=%d\n",
+		  le32_to_cpu(reply->response),
+		  le32_to_cpu(reply->status),
+		  le32_to_cpu(reply->data.action)));
+		if ((le32_to_cpu(reply->response) != ST_OK) ||
+		     (le32_to_cpu(reply->status) != CT_OK) ||
+		     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
+			printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
+			status = -EINVAL;
+		}
+	}
+	fib_complete(fibptr);
+	/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
+	if (status >= 0) {
+		if (commit == 1) {
+			struct aac_commit_config * dinfo;
+			fib_init(fibptr);
+			dinfo = (struct aac_commit_config *) fib_data(fibptr);
+	
+			dinfo->command = cpu_to_le32(VM_ContainerConfig);
+			dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
+	
+			status = fib_send(ContainerCommand,
+				    fibptr,
+				    sizeof (struct aac_commit_config),
+				    FsaNormal,
+				    1, 1,
+				    NULL, NULL);
+			fib_complete(fibptr);
+		} else if (commit == 0) {
+			printk(KERN_WARNING
+			  "aac_get_config_status: Foreign device configurations are being ignored\n");
+		}
+	}
+	fib_free(fibptr);
+	return status;
+}
+
+/**
+ *	aac_get_containers	-	list containers
+ *	@common: adapter to probe
+ *
+ *	Make a list of all containers on this controller
+ */
+int aac_get_containers(struct aac_dev *dev)
+{
+	struct fsa_dev_info *fsa_dev_ptr;
+	u32 index; 
+	int status = 0;
+	struct fib * fibptr;
+	unsigned instance;
+	struct aac_get_container_count *dinfo;
+	struct aac_get_container_count_resp *dresp;
+	int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+
+	instance = dev->scsi_host_ptr->unique_id;
+
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(fibptr);
+	dinfo = (struct aac_get_container_count *) fib_data(fibptr);
+	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+	dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
+
+	status = fib_send(ContainerCommand,
+		    fibptr,
+		    sizeof (struct aac_get_container_count),
+		    FsaNormal,
+		    1, 1,
+		    NULL, NULL);
+	if (status >= 0) {
+		dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
+		maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
+		fib_complete(fibptr);
+	}
+
+	if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
+		maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+
+	fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
+	  sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
+	if (!fsa_dev_ptr) {
+		fib_free(fibptr);
+		return -ENOMEM;
+	}
+	memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
+
+	dev->fsa_dev = fsa_dev_ptr;
+	dev->maximum_num_containers = maximum_num_containers;
+
+	for (index = 0; index < dev->maximum_num_containers; index++) {
+		struct aac_query_mount *dinfo;
+		struct aac_mount *dresp;
+
+		fsa_dev_ptr[index].devname[0] = '\0';
+
+		fib_init(fibptr);
+		dinfo = (struct aac_query_mount *) fib_data(fibptr);
+
+		dinfo->command = cpu_to_le32(VM_NameServe);
+		dinfo->count = cpu_to_le32(index);
+		dinfo->type = cpu_to_le32(FT_FILESYS);
+
+		status = fib_send(ContainerCommand,
+				    fibptr,
+				    sizeof (struct aac_query_mount),
+				    FsaNormal,
+				    1, 1,
+				    NULL, NULL);
+		if (status < 0 ) {
+			printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
+			break;
+		}
+		dresp = (struct aac_mount *)fib_data(fibptr);
+
+		dprintk ((KERN_DEBUG
+		  "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
+		  (int)index, (int)le32_to_cpu(dresp->status),
+		  (int)le32_to_cpu(dresp->mnt[0].vol),
+		  (int)le32_to_cpu(dresp->mnt[0].state),
+		  (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
+		if ((le32_to_cpu(dresp->status) == ST_OK) &&
+		    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+		    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+			fsa_dev_ptr[index].valid = 1;
+			fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
+			fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
+			if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+				    fsa_dev_ptr[index].ro = 1;
+		}
+		fib_complete(fibptr);
+		/*
+		 *	If there are no more containers, then stop asking.
+		 */
+		if ((index + 1) >= le32_to_cpu(dresp->count)){
+			break;
+		}
+	}
+	fib_free(fibptr);
+	return status;
+}
+
+static void aac_io_done(struct scsi_cmnd * scsicmd)
+{
+	unsigned long cpu_flags;
+	struct Scsi_Host *host = scsicmd->device->host;
+	spin_lock_irqsave(host->host_lock, cpu_flags);
+	scsicmd->scsi_done(scsicmd);
+	spin_unlock_irqrestore(host->host_lock, cpu_flags);
+}
+
+static void get_container_name_callback(void *context, struct fib * fibptr)
+{
+	struct aac_get_name_resp * get_name_reply;
+	struct scsi_cmnd * scsicmd;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
+	if (fibptr == NULL)
+		BUG();
+
+	get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
+	/* Failure is irrelevant, using default value instead */
+	if ((le32_to_cpu(get_name_reply->status) == CT_OK)
+	 && (get_name_reply->data[0] != '\0')) {
+		int    count;
+		char * dp;
+		char * sp = get_name_reply->data;
+		sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
+		while (*sp == ' ')
+			++sp;
+		count = sizeof(((struct inquiry_data *)NULL)->inqd_pid);
+		dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid;
+		if (*sp) do {
+			*dp++ = (*sp) ? *sp++ : ' ';
+		} while (--count > 0);
+	}
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+	aac_io_done(scsicmd);
+}
+
+/**
+ *	aac_get_container_name	-	get container name, none blocking.
+ */
+static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
+{
+	int status;
+	struct aac_get_name *dinfo;
+	struct fib * cmd_fibcontext;
+	struct aac_dev * dev;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+	if (!(cmd_fibcontext = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(cmd_fibcontext);
+	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
+
+	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+	dinfo->type = cpu_to_le32(CT_READ_NAME);
+	dinfo->cid = cpu_to_le32(cid);
+	dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
+
+	status = fib_send(ContainerCommand, 
+		  cmd_fibcontext, 
+		  sizeof (struct aac_get_name),
+		  FsaNormal, 
+		  0, 1, 
+		  (fib_callback) get_container_name_callback, 
+		  (void *) scsicmd);
+	
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS) 
+		return 0;
+		
+	printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status);
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return -1;
+}
+
+/**
+ *	probe_container		-	query a logical volume
+ *	@dev: device to query
+ *	@cid: container identifier
+ *
+ *	Queries the controller about the given volume. The volume information
+ *	is updated in the struct fsa_dev_info structure rather than returned.
+ */
+ 
+static int probe_container(struct aac_dev *dev, int cid)
+{
+	struct fsa_dev_info *fsa_dev_ptr;
+	int status;
+	struct aac_query_mount *dinfo;
+	struct aac_mount *dresp;
+	struct fib * fibptr;
+	unsigned instance;
+
+	fsa_dev_ptr = dev->fsa_dev;
+	instance = dev->scsi_host_ptr->unique_id;
+
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(fibptr);
+
+	dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+	dinfo->command = cpu_to_le32(VM_NameServe);
+	dinfo->count = cpu_to_le32(cid);
+	dinfo->type = cpu_to_le32(FT_FILESYS);
+
+	status = fib_send(ContainerCommand,
+			    fibptr,
+			    sizeof(struct aac_query_mount),
+			    FsaNormal,
+			    1, 1,
+			    NULL, NULL);
+	if (status < 0) {
+		printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
+		goto error;
+	}
+
+	dresp = (struct aac_mount *) fib_data(fibptr);
+
+	if ((le32_to_cpu(dresp->status) == ST_OK) &&
+	    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+	    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+		fsa_dev_ptr[cid].valid = 1;
+		fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
+		fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
+		if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+			fsa_dev_ptr[cid].ro = 1;
+	}
+
+error:
+	fib_complete(fibptr);
+	fib_free(fibptr);
+
+	return status;
+}
+
+/* Local Structure to set SCSI inquiry data strings */
+struct scsi_inq {
+	char vid[8];         /* Vendor ID */
+	char pid[16];        /* Product ID */
+	char prl[4];         /* Product Revision Level */
+};
+
+/**
+ *	InqStrCopy	-	string merge
+ *	@a:	string to copy from
+ *	@b:	string to copy to
+ *
+ * 	Copy a String from one location to another
+ *	without copying \0
+ */
+
+static void inqstrcpy(char *a, char *b)
+{
+
+	while(*a != (char)0) 
+		*b++ = *a++;
+}
+
+static char *container_types[] = {
+        "None",
+        "Volume",
+        "Mirror",
+        "Stripe",
+        "RAID5",
+        "SSRW",
+        "SSRO",
+        "Morph",
+        "Legacy",
+        "RAID4",
+        "RAID10",             
+        "RAID00",             
+        "V-MIRRORS",          
+        "PSEUDO R4",          
+	"RAID50",
+        "Unknown"
+};
+
+
+
+/* Function: setinqstr
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI inquiry data strings for vendor, product
+ * and revision level. Allows strings to be set in platform dependant
+ * files instead of in OS dependant driver source.
+ */
+
+static void setinqstr(int devtype, void *data, int tindex)
+{
+	struct scsi_inq *str;
+	struct aac_driver_ident *mp;
+
+	mp = aac_get_driver_ident(devtype);
+   
+	str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+
+	inqstrcpy (mp->vname, str->vid); 
+	inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
+
+	if (tindex < (sizeof(container_types)/sizeof(char *))){
+		char *findit = str->pid;
+
+		for ( ; *findit != ' '; findit++); /* walk till we find a space */
+		/* RAID is superfluous in the context of a RAID device */
+		if (memcmp(findit-4, "RAID", 4) == 0)
+			*(findit -= 4) = ' ';
+		inqstrcpy (container_types[tindex], findit + 1);
+	}
+	inqstrcpy ("V1.0", str->prl);
+}
+
+void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
+		    u8 a_sense_code, u8 incorrect_length,
+		    u8 bit_pointer, u16 field_pointer,
+		    u32 residue)
+{
+	sense_buf[0] = 0xF0;	/* Sense data valid, err code 70h (current error) */
+	sense_buf[1] = 0;	/* Segment number, always zero */
+
+	if (incorrect_length) {
+		sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
+		sense_buf[3] = BYTE3(residue);
+		sense_buf[4] = BYTE2(residue);
+		sense_buf[5] = BYTE1(residue);
+		sense_buf[6] = BYTE0(residue);
+	} else
+		sense_buf[2] = sense_key;	/* Sense key */
+
+	if (sense_key == ILLEGAL_REQUEST)
+		sense_buf[7] = 10;	/* Additional sense length */
+	else
+		sense_buf[7] = 6;	/* Additional sense length */
+
+	sense_buf[12] = sense_code;	/* Additional sense code */
+	sense_buf[13] = a_sense_code;	/* Additional sense code qualifier */
+	if (sense_key == ILLEGAL_REQUEST) {
+		sense_buf[15] = 0;
+
+		if (sense_code == SENCODE_INVALID_PARAM_FIELD)
+			sense_buf[15] = 0x80;/* Std sense key specific field */
+		/* Illegal parameter is in the parameter block */
+
+		if (sense_code == SENCODE_INVALID_CDB_FIELD)
+			sense_buf[15] = 0xc0;/* Std sense key specific field */
+		/* Illegal parameter is in the CDB block */
+		sense_buf[15] |= bit_pointer;
+		sense_buf[16] = field_pointer >> 8;	/* MSB */
+		sense_buf[17] = field_pointer;		/* LSB */
+	}
+}
+
+int aac_get_adapter_info(struct aac_dev* dev)
+{
+	struct fib* fibptr;
+	struct aac_adapter_info* info;
+	int rcode;
+	u32 tmp;
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(fibptr);
+	info = (struct aac_adapter_info*) fib_data(fibptr);
+
+	memset(info,0,sizeof(struct aac_adapter_info));
+
+	rcode = fib_send(RequestAdapterInfo,
+			fibptr, 
+			sizeof(struct aac_adapter_info),
+			FsaNormal, 
+			1, 1, 
+			NULL, 
+			NULL);
+
+	memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
+
+	tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+	printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d]\n", 
+			dev->name, 
+			dev->id,
+			tmp>>24,
+			(tmp>>16)&0xff,
+			tmp&0xff,
+			le32_to_cpu(dev->adapter_info.kernelbuild));
+	tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+	printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 
+			dev->name, dev->id,
+			tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+			le32_to_cpu(dev->adapter_info.monitorbuild));
+	tmp = le32_to_cpu(dev->adapter_info.biosrev);
+	printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 
+			dev->name, dev->id,
+			tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+			le32_to_cpu(dev->adapter_info.biosbuild));
+	if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
+		printk(KERN_INFO "%s%d: serial %x\n",
+			dev->name, dev->id,
+			le32_to_cpu(dev->adapter_info.serial[0]));
+
+	dev->nondasd_support = 0;
+	dev->raid_scsi_mode = 0;
+	if(dev->adapter_info.options & AAC_OPT_NONDASD){
+		dev->nondasd_support = 1;
+	}
+
+	/*
+	 * If the firmware supports ROMB RAID/SCSI mode and we are currently
+	 * in RAID/SCSI mode, set the flag. For now if in this mode we will
+	 * force nondasd support on. If we decide to allow the non-dasd flag
+	 * additional changes changes will have to be made to support
+	 * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
+	 * changed to support the new dev->raid_scsi_mode flag instead of
+	 * leaching off of the dev->nondasd_support flag. Also in linit.c the
+	 * function aac_detect will have to be modified where it sets up the
+	 * max number of channels based on the aac->nondasd_support flag only.
+	 */
+	if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
+	    (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
+		dev->nondasd_support = 1;
+		dev->raid_scsi_mode = 1;
+	}
+	if (dev->raid_scsi_mode != 0)
+		printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
+				dev->name, dev->id);
+		
+	if(nondasd != -1) {  
+		dev->nondasd_support = (nondasd!=0);
+	}
+	if(dev->nondasd_support != 0){
+		printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
+	}
+
+	dev->dac_support = 0;
+	if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
+		printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
+		dev->dac_support = 1;
+	}
+
+	if(dacmode != -1) {
+		dev->dac_support = (dacmode!=0);
+	}
+	if(dev->dac_support != 0) {
+		if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
+			!pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
+			printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
+				dev->name, dev->id);
+		} else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
+			!pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
+			printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
+				dev->name, dev->id);
+			dev->dac_support = 0;
+		} else {
+			printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
+				dev->name, dev->id);
+			rcode = -ENOMEM;
+		}
+	}
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+
+	return rcode;
+}
+
+
+static void read_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_read_reply *readreply;
+	struct scsi_cmnd *scsicmd;
+	u32 lba;
+	u32 cid;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
+
+	lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+	dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
+
+	if (fibptr == NULL)
+		BUG();
+		
+	if(scsicmd->use_sg)
+		pci_unmap_sg(dev->pdev, 
+			(struct scatterlist *)scsicmd->buffer,
+			scsicmd->use_sg,
+			scsicmd->sc_data_direction);
+	else if(scsicmd->request_bufflen)
+		pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
+				 scsicmd->request_bufflen,
+				 scsicmd->sc_data_direction);
+	readreply = (struct aac_read_reply *)fib_data(fibptr);
+	if (le32_to_cpu(readreply->status) == ST_OK)
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+	else {
+		printk(KERN_WARNING "read_callback: read failed, status = %d\n",
+				le32_to_cpu(readreply->status));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+				    HARDWARE_ERROR,
+				    SENCODE_INTERNAL_TARGET_FAILURE,
+				    ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+				    0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		  (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+		    ? sizeof(scsicmd->sense_buffer)
+		    : sizeof(dev->fsa_dev[cid].sense_data));
+	}
+	fib_complete(fibptr);
+	fib_free(fibptr);
+
+	aac_io_done(scsicmd);
+}
+
+static void write_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_write_reply *writereply;
+	struct scsi_cmnd *scsicmd;
+	u32 lba;
+	u32 cid;
+
+	scsicmd = (struct scsi_cmnd *) context;
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
+
+	lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+	dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
+	if (fibptr == NULL)
+		BUG();
+
+	if(scsicmd->use_sg)
+		pci_unmap_sg(dev->pdev, 
+			(struct scatterlist *)scsicmd->buffer,
+			scsicmd->use_sg,
+			scsicmd->sc_data_direction);
+	else if(scsicmd->request_bufflen)
+		pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
+				 scsicmd->request_bufflen,
+				 scsicmd->sc_data_direction);
+
+	writereply = (struct aac_write_reply *) fib_data(fibptr);
+	if (le32_to_cpu(writereply->status) == ST_OK)
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+	else {
+		printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+				    HARDWARE_ERROR,
+				    SENCODE_INTERNAL_TARGET_FAILURE,
+				    ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+				    0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 
+				sizeof(struct sense_data));
+	}
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+	aac_io_done(scsicmd);
+}
+
+int aac_read(struct scsi_cmnd * scsicmd, int cid)
+{
+	u32 lba;
+	u32 count;
+	int status;
+
+	u16 fibsize;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	if (scsicmd->cmnd[0] == READ_6)	/* 6 byte command */
+	{
+		dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
+
+		lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+		count = scsicmd->cmnd[4];
+
+		if (count == 0)
+			count = 256;
+	} else {
+		dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
+
+		lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+	}
+	dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
+	/*
+	 *	Alocate and initialize a Fib
+	 */
+	if (!(cmd_fibcontext = fib_alloc(dev))) {
+		return -1;
+	}
+
+	fib_init(cmd_fibcontext);
+
+	if(dev->dac_support == 1) {
+		struct aac_read64 *readcmd;
+		readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
+		readcmd->command = cpu_to_le32(VM_CtHostRead64);
+		readcmd->cid = cpu_to_le16(cid);
+		readcmd->sector_count = cpu_to_le16(count);
+		readcmd->block = cpu_to_le32(lba);
+		readcmd->pad   = 0;
+		readcmd->flags = 0; 
+
+		aac_build_sg64(scsicmd, &readcmd->sg);
+		fibsize = sizeof(struct aac_read64) + 
+			((le32_to_cpu(readcmd->sg.count) - 1) * 
+			 sizeof (struct sgentry64));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ContainerCommand64, 
+			  cmd_fibcontext, 
+			  fibsize, 
+			  FsaNormal, 
+			  0, 1, 
+			  (fib_callback) read_callback, 
+			  (void *) scsicmd);
+	} else {
+		struct aac_read *readcmd;
+		readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
+		readcmd->command = cpu_to_le32(VM_CtBlockRead);
+		readcmd->cid = cpu_to_le32(cid);
+		readcmd->block = cpu_to_le32(lba);
+		readcmd->count = cpu_to_le32(count * 512);
+
+		if (count * 512 > (64 * 1024))
+			BUG();
+
+		aac_build_sg(scsicmd, &readcmd->sg);
+		fibsize = sizeof(struct aac_read) + 
+			((le32_to_cpu(readcmd->sg.count) - 1) * 
+			 sizeof (struct sgentry));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ContainerCommand, 
+			  cmd_fibcontext, 
+			  fibsize, 
+			  FsaNormal, 
+			  0, 1, 
+			  (fib_callback) read_callback, 
+			  (void *) scsicmd);
+	}
+
+	
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS) 
+		return 0;
+		
+	printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+	aac_io_done(scsicmd);
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return 0;
+}
+
+static int aac_write(struct scsi_cmnd * scsicmd, int cid)
+{
+	u32 lba;
+	u32 count;
+	int status;
+	u16 fibsize;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	if (scsicmd->cmnd[0] == WRITE_6)	/* 6 byte command */
+	{
+		lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+		count = scsicmd->cmnd[4];
+		if (count == 0)
+			count = 256;
+	} else {
+		dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
+		lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+	}
+	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
+	  smp_processor_id(), (unsigned long long)lba, jiffies));
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	if (!(cmd_fibcontext = fib_alloc(dev))) {
+		scsicmd->result = DID_ERROR << 16;
+		aac_io_done(scsicmd);
+		return 0;
+	}
+	fib_init(cmd_fibcontext);
+
+	if(dev->dac_support == 1) {
+		struct aac_write64 *writecmd;
+		writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
+		writecmd->command = cpu_to_le32(VM_CtHostWrite64);
+		writecmd->cid = cpu_to_le16(cid);
+		writecmd->sector_count = cpu_to_le16(count); 
+		writecmd->block = cpu_to_le32(lba);
+		writecmd->pad	= 0;
+		writecmd->flags	= 0;
+
+		aac_build_sg64(scsicmd, &writecmd->sg);
+		fibsize = sizeof(struct aac_write64) + 
+			((le32_to_cpu(writecmd->sg.count) - 1) * 
+			 sizeof (struct sgentry64));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ContainerCommand64, 
+			  cmd_fibcontext, 
+			  fibsize, 
+			  FsaNormal, 
+			  0, 1, 
+			  (fib_callback) write_callback, 
+			  (void *) scsicmd);
+	} else {
+		struct aac_write *writecmd;
+		writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
+		writecmd->command = cpu_to_le32(VM_CtBlockWrite);
+		writecmd->cid = cpu_to_le32(cid);
+		writecmd->block = cpu_to_le32(lba);
+		writecmd->count = cpu_to_le32(count * 512);
+		writecmd->sg.count = cpu_to_le32(1);
+		/* ->stable is not used - it did mean which type of write */
+
+		if (count * 512 > (64 * 1024)) {
+			BUG();
+		}
+
+		aac_build_sg(scsicmd, &writecmd->sg);
+		fibsize = sizeof(struct aac_write) + 
+			((le32_to_cpu(writecmd->sg.count) - 1) * 
+			 sizeof (struct sgentry));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ContainerCommand, 
+			  cmd_fibcontext, 
+			  fibsize, 
+			  FsaNormal, 
+			  0, 1, 
+			  (fib_callback) write_callback, 
+			  (void *) scsicmd);
+	}
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+	{
+		dprintk("write queued.\n");
+		return 0;
+	}
+
+	printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+	aac_io_done(scsicmd);
+
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return 0;
+}
+
+static void synchronize_callback(void *context, struct fib *fibptr)
+{
+	struct aac_synchronize_reply *synchronizereply;
+	struct scsi_cmnd *cmd;
+
+	cmd = context;
+
+	dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 
+				smp_processor_id(), jiffies));
+	BUG_ON(fibptr == NULL);
+
+
+	synchronizereply = fib_data(fibptr);
+	if (le32_to_cpu(synchronizereply->status) == CT_OK)
+		cmd->result = DID_OK << 16 | 
+			COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+	else {
+		struct scsi_device *sdev = cmd->device;
+		struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+		u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
+		printk(KERN_WARNING 
+		     "synchronize_callback: synchronize failed, status = %d\n",
+		     le32_to_cpu(synchronizereply->status));
+		cmd->result = DID_OK << 16 | 
+			COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
+				    HARDWARE_ERROR,
+				    SENCODE_INTERNAL_TARGET_FAILURE,
+				    ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+				    0, 0);
+		memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		  min(sizeof(dev->fsa_dev[cid].sense_data), 
+			  sizeof(cmd->sense_buffer)));
+	}
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+	aac_io_done(cmd);
+}
+
+static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
+{
+	int status;
+	struct fib *cmd_fibcontext;
+	struct aac_synchronize *synchronizecmd;
+	struct scsi_cmnd *cmd;
+	struct scsi_device *sdev = scsicmd->device;
+	int active = 0;
+	unsigned long flags;
+
+	/*
+	 * Wait for all commands to complete to this specific
+	 * target (block).
+	 */
+	spin_lock_irqsave(&sdev->list_lock, flags);
+	list_for_each_entry(cmd, &sdev->cmd_list, list)
+		if (cmd != scsicmd && cmd->serial_number != 0) {
+			++active;
+			break;
+		}
+
+	spin_unlock_irqrestore(&sdev->list_lock, flags);
+
+	/*
+	 *	Yield the processor (requeue for later)
+	 */
+	if (active)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
+	/*
+	 *	Alocate and initialize a Fib
+	 */
+	if (!(cmd_fibcontext = 
+	    fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	fib_init(cmd_fibcontext);
+
+	synchronizecmd = fib_data(cmd_fibcontext);
+	synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
+	synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
+	synchronizecmd->cid = cpu_to_le32(cid);
+	synchronizecmd->count = 
+	     cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
+
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	status = fib_send(ContainerCommand,
+		  cmd_fibcontext,
+		  sizeof(struct aac_synchronize),
+		  FsaNormal,
+		  0, 1,
+		  (fib_callback)synchronize_callback,
+		  (void *)scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING 
+		"aac_synchronize: fib_send failed with status: %d.\n", status);
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ *	aac_scsi_cmd()		-	Process SCSI command
+ *	@scsicmd:		SCSI command block
+ *
+ *	Emulate a SCSI command and queue the required request for the
+ *	aacraid firmware.
+ */
+ 
+int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
+{
+	u32 cid = 0;
+	struct Scsi_Host *host = scsicmd->device->host;
+	struct aac_dev *dev = (struct aac_dev *)host->hostdata;
+	struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
+	int cardtype = dev->cardtype;
+	int ret;
+	
+	/*
+	 *	If the bus, id or lun is out of range, return fail
+	 *	Test does not apply to ID 16, the pseudo id for the controller
+	 *	itself.
+	 */
+	if (scsicmd->device->id != host->this_id) {
+		if ((scsicmd->device->channel == 0) ){
+			if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 
+				scsicmd->result = DID_NO_CONNECT << 16;
+				scsicmd->scsi_done(scsicmd);
+				return 0;
+			}
+			cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
+
+			/*
+			 *	If the target container doesn't exist, it may have
+			 *	been newly created
+			 */
+			if ((fsa_dev_ptr[cid].valid & 1) == 0) {
+				switch (scsicmd->cmnd[0]) {
+				case INQUIRY:
+				case READ_CAPACITY:
+				case TEST_UNIT_READY:
+					spin_unlock_irq(host->host_lock);
+					probe_container(dev, cid);
+					spin_lock_irq(host->host_lock);
+					if (fsa_dev_ptr[cid].valid == 0) {
+						scsicmd->result = DID_NO_CONNECT << 16;
+						scsicmd->scsi_done(scsicmd);
+						return 0;
+					}
+				default:
+					break;
+				}
+			}
+			/*
+			 *	If the target container still doesn't exist, 
+			 *	return failure
+			 */
+			if (fsa_dev_ptr[cid].valid == 0) {
+				scsicmd->result = DID_BAD_TARGET << 16;
+				scsicmd->scsi_done(scsicmd);
+				return 0;
+			}
+		} else {  /* check for physical non-dasd devices */
+			if(dev->nondasd_support == 1){
+				return aac_send_srb_fib(scsicmd);
+			} else {
+				scsicmd->result = DID_NO_CONNECT << 16;
+				scsicmd->scsi_done(scsicmd);
+				return 0;
+			}
+		}
+	}
+	/*
+	 * else Command for the controller itself
+	 */
+	else if ((scsicmd->cmnd[0] != INQUIRY) &&	/* only INQUIRY & TUR cmnd supported for controller */
+		(scsicmd->cmnd[0] != TEST_UNIT_READY)) 
+	{
+		dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+			    ILLEGAL_REQUEST,
+			    SENCODE_INVALID_COMMAND,
+			    ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		  (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+		    ? sizeof(scsicmd->sense_buffer)
+		    : sizeof(dev->fsa_dev[cid].sense_data));
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+
+	/* Handle commands here that don't really require going out to the adapter */
+	switch (scsicmd->cmnd[0]) {
+	case INQUIRY:
+	{
+		struct inquiry_data *inq_data_ptr;
+
+		dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
+		inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
+		memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
+
+		inq_data_ptr->inqd_ver = 2;	/* claim compliance to SCSI-2 */
+		inq_data_ptr->inqd_dtq = 0x80;	/* set RMB bit to one indicating that the medium is removable */
+		inq_data_ptr->inqd_rdf = 2;	/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+		inq_data_ptr->inqd_len = 31;
+		/*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+		inq_data_ptr->inqd_pad2= 0x32 ;	 /*WBus16|Sync|CmdQue */
+		/*
+		 *	Set the Vendor, Product, and Revision Level
+		 *	see: <vendor>.c i.e. aac.c
+		 */
+		if (scsicmd->device->id == host->this_id) {
+			setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *)));
+			inq_data_ptr->inqd_pdt = INQD_PDT_PROC;	/* Processor device */
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+			scsicmd->scsi_done(scsicmd);
+			return 0;
+		}
+		setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type);
+		inq_data_ptr->inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
+		return aac_get_container_name(scsicmd, cid);
+	}
+	case READ_CAPACITY:
+	{
+		u32 capacity;
+		char *cp;
+
+		dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+		if (fsa_dev_ptr[cid].size <= 0x100000000LL)
+			capacity = fsa_dev_ptr[cid].size - 1;
+		else
+			capacity = (u32)-1;
+		cp = scsicmd->request_buffer;
+		cp[0] = (capacity >> 24) & 0xff;
+		cp[1] = (capacity >> 16) & 0xff;
+		cp[2] = (capacity >> 8) & 0xff;
+		cp[3] = (capacity >> 0) & 0xff;
+		cp[4] = 0;
+		cp[5] = 0;
+		cp[6] = 2;
+		cp[7] = 0;
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+
+		return 0;
+	}
+
+	case MODE_SENSE:
+	{
+		char *mode_buf;
+
+		dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+		mode_buf = scsicmd->request_buffer;
+		mode_buf[0] = 3;	/* Mode data length */
+		mode_buf[1] = 0;	/* Medium type - default */
+		mode_buf[2] = 0;	/* Device-specific param, bit 8: 0/1 = write enabled/protected */
+		mode_buf[3] = 0;	/* Block descriptor length */
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+
+		return 0;
+	}
+	case MODE_SENSE_10:
+	{
+		char *mode_buf;
+
+		dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
+		mode_buf = scsicmd->request_buffer;
+		mode_buf[0] = 0;	/* Mode data length (MSB) */
+		mode_buf[1] = 6;	/* Mode data length (LSB) */
+		mode_buf[2] = 0;	/* Medium type - default */
+		mode_buf[3] = 0;	/* Device-specific param, bit 8: 0/1 = write enabled/protected */
+		mode_buf[4] = 0;	/* reserved */
+		mode_buf[5] = 0;	/* reserved */
+		mode_buf[6] = 0;	/* Block descriptor length (MSB) */
+		mode_buf[7] = 0;	/* Block descriptor length (LSB) */
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+
+		return 0;
+	}
+	case REQUEST_SENSE:
+		dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
+		memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+
+	case ALLOW_MEDIUM_REMOVAL:
+		dprintk((KERN_DEBUG "LOCK command.\n"));
+		if (scsicmd->cmnd[4])
+			fsa_dev_ptr[cid].locked = 1;
+		else
+			fsa_dev_ptr[cid].locked = 0;
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	/*
+	 *	These commands are all No-Ops
+	 */
+	case TEST_UNIT_READY:
+	case RESERVE:
+	case RELEASE:
+	case REZERO_UNIT:
+	case REASSIGN_BLOCKS:
+	case SEEK_10:
+	case START_STOP:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+	switch (scsicmd->cmnd[0]) 
+	{
+		case READ_6:
+		case READ_10:
+			/*
+			 *	Hack to keep track of ordinal number of the device that
+			 *	corresponds to a container. Needed to convert
+			 *	containers to /dev/sd device names
+			 */
+			 
+			spin_unlock_irq(host->host_lock);
+			if  (scsicmd->request->rq_disk)
+				memcpy(fsa_dev_ptr[cid].devname,
+					scsicmd->request->rq_disk->disk_name,
+					8);
+
+			ret = aac_read(scsicmd, cid);
+			spin_lock_irq(host->host_lock);
+			return ret;
+
+		case WRITE_6:
+		case WRITE_10:
+			spin_unlock_irq(host->host_lock);
+			ret = aac_write(scsicmd, cid);
+			spin_lock_irq(host->host_lock);
+			return ret;
+
+		case SYNCHRONIZE_CACHE:
+			/* Issue FIB to tell Firmware to flush it's cache */
+			return aac_synchronize(scsicmd, cid);
+			
+		default:
+			/*
+			 *	Unhandled commands
+			 */
+			printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+			set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+				ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+				ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+			memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+			  (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+			    ? sizeof(scsicmd->sense_buffer)
+			    : sizeof(dev->fsa_dev[cid].sense_data));
+			scsicmd->scsi_done(scsicmd);
+			return 0;
+	}
+}
+
+static int query_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_query_disk qd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+	if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	if (qd.cnum == -1)
+		qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
+	else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 
+	{
+		if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
+			return -EINVAL;
+		qd.instance = dev->scsi_host_ptr->host_no;
+		qd.bus = 0;
+		qd.id = CONTAINER_TO_ID(qd.cnum);
+		qd.lun = CONTAINER_TO_LUN(qd.cnum);
+	}
+	else return -EINVAL;
+
+	qd.valid = fsa_dev_ptr[qd.cnum].valid;
+	qd.locked = fsa_dev_ptr[qd.cnum].locked;
+	qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
+
+	if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
+		qd.unmapped = 1;
+	else
+		qd.unmapped = 0;
+
+	strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
+	  min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
+
+	if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	return 0;
+}
+
+static int force_delete_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum >= dev->maximum_num_containers)
+		return -EINVAL;
+	/*
+	 *	Mark this container as being deleted.
+	 */
+	fsa_dev_ptr[dd.cnum].deleted = 1;
+	/*
+	 *	Mark the container as no longer valid
+	 */
+	fsa_dev_ptr[dd.cnum].valid = 0;
+	return 0;
+}
+
+static int delete_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum >= dev->maximum_num_containers)
+		return -EINVAL;
+	/*
+	 *	If the container is locked, it can not be deleted by the API.
+	 */
+	if (fsa_dev_ptr[dd.cnum].locked)
+		return -EBUSY;
+	else {
+		/*
+		 *	Mark the container as no longer being valid.
+		 */
+		fsa_dev_ptr[dd.cnum].valid = 0;
+		fsa_dev_ptr[dd.cnum].devname[0] = '\0';
+		return 0;
+	}
+}
+
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
+{
+	switch (cmd) {
+	case FSACTL_QUERY_DISK:
+		return query_disk(dev, arg);
+	case FSACTL_DELETE_DISK:
+		return delete_disk(dev, arg);
+	case FSACTL_FORCE_DELETE_DISK:
+		return force_delete_disk(dev, arg);
+	case FSACTL_GET_CONTAINERS:
+		return aac_get_containers(dev);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/**
+ *
+ * aac_srb_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a scsi command to a non dasd device
+ *
+ */
+
+static void aac_srb_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_srb_reply *srbreply;
+	struct scsi_cmnd *scsicmd;
+
+	scsicmd = (struct scsi_cmnd *) context;
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+	if (fibptr == NULL)
+		BUG();
+
+	srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
+	scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
+	/*
+	 *	Calculate resid for sg 
+	 */
+	 
+	scsicmd->resid = scsicmd->request_bufflen - 
+		le32_to_cpu(srbreply->data_xfer_length);
+
+	if(scsicmd->use_sg)
+		pci_unmap_sg(dev->pdev, 
+			(struct scatterlist *)scsicmd->buffer,
+			scsicmd->use_sg,
+			scsicmd->sc_data_direction);
+	else if(scsicmd->request_bufflen)
+		pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
+			scsicmd->sc_data_direction);
+
+	/*
+	 * First check the fib status
+	 */
+
+	if (le32_to_cpu(srbreply->status) != ST_OK){
+		int len;
+		printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
+		len = (le32_to_cpu(srbreply->sense_data_size) > 
+				sizeof(scsicmd->sense_buffer)) ?
+				sizeof(scsicmd->sense_buffer) : 
+				le32_to_cpu(srbreply->sense_data_size);
+		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+	}
+
+	/*
+	 * Next check the srb status
+	 */
+	switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
+	case SRB_STATUS_ERROR_RECOVERY:
+	case SRB_STATUS_PENDING:
+	case SRB_STATUS_SUCCESS:
+		if(scsicmd->cmnd[0] == INQUIRY ){
+			u8 b;
+			u8 b1;
+			/* We can't expose disk devices because we can't tell whether they
+			 * are the raw container drives or stand alone drives.  If they have
+			 * the removable bit set then we should expose them though.
+			 */
+			b = (*(u8*)scsicmd->buffer)&0x1f;
+			b1 = ((u8*)scsicmd->buffer)[1];
+			if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 
+					|| (b==TYPE_DISK && (b1&0x80)) ){
+				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+			/*
+			 * We will allow disk devices if in RAID/SCSI mode and
+			 * the channel is 2
+			 */
+			} else if ((dev->raid_scsi_mode) &&
+					(scsicmd->device->channel == 2)) {
+				scsicmd->result = DID_OK << 16 | 
+						COMMAND_COMPLETE << 8;
+			} else {
+				scsicmd->result = DID_NO_CONNECT << 16 | 
+						COMMAND_COMPLETE << 8;
+			}
+		} else {
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		}
+		break;
+	case SRB_STATUS_DATA_OVERRUN:
+		switch(scsicmd->cmnd[0]){
+		case  READ_6:
+		case  WRITE_6:
+		case  READ_10:
+		case  WRITE_10:
+		case  READ_12:
+		case  WRITE_12:
+			if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
+				printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
+			} else {
+				printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
+			}
+			scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+			break;
+		case INQUIRY: {
+			u8 b;
+			u8 b1;
+			/* We can't expose disk devices because we can't tell whether they
+			* are the raw container drives or stand alone drives
+			*/
+			b = (*(u8*)scsicmd->buffer)&0x0f;
+			b1 = ((u8*)scsicmd->buffer)[1];
+			if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
+					|| (b==TYPE_DISK && (b1&0x80)) ){
+				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+			/*
+			 * We will allow disk devices if in RAID/SCSI mode and
+			 * the channel is 2
+			 */
+			} else if ((dev->raid_scsi_mode) &&
+					(scsicmd->device->channel == 2)) {
+				scsicmd->result = DID_OK << 16 | 
+						COMMAND_COMPLETE << 8;
+			} else {
+				scsicmd->result = DID_NO_CONNECT << 16 | 
+						COMMAND_COMPLETE << 8;
+			}
+			break;
+		}
+		default:
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+			break;
+		}
+		break;
+	case SRB_STATUS_ABORTED:
+		scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+		break;
+	case SRB_STATUS_ABORT_FAILED:
+		// Not sure about this one - but assuming the hba was trying to abort for some reason
+		scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+		break;
+	case SRB_STATUS_PARITY_ERROR:
+		scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
+		break;
+	case SRB_STATUS_NO_DEVICE:
+	case SRB_STATUS_INVALID_PATH_ID:
+	case SRB_STATUS_INVALID_TARGET_ID:
+	case SRB_STATUS_INVALID_LUN:
+	case SRB_STATUS_SELECTION_TIMEOUT:
+		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_COMMAND_TIMEOUT:
+	case SRB_STATUS_TIMEOUT:
+		scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_BUSY:
+		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_BUS_RESET:
+		scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_MESSAGE_REJECTED:
+		scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+		break;
+	case SRB_STATUS_REQUEST_FLUSHED:
+	case SRB_STATUS_ERROR:
+	case SRB_STATUS_INVALID_REQUEST:
+	case SRB_STATUS_REQUEST_SENSE_FAILED:
+	case SRB_STATUS_NO_HBA:
+	case SRB_STATUS_UNEXPECTED_BUS_FREE:
+	case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+	case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+	case SRB_STATUS_DELAYED_RETRY:
+	case SRB_STATUS_BAD_FUNCTION:
+	case SRB_STATUS_NOT_STARTED:
+	case SRB_STATUS_NOT_IN_USE:
+	case SRB_STATUS_FORCE_ABORT:
+	case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+	default:
+#ifdef AAC_DETAILED_STATUS_INFO
+		printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
+			le32_to_cpu(srbreply->srb_status) & 0x3F,
+			aac_get_status_string(
+				le32_to_cpu(srbreply->srb_status) & 0x3F), 
+			scsicmd->cmnd[0], 
+			le32_to_cpu(srbreply->scsi_status));
+#endif
+		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+	if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
+		int len;
+		scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+		len = (le32_to_cpu(srbreply->sense_data_size) > 
+				sizeof(scsicmd->sense_buffer)) ?
+				sizeof(scsicmd->sense_buffer) :
+				le32_to_cpu(srbreply->sense_data_size);
+#ifdef AAC_DETAILED_STATUS_INFO
+		dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 
+					le32_to_cpu(srbreply->status), len));
+#endif
+		memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+		
+	}
+	/*
+	 * OR in the scsi status (already shifted up a bit)
+	 */
+	scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+	aac_io_done(scsicmd);
+}
+
+/**
+ *
+ * aac_send_scb_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_srb from the 
+ * scsicmd passed in.
+ */
+
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
+{
+	struct fib* cmd_fibcontext;
+	struct aac_dev* dev;
+	int status;
+	struct aac_srb *srbcmd;
+	u16 fibsize;
+	u32 flag;
+	u32 timeout;
+
+	if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
+		scsicmd->result = DID_NO_CONNECT << 16;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	switch(scsicmd->sc_data_direction){
+	case DMA_TO_DEVICE:
+		flag = SRB_DataOut;
+		break;
+	case DMA_BIDIRECTIONAL:
+		flag = SRB_DataIn | SRB_DataOut;
+		break;
+	case DMA_FROM_DEVICE:
+		flag = SRB_DataIn;
+		break;
+	case DMA_NONE:
+	default:	/* shuts up some versions of gcc */
+		flag = SRB_NoDataXfer;
+		break;
+	}
+
+
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	if (!(cmd_fibcontext = fib_alloc(dev))) {
+		return -1;
+	}
+	fib_init(cmd_fibcontext);
+
+	srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
+	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+	srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
+	srbcmd->id   = cpu_to_le32(scsicmd->device->id);
+	srbcmd->lun      = cpu_to_le32(scsicmd->device->lun);
+	srbcmd->flags    = cpu_to_le32(flag);
+	timeout = (scsicmd->timeout-jiffies)/HZ;
+	if(timeout == 0){
+		timeout = 1;
+	}
+	srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
+	srbcmd->retry_limit = 0; /* Obsolete parameter */
+	srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
+	
+	if( dev->dac_support == 1 ) {
+		aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
+		srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+		memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+		memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+		/*
+		 *	Build Scatter/Gather list
+		 */
+		fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+			((le32_to_cpu(srbcmd->sg.count) & 0xff) * 
+			 sizeof (struct sgentry64));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ScsiPortCommand64, cmd_fibcontext, 
+				fibsize, FsaNormal, 0, 1,
+				  (fib_callback) aac_srb_callback, 
+				  (void *) scsicmd);
+	} else {
+		aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
+		srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+		memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+		memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+		/*
+		 *	Build Scatter/Gather list
+		 */
+		fibsize = sizeof (struct aac_srb) + 
+			(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * 
+			 sizeof (struct sgentry));
+		BUG_ON (fibsize > (sizeof(struct hw_fib) - 
+					sizeof(struct aac_fibhdr)));
+
+		/*
+		 *	Now send the Fib to the adapter
+		 */
+		status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
+				  (fib_callback) aac_srb_callback, (void *) scsicmd);
+	}
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS){
+		return 0;
+	}
+
+	printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+
+	return -1;
+}
+
+static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
+{
+	struct aac_dev *dev;
+	unsigned long byte_count = 0;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	// Get rid of old data
+	psg->count = 0;
+	psg->sg[0].addr = 0;
+	psg->sg[0].count = 0;  
+	if (scsicmd->use_sg) {
+		struct scatterlist *sg;
+		int i;
+		int sg_count;
+		sg = (struct scatterlist *) scsicmd->request_buffer;
+
+		sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+			scsicmd->sc_data_direction);
+		psg->count = cpu_to_le32(sg_count);
+
+		byte_count = 0;
+
+		for (i = 0; i < sg_count; i++) {
+			psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+			byte_count += sg_dma_len(sg);
+			sg++;
+		}
+		/* hba wants the size to be exact */
+		if(byte_count > scsicmd->request_bufflen){
+			psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+			byte_count = scsicmd->request_bufflen;
+		}
+		/* Check for command underflow */
+		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+					byte_count, scsicmd->underflow);
+		}
+	}
+	else if(scsicmd->request_bufflen) {
+		dma_addr_t addr; 
+		addr = pci_map_single(dev->pdev,
+				scsicmd->request_buffer,
+				scsicmd->request_bufflen,
+				scsicmd->sc_data_direction);
+		psg->count = cpu_to_le32(1);
+		psg->sg[0].addr = cpu_to_le32(addr);
+		psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+		scsicmd->SCp.dma_handle = addr;
+		byte_count = scsicmd->request_bufflen;
+	}
+	return byte_count;
+}
+
+
+static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
+{
+	struct aac_dev *dev;
+	unsigned long byte_count = 0;
+	u64 le_addr;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	// Get rid of old data
+	psg->count = 0;
+	psg->sg[0].addr[0] = 0;
+	psg->sg[0].addr[1] = 0;
+	psg->sg[0].count = 0;
+	if (scsicmd->use_sg) {
+		struct scatterlist *sg;
+		int i;
+		int sg_count;
+		sg = (struct scatterlist *) scsicmd->request_buffer;
+
+		sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+			scsicmd->sc_data_direction);
+		psg->count = cpu_to_le32(sg_count);
+
+		byte_count = 0;
+
+		for (i = 0; i < sg_count; i++) {
+			le_addr = cpu_to_le64(sg_dma_address(sg));
+			psg->sg[i].addr[1] = (u32)(le_addr>>32);
+			psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
+			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+			byte_count += sg_dma_len(sg);
+			sg++;
+		}
+		/* hba wants the size to be exact */
+		if(byte_count > scsicmd->request_bufflen){
+			psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+			byte_count = scsicmd->request_bufflen;
+		}
+		/* Check for command underflow */
+		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+					byte_count, scsicmd->underflow);
+		}
+	}
+	else if(scsicmd->request_bufflen) {
+		dma_addr_t addr; 
+		addr = pci_map_single(dev->pdev,
+				scsicmd->request_buffer,
+				scsicmd->request_bufflen,
+				scsicmd->sc_data_direction);
+		psg->count = cpu_to_le32(1);
+		le_addr = cpu_to_le64(addr);
+		psg->sg[0].addr[1] = (u32)(le_addr>>32);
+		psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
+		psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+		scsicmd->SCp.dma_handle = addr;
+		byte_count = scsicmd->request_bufflen;
+	}
+	return byte_count;
+}
+
+#ifdef AAC_DETAILED_STATUS_INFO
+
+struct aac_srb_status_info {
+	u32	status;
+	char	*str;
+};
+
+
+static struct aac_srb_status_info srb_status_info[] = {
+	{ SRB_STATUS_PENDING,		"Pending Status"},
+	{ SRB_STATUS_SUCCESS,		"Success"},
+	{ SRB_STATUS_ABORTED,		"Aborted Command"},
+	{ SRB_STATUS_ABORT_FAILED,	"Abort Failed"},
+	{ SRB_STATUS_ERROR,		"Error Event"}, 
+	{ SRB_STATUS_BUSY,		"Device Busy"},
+	{ SRB_STATUS_INVALID_REQUEST,	"Invalid Request"},
+	{ SRB_STATUS_INVALID_PATH_ID,	"Invalid Path ID"},
+	{ SRB_STATUS_NO_DEVICE,		"No Device"},
+	{ SRB_STATUS_TIMEOUT,		"Timeout"},
+	{ SRB_STATUS_SELECTION_TIMEOUT,	"Selection Timeout"},
+	{ SRB_STATUS_COMMAND_TIMEOUT,	"Command Timeout"},
+	{ SRB_STATUS_MESSAGE_REJECTED,	"Message Rejected"},
+	{ SRB_STATUS_BUS_RESET,		"Bus Reset"},
+	{ SRB_STATUS_PARITY_ERROR,	"Parity Error"},
+	{ SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
+	{ SRB_STATUS_NO_HBA,		"No HBA"},
+	{ SRB_STATUS_DATA_OVERRUN,	"Data Overrun/Data Underrun"},
+	{ SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
+	{ SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
+	{ SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
+	{ SRB_STATUS_REQUEST_FLUSHED,	"Request Flushed"},
+	{ SRB_STATUS_DELAYED_RETRY,	"Delayed Retry"},
+	{ SRB_STATUS_INVALID_LUN,	"Invalid LUN"}, 
+	{ SRB_STATUS_INVALID_TARGET_ID,	"Invalid TARGET ID"},
+	{ SRB_STATUS_BAD_FUNCTION,	"Bad Function"},
+	{ SRB_STATUS_ERROR_RECOVERY,	"Error Recovery"},
+	{ SRB_STATUS_NOT_STARTED,	"Not Started"},
+	{ SRB_STATUS_NOT_IN_USE,	"Not In Use"},
+    	{ SRB_STATUS_FORCE_ABORT,	"Force Abort"},
+	{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
+	{ 0xff,				"Unknown Error"}
+};
+
+char *aac_get_status_string(u32 status)
+{
+	int i;
+
+	for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
+		if(srb_status_info[i].status == status){
+			return srb_status_info[i].str;
+		}
+	}
+
+	return "Bad Status Code";
+}
+
+#endif
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
new file mode 100644
index 000000000000..700d90331c1c
--- /dev/null
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -0,0 +1,1623 @@
+#if (!defined(dprintk))
+# define dprintk(x)
+#endif
+
+/*------------------------------------------------------------------------------
+ *              D E F I N E S
+ *----------------------------------------------------------------------------*/
+
+#define MAXIMUM_NUM_CONTAINERS	32
+
+#define AAC_NUM_FIB		(256 + 64)
+#define AAC_NUM_IO_FIB		100
+
+#define AAC_MAX_LUN		(8)
+
+#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
+
+/*
+ * These macros convert from physical channels to virtual channels
+ */
+#define CONTAINER_CHANNEL		(0)
+#define ID_LUN_TO_CONTAINER(id, lun)	(id)
+#define CONTAINER_TO_CHANNEL(cont)	(CONTAINER_CHANNEL)
+#define CONTAINER_TO_ID(cont)		(cont)
+#define CONTAINER_TO_LUN(cont)		(0)
+
+#define aac_phys_to_logical(x)  (x+1)
+#define aac_logical_to_phys(x)  (x?x-1:0)
+
+/* #define AAC_DETAILED_STATUS_INFO */
+
+struct diskparm
+{
+	int heads;
+	int sectors;
+	int cylinders;
+};
+
+
+/*
+ *	DON'T CHANGE THE ORDER, this is set by the firmware
+ */
+ 
+#define		CT_NONE			0
+#define		CT_VOLUME		1
+#define		CT_MIRROR		2
+#define		CT_STRIPE		3
+#define		CT_RAID5		4
+#define		CT_SSRW			5
+#define		CT_SSRO			6
+#define		CT_MORPH		7
+#define		CT_PASSTHRU		8
+#define		CT_RAID4		9
+#define		CT_RAID10		10	/* stripe of mirror */
+#define		CT_RAID00		11	/* stripe of stripe */
+#define		CT_VOLUME_OF_MIRRORS	12	/* volume of mirror */
+#define		CT_PSEUDO_RAID		13	/* really raid4 */
+#define		CT_LAST_VOLUME_TYPE	14
+#define 	CT_OK        		218
+
+/*
+ *	Types of objects addressable in some fashion by the client.
+ *	This is a superset of those objects handled just by the filesystem
+ *	and includes "raw" objects that an administrator would use to
+ *	configure containers and filesystems.
+ */
+
+#define		FT_REG		1	/* regular file */
+#define		FT_DIR		2	/* directory */
+#define		FT_BLK		3	/* "block" device - reserved */
+#define		FT_CHR		4	/* "character special" device - reserved */
+#define		FT_LNK		5	/* symbolic link */
+#define		FT_SOCK		6	/* socket */
+#define		FT_FIFO		7	/* fifo */
+#define		FT_FILESYS	8	/* ADAPTEC's "FSA"(tm) filesystem */
+#define		FT_DRIVE	9	/* physical disk - addressable in scsi by bus/id/lun */
+#define		FT_SLICE	10	/* virtual disk - raw volume - slice */
+#define		FT_PARTITION	11	/* FSA partition - carved out of a slice - building block for containers */
+#define		FT_VOLUME	12	/* Container - Volume Set */
+#define		FT_STRIPE	13	/* Container - Stripe Set */
+#define		FT_MIRROR	14	/* Container - Mirror Set */
+#define		FT_RAID5	15	/* Container - Raid 5 Set */
+#define		FT_DATABASE	16	/* Storage object with "foreign" content manager */
+
+/*
+ *	Host side memory scatter gather list
+ *	Used by the adapter for read, write, and readdirplus operations
+ *	We have separate 32 and 64 bit version because even
+ *	on 64 bit systems not all cards support the 64 bit version
+ */
+struct sgentry {
+	u32	addr;	/* 32-bit address. */
+	u32	count;	/* Length. */
+};
+
+struct sgentry64 {
+	u32	addr[2];	/* 64-bit addr. 2 pieces for data alignment */
+	u32	count;	/* Length. */
+};
+
+/*
+ *	SGMAP
+ *
+ *	This is the SGMAP structure for all commands that use
+ *	32-bit addressing.
+ */
+
+struct sgmap {
+	u32		count;
+	struct sgentry	sg[1]; 
+};
+
+struct sgmap64 {
+	u32		count;
+	struct sgentry64 sg[1];
+};
+
+struct creation_info
+{
+	u8 		buildnum;		/* e.g., 588 */
+	u8 		usec;			/* e.g., 588 */
+	u8	 	via;			/* e.g., 1 = FSU,
+						 * 	 2 = API
+						 */
+	u8	 	year;		 	/* e.g., 1997 = 97 */
+	u32		date;			/*
+						 * unsigned 	Month		:4;	// 1 - 12
+						 * unsigned 	Day		:6;	// 1 - 32
+						 * unsigned 	Hour		:6;	// 0 - 23
+						 * unsigned 	Minute		:6;	// 0 - 60
+						 * unsigned 	Second		:6;	// 0 - 60
+						 */
+	u32		serial[2];			/* e.g., 0x1DEADB0BFAFAF001 */
+};
+
+
+/*
+ *	Define all the constants needed for the communication interface
+ */
+
+/*
+ *	Define how many queue entries each queue will have and the total
+ *	number of entries for the entire communication interface. Also define
+ *	how many queues we support.
+ *
+ *	This has to match the controller
+ */
+
+#define NUMBER_OF_COMM_QUEUES  8   // 4 command; 4 response
+#define HOST_HIGH_CMD_ENTRIES  4
+#define HOST_NORM_CMD_ENTRIES  8
+#define ADAP_HIGH_CMD_ENTRIES  4
+#define ADAP_NORM_CMD_ENTRIES  512
+#define HOST_HIGH_RESP_ENTRIES 4
+#define HOST_NORM_RESP_ENTRIES 512
+#define ADAP_HIGH_RESP_ENTRIES 4
+#define ADAP_NORM_RESP_ENTRIES 8
+
+#define TOTAL_QUEUE_ENTRIES  \
+    (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
+	    HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
+
+
+/*
+ *	Set the queues on a 16 byte alignment
+ */
+ 
+#define QUEUE_ALIGNMENT		16
+
+/*
+ *	The queue headers define the Communication Region queues. These
+ *	are physically contiguous and accessible by both the adapter and the
+ *	host. Even though all queue headers are in the same contiguous block
+ *	they will be represented as individual units in the data structures.
+ */
+
+struct aac_entry {
+	u32 size;          /* Size in bytes of Fib which this QE points to */
+	u32 addr; /* Receiver address of the FIB */
+};
+
+/*
+ *	The adapter assumes the ProducerIndex and ConsumerIndex are grouped
+ *	adjacently and in that order.
+ */
+ 
+struct aac_qhdr {
+	u64 header_addr;		/* Address to hand the adapter to access to this queue head */
+	u32 *producer;			/* The producer index for this queue (host address) */
+	u32 *consumer;			/* The consumer index for this queue (host address) */
+};
+
+/*
+ *	Define all the events which the adapter would like to notify
+ *	the host of.
+ */
+ 
+#define		HostNormCmdQue		1	/* Change in host normal priority command queue */
+#define		HostHighCmdQue		2	/* Change in host high priority command queue */
+#define		HostNormRespQue		3	/* Change in host normal priority response queue */
+#define		HostHighRespQue		4	/* Change in host high priority response queue */
+#define		AdapNormRespNotFull	5
+#define		AdapHighRespNotFull	6
+#define		AdapNormCmdNotFull	7
+#define		AdapHighCmdNotFull	8
+#define		SynchCommandComplete	9
+#define		AdapInternalError	0xfe    /* The adapter detected an internal error shutting down */
+
+/*
+ *	Define all the events the host wishes to notify the
+ *	adapter of. The first four values much match the Qid the
+ *	corresponding queue.
+ */
+
+#define		AdapNormCmdQue		2
+#define		AdapHighCmdQue		3
+#define		AdapNormRespQue		6
+#define		AdapHighRespQue		7
+#define		HostShutdown		8
+#define		HostPowerFail		9
+#define		FatalCommError		10
+#define		HostNormRespNotFull	11
+#define		HostHighRespNotFull	12
+#define		HostNormCmdNotFull	13
+#define		HostHighCmdNotFull	14
+#define		FastIo			15
+#define		AdapPrintfDone		16
+
+/*
+ *	Define all the queues that the adapter and host use to communicate
+ *	Number them to match the physical queue layout.
+ */
+
+enum aac_queue_types {
+        HostNormCmdQueue = 0,	/* Adapter to host normal priority command traffic */
+        HostHighCmdQueue,	/* Adapter to host high priority command traffic */
+        AdapNormCmdQueue,	/* Host to adapter normal priority command traffic */
+        AdapHighCmdQueue,	/* Host to adapter high priority command traffic */
+        HostNormRespQueue,	/* Adapter to host normal priority response traffic */
+        HostHighRespQueue,	/* Adapter to host high priority response traffic */
+        AdapNormRespQueue,	/* Host to adapter normal priority response traffic */
+        AdapHighRespQueue	/* Host to adapter high priority response traffic */
+};
+
+/*
+ *	Assign type values to the FSA communication data structures
+ */
+
+#define		FIB_MAGIC	0x0001
+
+/*
+ *	Define the priority levels the FSA communication routines support.
+ */
+
+#define		FsaNormal	1
+#define		FsaHigh		2
+
+/*
+ * Define the FIB. The FIB is the where all the requested data and
+ * command information are put to the application on the FSA adapter.
+ */
+
+struct aac_fibhdr {
+	u32 XferState;			// Current transfer state for this CCB
+	u16 Command;			// Routing information for the destination
+	u8 StructType;			// Type FIB
+	u8 Flags;			// Flags for FIB
+	u16 Size;			// Size of this FIB in bytes
+	u16 SenderSize;			// Size of the FIB in the sender (for response sizing)
+	u32 SenderFibAddress;		// Host defined data in the FIB
+	u32 ReceiverFibAddress;		// Logical address of this FIB for the adapter
+	u32 SenderData;			// Place holder for the sender to store data
+	union {
+		struct {
+		    u32 _ReceiverTimeStart; 	// Timestamp for receipt of fib
+		    u32 _ReceiverTimeDone;	// Timestamp for completion of fib
+		} _s;
+	} _u;
+};
+
+#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
+
+
+struct hw_fib {
+	struct aac_fibhdr header;
+	u8 data[FIB_DATA_SIZE_IN_BYTES];		// Command specific data
+};
+
+/*
+ *	FIB commands
+ */
+
+#define 	TestCommandResponse		1
+#define		TestAdapterCommand		2
+/*
+ *	Lowlevel and comm commands
+ */
+#define		LastTestCommand			100
+#define		ReinitHostNormCommandQueue	101
+#define		ReinitHostHighCommandQueue	102
+#define		ReinitHostHighRespQueue		103
+#define		ReinitHostNormRespQueue		104
+#define		ReinitAdapNormCommandQueue	105
+#define		ReinitAdapHighCommandQueue	107
+#define		ReinitAdapHighRespQueue		108
+#define		ReinitAdapNormRespQueue		109
+#define		InterfaceShutdown		110
+#define		DmaCommandFib			120
+#define		StartProfile			121
+#define		TermProfile			122
+#define		SpeedTest			123
+#define		TakeABreakPt			124
+#define		RequestPerfData			125
+#define		SetInterruptDefTimer		126
+#define		SetInterruptDefCount		127
+#define		GetInterruptDefStatus		128
+#define		LastCommCommand			129
+/*
+ *	Filesystem commands
+ */
+#define		NuFileSystem			300
+#define		UFS				301
+#define		HostFileSystem			302
+#define		LastFileSystemCommand		303
+/*
+ *	Container Commands
+ */
+#define		ContainerCommand		500
+#define		ContainerCommand64		501
+/*
+ *	Cluster Commands
+ */
+#define		ClusterCommand	 		550
+/*
+ *	Scsi Port commands (scsi passthrough)
+ */
+#define		ScsiPortCommand			600
+#define		ScsiPortCommand64		601
+/*
+ *	Misc house keeping and generic adapter initiated commands
+ */
+#define		AifRequest			700
+#define		CheckRevision			701
+#define		FsaHostShutdown			702
+#define		RequestAdapterInfo		703
+#define		IsAdapterPaused			704
+#define		SendHostTime			705
+#define		LastMiscCommand			706
+
+//
+// Commands that will target the failover level on the FSA adapter
+//
+
+enum fib_xfer_state {
+	HostOwned 			= (1<<0),
+	AdapterOwned 			= (1<<1),
+	FibInitialized 			= (1<<2),
+	FibEmpty 			= (1<<3),
+	AllocatedFromPool 		= (1<<4),
+	SentFromHost 			= (1<<5),
+	SentFromAdapter 		= (1<<6),
+	ResponseExpected 		= (1<<7),
+	NoResponseExpected 		= (1<<8),
+	AdapterProcessed 		= (1<<9),
+	HostProcessed 			= (1<<10),
+	HighPriority 			= (1<<11),
+	NormalPriority 			= (1<<12),
+	Async				= (1<<13),
+	AsyncIo				= (1<<13),	// rpbfix: remove with new regime
+	PageFileIo			= (1<<14),	// rpbfix: remove with new regime
+	ShutdownRequest			= (1<<15),
+	LazyWrite			= (1<<16),	// rpbfix: remove with new regime
+	AdapterMicroFib			= (1<<17),
+	BIOSFibPath			= (1<<18),
+	FastResponseCapable		= (1<<19),
+	ApiFib				= (1<<20)	// Its an API Fib.
+};
+
+/*
+ *	The following defines needs to be updated any time there is an
+ *	incompatible change made to the aac_init structure.
+ */
+
+#define ADAPTER_INIT_STRUCT_REVISION		3
+
+struct aac_init
+{
+	u32	InitStructRevision;
+	u32	MiniPortRevision;
+	u32	fsrev;
+	u32	CommHeaderAddress;
+	u32	FastIoCommAreaAddress;
+	u32	AdapterFibsPhysicalAddress;
+	u32	AdapterFibsVirtualAddress;
+	u32	AdapterFibsSize;
+	u32	AdapterFibAlign;
+	u32	printfbuf;
+	u32	printfbufsiz;
+	u32	HostPhysMemPages;		// number of 4k pages of host physical memory
+	u32	HostElapsedSeconds;		// number of seconds since 1970.
+};
+
+enum aac_log_level {
+	LOG_AAC_INIT			= 10,
+	LOG_AAC_INFORMATIONAL		= 20,
+	LOG_AAC_WARNING			= 30,
+	LOG_AAC_LOW_ERROR		= 40,
+	LOG_AAC_MEDIUM_ERROR		= 50,
+	LOG_AAC_HIGH_ERROR		= 60,
+	LOG_AAC_PANIC			= 70,
+	LOG_AAC_DEBUG			= 80,
+	LOG_AAC_WINDBG_PRINT		= 90
+};
+
+#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT	0x030b
+#define FSAFS_NTC_FIB_CONTEXT			0x030c
+
+struct aac_dev;
+
+struct adapter_ops
+{
+	void (*adapter_interrupt)(struct aac_dev *dev);
+	void (*adapter_notify)(struct aac_dev *dev, u32 event);
+	int  (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 *status);
+	int  (*adapter_check_health)(struct aac_dev *dev);
+};
+
+/*
+ *	Define which interrupt handler needs to be installed
+ */
+
+struct aac_driver_ident
+{
+	int 	(*init)(struct aac_dev *dev);
+	char *	name;
+	char *	vname;
+	char *	model;
+	u16	channels;
+	int	quirks;
+};
+/*
+ * Some adapter firmware needs communication memory 
+ * below 2gig. This tells the init function to set the
+ * dma mask such that fib memory will be allocated where the
+ * adapter firmware can get to it.
+ */
+#define AAC_QUIRK_31BIT	0x0001
+
+/*
+ * Some adapter firmware, when the raid card's cache is turned off, can not
+ * split up scatter gathers in order to deal with the limits of the
+ * underlying CHIM. This limit is 34 scatter gather elements.
+ */
+#define AAC_QUIRK_34SG	0x0002
+
+/*
+ * This adapter is a slave (no Firmware)
+ */
+#define AAC_QUIRK_SLAVE 0x0004
+
+/*
+ * This adapter is a master.
+ */
+#define AAC_QUIRK_MASTER 0x0008
+
+/*
+ *	The adapter interface specs all queues to be located in the same
+ *	physically contigous block. The host structure that defines the
+ *	commuication queues will assume they are each a separate physically
+ *	contigous memory region that will support them all being one big
+ *	contigous block. 
+ *	There is a command and response queue for each level and direction of
+ *	commuication. These regions are accessed by both the host and adapter.
+ */
+ 
+struct aac_queue {
+	u64		 	logical;	/*address we give the adapter */
+	struct aac_entry	*base;		/*system virtual address */
+	struct aac_qhdr 	headers;       	/*producer,consumer q headers*/
+	u32	 		entries;	/*Number of queue entries */
+	wait_queue_head_t	qfull;		/*Event to wait on if q full */
+	wait_queue_head_t	cmdready;	/*Cmd ready from the adapter */
+                  /* This is only valid for adapter to host command queues. */ 
+	spinlock_t	 	*lock;		/* Spinlock for this queue must take this lock before accessing the lock */
+	spinlock_t		lockdata;	/* Actual lock (used only on one side of the lock) */
+	unsigned long		SavedIrql;     	/* Previous IRQL when the spin lock is taken */
+	u32			padding;	/* Padding - FIXME - can remove I believe */
+	struct list_head 	cmdq;	   	/* A queue of FIBs which need to be prcessed by the FS thread. This is */
+                                		/* only valid for command queues which receive entries from the adapter. */
+	struct list_head	pendingq;	/* A queue of outstanding fib's to the adapter. */
+	u32			numpending;	/* Number of entries on outstanding queue. */
+	struct aac_dev *	dev;		/* Back pointer to adapter structure */
+};
+
+/*
+ *	Message queues. The order here is important, see also the 
+ *	queue type ordering
+ */
+
+struct aac_queue_block
+{
+	struct aac_queue queue[8];
+};
+
+/*
+ *	SaP1 Message Unit Registers
+ */
+ 
+struct sa_drawbridge_CSR {
+				/*	Offset 	|  Name */
+	__le32	reserved[10];	/*	00h-27h |  Reserved */
+	u8	LUT_Offset;	/*	28h	|  Lookup Table Offset */
+	u8	reserved1[3];	/* 	29h-2bh	|  Reserved */
+	__le32	LUT_Data;	/*	2ch	|  Looup Table Data */
+	__le32	reserved2[26];	/*	30h-97h	|  Reserved */
+	__le16	PRICLEARIRQ;	/*	98h	|  Primary Clear Irq */
+	__le16	SECCLEARIRQ;	/*	9ah	|  Secondary Clear Irq */
+	__le16	PRISETIRQ;	/*	9ch	|  Primary Set Irq */
+	__le16	SECSETIRQ;	/*	9eh	|  Secondary Set Irq */
+	__le16	PRICLEARIRQMASK;/*	a0h	|  Primary Clear Irq Mask */
+	__le16	SECCLEARIRQMASK;/*	a2h	|  Secondary Clear Irq Mask */
+	__le16	PRISETIRQMASK;	/*	a4h	|  Primary Set Irq Mask */
+	__le16	SECSETIRQMASK;	/*	a6h	|  Secondary Set Irq Mask */
+	__le32	MAILBOX0;	/*	a8h	|  Scratchpad 0 */
+	__le32	MAILBOX1;	/*	ach	|  Scratchpad 1 */
+	__le32	MAILBOX2;	/*	b0h	|  Scratchpad 2 */
+	__le32	MAILBOX3;	/*	b4h	|  Scratchpad 3 */
+	__le32	MAILBOX4;	/*	b8h	|  Scratchpad 4 */
+	__le32	MAILBOX5;	/*	bch	|  Scratchpad 5 */
+	__le32	MAILBOX6;	/*	c0h	|  Scratchpad 6 */
+	__le32	MAILBOX7;	/*	c4h	|  Scratchpad 7 */
+	__le32	ROM_Setup_Data;	/*	c8h 	|  Rom Setup and Data */
+	__le32	ROM_Control_Addr;/*	cch 	|  Rom Control and Address */
+	__le32	reserved3[12];	/*	d0h-ffh	|  reserved */
+	__le32	LUT[64];	/*    100h-1ffh	|  Lookup Table Entries */
+};
+
+#define Mailbox0	SaDbCSR.MAILBOX0
+#define Mailbox1	SaDbCSR.MAILBOX1
+#define Mailbox2	SaDbCSR.MAILBOX2
+#define Mailbox3	SaDbCSR.MAILBOX3
+#define Mailbox4	SaDbCSR.MAILBOX4
+#define Mailbox5	SaDbCSR.MAILBOX5
+#define Mailbox7	SaDbCSR.MAILBOX7
+	
+#define DoorbellReg_p SaDbCSR.PRISETIRQ
+#define DoorbellReg_s SaDbCSR.SECSETIRQ
+#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
+
+
+#define	DOORBELL_0	0x0001
+#define DOORBELL_1	0x0002
+#define DOORBELL_2	0x0004
+#define DOORBELL_3	0x0008
+#define DOORBELL_4	0x0010
+#define DOORBELL_5	0x0020
+#define DOORBELL_6	0x0040
+
+	
+#define PrintfReady	DOORBELL_5
+#define PrintfDone	DOORBELL_5
+	
+struct sa_registers {
+	struct sa_drawbridge_CSR	SaDbCSR;			/* 98h - c4h */
+};
+	
+
+#define Sa_MINIPORT_REVISION			1
+
+#define sa_readw(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP,  CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_writew(AEP, CSR, value)	writew(value, &((AEP)->regs.sa->CSR))
+#define sa_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.sa->CSR))
+
+/*
+ *	Rx Message Unit Registers
+ */
+
+struct rx_mu_registers {
+			    /*	Local  | PCI*| Name */
+	__le32	ARSR;	    /*	1300h  | 00h | APIC Register Select Register */
+	__le32	reserved0;  /*	1304h  | 04h | Reserved */
+	__le32	AWR;	    /*	1308h  | 08h | APIC Window Register */
+	__le32	reserved1;  /*	130Ch  | 0Ch | Reserved */
+	__le32	IMRx[2];    /*	1310h  | 10h | Inbound Message Registers */
+	__le32	OMRx[2];    /*	1318h  | 18h | Outbound Message Registers */
+	__le32	IDR;	    /*	1320h  | 20h | Inbound Doorbell Register */
+	__le32	IISR;	    /*	1324h  | 24h | Inbound Interrupt 
+						Status Register */
+	__le32	IIMR;	    /*	1328h  | 28h | Inbound Interrupt 
+					 	Mask Register */
+	__le32	ODR;	    /*	132Ch  | 2Ch | Outbound Doorbell Register */
+	__le32	OISR;	    /*	1330h  | 30h | Outbound Interrupt 
+						Status Register */
+	__le32	OIMR;	    /*	1334h  | 34h | Outbound Interrupt 
+						Mask Register */
+			    /* * Must access through ATU Inbound 
+			     	 Translation Window */
+};
+
+struct rx_inbound {
+	__le32	Mailbox[8];
+};
+
+#define	InboundMailbox0		IndexRegs.Mailbox[0]
+#define	InboundMailbox1		IndexRegs.Mailbox[1]
+#define	InboundMailbox2		IndexRegs.Mailbox[2]
+#define	InboundMailbox3		IndexRegs.Mailbox[3]
+#define	InboundMailbox4		IndexRegs.Mailbox[4]
+#define	InboundMailbox5		IndexRegs.Mailbox[5]
+#define	InboundMailbox6		IndexRegs.Mailbox[6]
+
+#define	INBOUNDDOORBELL_0	0x00000001
+#define INBOUNDDOORBELL_1	0x00000002
+#define INBOUNDDOORBELL_2	0x00000004
+#define INBOUNDDOORBELL_3	0x00000008
+#define INBOUNDDOORBELL_4	0x00000010
+#define INBOUNDDOORBELL_5	0x00000020
+#define INBOUNDDOORBELL_6	0x00000040
+
+#define	OUTBOUNDDOORBELL_0	0x00000001
+#define OUTBOUNDDOORBELL_1	0x00000002
+#define OUTBOUNDDOORBELL_2	0x00000004
+#define OUTBOUNDDOORBELL_3	0x00000008
+#define OUTBOUNDDOORBELL_4	0x00000010
+
+#define InboundDoorbellReg	MUnit.IDR
+#define OutboundDoorbellReg	MUnit.ODR
+
+struct rx_registers {
+	struct rx_mu_registers		MUnit;		/* 1300h - 1334h */
+	__le32				reserved1[6];	/* 1338h - 134ch */
+	struct rx_inbound		IndexRegs;
+};
+
+#define rx_readb(AEP, CSR)		readb(&((AEP)->regs.rx->CSR))
+#define rx_readl(AEP, CSR)		readl(&((AEP)->regs.rx->CSR))
+#define rx_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rx->CSR))
+#define rx_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rx->CSR))
+
+/*
+ *	Rkt Message Unit Registers (same as Rx, except a larger reserve region)
+ */
+
+#define rkt_mu_registers rx_mu_registers
+#define rkt_inbound rx_inbound
+
+struct rkt_registers {
+	struct rkt_mu_registers		MUnit;		 /* 1300h - 1334h */
+	__le32				reserved1[1010]; /* 1338h - 22fch */
+	struct rkt_inbound		IndexRegs;	 /* 2300h - */
+};
+
+#define rkt_readb(AEP, CSR)		readb(&((AEP)->regs.rkt->CSR))
+#define rkt_readl(AEP, CSR)		readl(&((AEP)->regs.rkt->CSR))
+#define rkt_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rkt->CSR))
+#define rkt_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rkt->CSR))
+
+struct fib;
+
+typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
+
+struct aac_fib_context {
+	s16	 		type;		// used for verification of structure	
+	s16	 		size;
+	u32			unique;		// unique value representing this context
+	ulong			jiffies;	// used for cleanup - dmb changed to ulong
+	struct list_head	next;		// used to link context's into a linked list
+	struct semaphore 	wait_sem;	// this is used to wait for the next fib to arrive.
+	int			wait;		// Set to true when thread is in WaitForSingleObject
+	unsigned long		count;		// total number of FIBs on FibList
+	struct list_head	fib_list;	// this holds fibs and their attachd hw_fibs
+};
+
+struct sense_data {
+	u8 error_code;		/* 70h (current errors), 71h(deferred errors) */
+	u8 valid:1;		/* A valid bit of one indicates that the information  */
+				/* field contains valid information as defined in the
+				 * SCSI-2 Standard.
+				 */
+	u8 segment_number;	/* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
+	u8 sense_key:4;		/* Sense Key */
+	u8 reserved:1;
+	u8 ILI:1;		/* Incorrect Length Indicator */
+	u8 EOM:1;		/* End Of Medium - reserved for random access devices */
+	u8 filemark:1;		/* Filemark - reserved for random access devices */
+
+	u8 information[4];	/* for direct-access devices, contains the unsigned 
+				 * logical block address or residue associated with 
+				 * the sense key 
+				 */
+	u8 add_sense_len;	/* number of additional sense bytes to follow this field */
+	u8 cmnd_info[4];	/* not used */
+	u8 ASC;			/* Additional Sense Code */
+	u8 ASCQ;		/* Additional Sense Code Qualifier */
+	u8 FRUC;		/* Field Replaceable Unit Code - not used */
+	u8 bit_ptr:3;		/* indicates which byte of the CDB or parameter data
+				 * was in error
+				 */
+	u8 BPV:1;		/* bit pointer valid (BPV): 1- indicates that 
+				 * the bit_ptr field has valid value
+				 */
+	u8 reserved2:2;
+	u8 CD:1;		/* command data bit: 1- illegal parameter in CDB.
+				 * 0- illegal parameter in data.
+				 */
+	u8 SKSV:1;
+	u8 field_ptr[2];	/* byte of the CDB or parameter data in error */
+};
+
+struct fsa_dev_info {
+	u64		last;
+	u64		size;
+	u32		type;
+	u16		queue_depth;
+	u8		valid;
+	u8		ro;
+	u8		locked;
+	u8		deleted;
+	char		devname[8];
+	struct sense_data sense_data;
+};
+
+struct fib {
+	void			*next;	/* this is used by the allocator */
+	s16			type;
+	s16			size;
+	/*
+	 *	The Adapter that this I/O is destined for.
+	 */
+	struct aac_dev 		*dev;
+	/*
+	 *	This is the event the sendfib routine will wait on if the
+	 *	caller did not pass one and this is synch io.
+	 */
+	struct semaphore 	event_wait;
+	spinlock_t		event_lock;
+
+	u32			done;	/* gets set to 1 when fib is complete */
+	fib_callback 		callback;
+	void 			*callback_data;
+	u32			flags; // u32 dmb was ulong
+	/*
+	 *	The following is used to put this fib context onto the 
+	 *	Outstanding I/O queue.
+	 */
+	struct list_head	queue;
+	/*
+	 *	And for the internal issue/reply queues (we may be able
+	 *	to merge these two)
+	 */
+	struct list_head	fiblink;
+	void 			*data;
+	struct hw_fib		*hw_fib;		/* Actual shared object */
+	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
+};
+
+/*
+ *	Adapter Information Block
+ *
+ *	This is returned by the RequestAdapterInfo block
+ */
+ 
+struct aac_adapter_info
+{
+	u32	platform;
+	u32	cpu;
+	u32	subcpu;
+	u32	clock;
+	u32	execmem;
+	u32	buffermem;
+	u32	totalmem;
+	u32	kernelrev;
+	u32	kernelbuild;
+	u32	monitorrev;
+	u32	monitorbuild;
+	u32	hwrev;
+	u32	hwbuild;
+	u32	biosrev;
+	u32	biosbuild;
+	u32	cluster;
+	u32	clusterchannelmask; 
+	u32	serial[2];
+	u32	battery;
+	u32	options;
+	u32	OEM;
+};
+
+/*
+ * Battery platforms
+ */
+#define AAC_BAT_REQ_PRESENT	(1)
+#define AAC_BAT_REQ_NOTPRESENT	(2)
+#define AAC_BAT_OPT_PRESENT	(3)
+#define AAC_BAT_OPT_NOTPRESENT	(4)
+#define AAC_BAT_NOT_SUPPORTED	(5)
+/*
+ * cpu types
+ */
+#define AAC_CPU_SIMULATOR	(1)
+#define AAC_CPU_I960		(2)
+#define AAC_CPU_STRONGARM	(3)
+
+/*
+ * Supported Options
+ */
+#define AAC_OPT_SNAPSHOT		cpu_to_le32(1)
+#define AAC_OPT_CLUSTERS		cpu_to_le32(1<<1)
+#define AAC_OPT_WRITE_CACHE		cpu_to_le32(1<<2)
+#define AAC_OPT_64BIT_DATA		cpu_to_le32(1<<3)
+#define AAC_OPT_HOST_TIME_FIB		cpu_to_le32(1<<4)
+#define AAC_OPT_RAID50			cpu_to_le32(1<<5)
+#define AAC_OPT_4GB_WINDOW		cpu_to_le32(1<<6)
+#define AAC_OPT_SCSI_UPGRADEABLE 	cpu_to_le32(1<<7)
+#define AAC_OPT_SOFT_ERR_REPORT		cpu_to_le32(1<<8)
+#define AAC_OPT_SUPPORTED_RECONDITION 	cpu_to_le32(1<<9)
+#define AAC_OPT_SGMAP_HOST64		cpu_to_le32(1<<10)
+#define AAC_OPT_ALARM			cpu_to_le32(1<<11)
+#define AAC_OPT_NONDASD			cpu_to_le32(1<<12)
+#define AAC_OPT_SCSI_MANAGED    	cpu_to_le32(1<<13)
+#define AAC_OPT_RAID_SCSI_MODE		cpu_to_le32(1<<14)
+#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO	cpu_to_le32(1<<16)
+#define AAC_OPT_NEW_COMM		cpu_to_le32(1<<17)
+#define AAC_OPT_NEW_COMM_64		cpu_to_le32(1<<18)
+
+struct aac_dev
+{
+	struct list_head	entry;
+	const char		*name;
+	int			id;
+
+	u16			irq_mask;
+	/*
+	 *	Map for 128 fib objects (64k)
+	 */	
+	dma_addr_t		hw_fib_pa;
+	struct hw_fib		*hw_fib_va;
+	struct hw_fib		*aif_base_va;
+	/*
+	 *	Fib Headers
+	 */
+	struct fib              *fibs;
+
+	struct fib		*free_fib;
+	struct fib		*timeout_fib;
+	spinlock_t		fib_lock;
+	
+	struct aac_queue_block *queues;
+	/*
+	 *	The user API will use an IOCTL to register itself to receive
+	 *	FIBs from the adapter.  The following list is used to keep
+	 *	track of all the threads that have requested these FIBs.  The
+	 *	mutex is used to synchronize access to all data associated 
+	 *	with the adapter fibs.
+	 */
+	struct list_head	fib_list;
+
+	struct adapter_ops	a_ops;
+	unsigned long		fsrev;		/* Main driver's revision number */
+	
+	struct aac_init		*init;		/* Holds initialization info to communicate with adapter */
+	dma_addr_t		init_pa; 	/* Holds physical address of the init struct */
+	
+	struct pci_dev		*pdev;		/* Our PCI interface */
+	void *			printfbuf;	/* pointer to buffer used for printf's from the adapter */
+	void *			comm_addr;	/* Base address of Comm area */
+	dma_addr_t		comm_phys;	/* Physical Address of Comm area */
+	size_t			comm_size;
+
+	struct Scsi_Host	*scsi_host_ptr;
+	int			maximum_num_containers;
+	struct fsa_dev_info	*fsa_dev;
+	pid_t			thread_pid;
+	int			cardtype;
+	
+	/*
+	 *	The following is the device specific extension.
+	 */
+	union
+	{
+		struct sa_registers __iomem *sa;
+		struct rx_registers __iomem *rx;
+		struct rkt_registers __iomem *rkt;
+	} regs;
+	u32			OIMR; /* Mask Register Cache */
+	/*
+	 *	AIF thread states
+	 */
+	u32			aif_thread;
+	struct completion	aif_completion;
+	struct aac_adapter_info adapter_info;
+	/* These are in adapter info but they are in the io flow so
+	 * lets break them out so we don't have to do an AND to check them
+	 */
+	u8			nondasd_support; 
+	u8			dac_support;
+	u8			raid_scsi_mode;
+};
+
+#define aac_adapter_interrupt(dev) \
+	(dev)->a_ops.adapter_interrupt(dev)
+
+#define aac_adapter_notify(dev, event) \
+	(dev)->a_ops.adapter_notify(dev, event)
+
+
+#define aac_adapter_check_health(dev) \
+	(dev)->a_ops.adapter_check_health(dev)
+
+
+#define FIB_CONTEXT_FLAG_TIMED_OUT		(0x00000001)
+
+/*
+ *	Define the command values
+ */
+ 
+#define		Null			0
+#define 	GetAttributes		1
+#define 	SetAttributes		2
+#define 	Lookup			3
+#define 	ReadLink		4
+#define 	Read			5
+#define 	Write			6
+#define		Create			7
+#define		MakeDirectory		8
+#define		SymbolicLink		9
+#define		MakeNode		10
+#define		Removex			11
+#define		RemoveDirectoryx	12
+#define		Rename			13
+#define		Link			14
+#define		ReadDirectory		15
+#define		ReadDirectoryPlus	16
+#define		FileSystemStatus	17
+#define		FileSystemInfo		18
+#define		PathConfigure		19
+#define		Commit			20
+#define		Mount			21
+#define		UnMount			22
+#define		Newfs			23
+#define		FsCheck			24
+#define		FsSync			25
+#define		SimReadWrite		26
+#define		SetFileSystemStatus	27
+#define		BlockRead		28
+#define		BlockWrite		29
+#define		NvramIoctl		30
+#define		FsSyncWait		31
+#define		ClearArchiveBit		32
+#define		SetAcl			33
+#define		GetAcl			34
+#define		AssignAcl		35
+#define		FaultInsertion		36	/* Fault Insertion Command */
+#define		CrazyCache		37	/* Crazycache */
+
+#define		MAX_FSACOMMAND_NUM	38
+
+
+/*
+ *	Define the status returns. These are very unixlike although
+ *	most are not in fact used
+ */
+
+#define		ST_OK		0
+#define		ST_PERM		1
+#define		ST_NOENT	2
+#define		ST_IO		5
+#define		ST_NXIO		6
+#define		ST_E2BIG	7
+#define		ST_ACCES	13
+#define		ST_EXIST	17
+#define		ST_XDEV		18
+#define		ST_NODEV	19
+#define		ST_NOTDIR	20
+#define		ST_ISDIR	21
+#define		ST_INVAL	22
+#define		ST_FBIG		27
+#define		ST_NOSPC	28
+#define		ST_ROFS		30
+#define		ST_MLINK	31
+#define		ST_WOULDBLOCK	35
+#define		ST_NAMETOOLONG	63
+#define		ST_NOTEMPTY	66
+#define		ST_DQUOT	69
+#define		ST_STALE	70
+#define		ST_REMOTE	71
+#define		ST_BADHANDLE	10001
+#define		ST_NOT_SYNC	10002
+#define		ST_BAD_COOKIE	10003
+#define		ST_NOTSUPP	10004
+#define		ST_TOOSMALL	10005
+#define		ST_SERVERFAULT	10006
+#define		ST_BADTYPE	10007
+#define		ST_JUKEBOX	10008
+#define		ST_NOTMOUNTED	10009
+#define		ST_MAINTMODE	10010
+#define		ST_STALEACL	10011
+
+/*
+ *	On writes how does the client want the data written.
+ */
+
+#define	CACHE_CSTABLE		1
+#define CACHE_UNSTABLE		2
+
+/*
+ *	Lets the client know at which level the data was commited on
+ *	a write request
+ */
+
+#define	CMFILE_SYNCH_NVRAM	1
+#define	CMDATA_SYNCH_NVRAM	2
+#define	CMFILE_SYNCH		3
+#define CMDATA_SYNCH		4
+#define CMUNSTABLE		5
+
+struct aac_read
+{
+	u32	 	command;
+	u32 		cid;
+	u32 		block;
+	u32 		count;
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_read64
+{
+	u32	 	command;
+	u16 		cid;
+	u16 		sector_count;
+	u32 		block;
+	u16		pad;
+	u16		flags;
+	struct sgmap64	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_read_reply
+{
+	u32	 	status;
+	u32 		count;
+};
+
+struct aac_write
+{
+	u32		command;
+	u32 		cid;
+	u32 		block;
+	u32 		count;
+	u32	 	stable;	// Not used
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_write64
+{
+	u32	 	command;
+	u16 		cid;
+	u16 		sector_count;
+	u32 		block;
+	u16		pad;
+	u16		flags;
+	struct sgmap64	sg;	// Must be last in struct because it is variable
+};
+struct aac_write_reply
+{
+	u32		status;
+	u32 		count;
+	u32		committed;
+};
+
+#define CT_FLUSH_CACHE 129
+struct aac_synchronize {
+	u32		command;	/* VM_ContainerConfig */
+	u32		type;		/* CT_FLUSH_CACHE */
+	u32		cid;
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		count;	/* sizeof(((struct aac_synchronize_reply *)NULL)->data) */
+};
+
+struct aac_synchronize_reply {
+	u32		dummy0;
+	u32		dummy1;
+	u32		status;	/* CT_OK */
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		parm5;
+	u8		data[16];
+};
+
+struct aac_srb
+{
+	u32		function;
+	u32		channel;
+	u32		id;
+	u32		lun;
+	u32		timeout;
+	u32		flags;
+	u32		count;		// Data xfer size
+	u32		retry_limit;
+	u32		cdb_size;
+	u8		cdb[16];
+	struct	sgmap	sg;
+};
+
+
+
+#define		AAC_SENSE_BUFFERSIZE	 30
+
+struct aac_srb_reply
+{
+	u32		status;
+	u32		srb_status;
+	u32		scsi_status;
+	u32		data_xfer_length;
+	u32		sense_data_size;
+	u8		sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
+};
+/*
+ * SRB Flags
+ */
+#define		SRB_NoDataXfer		 0x0000
+#define		SRB_DisableDisconnect	 0x0004
+#define		SRB_DisableSynchTransfer 0x0008
+#define 	SRB_BypassFrozenQueue	 0x0010
+#define		SRB_DisableAutosense	 0x0020
+#define		SRB_DataIn		 0x0040
+#define 	SRB_DataOut		 0x0080
+
+/*
+ * SRB Functions - set in aac_srb->function
+ */
+#define	SRBF_ExecuteScsi	0x0000
+#define	SRBF_ClaimDevice	0x0001
+#define	SRBF_IO_Control		0x0002
+#define	SRBF_ReceiveEvent	0x0003
+#define	SRBF_ReleaseQueue	0x0004
+#define	SRBF_AttachDevice	0x0005
+#define	SRBF_ReleaseDevice	0x0006
+#define	SRBF_Shutdown		0x0007
+#define	SRBF_Flush		0x0008
+#define	SRBF_AbortCommand	0x0010
+#define	SRBF_ReleaseRecovery	0x0011
+#define	SRBF_ResetBus		0x0012
+#define	SRBF_ResetDevice	0x0013
+#define	SRBF_TerminateIO	0x0014
+#define	SRBF_FlushQueue		0x0015
+#define	SRBF_RemoveDevice	0x0016
+#define	SRBF_DomainValidation	0x0017
+
+/* 
+ * SRB SCSI Status - set in aac_srb->scsi_status
+ */
+#define SRB_STATUS_PENDING                  0x00
+#define SRB_STATUS_SUCCESS                  0x01
+#define SRB_STATUS_ABORTED                  0x02
+#define SRB_STATUS_ABORT_FAILED             0x03
+#define SRB_STATUS_ERROR                    0x04
+#define SRB_STATUS_BUSY                     0x05
+#define SRB_STATUS_INVALID_REQUEST          0x06
+#define SRB_STATUS_INVALID_PATH_ID          0x07
+#define SRB_STATUS_NO_DEVICE                0x08
+#define SRB_STATUS_TIMEOUT                  0x09
+#define SRB_STATUS_SELECTION_TIMEOUT        0x0A
+#define SRB_STATUS_COMMAND_TIMEOUT          0x0B
+#define SRB_STATUS_MESSAGE_REJECTED         0x0D
+#define SRB_STATUS_BUS_RESET                0x0E
+#define SRB_STATUS_PARITY_ERROR             0x0F
+#define SRB_STATUS_REQUEST_SENSE_FAILED     0x10
+#define SRB_STATUS_NO_HBA                   0x11
+#define SRB_STATUS_DATA_OVERRUN             0x12
+#define SRB_STATUS_UNEXPECTED_BUS_FREE      0x13
+#define SRB_STATUS_PHASE_SEQUENCE_FAILURE   0x14
+#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH     0x15
+#define SRB_STATUS_REQUEST_FLUSHED          0x16
+#define SRB_STATUS_DELAYED_RETRY	    0x17
+#define SRB_STATUS_INVALID_LUN              0x20
+#define SRB_STATUS_INVALID_TARGET_ID        0x21
+#define SRB_STATUS_BAD_FUNCTION             0x22
+#define SRB_STATUS_ERROR_RECOVERY           0x23
+#define SRB_STATUS_NOT_STARTED		    0x24
+#define SRB_STATUS_NOT_IN_USE		    0x30
+#define SRB_STATUS_FORCE_ABORT		    0x31
+#define SRB_STATUS_DOMAIN_VALIDATION_FAIL   0x32
+
+/*
+ * Object-Server / Volume-Manager Dispatch Classes
+ */
+
+#define		VM_Null			0
+#define		VM_NameServe		1
+#define		VM_ContainerConfig	2
+#define		VM_Ioctl		3
+#define		VM_FilesystemIoctl	4
+#define		VM_CloseAll		5
+#define		VM_CtBlockRead		6
+#define		VM_CtBlockWrite		7
+#define		VM_SliceBlockRead	8	/* raw access to configured "storage objects" */
+#define		VM_SliceBlockWrite	9
+#define		VM_DriveBlockRead	10	/* raw access to physical devices */
+#define		VM_DriveBlockWrite	11
+#define		VM_EnclosureMgt		12	/* enclosure management */
+#define		VM_Unused		13	/* used to be diskset management */
+#define		VM_CtBlockVerify	14
+#define		VM_CtPerf		15	/* performance test */
+#define		VM_CtBlockRead64	16
+#define		VM_CtBlockWrite64	17
+#define		VM_CtBlockVerify64	18
+#define		VM_CtHostRead64		19
+#define		VM_CtHostWrite64	20
+
+#define		MAX_VMCOMMAND_NUM	21	/* used for sizing stats array - leave last */
+
+/*
+ *	Descriptive information (eg, vital stats)
+ *	that a content manager might report.  The
+ *	FileArray filesystem component is one example
+ *	of a content manager.  Raw mode might be
+ *	another.
+ */
+
+struct aac_fsinfo {
+	u32  fsTotalSize;	/* Consumed by fs, incl. metadata */
+	u32  fsBlockSize;
+	u32  fsFragSize;
+	u32  fsMaxExtendSize;
+	u32  fsSpaceUnits;
+	u32  fsMaxNumFiles;
+	u32  fsNumFreeFiles;
+	u32  fsInodeDensity;
+};	/* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+
+union aac_contentinfo {
+	struct aac_fsinfo filesys;	/* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+};
+
+/*
+ *	Query for Container Configuration Status
+ */
+
+#define CT_GET_CONFIG_STATUS 147
+struct aac_get_config_status {
+	u32		command;	/* VM_ContainerConfig */
+	u32		type;		/* CT_GET_CONFIG_STATUS */
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		parm5;
+	u32		count;	/* sizeof(((struct aac_get_config_status_resp *)NULL)->data) */
+};
+
+#define CFACT_CONTINUE 0
+#define CFACT_PAUSE    1
+#define CFACT_ABORT    2
+struct aac_get_config_status_resp {
+	u32		response; /* ST_OK */
+	u32		dummy0;
+	u32		status;	/* CT_OK */
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		parm5;
+	struct {
+		u32	action; /* CFACT_CONTINUE, CFACT_PAUSE or CFACT_ABORT */
+		u16	flags;
+		s16	count;
+	}		data;
+};
+
+/*
+ *	Accept the configuration as-is
+ */
+
+#define CT_COMMIT_CONFIG 152
+
+struct aac_commit_config {
+	u32		command;	/* VM_ContainerConfig */
+	u32		type;		/* CT_COMMIT_CONFIG */
+};
+
+/*
+ *	Query for Container Configuration Count
+ */
+
+#define CT_GET_CONTAINER_COUNT 4
+struct aac_get_container_count {
+	u32		command;	/* VM_ContainerConfig */
+	u32		type;		/* CT_GET_CONTAINER_COUNT */
+};
+
+struct aac_get_container_count_resp {
+	u32		response; /* ST_OK */
+	u32		dummy0;
+	u32		MaxContainers;
+	u32		ContainerSwitchEntries;
+	u32		MaxPartitions;
+};
+
+
+/*
+ *	Query for "mountable" objects, ie, objects that are typically
+ *	associated with a drive letter on the client (host) side.
+ */
+
+struct aac_mntent {
+	u32    			oid;
+	u8			name[16];	// if applicable
+	struct creation_info	create_info;	// if applicable
+	u32			capacity;
+	u32			vol;    	// substrate structure
+	u32			obj;	        // FT_FILESYS, FT_DATABASE, etc.
+	u32			state;		// unready for mounting, readonly, etc.
+	union aac_contentinfo	fileinfo;	// Info specific to content manager (eg, filesystem)
+	u32			altoid;		// != oid <==> snapshot or broken mirror exists
+};
+
+#define FSCS_NOTCLEAN	0x0001  /* fsck is neccessary before mounting */
+#define FSCS_READONLY	0x0002	/* possible result of broken mirror */
+#define FSCS_HIDDEN	0x0004	/* should be ignored - set during a clear */
+
+struct aac_query_mount {
+	u32		command;
+	u32		type;
+	u32		count;
+};
+
+struct aac_mount {
+	u32		status;
+	u32	   	type;           /* should be same as that requested */
+	u32		count;
+	struct aac_mntent mnt[1];
+};
+
+#define CT_READ_NAME 130
+struct aac_get_name {
+	u32		command;	/* VM_ContainerConfig */
+	u32		type;		/* CT_READ_NAME */
+	u32		cid;
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		count;	/* sizeof(((struct aac_get_name_resp *)NULL)->data) */
+};
+
+#define CT_OK        218
+struct aac_get_name_resp {
+	u32		dummy0;
+	u32		dummy1;
+	u32		status;	/* CT_OK */
+	u32		parm1;
+	u32		parm2;
+	u32		parm3;
+	u32		parm4;
+	u32		parm5;
+	u8		data[16];
+};
+
+/*
+ * The following command is sent to shut down each container.
+ */
+
+struct aac_close {
+	u32	command;
+	u32	cid;
+};
+
+struct aac_query_disk
+{
+	s32	cnum;
+	s32	bus;
+	s32	id;
+	s32	lun;
+	u32	valid;
+	u32	locked;
+	u32	deleted;
+	s32	instance;
+	s8	name[10];
+	u32	unmapped;
+};
+
+struct aac_delete_disk {
+	u32	disknum;
+	u32	cnum;
+};
+ 
+struct fib_ioctl
+{
+	u32	fibctx;
+	s32	wait;
+	char	__user *fib;
+};
+
+struct revision
+{
+	u32 compat;
+	u32 version;
+	u32 build;
+};
+	
+/*
+ * 	Ugly - non Linux like ioctl coding for back compat.
+ */
+
+#define CTL_CODE(function, method) (                 \
+    (4<< 16) | ((function) << 2) | (method) \
+)
+
+/*
+ *	Define the method codes for how buffers are passed for I/O and FS 
+ *	controls
+ */
+
+#define METHOD_BUFFERED                 0
+#define METHOD_NEITHER                  3
+
+/*
+ *	Filesystem ioctls
+ */
+
+#define FSACTL_SENDFIB                  	CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB               	CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_DELETE_DISK			0x163
+#define FSACTL_QUERY_DISK			0x173
+#define FSACTL_OPEN_GET_ADAPTER_FIB		CTL_CODE(2100, METHOD_BUFFERED)
+#define FSACTL_GET_NEXT_ADAPTER_FIB		CTL_CODE(2101, METHOD_BUFFERED)
+#define FSACTL_CLOSE_GET_ADAPTER_FIB		CTL_CODE(2102, METHOD_BUFFERED)
+#define FSACTL_MINIPORT_REV_CHECK               CTL_CODE(2107, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO               	CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_FORCE_DELETE_DISK		CTL_CODE(2120, METHOD_NEITHER)
+#define FSACTL_GET_CONTAINERS			2131
+
+
+struct aac_common
+{
+	/*
+	 *	If this value is set to 1 then interrupt moderation will occur 
+	 *	in the base commuication support.
+	 */
+	u32 irq_mod;
+	u32 peak_fibs;
+	u32 zero_fibs;
+	u32 fib_timeouts;
+	/*
+	 *	Statistical counters in debug mode
+	 */
+#ifdef DBG
+	u32 FibsSent;
+	u32 FibRecved;
+	u32 NoResponseSent;
+	u32 NoResponseRecved;
+	u32 AsyncSent;
+	u32 AsyncRecved;
+	u32 NormalSent;
+	u32 NormalRecved;
+#endif
+};
+
+extern struct aac_common aac_config;
+
+
+/*
+ *	The following macro is used when sending and receiving FIBs. It is
+ *	only used for debugging.
+ */
+ 
+#ifdef DBG
+#define	FIB_COUNTER_INCREMENT(counter)		(counter)++
+#else
+#define	FIB_COUNTER_INCREMENT(counter)		
+#endif
+
+/*
+ *	Adapter direct commands
+ *	Monitor/Kernel API
+ */
+
+#define	BREAKPOINT_REQUEST		0x00000004
+#define	INIT_STRUCT_BASE_ADDRESS	0x00000005
+#define READ_PERMANENT_PARAMETERS	0x0000000a
+#define WRITE_PERMANENT_PARAMETERS	0x0000000b
+#define HOST_CRASHING			0x0000000d
+#define	SEND_SYNCHRONOUS_FIB		0x0000000c
+#define COMMAND_POST_RESULTS		0x00000014
+#define GET_ADAPTER_PROPERTIES		0x00000019
+#define GET_DRIVER_BUFFER_PROPERTIES	0x00000023
+#define RCV_TEMP_READINGS		0x00000025
+#define GET_COMM_PREFERRED_SETTINGS	0x00000026
+#define IOP_RESET			0x00001000
+#define RE_INIT_ADAPTER			0x000000ee
+
+/*
+ *	Adapter Status Register
+ *
+ *  Phase Staus mailbox is 32bits:
+ *	<31:16> = Phase Status
+ *	<15:0>  = Phase
+ *
+ *	The adapter reports is present state through the phase.  Only
+ *	a single phase should be ever be set.  Each phase can have multiple
+ *	phase status bits to provide more detailed information about the 
+ *	state of the board.  Care should be taken to ensure that any phase 
+ *	status bits that are set when changing the phase are also valid
+ *	for the new phase or be cleared out.  Adapter software (monitor,
+ *	iflash, kernel) is responsible for properly maintining the phase 
+ *	status mailbox when it is running.
+ *											
+ *	MONKER_API Phases							
+ *
+ *	Phases are bit oriented.  It is NOT valid  to have multiple bits set						
+ */					
+
+#define	SELF_TEST_FAILED		0x00000004
+#define	MONITOR_PANIC			0x00000020
+#define	KERNEL_UP_AND_RUNNING		0x00000080
+#define	KERNEL_PANIC			0x00000100
+
+/*
+ *	Doorbell bit defines
+ */
+
+#define DoorBellSyncCmdAvailable	(1<<0)	/* Host -> Adapter */
+#define DoorBellPrintfDone		(1<<5)	/* Host -> Adapter */
+#define DoorBellAdapterNormCmdReady	(1<<1)	/* Adapter -> Host */
+#define DoorBellAdapterNormRespReady	(1<<2)	/* Adapter -> Host */
+#define DoorBellAdapterNormCmdNotFull	(1<<3)	/* Adapter -> Host */
+#define DoorBellAdapterNormRespNotFull	(1<<4)	/* Adapter -> Host */
+#define DoorBellPrintfReady		(1<<5)	/* Adapter -> Host */
+
+/*
+ *	For FIB communication, we need all of the following things
+ *	to send back to the user.
+ */
+ 
+#define 	AifCmdEventNotify	1	/* Notify of event */
+#define			AifEnConfigChange	3	/* Adapter configuration change */
+#define			AifEnContainerChange	4	/* Container configuration change */
+#define			AifEnDeviceFailure	5	/* SCSI device failed */
+#define			AifEnAddContainer	15	/* A new array was created */
+#define			AifEnDeleteContainer	16	/* A container was deleted */
+#define			AifEnExpEvent		23	/* Firmware Event Log */
+#define			AifExeFirmwarePanic	3	/* Firmware Event Panic */
+#define			AifHighPriority		3	/* Highest Priority Event */
+
+#define		AifCmdJobProgress	2	/* Progress report */
+#define			AifJobCtrZero	101	/* Array Zero progress */
+#define			AifJobStsSuccess 1	/* Job completes */
+#define		AifCmdAPIReport		3	/* Report from other user of API */
+#define		AifCmdDriverNotify	4	/* Notify host driver of event */
+#define			AifDenMorphComplete 200	/* A morph operation completed */
+#define			AifDenVolumeExtendComplete 201 /* A volume extend completed */
+#define		AifReqJobList		100	/* Gets back complete job list */
+#define		AifReqJobsForCtr	101	/* Gets back jobs for specific container */
+#define		AifReqJobsForScsi	102	/* Gets back jobs for specific SCSI device */ 
+#define		AifReqJobReport		103	/* Gets back a specific job report or list of them */ 
+#define		AifReqTerminateJob	104	/* Terminates job */
+#define		AifReqSuspendJob	105	/* Suspends a job */
+#define		AifReqResumeJob		106	/* Resumes a job */ 
+#define		AifReqSendAPIReport	107	/* API generic report requests */
+#define		AifReqAPIJobStart	108	/* Start a job from the API */
+#define		AifReqAPIJobUpdate	109	/* Update a job report from the API */
+#define		AifReqAPIJobFinish	110	/* Finish a job from the API */
+
+/*
+ *	Adapter Initiated FIB command structures. Start with the adapter
+ *	initiated FIBs that really come from the adapter, and get responded
+ *	to by the host.
+ */
+
+struct aac_aifcmd {
+	u32 command;		/* Tell host what type of notify this is */
+	u32 seqnum;		/* To allow ordering of reports (if necessary) */
+	u8 data[1];		/* Undefined length (from kernel viewpoint) */
+};
+
+/**
+ * 	Convert capacity to cylinders
+ *  	accounting for the fact capacity could be a 64 bit value
+ *
+ */
+static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
+{
+	sector_div(capacity, divisor);
+	return (u32)capacity;
+}
+
+struct scsi_cmnd;
+
+const char *aac_driverinfo(struct Scsi_Host *);
+struct fib *fib_alloc(struct aac_dev *dev);
+int fib_setup(struct aac_dev *dev);
+void fib_map_free(struct aac_dev *dev);
+void fib_free(struct fib * context);
+void fib_init(struct fib * context);
+void fib_dealloc(struct fib * context);
+void aac_printf(struct aac_dev *dev, u32 val);
+int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
+int fib_complete(struct fib * context);
+#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
+struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+int aac_get_config_status(struct aac_dev *dev);
+int aac_get_containers(struct aac_dev *dev);
+int aac_scsi_cmd(struct scsi_cmnd *cmd);
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
+int aac_rx_init(struct aac_dev *dev);
+int aac_rkt_init(struct aac_dev *dev);
+int aac_sa_init(struct aac_dev *dev);
+unsigned int aac_response_normal(struct aac_queue * q);
+unsigned int aac_command_normal(struct aac_queue * q);
+int aac_command_thread(struct aac_dev * dev);
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
+int fib_adapter_complete(struct fib * fibptr, unsigned short size);
+struct aac_driver_ident* aac_get_driver_ident(int devtype);
+int aac_get_adapter_info(struct aac_dev* dev);
+int aac_send_shutdown(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
new file mode 100644
index 000000000000..30dd1f7120f4
--- /dev/null
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -0,0 +1,683 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commctrl.c
+ *
+ * Abstract: Contains all routines for control of the AFA comm layer
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+#include "aacraid.h"
+
+/**
+ *	ioctl_send_fib	-	send a FIB from userspace
+ *	@dev:	adapter is being processed
+ *	@arg:	arguments to the ioctl call
+ *	
+ *	This routine sends a fib to the adapter on behalf of a user level
+ *	program.
+ */
+ 
+static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct hw_fib * kfib;
+	struct fib *fibptr;
+
+	fibptr = fib_alloc(dev);
+	if(fibptr == NULL)
+		return -ENOMEM;
+		
+	kfib = fibptr->hw_fib;
+	/*
+	 *	First copy in the header so that we can check the size field.
+	 */
+	if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+	/*
+	 *	Since we copy based on the fib header size, make sure that we
+	 *	will not overrun the buffer when we copy the memory. Return
+	 *	an error if we would.
+	 */
+	if (le16_to_cpu(kfib->header.Size) > 
+			sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
+		fib_free(fibptr);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(kfib, arg, le16_to_cpu(kfib->header.Size) +
+				sizeof(struct aac_fibhdr))) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+
+	if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
+		aac_adapter_interrupt(dev);
+		/*
+		 * Since we didn't really send a fib, zero out the state to allow 
+		 * cleanup code not to assert.
+		 */
+		kfib->header.XferState = 0;
+	} else {
+		int retval = fib_send(kfib->header.Command, fibptr,
+				le16_to_cpu(kfib->header.Size) , FsaNormal,
+				1, 1, NULL, NULL);
+		if (retval) {
+			fib_free(fibptr);
+			return retval;
+		}
+		if (fib_complete(fibptr) != 0) {
+			fib_free(fibptr);
+			return -EINVAL;
+		}
+	}
+	/*
+	 *	Make sure that the size returned by the adapter (which includes
+	 *	the header) is less than or equal to the size of a fib, so we
+	 *	don't corrupt application data. Then copy that size to the user
+	 *	buffer. (Don't try to add the header information again, since it
+	 *	was already included by the adapter.)
+	 */
+
+	if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+	fib_free(fibptr);
+	return 0;
+}
+
+/**
+ *	open_getadapter_fib	-	Get the next fib
+ *
+ *	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct aac_fib_context * fibctx;
+	int status;
+
+	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
+	if (fibctx == NULL) {
+		status = -ENOMEM;
+	} else {
+		unsigned long flags;
+		struct list_head * entry;
+		struct aac_fib_context * context;
+
+		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
+		fibctx->size = sizeof(struct aac_fib_context);
+ 		/*
+		 *	Yes yes, I know this could be an index, but we have a
+		 * better guarantee of uniqueness for the locked loop below.
+		 * Without the aid of a persistent history, this also helps
+		 * reduce the chance that the opaque context would be reused.
+		 */
+		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
+		/*
+		 *	Initialize the mutex used to wait for the next AIF.
+		 */
+		init_MUTEX_LOCKED(&fibctx->wait_sem);
+		fibctx->wait = 0;
+		/*
+		 *	Initialize the fibs and set the count of fibs on
+		 *	the list to 0.
+		 */
+		fibctx->count = 0;
+		INIT_LIST_HEAD(&fibctx->fib_list);
+		fibctx->jiffies = jiffies/HZ;
+		/*
+		 *	Now add this context onto the adapter's 
+		 *	AdapterFibContext list.
+		 */
+		spin_lock_irqsave(&dev->fib_lock, flags);
+		/* Ensure that we have a unique identifier */
+		entry = dev->fib_list.next;
+		while (entry != &dev->fib_list) {
+			context = list_entry(entry, struct aac_fib_context, next);
+			if (context->unique == fibctx->unique) {
+				/* Not unique (32 bits) */
+				fibctx->unique++;
+				entry = dev->fib_list.next;
+			} else {
+				entry = entry->next;
+			}
+		}
+		list_add_tail(&fibctx->next, &dev->fib_list);
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(arg,  &fibctx->unique, 
+						sizeof(fibctx->unique))) {
+			status = -EFAULT;
+		} else {
+			status = 0;
+		}	
+	}
+	return status;
+}
+
+/**
+ *	next_getadapter_fib	-	get the next fib
+ *	@dev: adapter to use
+ *	@arg: ioctl argument
+ *	
+ * 	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct fib_ioctl f;
+	struct fib *fib;
+	struct aac_fib_context *fibctx;
+	int status;
+	struct list_head * entry;
+	unsigned long flags;
+	
+	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
+		return -EFAULT;
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+	entry = dev->fib_list.next;
+	fibctx = NULL;
+
+	while (entry != &dev->fib_list) {
+		fibctx = list_entry(entry, struct aac_fib_context, next);
+		/*
+		 *	Extract the AdapterFibContext from the Input parameters.
+		 */
+		if (fibctx->unique == f.fibctx) {   /* We found a winner */
+			break;
+		}
+		entry = entry->next;
+		fibctx = NULL;
+	}
+	if (!fibctx) {
+		dprintk ((KERN_INFO "Fib Context not found\n"));
+		return -EINVAL;
+	}
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context))) {
+		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
+		return -EINVAL;
+	}
+	status = 0;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	/*
+	 *	If there are no fibs to send back, then either wait or return
+	 *	-EAGAIN
+	 */
+return_fib:
+	if (!list_empty(&fibctx->fib_list)) {
+		struct list_head * entry;
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fib_list.next;
+		list_del(entry);
+		
+		fib = list_entry(entry, struct fib, fiblink);
+		fibctx->count--;
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
+			kfree(fib->hw_fib);
+			kfree(fib);
+			return -EFAULT;
+		}	
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib->hw_fib);
+		kfree(fib);
+		status = 0;
+		fibctx->jiffies = jiffies/HZ;
+	} else {
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (f.wait) {
+			if(down_interruptible(&fibctx->wait_sem) < 0) {
+				status = -EINTR;
+			} else {
+				/* Lock again and retry */
+				spin_lock_irqsave(&dev->fib_lock, flags);
+				goto return_fib;
+			}
+		} else {
+			status = -EAGAIN;
+		}	
+	}
+	return status;
+}
+
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+{
+	struct fib *fib;
+
+	/*
+	 *	First free any FIBs that have not been consumed.
+	 */
+	while (!list_empty(&fibctx->fib_list)) {
+		struct list_head * entry;
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fib_list.next;
+		list_del(entry);
+		fib = list_entry(entry, struct fib, fiblink);
+		fibctx->count--;
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib->hw_fib);
+		kfree(fib);
+	}
+	/*
+	 *	Remove the Context from the AdapterFibContext List
+	 */
+	list_del(&fibctx->next);
+	/*
+	 *	Invalidate context
+	 */
+	fibctx->type = 0;
+	/*
+	 *	Free the space occupied by the Context
+	 */
+	kfree(fibctx);
+	return 0;
+}
+
+/**
+ *	close_getadapter_fib	-	close down user fib context
+ *	@dev: adapter
+ *	@arg: ioctl arguments
+ *
+ *	This routine will close down the fibctx passed in from the user.
+ */
+ 
+static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct aac_fib_context *fibctx;
+	int status;
+	unsigned long flags;
+	struct list_head * entry;
+
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+
+	entry = dev->fib_list.next;
+	fibctx = NULL;
+
+	while(entry != &dev->fib_list) {
+		fibctx = list_entry(entry, struct aac_fib_context, next);
+		/*
+		 *	Extract the fibctx from the input parameters
+		 */
+		if (fibctx->unique == (u32)(unsigned long)arg) {   
+			/* We found a winner */
+			break;
+		}
+		entry = entry->next;
+		fibctx = NULL;
+	}
+
+	if (!fibctx)
+		return 0; /* Already gone */
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context)))
+		return -EINVAL;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	status = aac_close_fib_context(dev, fibctx);
+	spin_unlock_irqrestore(&dev->fib_lock, flags);
+	return status;
+}
+
+/**
+ *	check_revision	-	close down user fib context
+ *	@dev: adapter
+ *	@arg: ioctl arguments
+ *
+ *	This routine returns the driver version.
+ *      Under Linux, there have been no version incompatibilities, so this is 
+ *      simple!
+ */
+
+static int check_revision(struct aac_dev *dev, void __user *arg)
+{
+	struct revision response;
+
+	response.compat = 1;
+	response.version = dev->adapter_info.kernelrev;
+	response.build = dev->adapter_info.kernelbuild;
+
+	if (copy_to_user(arg, &response, sizeof(response)))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ *
+ * aac_send_raw_scb
+ *
+ */
+
+int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+{
+	struct fib* srbfib;
+	int status;
+	struct aac_srb *srbcmd;
+	struct aac_srb __user *user_srb = arg;
+	struct aac_srb_reply __user *user_reply;
+	struct aac_srb_reply* reply;
+	u32 fibsize = 0;
+	u32 flags = 0;
+	s32 rcode = 0;
+	u32 data_dir;
+	void __user *sg_user[32];
+	void *sg_list[32];
+	u32   sg_indx = 0;
+	u32 byte_count = 0;
+	u32 actual_fibsize = 0;
+	int i;
+
+
+	if (!capable(CAP_SYS_ADMIN)){
+		printk(KERN_DEBUG"aacraid: No permission to send raw srb\n"); 
+		return -EPERM;
+	}
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	if (!(srbfib = fib_alloc(dev))) {
+		return -1;
+	}
+	fib_init(srbfib);
+
+	srbcmd = (struct aac_srb*) fib_data(srbfib);
+
+	if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
+		printk(KERN_DEBUG"aacraid: Could not copy data size from user\n"); 
+		rcode = -EFAULT;
+		goto cleanup;
+	}
+
+	if (fibsize > FIB_DATA_SIZE_IN_BYTES) {
+		rcode = -EINVAL;
+		goto cleanup;
+	}
+
+	if(copy_from_user(srbcmd, user_srb,fibsize)){
+		printk(KERN_DEBUG"aacraid: Could not copy srb from user\n"); 
+		rcode = -EFAULT;
+		goto cleanup;
+	}
+
+	user_reply = arg+fibsize;
+
+	flags = srbcmd->flags;
+	// Fix up srb for endian and force some values
+	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);	// Force this
+	srbcmd->channel  = cpu_to_le32(srbcmd->channel);
+	srbcmd->id	 = cpu_to_le32(srbcmd->id);
+	srbcmd->lun      = cpu_to_le32(srbcmd->lun);
+	srbcmd->flags    = cpu_to_le32(srbcmd->flags);
+	srbcmd->timeout  = cpu_to_le32(srbcmd->timeout);
+	srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
+	srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size);
+	
+	switch (srbcmd->flags & (SRB_DataIn | SRB_DataOut)) {
+	case SRB_DataOut:
+		data_dir = DMA_TO_DEVICE;
+		break;
+	case (SRB_DataIn | SRB_DataOut):
+		data_dir = DMA_BIDIRECTIONAL;
+		break;
+	case SRB_DataIn:
+		data_dir = DMA_FROM_DEVICE;
+		break;
+	default:
+		data_dir = DMA_NONE;
+	}
+	if (dev->dac_support == 1) {
+		struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
+		byte_count = 0;
+
+		/*
+		 * This should also catch if user used the 32 bit sgmap
+		 */
+		actual_fibsize = sizeof(struct aac_srb) - 
+			sizeof(struct sgentry) + ((srbcmd->sg.count & 0xff) * 
+			 	sizeof(struct sgentry64));
+		if(actual_fibsize != fibsize){ // User made a mistake - should not continue
+			printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
+			rcode = -EINVAL;
+			goto cleanup;
+		}
+		if ((data_dir == DMA_NONE) && psg->count) { 
+			printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
+			rcode = -EINVAL;
+			goto cleanup;
+		}
+
+		for (i = 0; i < psg->count; i++) {
+			dma_addr_t addr; 
+			u64 le_addr;
+			void* p;
+			p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+			if(p == 0) {
+				printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+				psg->sg[i].count,i,psg->count);
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+			sg_user[i] = (void __user *)psg->sg[i].addr;
+			sg_list[i] = p; // save so we can clean up later
+			sg_indx = i;
+
+			if( flags & SRB_DataOut ){
+				if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
+					printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n"); 
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+			addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
+
+			le_addr = cpu_to_le64(addr);
+			psg->sg[i].addr[1] = (u32)(le_addr>>32);
+			psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
+			psg->sg[i].count = cpu_to_le32(psg->sg[i].count);  
+			byte_count += psg->sg[i].count;
+		}
+
+		srbcmd->count = cpu_to_le32(byte_count);
+		status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
+	} else {
+		struct sgmap* psg = &srbcmd->sg;
+		byte_count = 0;
+
+		actual_fibsize = sizeof (struct aac_srb) + 
+			(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * 
+			 sizeof (struct sgentry));
+		if(actual_fibsize != fibsize){ // User made a mistake - should not continue
+			printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n");
+			rcode = -EINVAL;
+			goto cleanup;
+		}
+		if ((data_dir == DMA_NONE) && psg->count) {
+			printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n");
+			rcode = -EINVAL;
+			goto cleanup;
+		}
+		for (i = 0; i < psg->count; i++) {
+			dma_addr_t addr; 
+			void* p;
+			p = kmalloc(psg->sg[i].count,GFP_KERNEL);
+			if(p == 0) {
+				printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+				psg->sg[i].count,i,psg->count);
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+			sg_user[i] = (void __user *)(psg->sg[i].addr);
+			sg_list[i] = p; // save so we can clean up later
+			sg_indx = i;
+
+			if( flags & SRB_DataOut ){
+				if(copy_from_user(p,sg_user[i],psg->sg[i].count)){
+					printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n"); 
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+			addr = pci_map_single(dev->pdev, p, psg->sg[i].count, data_dir);
+
+			psg->sg[i].addr = cpu_to_le32(addr);
+			psg->sg[i].count = cpu_to_le32(psg->sg[i].count);  
+			byte_count += psg->sg[i].count;
+		}
+		srbcmd->count = cpu_to_le32(byte_count);
+		status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
+	}
+
+	if (status != 0){
+		printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"); 
+		rcode = -1;
+		goto cleanup;
+	}
+
+	if( flags & SRB_DataIn ) {
+		for(i = 0 ; i <= sg_indx; i++){
+			if(copy_to_user(sg_user[i],sg_list[i],le32_to_cpu(srbcmd->sg.sg[i].count))){
+				printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n"); 
+				rcode = -EFAULT;
+				goto cleanup;
+
+			}
+		}
+	}
+
+	reply = (struct aac_srb_reply *) fib_data(srbfib);
+	if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
+		printk(KERN_DEBUG"aacraid: Could not copy reply to user\n"); 
+		rcode = -EFAULT;
+		goto cleanup;
+	}
+
+cleanup:
+	for(i=0; i <= sg_indx; i++){
+		kfree(sg_list[i]);
+	}
+	fib_complete(srbfib);
+	fib_free(srbfib);
+
+	return rcode;
+}
+
+
+struct aac_pci_info {
+        u32 bus;
+        u32 slot;
+};
+
+
+int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
+{
+        struct aac_pci_info pci_info;
+
+	pci_info.bus = dev->pdev->bus->number;
+	pci_info.slot = PCI_SLOT(dev->pdev->devfn);
+
+       if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+		printk(KERN_DEBUG "aacraid: Could not copy pci info\n");
+               return -EFAULT;
+	}
+        return 0;
+ }
+ 
+
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+{
+	int status;
+	
+	/*
+	 *	HBA gets first crack
+	 */
+	 
+	status = aac_dev_ioctl(dev, cmd, arg);
+	if(status != -ENOTTY)
+		return status;
+
+	switch (cmd) {
+	case FSACTL_MINIPORT_REV_CHECK:
+		status = check_revision(dev, arg);
+		break;
+	case FSACTL_SENDFIB:
+		status = ioctl_send_fib(dev, arg);
+		break;
+	case FSACTL_OPEN_GET_ADAPTER_FIB:
+		status = open_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_GET_NEXT_ADAPTER_FIB:
+		status = next_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_CLOSE_GET_ADAPTER_FIB:
+		status = close_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_SEND_RAW_SRB:
+		status = aac_send_raw_srb(dev,arg);
+		break;
+	case FSACTL_GET_PCI_INFO:
+		status = aac_get_pci_info(dev,arg);
+		break;
+	default:
+		status = -ENOTTY;
+	  	break;	
+	}
+	return status;
+}
+
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
new file mode 100644
index 000000000000..6832a55ca907
--- /dev/null
+++ b/drivers/scsi/aacraid/comminit.c
@@ -0,0 +1,325 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  comminit.c
+ *
+ * Abstract: This supports the initialization of the host adapter commuication interface.
+ *    This is a platform dependent module for the pci cyclone board.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/mm.h>
+#include <asm/semaphore.h>
+
+#include "aacraid.h"
+
+struct aac_common aac_config;
+
+static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+{
+	unsigned char *base;
+	unsigned long size, align;
+	unsigned long fibsize = 4096;
+	unsigned long printfbufsiz = 256;
+	struct aac_init *init;
+	dma_addr_t phys;
+
+	size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
+
+ 
+	base = pci_alloc_consistent(dev->pdev, size, &phys);
+
+	if(base == NULL)
+	{
+		printk(KERN_ERR "aacraid: unable to create mapping.\n");
+		return 0;
+	}
+	dev->comm_addr = (void *)base;
+	dev->comm_phys = phys;
+	dev->comm_size = size;
+	
+	dev->init = (struct aac_init *)(base + fibsize);
+	dev->init_pa = phys + fibsize;
+
+	init = dev->init;
+
+	init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+	init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
+	init->fsrev = cpu_to_le32(dev->fsrev);
+
+	/*
+	 *	Adapter Fibs are the first thing allocated so that they
+	 *	start page aligned
+	 */
+	dev->aif_base_va = (struct hw_fib *)base;
+	
+	init->AdapterFibsVirtualAddress = 0;
+	init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
+	init->AdapterFibsSize = cpu_to_le32(fibsize);
+	init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
+	/* 
+	 * number of 4k pages of host physical memory. The aacraid fw needs
+	 * this number to be less than 4gb worth of pages. num_physpages is in
+	 * system page units. New firmware doesn't have any issues with the
+	 * mapping system, but older Firmware did, and had *troubles* dealing
+	 * with the math overloading past 32 bits, thus we must limit this
+	 * field.
+	 *
+	 * This assumes the memory is mapped zero->n, which isnt
+	 * always true on real computers. It also has some slight problems
+	 * with the GART on x86-64. I've btw never tried DMA from PCI space
+	 * on this platform but don't be suprised if its problematic.
+	 */
+#ifndef CONFIG_GART_IOMMU
+	if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
+		init->HostPhysMemPages = 
+			cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
+	} else 
+#endif	
+	{
+		init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+	}
+
+
+	/*
+	 * Increment the base address by the amount already used
+	 */
+	base = base + fibsize + sizeof(struct aac_init);
+	phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
+	/*
+	 *	Align the beginning of Headers to commalign
+	 */
+	align = (commalign - ((unsigned long)(base) & (commalign - 1)));
+	base = base + align;
+	phys = phys + align;
+	/*
+	 *	Fill in addresses of the Comm Area Headers and Queues
+	 */
+	*commaddr = base;
+	init->CommHeaderAddress = cpu_to_le32((u32)phys);
+	/*
+	 *	Increment the base address by the size of the CommArea
+	 */
+	base = base + commsize;
+	phys = phys + commsize;
+	/*
+	 *	 Place the Printf buffer area after the Fast I/O comm area.
+	 */
+	dev->printfbuf = (void *)base;
+	init->printfbuf = cpu_to_le32(phys);
+	init->printfbufsiz = cpu_to_le32(printfbufsiz);
+	memset(base, 0, printfbufsiz);
+	return 1;
+}
+    
+static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
+{
+	q->numpending = 0;
+	q->dev = dev;
+	INIT_LIST_HEAD(&q->pendingq);
+	init_waitqueue_head(&q->cmdready);
+	INIT_LIST_HEAD(&q->cmdq);
+	init_waitqueue_head(&q->qfull);
+	spin_lock_init(&q->lockdata);
+	q->lock = &q->lockdata;
+	q->headers.producer = mem;
+	q->headers.consumer = mem+1;
+	*(q->headers.producer) = cpu_to_le32(qsize);
+	*(q->headers.consumer) = cpu_to_le32(qsize);
+	q->entries = qsize;
+}
+
+/**
+ *	aac_send_shutdown		-	shutdown an adapter
+ *	@dev: Adapter to shutdown
+ *
+ *	This routine will send a VM_CloseAll (shutdown) request to the adapter.
+ */
+
+int aac_send_shutdown(struct aac_dev * dev)
+{
+	struct fib * fibctx;
+	struct aac_close *cmd;
+	int status;
+
+	fibctx = fib_alloc(dev);
+	fib_init(fibctx);
+
+	cmd = (struct aac_close *) fib_data(fibctx);
+
+	cmd->command = cpu_to_le32(VM_CloseAll);
+	cmd->cid = cpu_to_le32(0xffffffff);
+
+	status = fib_send(ContainerCommand,
+			  fibctx,
+			  sizeof(struct aac_close),
+			  FsaNormal,
+			  1, 1,
+			  NULL, NULL);
+
+	if (status == 0)
+		fib_complete(fibctx);
+	fib_free(fibctx);
+	return status;
+}
+
+/**
+ *	aac_comm_init	-	Initialise FSA data structures
+ *	@dev:	Adapter to initialise
+ *
+ *	Initializes the data structures that are required for the FSA commuication
+ *	interface to operate. 
+ *	Returns
+ *		1 - if we were able to init the commuication interface.
+ *		0 - If there were errors initing. This is a fatal error.
+ */
+ 
+int aac_comm_init(struct aac_dev * dev)
+{
+	unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
+	unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
+	u32 *headers;
+	struct aac_entry * queues;
+	unsigned long size;
+	struct aac_queue_block * comm = dev->queues;
+	/*
+	 *	Now allocate and initialize the zone structures used as our 
+	 *	pool of FIB context records.  The size of the zone is based
+	 *	on the system memory size.  We also initialize the mutex used
+	 *	to protect the zone.
+	 */
+	spin_lock_init(&dev->fib_lock);
+
+	/*
+	 *	Allocate the physically contigous space for the commuication
+	 *	queue headers. 
+	 */
+
+	size = hdrsize + queuesize;
+
+	if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
+		return -ENOMEM;
+
+	queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
+
+	/* Adapter to Host normal priority Command queue */ 
+	comm->queue[HostNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
+	queues += HOST_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* Adapter to Host high priority command queue */
+	comm->queue[HostHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
+    
+	queues += HOST_HIGH_CMD_ENTRIES;
+	headers +=2;
+
+	/* Host to adapter normal priority command queue */
+	comm->queue[AdapNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
+    
+	queues += ADAP_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* host to adapter high priority command queue */
+	comm->queue[AdapHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
+    
+	queues += ADAP_HIGH_CMD_ENTRIES;
+	headers += 2;
+
+	/* adapter to host normal priority response queue */
+	comm->queue[HostNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
+	queues += HOST_NORM_RESP_ENTRIES;
+	headers += 2;
+
+	/* adapter to host high priority response queue */
+	comm->queue[HostHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
+   
+	queues += HOST_HIGH_RESP_ENTRIES;
+	headers += 2;
+
+	/* host to adapter normal priority response queue */
+	comm->queue[AdapNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
+
+	queues += ADAP_NORM_RESP_ENTRIES;
+	headers += 2;
+	
+	/* host to adapter high priority response queue */ 
+	comm->queue[AdapHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
+
+	comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
+	comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
+	comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
+	comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
+
+	return 0;
+}
+
+struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+{
+	/*
+	 *	Ok now init the communication subsystem
+	 */
+
+	dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+	if (dev->queues == NULL) {
+		printk(KERN_ERR "Error could not allocate comm region.\n");
+		return NULL;
+	}
+	memset(dev->queues, 0, sizeof(struct aac_queue_block));
+
+	if (aac_comm_init(dev)<0){
+		kfree(dev->queues);
+		return NULL;
+	}
+	/*
+	 *	Initialize the list of fibs
+	 */
+	if(fib_setup(dev)<0){
+		kfree(dev->queues);
+		return NULL;
+	}
+		
+	INIT_LIST_HEAD(&dev->fib_list);
+	init_completion(&dev->aif_completion);
+
+	return dev;
+}
+
+    
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
new file mode 100644
index 000000000000..3f36dbaa2bb3
--- /dev/null
+++ b/drivers/scsi/aacraid/commsup.c
@@ -0,0 +1,939 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commsup.c
+ *
+ * Abstract: Contain all routines that are required for FSA host/adapter
+ *    commuication.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+
+#include "aacraid.h"
+
+/**
+ *	fib_map_alloc		-	allocate the fib objects
+ *	@dev: Adapter to allocate for
+ *
+ *	Allocate and map the shared PCI space for the FIB blocks used to
+ *	talk to the Adaptec firmware.
+ */
+ 
+static int fib_map_alloc(struct aac_dev *dev)
+{
+	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ *	fib_map_free		-	free the fib objects
+ *	@dev: Adapter to free
+ *
+ *	Free the PCI mappings and the memory allocated for FIB blocks
+ *	on this adapter.
+ */
+
+void fib_map_free(struct aac_dev *dev)
+{
+	pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
+}
+
+/**
+ *	fib_setup	-	setup the fibs
+ *	@dev: Adapter to set up
+ *
+ *	Allocate the PCI space for the fibs, map it and then intialise the
+ *	fib area, the unmapped fib data and also the free list
+ */
+
+int fib_setup(struct aac_dev * dev)
+{
+	struct fib *fibptr;
+	struct hw_fib *hw_fib_va;
+	dma_addr_t hw_fib_pa;
+	int i;
+	
+	if(fib_map_alloc(dev)<0)
+		return -ENOMEM;
+		
+	hw_fib_va = dev->hw_fib_va;
+	hw_fib_pa = dev->hw_fib_pa;
+	memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
+	/*
+	 *	Initialise the fibs
+	 */
+	for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
+	{
+		fibptr->dev = dev;
+		fibptr->hw_fib = hw_fib_va;
+		fibptr->data = (void *) fibptr->hw_fib->data;
+		fibptr->next = fibptr+1;	/* Forward chain the fibs */
+		init_MUTEX_LOCKED(&fibptr->event_wait);
+		spin_lock_init(&fibptr->event_lock);
+		hw_fib_va->header.XferState = 0xffffffff;
+		hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+		fibptr->hw_fib_pa = hw_fib_pa;
+		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
+		hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); 
+	}
+	/*
+	 *	Add the fib chain to the free list
+	 */
+	dev->fibs[AAC_NUM_FIB-1].next = NULL;
+	/*
+	 *	Enable this to debug out of queue space
+	 */
+	dev->free_fib = &dev->fibs[0];
+	return 0;
+}
+
+/**
+ *	fib_alloc	-	allocate a fib
+ *	@dev: Adapter to allocate the fib for
+ *
+ *	Allocate a fib from the adapter fib pool. If the pool is empty we
+ *	wait for fibs to become free.
+ */
+ 
+struct fib * fib_alloc(struct aac_dev *dev)
+{
+	struct fib * fibptr;
+	unsigned long flags;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	fibptr = dev->free_fib;	
+	/* Cannot sleep here or you get hangs. Instead we did the
+	   maths at compile time. */
+	if(!fibptr)
+		BUG();
+	dev->free_fib = fibptr->next;
+	spin_unlock_irqrestore(&dev->fib_lock, flags);
+	/*
+	 *	Set the proper node type code and node byte size
+	 */
+	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+	fibptr->size = sizeof(struct fib);
+	/*
+	 *	Null out fields that depend on being zero at the start of
+	 *	each I/O
+	 */
+	fibptr->hw_fib->header.XferState = 0;
+	fibptr->callback = NULL;
+	fibptr->callback_data = NULL;
+
+	return fibptr;
+}
+
+/**
+ *	fib_free	-	free a fib
+ *	@fibptr: fib to free up
+ *
+ *	Frees up a fib and places it on the appropriate queue
+ *	(either free or timed out)
+ */
+ 
+void fib_free(struct fib * fibptr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
+		aac_config.fib_timeouts++;
+		fibptr->next = fibptr->dev->timeout_fib;
+		fibptr->dev->timeout_fib = fibptr;
+	} else {
+		if (fibptr->hw_fib->header.XferState != 0) {
+			printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 
+				 (void*)fibptr, 
+				 le32_to_cpu(fibptr->hw_fib->header.XferState));
+		}
+		fibptr->next = fibptr->dev->free_fib;
+		fibptr->dev->free_fib = fibptr;
+	}	
+	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+}
+
+/**
+ *	fib_init	-	initialise a fib
+ *	@fibptr: The fib to initialize
+ *	
+ *	Set up the generic fib fields ready for use
+ */
+ 
+void fib_init(struct fib *fibptr)
+{
+	struct hw_fib *hw_fib = fibptr->hw_fib;
+
+	hw_fib->header.StructType = FIB_MAGIC;
+	hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
+        hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
+	hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+	hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+}
+
+/**
+ *	fib_deallocate		-	deallocate a fib
+ *	@fibptr: fib to deallocate
+ *
+ *	Will deallocate and return to the free pool the FIB pointed to by the
+ *	caller.
+ */
+ 
+void fib_dealloc(struct fib * fibptr)
+{
+	struct hw_fib *hw_fib = fibptr->hw_fib;
+	if(hw_fib->header.StructType != FIB_MAGIC) 
+		BUG();
+	hw_fib->header.XferState = 0;        
+}
+
+/*
+ *	Commuication primitives define and support the queuing method we use to
+ *	support host to adapter commuication. All queue accesses happen through
+ *	these routines and are the only routines which have a knowledge of the
+ *	 how these queues are implemented.
+ */
+ 
+/**
+ *	aac_get_entry		-	get a queue entry
+ *	@dev: Adapter
+ *	@qid: Queue Number
+ *	@entry: Entry return
+ *	@index: Index return
+ *	@nonotify: notification control
+ *
+ *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
+ *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
+ *	returned.
+ */
+ 
+static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
+{
+	struct aac_queue * q;
+
+	/*
+	 *	All of the queues wrap when they reach the end, so we check
+	 *	to see if they have reached the end and if they have we just
+	 *	set the index back to zero. This is a wrap. You could or off
+	 *	the high bits in all updates but this is a bit faster I think.
+	 */
+
+	q = &dev->queues->queue[qid];
+	
+	*index = le32_to_cpu(*(q->headers.producer));
+	if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
+			*nonotify = 1; 
+
+	if (qid == AdapHighCmdQueue) {
+	        if (*index >= ADAP_HIGH_CMD_ENTRIES)
+        		*index = 0;
+	} else if (qid == AdapNormCmdQueue) {
+	        if (*index >= ADAP_NORM_CMD_ENTRIES) 
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	}
+	else if (qid == AdapHighRespQueue) 
+	{
+	        if (*index >= ADAP_HIGH_RESP_ENTRIES)
+			*index = 0;
+	}
+	else if (qid == AdapNormRespQueue) 
+	{
+		if (*index >= ADAP_NORM_RESP_ENTRIES) 
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	}
+	else {
+		printk("aacraid: invalid qid\n");
+		BUG();
+	}
+
+        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
+		printk(KERN_WARNING "Queue %d full, %d outstanding.\n",
+				qid, q->numpending);
+		return 0;
+	} else {
+	        *entry = q->base + *index;
+		return 1;
+	}
+}   
+
+/**
+ *	aac_queue_get		-	get the next free QE
+ *	@dev: Adapter
+ *	@index: Returned index
+ *	@priority: Priority of fib
+ *	@fib: Fib to associate with the queue entry
+ *	@wait: Wait if queue full
+ *	@fibptr: Driver fib object to go with fib
+ *	@nonotify: Don't notify the adapter
+ *
+ *	Gets the next free QE off the requested priorty adapter command
+ *	queue and associates the Fib with the QE. The QE represented by
+ *	index is ready to insert on the queue when this routine returns
+ *	success.
+ */
+
+static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+{
+	struct aac_entry * entry = NULL;
+	int map = 0;
+	struct aac_queue * q = &dev->queues->queue[qid];
+		
+	spin_lock_irqsave(q->lock, q->SavedIrql);
+	    
+	if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
+	{
+		/*  if no entries wait for some if caller wants to */
+        	while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+        	{
+			printk(KERN_ERR "GetEntries failed\n");
+		}
+	        /*
+	         *	Setup queue entry with a command, status and fib mapped
+	         */
+	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+	        map = 1;
+	}
+	else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
+	{
+	        while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+	        {
+			/* if no entries wait for some if caller wants to */
+		}
+        	/*
+        	 *	Setup queue entry with command, status and fib mapped
+        	 */
+        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+        	entry->addr = hw_fib->header.SenderFibAddress;
+     			/* Restore adapters pointer to the FIB */
+		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
+        	map = 0;
+	}
+	/*
+	 *	If MapFib is true than we need to map the Fib and put pointers
+	 *	in the queue entry.
+	 */
+	if (map)
+		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
+	return 0;
+}
+
+
+/**
+ *	aac_insert_entry	-	insert a queue entry
+ *	@dev: Adapter
+ *	@index: Index of entry to insert
+ *	@qid: Queue number
+ *	@nonotify: Suppress adapter notification
+ *
+ *	Gets the next free QE off the requested priorty adapter command
+ *	queue and associates the Fib with the QE. The QE represented by
+ *	index is ready to insert on the queue when this routine returns
+ *	success.
+ */
+ 
+static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
+{
+	struct aac_queue * q = &dev->queues->queue[qid];
+
+	if(q == NULL)
+		BUG();
+	*(q->headers.producer) = cpu_to_le32(index + 1);
+	spin_unlock_irqrestore(q->lock, q->SavedIrql);
+
+	if (qid == AdapHighCmdQueue ||
+	    qid == AdapNormCmdQueue ||
+	    qid == AdapHighRespQueue ||
+	    qid == AdapNormRespQueue)
+	{
+		if (!nonotify)
+			aac_adapter_notify(dev, qid);
+	}
+	else
+		printk("Suprise insert!\n");
+	return 0;
+}
+
+/*
+ *	Define the highest level of host to adapter communication routines. 
+ *	These routines will support host to adapter FS commuication. These 
+ *	routines have no knowledge of the commuication method used. This level
+ *	sends and receives FIBs. This level has no knowledge of how these FIBs
+ *	get passed back and forth.
+ */
+
+/**
+ *	fib_send	-	send a fib to the adapter
+ *	@command: Command to send
+ *	@fibptr: The fib
+ *	@size: Size of fib data area
+ *	@priority: Priority of Fib
+ *	@wait: Async/sync select
+ *	@reply: True if a reply is wanted
+ *	@callback: Called with reply
+ *	@callback_data: Passed to callback
+ *
+ *	Sends the requested FIB to the adapter and optionally will wait for a
+ *	response FIB. If the caller does not wish to wait for a response than
+ *	an event to wait on must be supplied. This event will be set when a
+ *	response FIB is received from the adapter.
+ */
+ 
+int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
+{
+	u32 index;
+	u32 qid;
+	struct aac_dev * dev = fibptr->dev;
+	unsigned long nointr = 0;
+	struct hw_fib * hw_fib = fibptr->hw_fib;
+	struct aac_queue * q;
+	unsigned long flags = 0;
+	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
+		return -EBUSY;
+	/*
+	 *	There are 5 cases with the wait and reponse requested flags. 
+	 *	The only invalid cases are if the caller requests to wait and
+	 *	does not request a response and if the caller does not want a
+	 *	response and the Fib is not allocated from pool. If a response
+	 *	is not requesed the Fib will just be deallocaed by the DPC
+	 *	routine when the response comes back from the adapter. No
+	 *	further processing will be done besides deleting the Fib. We 
+	 *	will have a debug mode where the adapter can notify the host
+	 *	it had a problem and the host can log that fact.
+	 */
+	if (wait && !reply) {
+		return -EINVAL;
+	} else if (!wait && reply) {
+		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+	} else if (!wait && !reply) {
+		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+	} else if (wait && reply) {
+		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+	} 
+	/*
+	 *	Map the fib into 32bits by using the fib number
+	 */
+
+	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
+	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
+	/*
+	 *	Set FIB state to indicate where it came from and if we want a
+	 *	response from the adapter. Also load the command from the
+	 *	caller.
+	 *
+	 *	Map the hw fib pointer as a 32bit value
+	 */
+	hw_fib->header.Command = cpu_to_le16(command);
+	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
+	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
+	/*
+	 *	Set the size of the Fib we want to send to the adapter
+	 */
+	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
+	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
+		return -EMSGSIZE;
+	}                
+	/*
+	 *	Get a queue entry connect the FIB to it and send an notify
+	 *	the adapter a command is ready.
+	 */
+	if (priority == FsaHigh) {
+		hw_fib->header.XferState |= cpu_to_le32(HighPriority);
+		qid = AdapHighCmdQueue;
+	} else {
+		hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
+		qid = AdapNormCmdQueue;
+	}
+	q = &dev->queues->queue[qid];
+
+	if(wait)
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+	if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
+		return -EWOULDBLOCK;
+	dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
+	dprintk((KERN_DEBUG "Fib contents:.\n"));
+	dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
+	dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
+	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
+	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
+	/*
+	 *	Fill in the Callback and CallbackContext if we are not
+	 *	going to wait.
+	 */
+	if (!wait) {
+		fibptr->callback = callback;
+		fibptr->callback_data = callback_data;
+	}
+	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+	list_add_tail(&fibptr->queue, &q->pendingq);
+	q->numpending++;
+
+	fibptr->done = 0;
+	fibptr->flags = 0;
+
+	if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
+		return -EWOULDBLOCK;
+	/*
+	 *	If the caller wanted us to wait for response wait now. 
+	 */
+    
+	if (wait) {
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		down(&fibptr->event_wait);
+		if(fibptr->done == 0)
+			BUG();
+			
+		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
+			return -ETIMEDOUT;
+		} else {
+			return 0;
+		}
+	}
+	/*
+	 *	If the user does not want a response than return success otherwise
+	 *	return pending
+	 */
+	if (reply)
+		return -EINPROGRESS;
+	else
+		return 0;
+}
+
+/** 
+ *	aac_consumer_get	-	get the top of the queue
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@entry: Return entry
+ *
+ *	Will return a pointer to the entry on the top of the queue requested that
+ * 	we are a consumer of, and return the address of the queue entry. It does
+ *	not change the state of the queue. 
+ */
+
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
+{
+	u32 index;
+	int status;
+	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
+		status = 0;
+	} else {
+		/*
+		 *	The consumer index must be wrapped if we have reached
+		 *	the end of the queue, else we just use the entry
+		 *	pointed to by the header index
+		 */
+		if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
+			index = 0;		
+		else
+		        index = le32_to_cpu(*q->headers.consumer);
+		*entry = q->base + index;
+		status = 1;
+	}
+	return(status);
+}
+
+/**
+ *	aac_consumer_free	-	free consumer entry
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@qid: Queue ident
+ *
+ *	Frees up the current top of the queue we are a consumer of. If the
+ *	queue was full notify the producer that the queue is no longer full.
+ */
+
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
+{
+	int wasfull = 0;
+	u32 notify;
+
+	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
+		wasfull = 1;
+        
+	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+		*q->headers.consumer = cpu_to_le32(1);
+	else
+		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
+        
+	if (wasfull) {
+		switch (qid) {
+
+		case HostNormCmdQueue:
+			notify = HostNormCmdNotFull;
+			break;
+		case HostHighCmdQueue:
+			notify = HostHighCmdNotFull;
+			break;
+		case HostNormRespQueue:
+			notify = HostNormRespNotFull;
+			break;
+		case HostHighRespQueue:
+			notify = HostHighRespNotFull;
+			break;
+		default:
+			BUG();
+			return;
+		}
+		aac_adapter_notify(dev, notify);
+	}
+}        
+
+/**
+ *	fib_adapter_complete	-	complete adapter issued fib
+ *	@fibptr: fib to complete
+ *	@size: size of fib
+ *
+ *	Will do all necessary work to complete a FIB that was sent from
+ *	the adapter.
+ */
+
+int fib_adapter_complete(struct fib * fibptr, unsigned short size)
+{
+	struct hw_fib * hw_fib = fibptr->hw_fib;
+	struct aac_dev * dev = fibptr->dev;
+	unsigned long nointr = 0;
+	if (hw_fib->header.XferState == 0)
+        	return 0;
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */ 
+	if ( hw_fib->header.StructType != FIB_MAGIC ) {
+        	return -EINVAL;
+	}
+	/*
+	 *	This block handles the case where the adapter had sent us a
+	 *	command and we have finished processing the command. We
+	 *	call completeFib when we are done processing the command 
+	 *	and want to send a response back to the adapter. This will 
+	 *	send the completed cdb to the adapter.
+	 */
+	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+	        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
+	        if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
+        		u32 index;
+       			if (size) 
+			{
+				size += sizeof(struct aac_fibhdr);
+				if (size > le16_to_cpu(hw_fib->header.SenderSize))
+					return -EMSGSIZE;
+				hw_fib->header.Size = cpu_to_le16(size);
+			}
+			if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
+				return -EWOULDBLOCK;
+			}
+			if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
+			}
+		}
+		else if (hw_fib->header.XferState & NormalPriority) 
+		{
+			u32 index;
+
+			if (size) {
+				size += sizeof(struct aac_fibhdr);
+				if (size > le16_to_cpu(hw_fib->header.SenderSize)) 
+					return -EMSGSIZE;
+				hw_fib->header.Size = cpu_to_le16(size);
+			}
+			if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) 
+				return -EWOULDBLOCK;
+			if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) 
+			{
+			}
+		}
+	}
+	else 
+	{
+        	printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
+        	BUG();
+	}   
+	return 0;
+}
+
+/**
+ *	fib_complete	-	fib completion handler
+ *	@fib: FIB to complete
+ *
+ *	Will do all necessary work to complete a FIB.
+ */
+ 
+int fib_complete(struct fib * fibptr)
+{
+	struct hw_fib * hw_fib = fibptr->hw_fib;
+
+	/*
+	 *	Check for a fib which has already been completed
+	 */
+
+	if (hw_fib->header.XferState == 0)
+        	return 0;
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */ 
+
+	if (hw_fib->header.StructType != FIB_MAGIC)
+	        return -EINVAL;
+	/*
+	 *	This block completes a cdb which orginated on the host and we 
+	 *	just need to deallocate the cdb or reinit it. At this point the
+	 *	command is complete that we had sent to the adapter and this
+	 *	cdb could be reused.
+	 */
+	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
+		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
+	{
+		fib_dealloc(fibptr);
+	}
+	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
+	{
+		/*
+		 *	This handles the case when the host has aborted the I/O
+		 *	to the adapter because the adapter is not responding
+		 */
+		fib_dealloc(fibptr);
+	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
+		fib_dealloc(fibptr);
+	} else {
+		BUG();
+	}   
+	return 0;
+}
+
+/**
+ *	aac_printf	-	handle printf from firmware
+ *	@dev: Adapter
+ *	@val: Message info
+ *
+ *	Print a message passed to us by the controller firmware on the
+ *	Adaptec board
+ */
+
+void aac_printf(struct aac_dev *dev, u32 val)
+{
+	int length = val & 0xffff;
+	int level = (val >> 16) & 0xffff;
+	char *cp = dev->printfbuf;
+	
+	/*
+	 *	The size of the printfbuf is set in port.c
+	 *	There is no variable or define for it
+	 */
+	if (length > 255)
+		length = 255;
+	if (cp[length] != 0)
+		cp[length] = 0;
+	if (level == LOG_AAC_HIGH_ERROR)
+		printk(KERN_WARNING "aacraid:%s", cp);
+	else
+		printk(KERN_INFO "aacraid:%s", cp);
+	memset(cp, 0,  256);
+}
+
+/**
+ *	aac_command_thread	-	command processing thread
+ *	@dev: Adapter to monitor
+ *
+ *	Waits on the commandready event in it's queue. When the event gets set
+ *	it will pull FIBs off it's queue. It will continue to pull FIBs off
+ *	until the queue is empty. When the queue is empty it will wait for
+ *	more FIBs.
+ */
+ 
+int aac_command_thread(struct aac_dev * dev)
+{
+	struct hw_fib *hw_fib, *hw_newfib;
+	struct fib *fib, *newfib;
+	struct aac_queue_block *queues = dev->queues;
+	struct aac_fib_context *fibctx;
+	unsigned long flags;
+	DECLARE_WAITQUEUE(wait, current);
+
+	/*
+	 *	We can only have one thread per adapter for AIF's.
+	 */
+	if (dev->aif_thread)
+		return -EINVAL;
+	/*
+	 *	Set up the name that will appear in 'ps'
+	 *	stored in  task_struct.comm[16].
+	 */
+	daemonize("aacraid");
+	allow_signal(SIGKILL);
+	/*
+	 *	Let the DPC know it has a place to send the AIF's to.
+	 */
+	dev->aif_thread = 1;
+	add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while(1) 
+	{
+		spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+		while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+			struct list_head *entry;
+			struct aac_aifcmd * aifcmd;
+
+			set_current_state(TASK_RUNNING);
+		
+			entry = queues->queue[HostNormCmdQueue].cmdq.next;
+			list_del(entry);
+			
+			spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+			fib = list_entry(entry, struct fib, fiblink);
+			/*
+			 *	We will process the FIB here or pass it to a 
+			 *	worker thread that is TBD. We Really can't 
+			 *	do anything at this point since we don't have
+			 *	anything defined for this thread to do.
+			 */
+			hw_fib = fib->hw_fib;
+			memset(fib, 0, sizeof(struct fib));
+			fib->type = FSAFS_NTC_FIB_CONTEXT;
+			fib->size = sizeof( struct fib );
+			fib->hw_fib = hw_fib;
+			fib->data = hw_fib->data;
+			fib->dev = dev;
+			/*
+			 *	We only handle AifRequest fibs from the adapter.
+			 */
+			aifcmd = (struct aac_aifcmd *) hw_fib->data;
+			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+				/* Handle Driver Notify Events */
+				*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
+				fib_adapter_complete(fib, sizeof(u32));
+			} else {
+				struct list_head *entry;
+				/* The u32 here is important and intended. We are using
+				   32bit wrapping time to fit the adapter field */
+				   
+				u32 time_now, time_last;
+				unsigned long flagv;
+				
+				time_now = jiffies/HZ;
+
+				spin_lock_irqsave(&dev->fib_lock, flagv);
+				entry = dev->fib_list.next;
+				/*
+				 * For each Context that is on the 
+				 * fibctxList, make a copy of the
+				 * fib, and then set the event to wake up the
+				 * thread that is waiting for it.
+				 */
+				while (entry != &dev->fib_list) {
+					/*
+					 * Extract the fibctx
+					 */
+					fibctx = list_entry(entry, struct aac_fib_context, next);
+					/*
+					 * Check if the queue is getting
+					 * backlogged
+					 */
+					if (fibctx->count > 20)
+					{
+						/*
+						 * It's *not* jiffies folks,
+						 * but jiffies / HZ so do not
+						 * panic ...
+						 */
+						time_last = fibctx->jiffies;
+						/*
+						 * Has it been > 2 minutes 
+						 * since the last read off
+						 * the queue?
+						 */
+						if ((time_now - time_last) > 120) {
+							entry = entry->next;
+							aac_close_fib_context(dev, fibctx);
+							continue;
+						}
+					}
+					/*
+					 * Warning: no sleep allowed while
+					 * holding spinlock
+					 */
+					hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+					newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
+					if (newfib && hw_newfib) {
+						/*
+						 * Make the copy of the FIB
+						 */
+						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+						memcpy(newfib, fib, sizeof(struct fib));
+						newfib->hw_fib = hw_newfib;
+						/*
+						 * Put the FIB onto the
+						 * fibctx's fibs
+						 */
+						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+						fibctx->count++;
+						/* 
+						 * Set the event to wake up the
+						 * thread that will waiting.
+						 */
+						up(&fibctx->wait_sem);
+					} else {
+						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+						if(newfib)
+							kfree(newfib);
+						if(hw_newfib)
+							kfree(hw_newfib);
+					}
+					entry = entry->next;
+				}
+				/*
+				 *	Set the status of this FIB
+				 */
+				*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
+				fib_adapter_complete(fib, sizeof(u32));
+				spin_unlock_irqrestore(&dev->fib_lock, flagv);
+			}
+			spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+			kfree(fib);
+		}
+		/*
+		 *	There are no more AIF's
+		 */
+		spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+		schedule();
+
+		if(signal_pending(current))
+			break;
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+	dev->aif_thread = 0;
+	complete_and_exit(&dev->aif_completion, 0);
+}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
new file mode 100644
index 000000000000..8480b427a6d9
--- /dev/null
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -0,0 +1,215 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+
+#include "aacraid.h"
+
+/**
+ *	aac_response_normal	-	Handle command replies
+ *	@q: Queue to read from
+ *
+ *	This DPC routine will be run when the adapter interrupts us to let us
+ *	know there is a response on our normal priority queue. We will pull off
+ *	all QE there are and wake up all the waiters before exiting. We will
+ *	take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	struct hw_fib * hwfib;
+	struct fib * fib;
+	int consumed = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->lock, flags);	
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system. If no response was requesed we just
+	 *	deallocate the Fib here and continue.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		int fast;
+		u32 index = le32_to_cpu(entry->addr);
+		fast = index & 0x01;
+		fib = &dev->fibs[index >> 1];
+		hwfib = fib->hw_fib;
+		
+		aac_consumer_free(dev, q, HostNormRespQueue);
+		/*
+		 *	Remove this fib from the Outstanding I/O queue.
+		 *	But only if it has not already been timed out.
+		 *
+		 *	If the fib has been timed out already, then just 
+		 *	continue. The caller has already been notified that
+		 *	the fib timed out.
+		 */
+		if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+			list_del(&fib->queue);
+			dev->queues->queue[AdapNormCmdQueue].numpending--;
+		} else {
+			printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
+			printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
+			continue;
+		}
+		spin_unlock_irqrestore(q->lock, flags);
+
+		if (fast) {
+			/*
+			 *	Doctor the fib
+			 */
+			*(u32 *)hwfib->data = cpu_to_le32(ST_OK);
+			hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+		}
+
+		FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+		if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+		{
+			u32 *pstatus = (u32 *)hwfib->data;
+			if (*pstatus & cpu_to_le32(0xffff0000))
+				*pstatus = cpu_to_le32(ST_OK);
+		}
+		if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
+		{
+	        	if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+				FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+			else 
+				FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+			/*
+			 *	NOTE:  we cannot touch the fib after this
+			 *	    call, because it may have been deallocated.
+			 */
+			fib->callback(fib->callback_data, fib);
+		} else {
+			unsigned long flagv;
+			spin_lock_irqsave(&fib->event_lock, flagv);
+			fib->done = 1;
+			up(&fib->event_wait);
+			spin_unlock_irqrestore(&fib->event_lock, flagv);
+			FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+		}
+		consumed++;
+		spin_lock_irqsave(q->lock, flags);
+	}
+
+	if (consumed > aac_config.peak_fibs)
+		aac_config.peak_fibs = consumed;
+	if (consumed == 0) 
+		aac_config.zero_fibs++;
+
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
+
+
+/**
+ *	aac_command_normal	-	handle commands
+ *	@q: queue to process
+ *
+ *	This DPC routine will be queued when the adapter interrupts us to 
+ *	let us know there is a command on our normal priority queue. We will 
+ *	pull off all QE there are and wake up all the waiters before exiting.
+ *	We will take a spinlock out on the queue before operating on it.
+ */
+ 
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->lock, flags);
+
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		struct fib fibctx;
+		struct hw_fib * hw_fib;
+		u32 index;
+		struct fib *fib = &fibctx;
+		
+		index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
+		hw_fib = &dev->aif_base_va[index];
+		
+		/*
+		 *	Allocate a FIB at all costs. For non queued stuff
+		 *	we can just use the stack so we are happy. We need
+		 *	a fib object in order to manage the linked lists
+		 */
+		if (dev->aif_thread)
+			if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
+				fib = &fibctx;
+		
+		memset(fib, 0, sizeof(struct fib));
+		INIT_LIST_HEAD(&fib->fiblink);
+		fib->type = FSAFS_NTC_FIB_CONTEXT;
+		fib->size = sizeof(struct fib);
+		fib->hw_fib = hw_fib;
+		fib->data = hw_fib->data;
+		fib->dev = dev;
+		
+				
+		if (dev->aif_thread && fib != &fibctx) {
+		        list_add_tail(&fib->fiblink, &q->cmdq);
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+		        wake_up_interruptible(&q->cmdready);
+		} else {
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+			spin_unlock_irqrestore(q->lock, flags);
+			/*
+			 *	Set the status of this FIB
+			 */
+			*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
+			fib_adapter_complete(fib, sizeof(u32));
+			spin_lock_irqsave(q->lock, flags);
+		}		
+	}
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
new file mode 100644
index 000000000000..c9b82687ba1a
--- /dev/null
+++ b/drivers/scsi/aacraid/linit.c
@@ -0,0 +1,749 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *   linit.c
+ *
+ * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
+ */
+
+#define AAC_DRIVER_VERSION		"1.1.2-lk2"
+#define AAC_DRIVER_BUILD_DATE		__DATE__
+#define AAC_DRIVERNAME			"aacraid"
+
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/ioctl32.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <asm/semaphore.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_eh.h>
+
+#include "aacraid.h"
+
+
+MODULE_AUTHOR("Red Hat Inc and Adaptec");
+MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
+		   "Adaptec Advanced Raid Products, "
+		   "and HP NetRAID-4M SCSI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(AAC_DRIVER_VERSION);
+
+static LIST_HEAD(aac_devices);
+static int aac_cfg_major = -1;
+
+/*
+ * Because of the way Linux names scsi devices, the order in this table has
+ * become important.  Check for on-board Raid first, add-in cards second.
+ *
+ * Note: The last field is used to index into aac_drivers below.
+ */
+static struct pci_device_id aac_pci_tbl[] = {
+	{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
+	{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
+	{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
+	{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+	{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
+	{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+	{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+	{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+	{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+	{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
+	{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
+	{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
+	{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
+	{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
+	{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
+	{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
+
+	{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
+	{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
+	{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+	{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+	{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 25 }, /* Callisto Jupiter Platform */
+	{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 26 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+	{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 27 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+	{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 28 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+	{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 29 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+	{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 30 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+	{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 31 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+	{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 32 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+	{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 33 }, /* AAR-2610SA PCI SATA 6ch */
+	{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 34 }, /* ASR-2240S (SabreExpress) */
+	{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 35 }, /* ASR-4005SAS */
+	{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 36 }, /* IBM 8i (AvonPark) */
+	{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 37 }, /* ASR-4000SAS (BlackBird) */
+	{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 38 }, /* ASR-4800SAS (Marauder-X) */
+	{ 0x9005, 0x0285, 0x9005, 0x029A, 0, 0, 39 }, /* ASR-4805SAS (Marauder-E) */
+
+	{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 40 }, /* Perc 320/DC*/
+	{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 41 }, /* Adaptec 5400S (Mustang)*/
+	{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 42 }, /* Adaptec 5400S (Mustang)*/
+	{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 43 }, /* Dell PERC2/QC */
+	{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 44 }, /* HP NetRAID-4M */
+
+	{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 45 }, /* Dell Catchall */
+	{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 46 }, /* Legend Catchall */
+	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 47 }, /* Adaptec Catch All */
+	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 48 }, /* Adaptec Rocket Catch All */
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
+
+/*
+ * dmb - For now we add the number of channels to this structure.  
+ * In the future we should add a fib that reports the number of channels
+ * for the card.  At that time we can remove the channels from here
+ */
+static struct aac_driver_ident aac_drivers[] = {
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
+	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
+	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
+
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 }, /* Adaptec 3230S (Harrier) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 }, /* Adaptec 3240S (Tornado) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020ZCR     ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025ZCR     ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2820SA      ", 1 }, /* AAR-2820SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2620SA      ", 1 }, /* AAR-2620SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2420SA      ", 1 }, /* AAR-2420SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "Callisto        ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020SA       ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025SA       ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 1 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+	{ aac_rx_init, "aacraid",  "DELL    ", "CERC SR2        ", 1 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 1 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA", 1 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2026ZCR     ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2610SA      ", 1 }, /* SATA 6Ch (Bearcat) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2240S       ", 1 }, /* ASR-2240S (SabreExpress) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4005SAS     ", 1 }, /* ASR-4005SAS */
+	{ aac_rx_init, "aacraid",  "IBM     ", "ServeRAID 8i    ", 1 }, /* IBM 8i (AvonPark) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4000SAS     ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4800SAS     ", 1 }, /* ASR-4800SAS (Marauder-X) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4805SAS     ", 1 }, /* ASR-4805SAS (Marauder-E) */
+
+	{ aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
+	{ aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+	{ aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+	{ aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+	{ aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
+
+	{ aac_rx_init, "aacraid",  "DELL    ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
+	{ aac_rx_init, "aacraid",  "Legend  ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
+	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec Rocket Catch All */
+};
+
+/**
+ *	aac_queuecommand	-	queue a SCSI command
+ *	@cmd:		SCSI command to queue
+ *	@done:		Function to call on command completion
+ *
+ *	Queues a command for execution by the associated Host Adapter.
+ *
+ *	TODO: unify with aac_scsi_cmd().
+ */ 
+
+static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+	cmd->scsi_done = done;
+	return (aac_scsi_cmd(cmd) ? FAILED : 0);
+} 
+
+/**
+ *	aac_info		-	Returns the host adapter name
+ *	@shost:		Scsi host to report on
+ *
+ *	Returns a static string describing the device in question
+ */
+
+const char *aac_info(struct Scsi_Host *shost)
+{
+	struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
+	return aac_drivers[dev->cardtype].name;
+}
+
+/**
+ *	aac_get_driver_ident
+ * 	@devtype: index into lookup table
+ *
+ * 	Returns a pointer to the entry in the driver lookup table.
+ */
+
+struct aac_driver_ident* aac_get_driver_ident(int devtype)
+{
+	return &aac_drivers[devtype];
+}
+
+/**
+ *	aac_biosparm	-	return BIOS parameters for disk
+ *	@sdev: The scsi device corresponding to the disk
+ *	@bdev: the block device corresponding to the disk
+ *	@capacity: the sector capacity of the disk
+ *	@geom: geometry block to fill in
+ *
+ *	Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.  
+ *	The default disk geometry is 64 heads, 32 sectors, and the appropriate 
+ *	number of cylinders so as not to exceed drive capacity.  In order for 
+ *	disks equal to or larger than 1 GB to be addressable by the BIOS
+ *	without exceeding the BIOS limitation of 1024 cylinders, Extended 
+ *	Translation should be enabled.   With Extended Translation enabled, 
+ *	drives between 1 GB inclusive and 2 GB exclusive are given a disk 
+ *	geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive 
+ *	are given a disk geometry of 255 heads and 63 sectors.  However, if 
+ *	the BIOS detects that the Extended Translation setting does not match 
+ *	the geometry in the partition table, then the translation inferred 
+ *	from the partition table will be used by the BIOS, and a warning may 
+ *	be displayed.
+ */
+ 
+static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
+			sector_t capacity, int *geom)
+{
+	struct diskparm *param = (struct diskparm *)geom;
+	unsigned char *buf;
+
+	dprintk((KERN_DEBUG "aac_biosparm.\n"));
+
+	/*
+	 *	Assuming extended translation is enabled - #REVISIT#
+	 */
+	if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
+		if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
+			param->heads = 255;
+			param->sectors = 63;
+		} else {
+			param->heads = 128;
+			param->sectors = 32;
+		}
+	} else {
+		param->heads = 64;
+		param->sectors = 32;
+	}
+
+	param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+
+	/* 
+	 *	Read the first 1024 bytes from the disk device, if the boot
+	 *	sector partition table is valid, search for a partition table
+	 *	entry whose end_head matches one of the standard geometry 
+	 *	translations ( 64/32, 128/32, 255/63 ).
+	 */
+	buf = scsi_bios_ptable(bdev);
+	if(*(unsigned short *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
+		struct partition *first = (struct partition * )buf;
+		struct partition *entry = first;
+		int saved_cylinders = param->cylinders;
+		int num;
+		unsigned char end_head, end_sec;
+
+		for(num = 0; num < 4; num++) {
+			end_head = entry->end_head;
+			end_sec = entry->end_sector & 0x3f;
+
+			if(end_head == 63) {
+				param->heads = 64;
+				param->sectors = 32;
+				break;
+			} else if(end_head == 127) {
+				param->heads = 128;
+				param->sectors = 32;
+				break;
+			} else if(end_head == 254) {
+				param->heads = 255;
+				param->sectors = 63;
+				break;
+			}
+			entry++;
+		}
+
+		if (num == 4) {
+			end_head = first->end_head;
+			end_sec = first->end_sector & 0x3f;
+		}
+
+		param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+		if (num < 4 && end_sec == param->sectors) {
+			if (param->cylinders != saved_cylinders)
+				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
+					param->heads, param->sectors, num));
+		} else if (end_head > 0 || end_sec > 0) {
+			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
+				end_head + 1, end_sec, num));
+			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+					param->heads, param->sectors));
+		}
+	}
+	kfree(buf);
+	return 0;
+}
+
+/**
+ *	aac_slave_configure		-	compute queue depths
+ *	@sdev:	SCSI device we are considering
+ *
+ *	Selects queue depths for each target device based on the host adapter's
+ *	total capacity and the queue depth supported by the target device.
+ *	A queue depth of one automatically disables tagged queueing.
+ */
+
+static int aac_slave_configure(struct scsi_device *sdev)
+{
+	if (sdev->tagged_supported)
+		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128);
+	else
+		scsi_adjust_queue_depth(sdev, 0, 1);
+	return 0;
+}
+
+static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
+{
+	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	return aac_do_ioctl(dev, cmd, arg);
+}
+
+/*
+ * XXX: does aac really need no error handling??
+ */
+static int aac_eh_abort(struct scsi_cmnd *cmd)
+{
+	return FAILED;
+}
+
+/*
+ *	aac_eh_reset	- Reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_reset(struct scsi_cmnd* cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct scsi_cmnd * command;
+	int count;
+	struct aac_dev * aac;
+	unsigned long flags;
+
+	printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 
+					AAC_DRIVERNAME);
+
+
+	aac = (struct aac_dev *)host->hostdata;
+	if (aac_adapter_check_health(aac)) {
+		printk(KERN_ERR "%s: Host adapter appears dead\n", 
+				AAC_DRIVERNAME);
+		return -ENODEV;
+	}
+	/*
+	 * Wait for all commands to complete to this specific
+	 * target (block maximum 60 seconds).
+	 */
+	for (count = 60; count; --count) {
+		int active = 0;
+		__shost_for_each_device(dev, host) {
+			spin_lock_irqsave(&dev->list_lock, flags);
+			list_for_each_entry(command, &dev->cmd_list, list) {
+				if (command->serial_number) {
+					active++;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&dev->list_lock, flags);
+			if (active)
+				break;
+
+		}
+		/*
+		 * We can exit If all the commands are complete
+		 */
+		if (active == 0)
+			return SUCCESS;
+		spin_unlock_irq(host->host_lock);
+		ssleep(1);
+		spin_lock_irq(host->host_lock);
+	}
+	printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+	return -ETIMEDOUT;
+}
+
+/**
+ *	aac_cfg_open		-	open a configuration file
+ *	@inode: inode being opened
+ *	@file: file handle attached
+ *
+ *	Called when the configuration device is opened. Does the needed
+ *	set up on the handle and then returns
+ *
+ *	Bugs: This needs extending to check a given adapter is present
+ *	so we can support hot plugging, and to ref count adapters.
+ */
+
+static int aac_cfg_open(struct inode *inode, struct file *file)
+{
+	struct aac_dev *aac;
+	unsigned minor = iminor(inode);
+	int err = -ENODEV;
+
+	list_for_each_entry(aac, &aac_devices, entry) {
+		if (aac->id == minor) {
+			file->private_data = aac;
+			err = 0;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *	aac_cfg_ioctl		-	AAC configuration request
+ *	@inode: inode of device
+ *	@file: file handle
+ *	@cmd: ioctl command code
+ *	@arg: argument
+ *
+ *	Handles a configuration ioctl. Currently this involves wrapping it
+ *	up and feeding it into the nasty windowsalike glue layer.
+ *
+ *	Bugs: Needs locking against parallel ioctls lower down
+ *	Bugs: Needs to handle hot plugging
+ */
+ 
+static int aac_cfg_ioctl(struct inode *inode,  struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
+{
+	long ret;
+	lock_kernel();
+	switch (cmd) { 
+	case FSACTL_MINIPORT_REV_CHECK:
+	case FSACTL_SENDFIB:
+	case FSACTL_OPEN_GET_ADAPTER_FIB:
+	case FSACTL_CLOSE_GET_ADAPTER_FIB:
+	case FSACTL_SEND_RAW_SRB:
+	case FSACTL_GET_PCI_INFO:
+	case FSACTL_QUERY_DISK:
+	case FSACTL_DELETE_DISK:
+	case FSACTL_FORCE_DELETE_DISK:
+	case FSACTL_GET_CONTAINERS: 
+		ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
+		break;
+
+	case FSACTL_GET_NEXT_ADAPTER_FIB: {
+		struct fib_ioctl __user *f;
+		
+		f = compat_alloc_user_space(sizeof(*f));
+		ret = 0;
+		if (clear_user(f, sizeof(*f) != sizeof(*f)))
+			ret = -EFAULT;
+		if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
+			ret = -EFAULT;
+		if (!ret)
+			ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
+		break;
+	}
+
+	default:
+		ret = -ENOIOCTLCMD; 
+		break;
+	} 
+	unlock_kernel();
+	return ret;
+}
+
+static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+}
+
+static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
+}
+#endif
+
+static struct file_operations aac_cfg_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= aac_cfg_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = aac_compat_cfg_ioctl,
+#endif
+	.open		= aac_cfg_open,
+};
+
+static struct scsi_host_template aac_driver_template = {
+	.module				= THIS_MODULE,
+	.name           		= "AAC",
+	.proc_name			= "aacraid",
+	.info           		= aac_info,
+	.ioctl          		= aac_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl			= aac_compat_ioctl,
+#endif
+	.queuecommand   		= aac_queuecommand,
+	.bios_param     		= aac_biosparm,	
+	.slave_configure		= aac_slave_configure,
+	.eh_abort_handler		= aac_eh_abort,
+	.eh_host_reset_handler		= aac_eh_reset,
+	.can_queue      		= AAC_NUM_IO_FIB,	
+	.this_id        		= 16,
+	.sg_tablesize   		= 16,
+	.max_sectors    		= 128,
+#if (AAC_NUM_IO_FIB > 256)
+	.cmd_per_lun			= 256,
+#else		
+	.cmd_per_lun    		= AAC_NUM_IO_FIB, 
+#endif	
+	.use_clustering			= ENABLE_CLUSTERING,
+};
+
+
+static int __devinit aac_probe_one(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	unsigned index = id->driver_data;
+	struct Scsi_Host *shost;
+	struct aac_dev *aac;
+	struct list_head *insert = &aac_devices;
+	int error = -ENODEV;
+	int unique_id = 0;
+
+	list_for_each_entry(aac, &aac_devices, entry) {
+		if (aac->id > unique_id)
+			break;
+		insert = &aac->entry;
+		unique_id++;
+	}
+
+	if (pci_enable_device(pdev))
+		goto out;
+
+	if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || 
+			pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL))
+		goto out;
+	/*
+	 * If the quirk31 bit is set, the adapter needs adapter
+	 * to driver communication memory to be allocated below 2gig
+	 */
+	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) 
+		if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) ||
+				pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL))
+			goto out;
+	
+	pci_set_master(pdev);
+
+	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
+	if (!shost)
+		goto out_disable_pdev;
+
+	shost->irq = pdev->irq;
+	shost->base = pci_resource_start(pdev, 0);
+	shost->unique_id = unique_id;
+
+	aac = (struct aac_dev *)shost->hostdata;
+	aac->scsi_host_ptr = shost;	
+	aac->pdev = pdev;
+	aac->name = aac_driver_template.name;
+	aac->id = shost->unique_id;
+	aac->cardtype =  index;
+	INIT_LIST_HEAD(&aac->entry);
+
+	aac->fibs = kmalloc(sizeof(struct fib) * AAC_NUM_FIB, GFP_KERNEL);
+	if (!aac->fibs)
+		goto out_free_host;
+	spin_lock_init(&aac->fib_lock);
+
+	if ((*aac_drivers[index].init)(aac))
+		goto out_free_fibs;
+
+	/*
+	 * If we had set a smaller DMA mask earlier, set it to 4gig
+	 * now since the adapter can dma data to at least a 4gig
+	 * address space.
+	 */
+	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+		if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL))
+			goto out_free_fibs;
+
+	aac_get_adapter_info(aac);
+
+	/*
+	 * max channel will be the physical channels plus 1 virtual channel
+	 * all containers are on the virtual channel 0
+	 * physical channels are address by their actual physical number+1
+	 */
+	if (aac->nondasd_support == 1)
+		shost->max_channel = aac_drivers[index].channels+1;
+	else
+		shost->max_channel = 1;
+
+	aac_get_config_status(aac);
+	aac_get_containers(aac);
+	list_add(&aac->entry, insert);
+
+	shost->max_id = aac->maximum_num_containers;
+	if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
+		shost->max_id = MAXIMUM_NUM_CONTAINERS;
+	else
+		shost->this_id = shost->max_id;
+
+	/*
+	 * dmb - we may need to move the setting of these parms somewhere else once
+	 * we get a fib that can report the actual numbers
+	 */
+	shost->max_lun = AAC_MAX_LUN;
+
+	pci_set_drvdata(pdev, shost);
+
+	error = scsi_add_host(shost, &pdev->dev);
+	if (error)
+		goto out_deinit;
+	scsi_scan_host(shost);
+
+	return 0;
+
+out_deinit:
+	kill_proc(aac->thread_pid, SIGKILL, 0);
+	wait_for_completion(&aac->aif_completion);
+
+	aac_send_shutdown(aac);
+	fib_map_free(aac);
+	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+	kfree(aac->queues);
+	free_irq(pdev->irq, aac);
+	iounmap(aac->regs.sa);
+ out_free_fibs:
+	kfree(aac->fibs);
+	kfree(aac->fsa_dev);
+ out_free_host:
+	scsi_host_put(shost);
+ out_disable_pdev:
+	pci_disable_device(pdev);
+ out:
+	return error;
+}
+
+static void __devexit aac_remove_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+	scsi_remove_host(shost);
+
+	kill_proc(aac->thread_pid, SIGKILL, 0);
+	wait_for_completion(&aac->aif_completion);
+
+	aac_send_shutdown(aac);
+	fib_map_free(aac);
+	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
+			aac->comm_phys);
+	kfree(aac->queues);
+
+	free_irq(pdev->irq, aac);
+	iounmap(aac->regs.sa);
+	
+	kfree(aac->fibs);
+	
+	list_del(&aac->entry);
+	scsi_host_put(shost);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver aac_pci_driver = {
+	.name		= AAC_DRIVERNAME,
+	.id_table	= aac_pci_tbl,
+	.probe		= aac_probe_one,
+	.remove		= __devexit_p(aac_remove_one),
+};
+
+static int __init aac_init(void)
+{
+	int error;
+	
+	printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n",
+			AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
+
+	error = pci_module_init(&aac_pci_driver);
+	if (error)
+		return error;
+
+	aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
+	if (aac_cfg_major < 0) {
+		printk(KERN_WARNING
+		       "aacraid: unable to register \"aac\" device.\n");
+	}
+	return 0;
+}
+
+static void __exit aac_exit(void)
+{
+	unregister_chrdev(aac_cfg_major, "aac");
+	pci_unregister_driver(&aac_pci_driver);
+}
+
+module_init(aac_init);
+module_exit(aac_exit);
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
new file mode 100644
index 000000000000..1b8ed47cfe30
--- /dev/null
+++ b/drivers/scsi/aacraid/rkt.c
@@ -0,0 +1,440 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rkt.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <asm/semaphore.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned long bellbits;
+	u8 intstat, mask;
+	intstat = rkt_readb(dev, MUnit.OISR);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have 
+	 *	been enabled.
+	 */
+	mask = ~(dev->OIMR);
+	/* Check to see if this is our interrupt.  If it isn't just return */
+	if (intstat & mask) 
+	{
+		bellbits = rkt_readl(dev, OutboundDoorbellReg);
+		if (bellbits & DoorBellPrintfReady) {
+			aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5]));
+			rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+			rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdReady) {
+			rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+		}
+		else if (bellbits & DoorBellAdapterNormRespReady) {
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+			rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdNotFull) {
+			rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+		}
+		else if (bellbits & DoorBellAdapterNormRespNotFull) {
+			rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+			rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	rkt_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int rkt_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
+{
+	unsigned long start;
+	int ok;
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	rkt_writel(dev, InboundMailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	rkt_writel(dev, InboundMailbox1, p1);
+	rkt_writel(dev, InboundMailbox2, 0);
+	rkt_writel(dev, InboundMailbox3, 0);
+	rkt_writel(dev, InboundMailbox4, 0);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	rkt_readb (dev, MUnit.OIMR);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	/*
+	 *	Wait up to 30 seconds
+	 */
+	while (time_before(jiffies, start+30*HZ)) 
+	{
+		udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+		/*
+		 *	Mon960 will set doorbell0 bit when it has completed the command.
+		 */
+		if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+			/*
+			 *	Clear the doorbell.
+			 */
+			rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+			ok = 1;
+			break;
+		}
+		/*
+		 *	Yield the processor in case we are slow 
+		 */
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	if (ok != 1) {
+		/*
+		 *	Restore interrupt mask even though we timed out
+		 */
+		rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+		return -ETIMEDOUT;
+	}
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	if (status)
+		*status = rkt_readl(dev, IndexRegs.Mailbox[0]);
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Restore interrupt mask
+	 */
+	rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+	return 0;
+
+}
+
+/**
+ *	aac_rkt_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
+{
+	u32 ret;
+	rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+}
+
+/**
+ *	aac_rkt_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+		break;
+	case HostShutdown:
+//		rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
+		break;
+	case FastIo:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_rkt_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rkt_start_adapter(struct aac_dev *dev)
+{
+	u32 status;
+	struct aac_init *init;
+
+	init = dev->init;
+	init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+	/*
+	 *	Tell the adapter we are back and up and running so it will scan
+	 *	its command queues and enable our interrupts
+	 */
+	dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	rkt_writeb(dev, MUnit.OIMR, 0xff);
+	rkt_writel(dev, MUnit.ODR, 0xffffffff);
+//	rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
+	rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+
+	// We can only use a 32 bit address here
+	rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
+}
+
+/**
+ *	aac_rkt_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_rkt_check_health(struct aac_dev *dev)
+{
+	u32 status = rkt_readl(dev, MUnit.OMRx[0]);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (status & SELF_TEST_FAILED)
+		return -1;
+	/*
+	 *	Check to see if the board panic'd.
+	 */
+	if (status & KERNEL_PANIC) {
+		char * buffer;
+		struct POSTSTATUS {
+			u32 Post_Command;
+			u32 Post_Address;
+		} * post;
+		dma_addr_t paddr, baddr;
+		int ret;
+
+		if ((status & 0xFF000000L) == 0xBC000000L)
+			return (status >> 16) & 0xFF;
+		buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
+		ret = -2;
+		if (buffer == NULL)
+			return ret;
+		post = pci_alloc_consistent(dev->pdev,
+		  sizeof(struct POSTSTATUS), &paddr);
+		if (post == NULL) {
+			pci_free_consistent(dev->pdev, 512, buffer, baddr);
+			return ret;
+		}
+                memset(buffer, 0, 512);
+		post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
+                post->Post_Address = cpu_to_le32(baddr);
+                rkt_writel(dev, MUnit.IMRx[0], paddr);
+                rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
+		pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
+		  post, paddr);
+                if ((buffer[0] == '0') && (buffer[1] == 'x')) {
+                        ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
+                        ret <<= 4;
+                        ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
+                }
+		pci_free_consistent(dev->pdev, 512, buffer, baddr);
+                return ret;
+        }
+	/*
+	 *	Wait for the adapter to be up and running.
+	 */
+	if (!(status & KERNEL_UP_AND_RUNNING))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_rkt_init	-	initialize an i960 based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the i960 based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_rkt_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int instance;
+	const char * name;
+
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+	if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+	{	
+		printk(KERN_WARNING "aacraid: unable to map i960.\n" );
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
+		printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
+	{
+		if(time_after(jiffies, start+180*HZ))
+		{
+			status = rkt_readl(dev, MUnit.OMRx[0]);
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 
+	{
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
+	dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_rkt_check_health;
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_irq;
+	/*
+	 *	Start any kernel threads needed
+	 */
+	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+	if(dev->thread_pid < 0)
+	{
+		printk(KERN_ERR "aacraid: Unable to create rkt thread.\n");
+		goto error_kfree;
+	}	
+	/*
+	 *	Tell the adapter that all is configured, and it can start
+	 *	accepting requests
+	 */
+	aac_rkt_start_adapter(dev);
+	return 0;
+
+error_kfree:
+	kfree(dev->queues);
+
+error_irq:
+	free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+
+error_iounmap:
+	iounmap(dev->regs.rkt);
+
+	return -1;
+}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
new file mode 100644
index 000000000000..630b99e1fe83
--- /dev/null
+++ b/drivers/scsi/aacraid/rx.c
@@ -0,0 +1,441 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rx.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <asm/semaphore.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned long bellbits;
+	u8 intstat, mask;
+	intstat = rx_readb(dev, MUnit.OISR);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have 
+	 *	been enabled.
+	 */
+	mask = ~(dev->OIMR);
+	/* Check to see if this is our interrupt.  If it isn't just return */
+	if (intstat & mask) 
+	{
+		bellbits = rx_readl(dev, OutboundDoorbellReg);
+		if (bellbits & DoorBellPrintfReady) {
+			aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
+			rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+			rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdReady) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+		}
+		else if (bellbits & DoorBellAdapterNormRespReady) {
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+			rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdNotFull) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+		}
+		else if (bellbits & DoorBellAdapterNormRespNotFull) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	rx_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
+{
+	unsigned long start;
+	int ok;
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	rx_writel(dev, InboundMailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	rx_writel(dev, InboundMailbox1, p1);
+	rx_writel(dev, InboundMailbox2, 0);
+	rx_writel(dev, InboundMailbox3, 0);
+	rx_writel(dev, InboundMailbox4, 0);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR |= 0x04);
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	rx_readb (dev, MUnit.OIMR);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	/*
+	 *	Wait up to 30 seconds
+	 */
+	while (time_before(jiffies, start+30*HZ)) 
+	{
+		udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+		/*
+		 *	Mon960 will set doorbell0 bit when it has completed the command.
+		 */
+		if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+			/*
+			 *	Clear the doorbell.
+			 */
+			rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+			ok = 1;
+			break;
+		}
+		/*
+		 *	Yield the processor in case we are slow 
+		 */
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	if (ok != 1) {
+		/*
+		 *	Restore interrupt mask even though we timed out
+		 */
+		rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
+		return -ETIMEDOUT;
+	}
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	if (status)
+		*status = rx_readl(dev, IndexRegs.Mailbox[0]);
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Restore interrupt mask
+	 */
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
+	return 0;
+
+}
+
+/**
+ *	aac_rx_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rx_interrupt_adapter(struct aac_dev *dev)
+{
+	u32 ret;
+	rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+}
+
+/**
+ *	aac_rx_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+		break;
+	case HostShutdown:
+//		rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
+		break;
+	case FastIo:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_rx_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rx_start_adapter(struct aac_dev *dev)
+{
+	u32 status;
+	struct aac_init *init;
+
+	init = dev->init;
+	init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+	/*
+	 *	Tell the adapter we are back and up and running so it will scan
+	 *	its command queues and enable our interrupts
+	 */
+	dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	rx_writeb(dev, MUnit.OIMR, 0xff);
+	rx_writel(dev, MUnit.ODR, 0xffffffff);
+//	rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+
+	// We can only use a 32 bit address here
+	rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
+}
+
+/**
+ *	aac_rx_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_rx_check_health(struct aac_dev *dev)
+{
+	u32 status = rx_readl(dev, MUnit.OMRx[0]);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (status & SELF_TEST_FAILED)
+		return -1;
+	/*
+	 *	Check to see if the board panic'd.
+	 */
+	if (status & KERNEL_PANIC) {
+		char * buffer;
+		struct POSTSTATUS {
+			u32 Post_Command;
+			u32 Post_Address;
+		} * post;
+		dma_addr_t paddr, baddr;
+		int ret;
+
+		if ((status & 0xFF000000L) == 0xBC000000L)
+			return (status >> 16) & 0xFF;
+		buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
+		ret = -2;
+		if (buffer == NULL)
+			return ret;
+		post = pci_alloc_consistent(dev->pdev,
+		  sizeof(struct POSTSTATUS), &paddr);
+		if (post == NULL) {
+			pci_free_consistent(dev->pdev, 512, buffer, baddr);
+			return ret;
+		}
+		memset(buffer, 0, 512);
+		post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
+		post->Post_Address = cpu_to_le32(baddr);
+		rx_writel(dev, MUnit.IMRx[0], paddr);
+		rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
+		pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
+		  post, paddr);
+		if ((buffer[0] == '0') && (buffer[1] == 'x')) {
+			ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
+			ret <<= 4;
+			ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
+		}
+		pci_free_consistent(dev->pdev, 512, buffer, baddr);
+		return ret;
+	}
+	/*
+	 *	Wait for the adapter to be up and running.
+	 */
+	if (!(status & KERNEL_UP_AND_RUNNING))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_rx_init	-	initialize an i960 based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the i960 based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_rx_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int instance;
+	const char * name;
+
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+	if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+	{	
+		printk(KERN_WARNING "aacraid: unable to map i960.\n" );
+		return -1;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
+		printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING))
+		|| (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
+	{
+		if(time_after(jiffies, start+180*HZ))
+		{
+			status = rx_readl(dev, IndexRegs.Mailbox[7]);
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 
+	{
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_rx_check_health;
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_irq;
+	/*
+	 *	Start any kernel threads needed
+	 */
+	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+	if(dev->thread_pid < 0)
+	{
+		printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
+		goto error_kfree;
+	}
+	/*
+	 *	Tell the adapter that all is configured, and it can start
+	 *	accepting requests
+	 */
+	aac_rx_start_adapter(dev);
+	return 0;
+
+error_kfree:
+	kfree(dev->queues);
+
+error_irq:
+	free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+
+error_iounmap:
+	iounmap(dev->regs.rx);
+
+	return -1;
+}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
new file mode 100644
index 000000000000..bd6c30723fba
--- /dev/null
+++ b/drivers/scsi/aacraid/sa.c
@@ -0,0 +1,374 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  sa.c
+ *
+ * Abstract: Drawbridge specific support functions
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <asm/semaphore.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned short intstat, mask;
+
+	intstat = sa_readw(dev, DoorbellReg_p);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have been enabled.
+	 */
+	mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
+
+	/* Check to see if this is our interrupt.  If it isn't just return */
+
+	if (intstat & mask) {
+		if (intstat & PrintfReady) {
+			aac_printf(dev, sa_readl(dev, Mailbox5));
+			sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
+			sa_writew(dev, DoorbellReg_s, PrintfDone);
+		} else if (intstat & DOORBELL_1) {	// dev -> Host Normal Command Ready
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+		} else if (intstat & DOORBELL_2) {	// dev -> Host Normal Response Ready
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+		} else if (intstat & DOORBELL_3) {	// dev -> Host Normal Command Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
+		} else if (intstat & DOORBELL_4) {	// dev -> Host Normal Response Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	aac_sa_notify_adapter		-	handle adapter notification
+ *	@dev:	Adapter that notification is for
+ *	@event:	Event to notidy
+ *
+ *	Notify the adapter of an event
+ */
+ 
+void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_3);
+		break;
+	case HostShutdown:
+		//sa_sync_cmd(dev, HOST_CRASHING, 0, &ret);
+		break;
+	case FastIo:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+
+/**
+ *	sa_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
+{
+	unsigned long start;
+ 	int ok;
+	/*
+	 *	Write the Command into Mailbox 0
+	 */
+	sa_writel(dev, Mailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	sa_writel(dev, Mailbox1, p1);
+	sa_writel(dev, Mailbox2, 0);
+	sa_writel(dev, Mailbox3, 0);
+	sa_writel(dev, Mailbox4, 0);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	sa_writew(dev, DoorbellReg_s, DOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	while(time_before(jiffies, start+30*HZ))
+	{
+		/*
+		 *	Delay 5uS so that the monitor gets access
+		 */
+		udelay(5);
+		/*
+		 *	Mon110 will set doorbell0 bit when it has 
+		 *	completed the command.
+		 */
+		if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0)  {
+			ok = 1;
+			break;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	if (ok != 1)
+		return -ETIMEDOUT;
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	if (ret)
+		*ret = sa_readl(dev, Mailbox0);
+	return 0;
+}
+
+/**
+ *	aac_sa_interrupt_adapter	-	interrupt an adapter
+ *	@dev: Which adapter to enable.
+ *
+ *	Breakpoint an adapter.
+ */
+ 
+static void aac_sa_interrupt_adapter (struct aac_dev *dev)
+{
+	u32 ret;
+	sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+}
+
+/**
+ *	aac_sa_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an ARM based AAC adapter
+ */
+
+static void aac_sa_start_adapter(struct aac_dev *dev)
+{
+	u32 ret;
+	struct aac_init *init;
+	/*
+	 * Fill in the remaining pieces of the init.
+	 */
+	init = dev->init;
+	init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+
+	/*
+	 * Tell the adapter we are back and up and running so it will scan its command
+	 * queues and enable our interrupts
+	 */
+	dev->irq_mask =	(PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that 
+	 *	we can handle.
+	 */
+	sa_writew(dev, SaDbCSR.PRISETIRQMASK, cpu_to_le16(0xffff));
+	sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+	/* We can only use a 32 bit address here */
+	sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &ret);
+}
+
+/**
+ *	aac_sa_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_sa_check_health(struct aac_dev *dev)
+{
+	long status = sa_readl(dev, Mailbox7);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (status & SELF_TEST_FAILED)
+		return -1;
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (status & KERNEL_PANIC)
+		return -2;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	if (!(status & KERNEL_UP_AND_RUNNING))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_sa_init	-	initialize an ARM based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the ARM based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_sa_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int instance;
+	const char *name;
+
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+
+	if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+	{	
+		printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
+		printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
+		printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes.
+	 */
+	while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
+		if (time_after(jiffies, start+180*HZ)) {
+			status = sa_readl(dev, Mailbox7);
+			printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n", 
+					name, instance, status);
+			goto error_iounmap;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
+		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
+		goto error_iounmap;
+	}
+
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+
+	dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+	dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_sa_check_health;
+
+
+	if(aac_init_adapter(dev) == NULL)
+		goto error_irq;
+
+	/*
+	 *	Start any kernel threads needed
+	 */
+	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+	if (dev->thread_pid < 0) {
+		printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+		goto error_kfree;
+	}
+
+	/*
+	 *	Tell the adapter that all is configure, and it can start 
+	 *	accepting requests
+	 */
+	aac_sa_start_adapter(dev);
+	return 0;
+
+
+error_kfree:
+	kfree(dev->queues);
+
+error_irq:
+	free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+
+error_iounmap:
+	iounmap(dev->regs.sa);
+
+	return -1;
+}
+