summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/sysfs.c21
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig8
-rw-r--r--drivers/infiniband/hw/ipath/Makefile6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c1862
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2631
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c72
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c613
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3588
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8058
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1580
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220.c)859
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220_img.c)19
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_7220.h)49
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--include/rdma/ib_verbs.h4
73 files changed, 50979 insertions, 5069 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 330d2a423362..89d70de5e235 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS
 
 source "drivers/infiniband/hw/mthca/Kconfig"
 source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
 source "drivers/infiniband/hw/ehca/Kconfig"
 source "drivers/infiniband/hw/amso1100/Kconfig"
 source "drivers/infiniband/hw/cxgb3/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 0c4e589d746e..9cc7a47d3e67 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_INFINIBAND)		+= core/
 obj-$(CONFIG_INFINIBAND_MTHCA)		+= hw/mthca/
 obj-$(CONFIG_INFINIBAND_IPATH)		+= hw/ipath/
+obj-$(CONFIG_INFINIBAND_QIB)		+= hw/qib/
 obj-$(CONFIG_INFINIBAND_EHCA)		+= hw/ehca/
 obj-$(CONFIG_INFINIBAND_AMSO1100)	+= hw/amso1100/
 obj-$(CONFIG_INFINIBAND_CXGB3)		+= hw/cxgb3/
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 05ac36e6acdb..a565af5c2d2e 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,9 @@
 
 #include <rdma/ib_verbs.h>
 
-int  ib_device_register_sysfs(struct ib_device *device);
+int  ib_device_register_sysfs(struct ib_device *device,
+			      int (*port_callback)(struct ib_device *,
+						   u8, struct kobject *));
 void ib_device_unregister_sysfs(struct ib_device *device);
 
 int  ib_sysfs_setup(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d1fba4153332..a19effad0811 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -267,7 +267,9 @@ out:
  * callback for each device that is added. @device must be allocated
  * with ib_alloc_device().
  */
-int ib_register_device(struct ib_device *device)
+int ib_register_device(struct ib_device *device,
+		       int (*port_callback)(struct ib_device *,
+					    u8, struct kobject *))
 {
 	int ret;
 
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
 		goto out;
 	}
 
-	ret = ib_device_register_sysfs(device);
+	ret = ib_device_register_sysfs(device, port_callback);
 	if (ret) {
 		printk(KERN_WARNING "Couldn't register device %s with driver model\n",
 		       device->name);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 6dc7b77d5d29..ef1304f151dc 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
 MODULE_AUTHOR("Hal Rosenstock");
 MODULE_AUTHOR("Sean Hefty");
 
-int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
-int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
 
 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index f901957abc8b..3627300e2a10 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -475,7 +475,9 @@ err:
 	return NULL;
 }
 
-static int add_port(struct ib_device *device, int port_num)
+static int add_port(struct ib_device *device, int port_num,
+		    int (*port_callback)(struct ib_device *,
+					 u8, struct kobject *))
 {
 	struct ib_port *p;
 	struct ib_port_attr attr;
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
 	if (ret)
 		goto err_free_pkey;
 
+	if (port_callback) {
+		ret = port_callback(device, port_num, &p->kobj);
+		if (ret)
+			goto err_remove_pkey;
+	}
+
 	list_add_tail(&p->kobj.entry, &device->port_list);
 
 	kobject_uevent(&p->kobj, KOBJ_ADD);
 	return 0;
 
+err_remove_pkey:
+	sysfs_remove_group(&p->kobj, &p->pkey_group);
+
 err_free_pkey:
 	for (i = 0; i < attr.pkey_tbl_len; ++i)
 		kfree(p->pkey_group.attrs[i]);
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
 	.attrs	= iw_proto_stats_attrs,
 };
 
-int ib_device_register_sysfs(struct ib_device *device)
+int ib_device_register_sysfs(struct ib_device *device,
+			     int (*port_callback)(struct ib_device *,
+						  u8, struct kobject *))
 {
 	struct device *class_dev = &device->dev;
 	int ret;
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
 	}
 
 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
-		ret = add_port(device, 0);
+		ret = add_port(device, 0, port_callback);
 		if (ret)
 			goto err_put;
 	} else {
 		for (i = 1; i <= device->phys_port_cnt; ++i) {
-			ret = add_port(device, i);
+			ret = add_port(device, i, port_callback);
 			if (ret)
 				goto err_put;
 		}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index c47f618d12e8..aeebc4d37e33 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
 	dev->ibdev.iwcm->create_listen = c2_service_create;
 	dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
 
-	ret = ib_register_device(&dev->ibdev);
+	ret = ib_register_device(&dev->ibdev, NULL);
 	if (ret)
 		goto out_free_iwcm;
 
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 19b1c4a62a23..fca0b4b747e4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
 	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
 	dev->ibdev.iwcm->get_qp = iwch_get_qp;
 
-	ret = ib_register_device(&dev->ibdev);
+	ret = ib_register_device(&dev->ibdev, NULL);
 	if (ret)
 		goto bail1;
 
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 322134bb8b73..8f645c83a125 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
 	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
 	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
 
-	ret = ib_register_device(&dev->ibdev);
+	ret = ib_register_device(&dev->ibdev, NULL);
 	if (ret)
 		goto bail1;
 
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e3..d1a92785c9ee 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -798,7 +798,7 @@ static int __devinit ehca_probe(struct of_device *dev,
 		goto probe5;
 	}
 
-	ret = ib_register_device(&shca->ib_device);
+	ret = ib_register_device(&shca->ib_device, NULL);
 	if (ret) {
 		ehca_err(&shca->ib_device,
 			 "ib_register_device() failed ret=%i", ret);
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 3c7968f25ec2..1d9bb115cbf6 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,9 +1,11 @@
 config INFINIBAND_IPATH
-	tristate "QLogic InfiniPath Driver"
-	depends on 64BIT && NET
+	tristate "QLogic HTX HCA support"
+	depends on 64BIT && NET && HT_IRQ
 	---help---
-	This is a driver for QLogic InfiniPath host channel adapters,
+	This is a driver for the obsolete QLogic Hyper-Transport
+	IB host channel adapter (model QHT7140),
 	including InfiniBand verbs support.  This driver allows these
 	devices to be used with both kernel upper level protocols such
 	as IP-over-InfiniBand as well as with userspace applications
 	(in conjunction with InfiniBand userspace access).
+	For QLogic PCIe QLE based cards, use the QIB driver instead.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index bf9450061986..fa3df82681df 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -29,13 +29,9 @@ ib_ipath-y := \
 	ipath_user_pages.o \
 	ipath_user_sdma.o \
 	ipath_verbs_mcast.o \
-	ipath_verbs.o \
-	ipath_iba7220.o \
-	ipath_sd7220.o \
-	ipath_sd7220_img.o
+	ipath_verbs.o
 
 ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
 
 ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
 ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 6302626d17f0..21337468c652 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
 
 /* Only needed for registration, nothing else needs this info */
 #define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
 #define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
-#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
 
 /* Number of seconds before our card status check...  */
 #define STATUS_TIMEOUT 60
 
 static const struct pci_device_id ipath_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
 	{ 0, }
 };
 
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
 	/* setup the chip-specific functions, as early as possible. */
 	switch (ent->device) {
 	case PCI_DEVICE_ID_INFINIPATH_HT:
-#ifdef CONFIG_HT_IRQ
 		ipath_init_iba6110_funcs(dd);
 		break;
-#else
-		ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
-			      "CONFIG_HT_IRQ is not enabled\n", ent->device);
-		return -ENODEV;
-#endif
-	case PCI_DEVICE_ID_INFINIPATH_PE800:
-#ifdef CONFIG_PCI_MSI
-		ipath_init_iba6120_funcs(dd);
-		break;
-#else
-		ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
-			      "CONFIG_PCI_MSI is not enabled\n", ent->device);
-		return -ENODEV;
-#endif
-	case PCI_DEVICE_ID_INFINIPATH_7220:
-#ifndef CONFIG_PCI_MSI
-		ipath_dbg("CONFIG_PCI_MSI is not enabled, "
-			  "using INTx for unit %u\n", dd->ipath_unit);
-#endif
-		ipath_init_iba7220_funcs(dd);
-		break;
+
 	default:
 		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
 			      "failing\n", ent->device);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
deleted file mode 100644
index 4b4a30b0dabd..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ /dev/null
@@ -1,1862 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PCI-Express chip.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
-	unsigned long long Revision;
-	unsigned long long Control;
-	unsigned long long PageAlign;
-	unsigned long long PortCnt;
-	unsigned long long DebugPortSelect;
-	unsigned long long Reserved0;
-	unsigned long long SendRegBase;
-	unsigned long long UserRegBase;
-	unsigned long long CounterRegBase;
-	unsigned long long Scratch;
-	unsigned long long Reserved1;
-	unsigned long long Reserved2;
-	unsigned long long IntBlocked;
-	unsigned long long IntMask;
-	unsigned long long IntStatus;
-	unsigned long long IntClear;
-	unsigned long long ErrorMask;
-	unsigned long long ErrorStatus;
-	unsigned long long ErrorClear;
-	unsigned long long HwErrMask;
-	unsigned long long HwErrStatus;
-	unsigned long long HwErrClear;
-	unsigned long long HwDiagCtrl;
-	unsigned long long MDIO;
-	unsigned long long IBCStatus;
-	unsigned long long IBCCtrl;
-	unsigned long long ExtStatus;
-	unsigned long long ExtCtrl;
-	unsigned long long GPIOOut;
-	unsigned long long GPIOMask;
-	unsigned long long GPIOStatus;
-	unsigned long long GPIOClear;
-	unsigned long long RcvCtrl;
-	unsigned long long RcvBTHQP;
-	unsigned long long RcvHdrSize;
-	unsigned long long RcvHdrCnt;
-	unsigned long long RcvHdrEntSize;
-	unsigned long long RcvTIDBase;
-	unsigned long long RcvTIDCnt;
-	unsigned long long RcvEgrBase;
-	unsigned long long RcvEgrCnt;
-	unsigned long long RcvBufBase;
-	unsigned long long RcvBufSize;
-	unsigned long long RxIntMemBase;
-	unsigned long long RxIntMemSize;
-	unsigned long long RcvPartitionKey;
-	unsigned long long Reserved3;
-	unsigned long long RcvPktLEDCnt;
-	unsigned long long Reserved4[8];
-	unsigned long long SendCtrl;
-	unsigned long long SendPIOBufBase;
-	unsigned long long SendPIOSize;
-	unsigned long long SendPIOBufCnt;
-	unsigned long long SendPIOAvailAddr;
-	unsigned long long TxIntMemBase;
-	unsigned long long TxIntMemSize;
-	unsigned long long Reserved5;
-	unsigned long long PCIeRBufTestReg0;
-	unsigned long long PCIeRBufTestReg1;
-	unsigned long long Reserved51[6];
-	unsigned long long SendBufferError;
-	unsigned long long SendBufferErrorCONT1;
-	unsigned long long Reserved6SBE[6];
-	unsigned long long RcvHdrAddr0;
-	unsigned long long RcvHdrAddr1;
-	unsigned long long RcvHdrAddr2;
-	unsigned long long RcvHdrAddr3;
-	unsigned long long RcvHdrAddr4;
-	unsigned long long Reserved7RHA[11];
-	unsigned long long RcvHdrTailAddr0;
-	unsigned long long RcvHdrTailAddr1;
-	unsigned long long RcvHdrTailAddr2;
-	unsigned long long RcvHdrTailAddr3;
-	unsigned long long RcvHdrTailAddr4;
-	unsigned long long Reserved8RHTA[11];
-	unsigned long long Reserved9SW[8];
-	unsigned long long SerdesConfig0;
-	unsigned long long SerdesConfig1;
-	unsigned long long SerdesStatus;
-	unsigned long long XGXSConfig;
-	unsigned long long IBPLLCfg;
-	unsigned long long Reserved10SW2[3];
-	unsigned long long PCIEQ0SerdesConfig0;
-	unsigned long long PCIEQ0SerdesConfig1;
-	unsigned long long PCIEQ0SerdesStatus;
-	unsigned long long Reserved11;
-	unsigned long long PCIEQ1SerdesConfig0;
-	unsigned long long PCIEQ1SerdesConfig1;
-	unsigned long long PCIEQ1SerdesStatus;
-	unsigned long long Reserved12;
-};
-
-struct _infinipath_do_not_use_counters {
-	__u64 LBIntCnt;
-	__u64 LBFlowStallCnt;
-	__u64 Reserved1;
-	__u64 TxUnsupVLErrCnt;
-	__u64 TxDataPktCnt;
-	__u64 TxFlowPktCnt;
-	__u64 TxDwordCnt;
-	__u64 TxLenErrCnt;
-	__u64 TxMaxMinLenErrCnt;
-	__u64 TxUnderrunCnt;
-	__u64 TxFlowStallCnt;
-	__u64 TxDroppedPktCnt;
-	__u64 RxDroppedPktCnt;
-	__u64 RxDataPktCnt;
-	__u64 RxFlowPktCnt;
-	__u64 RxDwordCnt;
-	__u64 RxLenErrCnt;
-	__u64 RxMaxMinLenErrCnt;
-	__u64 RxICRCErrCnt;
-	__u64 RxVCRCErrCnt;
-	__u64 RxFlowCtrlErrCnt;
-	__u64 RxBadFormatCnt;
-	__u64 RxLinkProblemCnt;
-	__u64 RxEBPCnt;
-	__u64 RxLPCRCErrCnt;
-	__u64 RxBufOvflCnt;
-	__u64 RxTIDFullErrCnt;
-	__u64 RxTIDValidErrCnt;
-	__u64 RxPKeyMismatchCnt;
-	__u64 RxP0HdrEgrOvflCnt;
-	__u64 RxP1HdrEgrOvflCnt;
-	__u64 RxP2HdrEgrOvflCnt;
-	__u64 RxP3HdrEgrOvflCnt;
-	__u64 RxP4HdrEgrOvflCnt;
-	__u64 RxP5HdrEgrOvflCnt;
-	__u64 RxP6HdrEgrOvflCnt;
-	__u64 RxP7HdrEgrOvflCnt;
-	__u64 RxP8HdrEgrOvflCnt;
-	__u64 Reserved6;
-	__u64 Reserved7;
-	__u64 IBStatusChangeCnt;
-	__u64 IBLinkErrRecoveryCnt;
-	__u64 IBLinkDownedCnt;
-	__u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_pe_kregs = {
-	.kr_control = IPATH_KREG_OFFSET(Control),
-	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
-	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
-	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
-	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-	.kr_revision = IPATH_KREG_OFFSET(Revision),
-	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
-	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-	.kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
-
-	/*
-	 * These should not be used directly via ipath_write_kreg64(),
-	 * use them with ipath_write_kreg64_port(),
-	 */
-	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
-	/* The rcvpktled register controls one of the debug port signals, so
-	 * a packet activity LED can be connected to it. */
-	.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
-	.kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
-	.kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-	.kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
-	.kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
-	.kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
-	.kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
-	.kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
-	.kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
-};
-
-static const struct ipath_cregs ipath_pe_cregs = {
-	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
-
-#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6120_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_FOUND       0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x5ULL
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
-	(_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
-	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
-#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
-	((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
-#define INFINIPATH_RT_IS_VALID(tid) \
-	(((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
-	 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
-#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define INFINIPATH_RT_ADDR_SHIFT 10
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* 6120 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
-	INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
-	INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
-	/*
-	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
-	 * parity or memory parity error failures, because most likely we
-	 * won't be able to talk to the core of the chip.  Nonetheless, we
-	 * might see them, if they are in parts of the PCIe core that aren't
-	 * essential.
-	 */
-	INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
-	INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
-	INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
-	INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
-		        INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
-		        << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
-			  << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
-			       u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip bug can result in (many) false parity error
- * reports.  So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void ipath_pe_txe_recover(struct ipath_devdata *dd)
-{
-	if (ipath_unordered_wc())
-		ipath_dbg("Recovering from TXE PIO parity error\n");
-	else {
-		++ipath_stats.sps_txeparity;
-		dev_info(&dd->pcidev->dev,
-			"Recovering from TXE PIO parity error\n");
-	}
-}
-
-
-/**
- * ipath_pe_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-				     size_t msgl)
-{
-	ipath_err_t hwerrs;
-	u32 bits, ctrl;
-	int isfatal = 0;
-	char bitsmsg[64];
-	int log_idx;
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-	if (!hwerrs) {
-		/*
-		 * better than printing cofusing messages
-		 * This seems to be related to clearing the crc error, or
-		 * the pll error during init.
-		 */
-		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-		return;
-	} else if (hwerrs == ~0ULL) {
-		ipath_dev_err(dd, "Read of hardware error status failed "
-			      "(all bits set); ignoring\n");
-		return;
-	}
-	ipath_stats.sps_hwerrs++;
-
-	/* Always clear the error status register, except MEMBISTFAIL,
-	 * regardless of whether we continue or stop using the chip.
-	 * We want that set so we know it failed, even across driver reload.
-	 * We'll still ignore it in the hwerrmask.  We do this partly for
-	 * diagnostics, but also for support */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-	hwerrs &= dd->ipath_hwerrmask;
-
-	/* We log some errors to EEPROM, check if we have any of those. */
-	for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
-		if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
-			ipath_inc_eeprom_err(dd, log_idx, 1);
-
-	/*
-	 * make sure we get this much out, unless told to be quiet,
-	 * or it's occurred within the last 5 seconds
-	 */
-	if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
-			RXE_EAGER_PARITY)) ||
-	    (ipath_debug & __IPATH_VERBDBG))
-		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-			 "(cleared)\n", (unsigned long long) hwerrs);
-	dd->ipath_lasthwerror |= hwerrs;
-
-	if (hwerrs & ~dd->ipath_hwe_bitsextant)
-		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (hwerrs & ~dd->ipath_hwe_bitsextant));
-
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
-		/*
-		 * parity errors in send memory are recoverable,
-		 * just cancel the send (if indicated in * sendbuffererror),
-		 * count the occurrence, unfreeze (if no other handled
-		 * hardware error bits are set), and continue. They can
-		 * occur if a processor speculative read is done to the PIO
-		 * buffer while we are sending a packet, for example.
-		 */
-		if (hwerrs & TXE_PIO_PARITY) {
-			ipath_pe_txe_recover(dd);
-			hwerrs &= ~TXE_PIO_PARITY;
-		}
-		if (!hwerrs) {
-			static u32 freeze_cnt;
-
-			freeze_cnt++;
-			ipath_dbg("Clearing freezemode on ignored or recovered "
-				  "hardware error (%u)\n", freeze_cnt);
-			ipath_clear_freeze(dd);
-		}
-	}
-
-	*msg = '\0';
-
-	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-		strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
-			msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	ipath_format_hwerrors(hwerrs,
-			      ipath_6120_hwerror_msgs,
-			      sizeof(ipath_6120_hwerror_msgs)/
-			      sizeof(ipath_6120_hwerror_msgs[0]),
-			      msg, msgl);
-
-	if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PCIe Mem Parity Errs %x] ", bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
-			 INFINIPATH_HWE_COREPLL_RFSLIP )
-
-	if (hwerrs & _IPATH_PLL_FAIL) {
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PLL failed (%llx), InfiniPath hardware unusable]",
-			 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
-		strlcat(msg, bitsmsg, msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-		/*
-		 * If it occurs, it is left masked since the external
-		 * interface is unused
-		 */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs) {
-		/*
-		 * if any set that we aren't ignoring; only
-		 * make the complaint once, in case it's stuck
-		 * or recurring, and we get here multiple
-		 * times.
-		 */
-		ipath_dev_err(dd, "%s hardware error\n", msg);
-		if (dd->ipath_flags & IPATH_INITTED) {
-			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-			ipath_setup_pe_setextled(dd,
-				INFINIPATH_IBCS_L_STATE_DOWN,
-				INFINIPATH_IBCS_LT_STATE_DISABLED);
-			ipath_dev_err(dd, "Fatal Hardware Error (freeze "
-					  "mode), no longer usable, SN %.16s\n",
-					  dd->ipath_serial);
-			isfatal = 1;
-		}
-		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-		/* mark as having had error */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		/*
-		 * mark as not usable, at a minimum until driver
-		 * is reloaded, probably until reboot, since no
-		 * other reset is possible.
-		 */
-		dd->ipath_flags &= ~IPATH_INITTED;
-	} else
-		*msg = 0; /* recovered from all of them */
-
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
-		/*
-		 * for /sys status file ; if no trailing brace is copied,
-		 * we'll know it was truncated.
-		 */
-		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
-			 "{%s}", msg);
-	}
-}
-
-/**
- * ipath_pe_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen)
-{
-	char *n = NULL;
-	u8 boardrev = dd->ipath_boardrev;
-	int ret;
-
-	switch (boardrev) {
-	case 0:
-		n = "InfiniPath_Emulation";
-		break;
-	case 1:
-		n = "InfiniPath_QLE7140-Bringup";
-		break;
-	case 2:
-		n = "InfiniPath_QLE7140";
-		break;
-	case 3:
-		n = "InfiniPath_QMI7140";
-		break;
-	case 4:
-		n = "InfiniPath_QEM7140";
-		break;
-	case 5:
-		n = "InfiniPath_QMH7140";
-		break;
-	case 6:
-		n = "InfiniPath_QLE7142";
-		break;
-	default:
-		ipath_dev_err(dd,
-			      "Don't yet know about board with ID %u\n",
-			      boardrev);
-		snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
-			 boardrev);
-		break;
-	}
-	if (n)
-		snprintf(name, namelen, "%s", n);
-
-	if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
-		ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 1;
-	} else {
-		ret = 0;
-		if (dd->ipath_minrev >= 2)
-			dd->ipath_f_put_tid = ipath_pe_put_tid_2;
-	}
-
-	/*
-	 * set here, not in ipath_init_*_funcs because we have to do
-	 * it after we can read chip registers.
-	 */
-	dd->ipath_ureg_align =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
-	return ret;
-}
-
-/**
- * ipath_pe_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
-{
-	ipath_err_t val;
-	u64 extsval;
-
-	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-		ipath_dev_err(dd, "MemBIST did not complete!\n");
-	if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
-		ipath_dbg("MemBIST corrected\n");
-
-	val = ~0ULL;	/* barring bugs, all hwerrors become interrupts, */
-
-	if (!dd->ipath_boardrev)	// no PLL for Emulator
-		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
-	if (dd->ipath_minrev < 2) {
-		/* workaround bug 9460 in internal interface bus parity
-		 * checking. Fixed (HW bug 9490) in Rev2.
-		 */
-		val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
-	}
-	dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_pe_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
-{
-	u64 val, config1, prev_val;
-	int ret = 0;
-
-	ipath_dbg("Trying to bringup serdes\n");
-
-	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-	    INFINIPATH_HWE_SERDESPLLFAILED) {
-		ipath_dbg("At start, serdes PLL failed bit set "
-			  "in hwerrstatus, clearing and continuing\n");
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-				 INFINIPATH_HWE_SERDESPLLFAILED);
-	}
-
-	dd->ibdeltainprog = 1;
-	dd->ibsymsnap =
-	     ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
-	dd->iblnkerrsnap =
-	     ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-	ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
-		   "xgxsconfig %llx\n", (unsigned long long) val,
-		   (unsigned long long) config1, (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	/*
-	 * Force reset on, also set rxdetect enable.  Must do before reading
-	 * serdesstatus at least for simulation, or some of the bits in
-	 * serdes status will come back as undefined and cause simulation
-	 * failures
-	 */
-	val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
-		| INFINIPATH_SERDC0_L1PWR_DN;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	udelay(5);		/* need pll reset set at least for a bit */
-	/*
-	 * after PLL is reset, set the per-lane Resets and TxIdle and
-	 * clear the PLL reset and rxdetect (to get falling edge).
-	 * Leave L1PWR bits set (permanently)
-	 */
-	val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
-		 | INFINIPATH_SERDC0_L1PWR_DN);
-	val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
-	ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
-		   "and txidle (%llx)\n", (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	/* need PLL reset clear for at least 11 usec before lane
-	 * resets cleared; give it a few more to be sure */
-	udelay(15);
-	val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
-
-	ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
-		   "(writing %llx)\n", (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	prev_val = val;
-	if (val & INFINIPATH_XGXS_RESET)
-		val &= ~INFINIPATH_XGXS_RESET;
-	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
-	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
-		/* need to compensate for Tx inversion in partner */
-		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-		         INFINIPATH_XGXS_RX_POL_SHIFT);
-		val |= dd->ipath_rx_pol_inv <<
-			INFINIPATH_XGXS_RX_POL_SHIFT;
-	}
-	if (val != prev_val)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	/* clear current and de-emphasis bits */
-	config1 &= ~0x0ffffffff00ULL;
-	/* set current to 20ma */
-	config1 |= 0x00000000000ULL;
-	/* set de-emphasis to -5.68dB */
-	config1 |= 0x0cccc000000ULL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-	ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs=%llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	return ret;
-}
-
-/**
- * ipath_pe_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
-{
-	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	if (dd->ibsymdelta || dd->iblnkerrdelta ||
-	    dd->ibdeltainprog) {
-		u64 diagc;
-		/* enable counter writes */
-		diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
-				 diagc | INFINIPATH_DC_COUNTERWREN);
-
-		if (dd->ibsymdelta || dd->ibdeltainprog) {
-			val = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_ibsymbolerrcnt);
-			if (dd->ibdeltainprog)
-				val -= val - dd->ibsymsnap;
-			val -= dd->ibsymdelta;
-			ipath_write_creg(dd,
-				  dd->ipath_cregs->cr_ibsymbolerrcnt, val);
-		}
-		if (dd->iblnkerrdelta || dd->ibdeltainprog) {
-			val = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_iblinkerrrecovcnt);
-			if (dd->ibdeltainprog)
-				val -= val - dd->iblnkerrsnap;
-			val -= dd->iblnkerrdelta;
-			ipath_write_creg(dd,
-				   dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
-	     }
-
-	     /* and disable counter writes */
-	     ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
-	}
-	val |= INFINIPATH_SERDC0_TXIDLE;
-	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-		  (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-static int ipath_pe_intconfig(struct ipath_devdata *dd)
-{
-	u32 chiprev;
-
-	/*
-	 * If the chip supports added error indication via GPIO pins,
-	 * enable interrupts on those bits so the interrupt routine
-	 * can count the events. Also set flag so interrupt routine
-	 * can know they are expected.
-	 */
-	chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
-	if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
-		/* Rev2+ reports extra errors via internal GPIO pins */
-		dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
-		dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-				 dd->ipath_gpio_mask);
-	}
-	return 0;
-}
-
-/**
- * ipath_setup_pe_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
-				     u64 ltst)
-{
-	u64 extctl;
-	unsigned long flags = 0;
-
-	/* the diags use the LED to indicate diag info, so we leave
-	 * the external LED alone when the diags are running */
-	if (ipath_diag_inuse)
-		return;
-
-	/* Allow override of LED display for, e.g. Locating system in rack */
-	if (dd->ipath_led_override) {
-		ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
-			? INFINIPATH_IBCS_LT_STATE_LINKUP
-			: INFINIPATH_IBCS_LT_STATE_DISABLED;
-		lst = (dd->ipath_led_override & IPATH_LED_LOG)
-			? INFINIPATH_IBCS_L_STATE_ACTIVE
-			: INFINIPATH_IBCS_L_STATE_DOWN;
-	}
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-				       INFINIPATH_EXTC_LED2PRIPORT_ON);
-
-	if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-	dd->ipath_extctrl = extctl;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-/**
- * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * This is called during driver unload.
- * We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT chips. If we do end up needing pci_enable_msi
- * at some point in the future for HT, we'll move the call back
- * into the main init_one code.
- */
-static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
-{
-	dd->ipath_msi_lo = 0;	/* just in case unload fails */
-	pci_disable_msi(dd->pcidev);
-}
-
-static void ipath_6120_pcie_params(struct ipath_devdata *dd)
-{
-	u16 linkstat, speed;
-	int pos;
-
-	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
-	if (!pos) {
-		ipath_dev_err(dd, "Can't find PCI Express capability!\n");
-		goto bail;
-	}
-
-	pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
-			     &linkstat);
-	/*
-	 * speed is bits 0-4, linkwidth is bits 4-8
-	 * no defines for them in headers
-	 */
-	speed = linkstat & 0xf;
-	linkstat >>= 4;
-	linkstat &= 0x1f;
-	dd->ipath_lbus_width = linkstat;
-
-	switch (speed) {
-	case 1:
-		dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
-		break;
-	case 2:
-		dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
-		break;
-	default: /* not defined, assume gen1 */
-		dd->ipath_lbus_speed = 2500;
-		break;
-	}
-
-	if (linkstat < 8)
-		ipath_dev_err(dd,
-			"PCIe width %u (x8 HCA), performance reduced\n",
-			linkstat);
-	else
-		ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
-			dd->ipath_lbus_speed, linkstat);
-
-	if (speed != 1)
-		ipath_dev_err(dd,
-			"PCIe linkspeed %u is incorrect; "
-			"should be 1 (2500)!\n", speed);
-bail:
-	/* fill in string, even on errors */
-	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
-		"PCIe,%uMHz,x%u\n",
-		dd->ipath_lbus_speed,
-		dd->ipath_lbus_width);
-
-	return;
-}
-
-/**
- * ipath_setup_pe_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe.  If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_pe_config(struct ipath_devdata *dd,
-				 struct pci_dev *pdev)
-{
-	int pos, ret;
-
-	dd->ipath_msi_lo = 0;	/* used as a flag during reset processing */
-	ret = pci_enable_msi(dd->pcidev);
-	if (ret)
-		ipath_dev_err(dd, "pci_enable_msi failed: %d, "
-			      "interrupts may not work\n", ret);
-	/* continue even if it fails, we may still be OK... */
-	dd->ipath_irq = pdev->irq;
-
-	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
-		u16 control;
-		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
-				      &dd->ipath_msi_lo);
-		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
-				      &dd->ipath_msi_hi);
-		pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
-				     &control);
-		/* now save the data (vector) info */
-		pci_read_config_word(dd->pcidev,
-				     pos + ((control & PCI_MSI_FLAGS_64BIT)
-					    ? 12 : 8),
-				     &dd->ipath_msi_data);
-		ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
-			   "0x%x, control=0x%x\n", dd->ipath_msi_data,
-			   pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
-			   control);
-		/* we save the cachelinesize also, although it doesn't
-		 * really matter */
-		pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
-				     &dd->ipath_pci_cacheline);
-	} else
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't save MSI settings for reset\n");
-
-	ipath_6120_pcie_params(dd);
-
-	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-	dd->ipath_link_speed_supported = IPATH_IB_SDR;
-	dd->ipath_link_width_enabled = IB_WIDTH_4X;
-	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-	/* these can't change for this chip, so set once */
-	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
-	return 0;
-}
-
-static void ipath_init_pe_variables(struct ipath_devdata *dd)
-{
-	/*
-	 * setup the register offsets, since they are different for each
-	 * chip
-	 */
-	dd->ipath_kregs = &ipath_pe_kregs;
-	dd->ipath_cregs = &ipath_pe_cregs;
-
-	/*
-	 * bits for selecting i2c direction and values,
-	 * used for I2C serial flash
-	 */
-	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
-	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
-	/*
-	 * Fill in data for field-values that change in newer chips.
-	 * We dynamically specify only the mask for LINKTRAININGSTATE
-	 * and only the shift for LINKSTATE, as they are the only ones
-	 * that change.  Also precalculate the 3 link states of interest
-	 * and the combined mask.
-	 */
-	dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
-	dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
-	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
-	/*
-	 * Fill in data for ibcc field-values that change in newer chips.
-	 * We dynamically specify only the mask for LINKINITCMD
-	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
-	 * the only ones that change.
-	 */
-	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
-	/* Fill in shifts for RcvCtrl. */
-	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
-
-	/* variables for sanity checking interrupt and errors */
-	dd->ipath_hwe_bitsextant =
-		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
-		INFINIPATH_HWE_PCIE1PLLFAILED |
-		INFINIPATH_HWE_PCIE0PLLFAILED |
-		INFINIPATH_HWE_PCIEPOISONEDTLP |
-		INFINIPATH_HWE_PCIECPLTIMEOUT |
-		INFINIPATH_HWE_PCIEBUSPARITYXTLH |
-		INFINIPATH_HWE_PCIEBUSPARITYXADM |
-		INFINIPATH_HWE_PCIEBUSPARITYRADM |
-		INFINIPATH_HWE_MEMBISTFAILED |
-		INFINIPATH_HWE_COREPLL_FBSLIP |
-		INFINIPATH_HWE_COREPLL_RFSLIP |
-		INFINIPATH_HWE_SERDESPLLFAILED |
-		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-	dd->ipath_i_bitsextant =
-		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-		(INFINIPATH_I_RCVAVAIL_MASK <<
-		 INFINIPATH_I_RCVAVAIL_SHIFT) |
-		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-	dd->ipath_e_bitsextant =
-		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-		INFINIPATH_E_HARDWARE;
-
-	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
-	/*
-	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-	 * 2 is Some Misc, 3 is reserved for future.
-	 */
-	dd->ipath_eep_st_masks[0].hwerrs_to_log =
-		INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
-	/* Ignore errors in PIO/PBC on systems with unordered write-combining */
-	if (ipath_unordered_wc())
-		dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
-	dd->ipath_eep_st_masks[1].hwerrs_to_log =
-		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
-	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-	dd->delay_mult = 2; /* SDR, 4X, can't change */
-}
-
-/* setup the MSI stuff again after a reset.  I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline.  Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_pe_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
-	int pos;
-	u16 control;
-	int ret;
-
-	if (!dd->ipath_msi_lo) {
-		dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
-			 "initial setup failed?\n");
-		ret = 0;
-		goto bail;
-	}
-
-	if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't restore MSI settings\n");
-		ret = 0;
-		goto bail;
-	}
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
-			       dd->ipath_msi_lo);
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
-			       dd->ipath_msi_hi);
-	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
-	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
-		ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
-			   "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
-			   control, control | PCI_MSI_FLAGS_ENABLE);
-		control |= PCI_MSI_FLAGS_ENABLE;
-		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
-				      control);
-	}
-	/* now rewrite the data (vector) info */
-	pci_write_config_word(dd->pcidev, pos +
-			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
-			      dd->ipath_msi_data);
-	/* we restore the cachelinesize also, although it doesn't really
-	 * matter */
-	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
-			      dd->ipath_pci_cacheline);
-	/* and now set the pci master bit again */
-	pci_set_master(dd->pcidev);
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/* This routine sleeps, so it can only be called from user context, not
- * from interrupt context.  If we need interrupt context, we can split
- * it into two routines.
-*/
-static int ipath_setup_pe_reset(struct ipath_devdata *dd)
-{
-	u64 val;
-	int i;
-	int ret;
-	u16 cmdval;
-
-	pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
-	/* Use ERROR so it shows up in logs, etc. */
-	ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
-	/* keep chip from being accessed in a few places */
-	dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
-	val = dd->ipath_control | INFINIPATH_C_RESET;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
-	mb();
-
-	for (i = 1; i <= 5; i++) {
-		int r;
-		/* allow MBIST, etc. to complete; longer on each retry.
-		 * We sometimes get machine checks from bus timeout if no
-		 * response, so for now, make it *really* long.
-		 */
-		msleep(1000 + (1 + i) * 2000);
-		if ((r =
-		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
-					    dd->ipath_pcibar0)))
-			ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
-				      r);
-		if ((r =
-		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
-					    dd->ipath_pcibar1)))
-			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
-				      r);
-		/* now re-enable memory access */
-		pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
-		if ((r = pci_enable_device(dd->pcidev)))
-			ipath_dev_err(dd, "pci_enable_device failed after "
-				      "reset: %d\n", r);
-		/*
-		 * whether it fully enabled or not, mark as present,
-		 * again (but not INITTED)
-		 */
-		dd->ipath_flags |= IPATH_PRESENT;
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-		if (val == dd->ipath_revision) {
-			ipath_cdbg(VERBOSE, "Got matching revision "
-				   "register %llx on try %d\n",
-				   (unsigned long long) val, i);
-			ret = ipath_reinit_msi(dd);
-			goto bail;
-		}
-		/* Probably getting -1 back */
-		ipath_dbg("Didn't get expected revision register, "
-			  "got %llx, try %d\n", (unsigned long long) val,
-			  i + 1);
-	}
-	ret = 0; /* failed */
-
-bail:
-	if (ret)
-		ipath_6120_pcie_params(dd);
-	return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
-			     u32 type, unsigned long pa)
-{
-	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
-	unsigned long flags = 0; /* keep gcc quiet */
-	int tidx;
-	spinlock_t *tidlockp;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	if (pa != dd->ipath_tidinvalid) {
-		if (pa & ((1U << 11) - 1)) {
-			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
-				 "not 2KB aligned!\n", pa);
-			return;
-		}
-		pa >>= 11;
-		/* paranoia check */
-		if (pa & ~INFINIPATH_RT_ADDR_MASK)
-			ipath_dev_err(dd,
-				      "BUG: Physical page address 0x%lx "
-				      "has bits set in 31-29\n", pa);
-
-		if (type == RCVHQ_RCV_TYPE_EAGER)
-			pa |= dd->ipath_tidtemplate;
-		else /* for now, always full 4KB page */
-			pa |= 2 << 29;
-	}
-
-	/*
-	 * Workaround chip bug 9437 by writing the scratch register
-	 * before and after the TID, and with an io write barrier.
-	 * We use a spinlock around the writes, so they can't intermix
-	 * with other TID (eager or expected) writes (the chip bug
-	 * is triggered by back to back TID writes). Unfortunately, this
-	 * call can be done from interrupt level for the port 0 eager TIDs,
-	 * so we have to use irqsave locks.
-	 */
-	/*
-	 * Assumes tidptr always > ipath_egrtidbase
-	 * if type == RCVHQ_RCV_TYPE_EAGER.
-	 */
-	tidx = tidptr - dd->ipath_egrtidbase;
-
-	tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
-		? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
-	spin_lock_irqsave(tidlockp, flags);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
-	writel(pa, tidp32);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
-	mmiowb();
-	spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
-			     u32 type, unsigned long pa)
-{
-	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
-	u32 tidx;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	if (pa != dd->ipath_tidinvalid) {
-		if (pa & ((1U << 11) - 1)) {
-			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
-				 "not 2KB aligned!\n", pa);
-			return;
-		}
-		pa >>= 11;
-		/* paranoia check */
-		if (pa & ~INFINIPATH_RT_ADDR_MASK)
-			ipath_dev_err(dd,
-				      "BUG: Physical page address 0x%lx "
-				      "has bits set in 31-29\n", pa);
-
-		if (type == RCVHQ_RCV_TYPE_EAGER)
-			pa |= dd->ipath_tidtemplate;
-		else /* for now, always full 4KB page */
-			pa |= 2 << 29;
-	}
-	tidx = tidptr - dd->ipath_egrtidbase;
-	writel(pa, tidp32);
-	mmiowb();
-}
-
-
-/**
- * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close().  On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-	u64 __iomem *tidbase;
-	unsigned long tidinv;
-	int i;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-	tidinv = dd->ipath_tidinvalid;
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvtidbase +
-		 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-		dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
-				 tidinv);
-
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvegrbase +
-		 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-		dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
-				 tidinv);
-}
-
-/**
- * ipath_pe_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
-{
-	u32 egrsize = dd->ipath_rcvegrbufsize;
-
-	/* For now, we always allocate 4KB buffers (at init) so we can
-	 * receive max size packets.  We may want a module parameter to
-	 * specify 2KB or 4KB and/or make be per port instead of per device
-	 * for those who want to reduce memory footprint.  Note that the
-	 * ipath_rcvhdrentsize size must be large enough to hold the largest
-	 * IB header (currently 96 bytes) that we expect to handle (plus of
-	 * course the 2 dwords of RHF).
-	 */
-	if (egrsize == 2048)
-		dd->ipath_tidtemplate = 1U << 29;
-	else if (egrsize == 4096)
-		dd->ipath_tidtemplate = 2U << 29;
-	else {
-		egrsize = 4096;
-		dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
-			 "%u, using %u\n", dd->ipath_rcvegrbufsize,
-			 egrsize);
-		dd->ipath_tidtemplate = 2U << 29;
-	}
-	dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_pe_early_init(struct ipath_devdata *dd)
-{
-	dd->ipath_flags |= IPATH_4BYTE_TID;
-	if (ipath_unordered_wc())
-		dd->ipath_flags |= IPATH_PIO_FLUSH_WC;
-
-	/*
-	 * For openfabrics, we need to be able to handle an IB header of
-	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
-	 * made them the same size as the PIO buffers.  This chip does not
-	 * handle arbitrary size buffers, so we need the header large enough
-	 * to handle largest IB header, but still have room for a 2KB MTU
-	 * standard IB packet.
-	 */
-	dd->ipath_rcvhdrentsize = 24;
-	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-	dd->ipath_rhf_offset = 0;
-	dd->ipath_egrtidbase = (u64 __iomem *)
-		((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
-
-	dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
-	/*
-	 * the min() check here is currently a nop, but it may not always
-	 * be, depending on just how we do ipath_rcvegrbufsize
-	 */
-	dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
-				 dd->ipath_piosize2k,
-				 dd->ipath_rcvegrbufsize +
-				 (dd->ipath_rcvhdrentsize << 2));
-	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
-	/*
-	 * We can request a receive interrupt for 1 or
-	 * more packets from current offset.  For now, we set this
-	 * up for a single packet.
-	 */
-	dd->ipath_rhdrhead_intr_off = 1ULL<<32;
-
-	ipath_get_eeprom_info(dd);
-
-	return 0;
-}
-
-int __attribute__((weak)) ipath_unordered_wc(void)
-{
-	return 0;
-}
-
-/**
- * ipath_init_pe_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-	struct ipath_base_info *kinfo = kbase;
-	struct ipath_devdata *dd;
-
-	if (ipath_unordered_wc()) {
-		kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
-		ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
-	}
-	else
-		ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
-
-	if (pd == NULL)
-		goto done;
-
-	dd = pd->port_dd;
-
-done:
-	kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE |
-		IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED;
-	return 0;
-}
-
-static void ipath_pe_free_irq(struct ipath_devdata *dd)
-{
-	free_irq(dd->ipath_irq, dd);
-	dd->ipath_irq = 0;
-}
-
-
-static struct ipath_message_header *
-ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
-	return (struct ipath_message_header *)
-		&rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
-	dd->ipath_portcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-	dd->ipath_p0_rcvegrcnt =
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_pe_read_counters(struct ipath_devdata *dd,
-				   struct infinipath_counters *cntrs)
-{
-	cntrs->LBIntCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-	cntrs->LBFlowStallCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-	cntrs->TxSDmaDescCnt = 0;
-	cntrs->TxUnsupVLErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-	cntrs->TxDataPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-	cntrs->TxFlowPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-	cntrs->TxDwordCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-	cntrs->TxLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-	cntrs->TxMaxMinLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-	cntrs->TxUnderrunCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-	cntrs->TxFlowStallCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-	cntrs->TxDroppedPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-	cntrs->RxDroppedPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-	cntrs->RxDataPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-	cntrs->RxFlowPktCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-	cntrs->RxDwordCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-	cntrs->RxLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-	cntrs->RxMaxMinLenErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-	cntrs->RxICRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-	cntrs->RxVCRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-	cntrs->RxFlowCtrlErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-	cntrs->RxBadFormatCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-	cntrs->RxLinkProblemCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-	cntrs->RxEBPCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-	cntrs->RxLPCRCErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-	cntrs->RxBufOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-	cntrs->RxTIDFullErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-	cntrs->RxTIDValidErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-	cntrs->RxPKeyMismatchCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-	cntrs->RxP0HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-	cntrs->RxP1HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-	cntrs->RxP2HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-	cntrs->RxP3HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-	cntrs->RxP4HdrEgrOvflCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-	cntrs->RxP5HdrEgrOvflCnt = 0;
-	cntrs->RxP6HdrEgrOvflCnt = 0;
-	cntrs->RxP7HdrEgrOvflCnt = 0;
-	cntrs->RxP8HdrEgrOvflCnt = 0;
-	cntrs->RxP9HdrEgrOvflCnt = 0;
-	cntrs->RxP10HdrEgrOvflCnt = 0;
-	cntrs->RxP11HdrEgrOvflCnt = 0;
-	cntrs->RxP12HdrEgrOvflCnt = 0;
-	cntrs->RxP13HdrEgrOvflCnt = 0;
-	cntrs->RxP14HdrEgrOvflCnt = 0;
-	cntrs->RxP15HdrEgrOvflCnt = 0;
-	cntrs->RxP16HdrEgrOvflCnt = 0;
-	cntrs->IBStatusChangeCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-	cntrs->IBLinkErrRecoveryCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-	cntrs->IBLinkDownedCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-	cntrs->IBSymbolErrCnt =
-		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-	cntrs->RxVL15DroppedPktCnt = 0;
-	cntrs->RxOtherLocalPhyErrCnt = 0;
-	cntrs->PcieRetryBufDiagQwordCnt = 0;
-	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-	cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
-	cntrs->RxVlErrCnt = 0;
-	cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
-{
-	return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC).  Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining.  To do this right, we reset IBC
- * as well.
- */
-static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
-{
-	u64 val, prev_val;
-
-	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	val = prev_val | INFINIPATH_XGXS_RESET;
-	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
-}
-
-
-static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
-	int ret;
-
-	switch (which) {
-	case IPATH_IB_CFG_LWID:
-		ret = dd->ipath_link_width_active;
-		break;
-	case IPATH_IB_CFG_SPD:
-		ret = dd->ipath_link_speed_active;
-		break;
-	case IPATH_IB_CFG_LWID_ENB:
-		ret = dd->ipath_link_width_enabled;
-		break;
-	case IPATH_IB_CFG_SPD_ENB:
-		ret = dd->ipath_link_speed_enabled;
-		break;
-	default:
-		ret =  -ENOTSUPP;
-		break;
-	}
-	return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
-	int ret = 0;
-
-	if (which == IPATH_IB_CFG_LWID_ENB)
-		dd->ipath_link_width_enabled = val;
-	else if (which == IPATH_IB_CFG_SPD_ENB)
-		dd->ipath_link_speed_enabled = val;
-	else
-		ret = -ENOTSUPP;
-	return ret;
-}
-
-static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
-	if (ibup) {
-		if (dd->ibdeltainprog) {
-			dd->ibdeltainprog = 0;
-			dd->ibsymdelta +=
-				ipath_read_creg32(dd,
-				  dd->ipath_cregs->cr_ibsymbolerrcnt) -
-				dd->ibsymsnap;
-			dd->iblnkerrdelta +=
-				ipath_read_creg32(dd,
-				  dd->ipath_cregs->cr_iblinkerrrecovcnt) -
-				dd->iblnkerrsnap;
-		}
-	} else {
-		dd->ipath_lli_counter = 0;
-		if (!dd->ibdeltainprog) {
-			dd->ibdeltainprog = 1;
-			dd->ibsymsnap =
-				ipath_read_creg32(dd,
-				  dd->ipath_cregs->cr_ibsymbolerrcnt);
-			dd->iblnkerrsnap =
-				ipath_read_creg32(dd,
-				  dd->ipath_cregs->cr_iblinkerrrecovcnt);
-		}
-	}
-
-	ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-		ipath_ib_linktrstate(dd, ibcs));
-	return 0;
-}
-
-
-/**
- * ipath_init_iba6120_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
-{
-	dd->ipath_f_intrsetup = ipath_pe_intconfig;
-	dd->ipath_f_bus = ipath_setup_pe_config;
-	dd->ipath_f_reset = ipath_setup_pe_reset;
-	dd->ipath_f_get_boardname = ipath_pe_boardname;
-	dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
-	dd->ipath_f_early_init = ipath_pe_early_init;
-	dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
-	dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
-	dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
-	dd->ipath_f_clear_tids = ipath_pe_clear_tids;
-	/*
-	 * _f_put_tid may get changed after we read the chip revision,
-	 * but we start with the safe version for all revs
-	 */
-	dd->ipath_f_put_tid = ipath_pe_put_tid;
-	dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
-	dd->ipath_f_setextled = ipath_setup_pe_setextled;
-	dd->ipath_f_get_base_info = ipath_pe_get_base_info;
-	dd->ipath_f_free_irq = ipath_pe_free_irq;
-	dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
-	dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
-	dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
-	dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
-	dd->ipath_f_config_ports = ipath_pe_config_ports;
-	dd->ipath_f_read_counters = ipath_pe_read_counters;
-	dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
-	dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
-	dd->ipath_f_config_jint = ipath_pe_config_jint;
-	dd->ipath_f_ib_updown = ipath_pe_ib_updown;
-
-
-	/* initialize chip-specific variables */
-	ipath_init_pe_variables(dd);
-}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
deleted file mode 100644
index 34b778ed97fc..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ /dev/null
@@ -1,2631 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
-
-static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
-
-static unsigned ipath_compat_ddr_negotiate = 1;
-
-module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
-			S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
-		"Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-static unsigned ipath_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in ipath_sd7220.c.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
-	unsigned long long Revision;
-	unsigned long long Control;
-	unsigned long long PageAlign;
-	unsigned long long PortCnt;
-	unsigned long long DebugPortSelect;
-	unsigned long long DebugSigsIntSel; /* was Reserved0;*/
-	unsigned long long SendRegBase;
-	unsigned long long UserRegBase;
-	unsigned long long CounterRegBase;
-	unsigned long long Scratch;
-	unsigned long long EEPROMAddrCmd; /* was Reserved1; */
-	unsigned long long EEPROMData; /* was Reserved2; */
-	unsigned long long IntBlocked;
-	unsigned long long IntMask;
-	unsigned long long IntStatus;
-	unsigned long long IntClear;
-	unsigned long long ErrorMask;
-	unsigned long long ErrorStatus;
-	unsigned long long ErrorClear;
-	unsigned long long HwErrMask;
-	unsigned long long HwErrStatus;
-	unsigned long long HwErrClear;
-	unsigned long long HwDiagCtrl;
-	unsigned long long MDIO;
-	unsigned long long IBCStatus;
-	unsigned long long IBCCtrl;
-	unsigned long long ExtStatus;
-	unsigned long long ExtCtrl;
-	unsigned long long GPIOOut;
-	unsigned long long GPIOMask;
-	unsigned long long GPIOStatus;
-	unsigned long long GPIOClear;
-	unsigned long long RcvCtrl;
-	unsigned long long RcvBTHQP;
-	unsigned long long RcvHdrSize;
-	unsigned long long RcvHdrCnt;
-	unsigned long long RcvHdrEntSize;
-	unsigned long long RcvTIDBase;
-	unsigned long long RcvTIDCnt;
-	unsigned long long RcvEgrBase;
-	unsigned long long RcvEgrCnt;
-	unsigned long long RcvBufBase;
-	unsigned long long RcvBufSize;
-	unsigned long long RxIntMemBase;
-	unsigned long long RxIntMemSize;
-	unsigned long long RcvPartitionKey;
-	unsigned long long RcvQPMulticastPort;
-	unsigned long long RcvPktLEDCnt;
-	unsigned long long IBCDDRCtrl;
-	unsigned long long HRTBT_GUID;
-	unsigned long long IB_SDTEST_IF_TX;
-	unsigned long long IB_SDTEST_IF_RX;
-	unsigned long long IBCDDRCtrl2;
-	unsigned long long IBCDDRStatus;
-	unsigned long long JIntReload;
-	unsigned long long IBNCModeCtrl;
-	unsigned long long SendCtrl;
-	unsigned long long SendBufBase;
-	unsigned long long SendBufSize;
-	unsigned long long SendBufCnt;
-	unsigned long long SendAvailAddr;
-	unsigned long long TxIntMemBase;
-	unsigned long long TxIntMemSize;
-	unsigned long long SendDmaBase;
-	unsigned long long SendDmaLenGen;
-	unsigned long long SendDmaTail;
-	unsigned long long SendDmaHead;
-	unsigned long long SendDmaHeadAddr;
-	unsigned long long SendDmaBufMask0;
-	unsigned long long SendDmaBufMask1;
-	unsigned long long SendDmaBufMask2;
-	unsigned long long SendDmaStatus;
-	unsigned long long SendBufferError;
-	unsigned long long SendBufferErrorCONT1;
-	unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
-	unsigned long long Reserved6L[2];
-	unsigned long long AvailUpdCount;
-	unsigned long long RcvHdrAddr0;
-	unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
-	unsigned long long Reserved7hdtl; /* Align next to 300 */
-	unsigned long long RcvHdrTailAddr0; /* 300, like others */
-	unsigned long long RcvHdrTailAddrs[16];
-	unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
-	unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
-	unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
-	unsigned long long Reserved10sds; /* was SerdesStatus on */
-	unsigned long long XGXSConfig;
-	unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
-	unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
-	unsigned long long EEPAddrCmd;
-	unsigned long long EEPData;
-	unsigned long long PcieEpbAccCtl;
-	unsigned long long PcieEpbTransCtl;
-	unsigned long long EfuseCtl; /* E-Fuse control */
-	unsigned long long EfuseData[4];
-	unsigned long long ProcMon;
-	/* this chip moves following two from previous 200, 208 */
-	unsigned long long PCIeRBufTestReg0;
-	unsigned long long PCIeRBufTestReg1;
-	/* added for this chip */
-	unsigned long long PCIeRBufTestReg2;
-	unsigned long long PCIeRBufTestReg3;
-	/* added for this chip, debug only */
-	unsigned long long SPC_JTAG_ACCESS_REG;
-	unsigned long long LAControlReg;
-	unsigned long long GPIODebugSelReg;
-	unsigned long long DebugPortValueReg;
-	/* added for this chip, DMA */
-	unsigned long long SendDmaBufUsed[3];
-	unsigned long long SendDmaReqTagUsed;
-	/*
-	 * added for this chip, EFUSE: note that these program 64-bit
-	 * words 2 and 3 */
-	unsigned long long efuse_pgm_data[2];
-	unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
-	/* we have 30 regs for DDS and RXEQ in IB SERDES */
-	unsigned long long SerDesDDSRXEQ[30];
-	unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
-	/* added for LA debug support */
-	unsigned long long LAMemory[32];
-};
-
-struct _infinipath_do_not_use_counters {
-	__u64 LBIntCnt;
-	__u64 LBFlowStallCnt;
-	__u64 TxSDmaDescCnt;	/* was Reserved1 */
-	__u64 TxUnsupVLErrCnt;
-	__u64 TxDataPktCnt;
-	__u64 TxFlowPktCnt;
-	__u64 TxDwordCnt;
-	__u64 TxLenErrCnt;
-	__u64 TxMaxMinLenErrCnt;
-	__u64 TxUnderrunCnt;
-	__u64 TxFlowStallCnt;
-	__u64 TxDroppedPktCnt;
-	__u64 RxDroppedPktCnt;
-	__u64 RxDataPktCnt;
-	__u64 RxFlowPktCnt;
-	__u64 RxDwordCnt;
-	__u64 RxLenErrCnt;
-	__u64 RxMaxMinLenErrCnt;
-	__u64 RxICRCErrCnt;
-	__u64 RxVCRCErrCnt;
-	__u64 RxFlowCtrlErrCnt;
-	__u64 RxBadFormatCnt;
-	__u64 RxLinkProblemCnt;
-	__u64 RxEBPCnt;
-	__u64 RxLPCRCErrCnt;
-	__u64 RxBufOvflCnt;
-	__u64 RxTIDFullErrCnt;
-	__u64 RxTIDValidErrCnt;
-	__u64 RxPKeyMismatchCnt;
-	__u64 RxP0HdrEgrOvflCnt;
-	__u64 RxP1HdrEgrOvflCnt;
-	__u64 RxP2HdrEgrOvflCnt;
-	__u64 RxP3HdrEgrOvflCnt;
-	__u64 RxP4HdrEgrOvflCnt;
-	__u64 RxP5HdrEgrOvflCnt;
-	__u64 RxP6HdrEgrOvflCnt;
-	__u64 RxP7HdrEgrOvflCnt;
-	__u64 RxP8HdrEgrOvflCnt;
-	__u64 RxP9HdrEgrOvflCnt;	/* was Reserved6 */
-	__u64 RxP10HdrEgrOvflCnt;	/* was Reserved7 */
-	__u64 RxP11HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP12HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP13HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP14HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP15HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 RxP16HdrEgrOvflCnt;	/* new for IBA7220 */
-	__u64 IBStatusChangeCnt;
-	__u64 IBLinkErrRecoveryCnt;
-	__u64 IBLinkDownedCnt;
-	__u64 IBSymbolErrCnt;
-	/* The following are new for IBA7220 */
-	__u64 RxVL15DroppedPktCnt;
-	__u64 RxOtherLocalPhyErrCnt;
-	__u64 PcieRetryBufDiagQwordCnt;
-	__u64 ExcessBufferOvflCnt;
-	__u64 LocalLinkIntegrityErrCnt;
-	__u64 RxVlErrCnt;
-	__u64 RxDlidFltrCnt;
-	__u64 Reserved8[7];
-	__u64 PSStat;
-	__u64 PSStart;
-	__u64 PSInterval;
-	__u64 PSRcvDataCount;
-	__u64 PSRcvPktsCount;
-	__u64 PSXmitDataCount;
-	__u64 PSXmitPktsCount;
-	__u64 PSXmitWaitCount;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_7220_kregs = {
-	.kr_control = IPATH_KREG_OFFSET(Control),
-	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
-	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
-	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
-	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-	.kr_revision = IPATH_KREG_OFFSET(Revision),
-	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
-	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
-	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
-	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
-	.kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
-	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-
-	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-
-	/* send dma related regs */
-	.kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
-	.kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
-	.kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
-	.kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
-	.kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
-	.kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
-	.kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
-	.kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
-	.kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
-
-	/* SerDes related regs */
-	.kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
-	.kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
-	.kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
-	.kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
-	.kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
-	.kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
-
-	/*
-	 * These should not be used directly via ipath_read_kreg64(),
-	 * use them with ipath_read_kreg64_port()
-	 */
-	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
-	/*
-	 * The rcvpktled register controls one of the debug port signals, so
-	 * a packet activity LED can be connected to it.
-	 */
-	.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
-	.kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
-	.kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-
-	.kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
-	.kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
-	.kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
-	.kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
-};
-
-static const struct ipath_cregs ipath_7220_cregs = {
-	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
-	.cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
-	.cr_rxotherlocalphyerrcnt =
-		IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
-	.cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
-	.cr_locallinkintegrityerrcnt =
-		IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
-	.cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
-	.cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
-	.cr_psstat = IPATH_CREG_OFFSET(PSStat),
-	.cr_psstart = IPATH_CREG_OFFSET(PSStart),
-	.cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
-	.cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
-	.cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
-	.cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
-	.cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
-	.cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET (1U<<7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 32
-#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 0
-#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x00000000000000ffULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
-/* specific to this chip */
-#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR         0x0000000000000040ULL
-#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR          0x0000000000000080ULL
-#define INFINIPATH_HWE_SDMAMEMREADERR              0x0000000010000000ULL
-#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED	   0x2000000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT   0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT   0x0200000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT   0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT   0x0800000000000000ULL
-#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR	   0x0000008000000000ULL
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR	   0x0000004000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
-#define IBA7220_IBCS_LINKSTATE_SHIFT 5
-#define IBA7220_IBCS_LINKSPEED_SHIFT 8
-#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
-
-#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK	0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT	32
-#define IBA7220_IBC_HRTBT_MASK	3
-#define IBA7220_IBC_HRTBT_SHIFT	16
-#define IBA7220_IBC_HRTBT_ENB	0x10000UL
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK	1
-#define IBA7220_IBC_LREV_SHIFT	8
-#define IBA7220_IBC_RXPOL_MASK	1
-#define IBA7220_IBC_RXPOL_SHIFT	7
-#define IBA7220_IBC_WIDTH_SHIFT	5
-#define IBA7220_IBC_WIDTH_MASK	0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY	(0<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY	(1<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG	(2<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG	(1<<1)
-#define IBA7220_IBC_SPEED_SDR		(1<<2)
-#define IBA7220_IBC_SPEED_DDR		(1<<3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK  (0x7<<1)
-#define IBA7220_IBC_IBTA_1_2_MASK	(1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK    0x3ffffff
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_DISABLED    0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x5ULL
-#define INFINIPATH_XGXS_FC_SAFE        (1ULL<<63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
-	(_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
-	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_PORTCFG_SHIFT 36
-
-#define INFINIPATH_JINT_PACKETSHIFT 16
-#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS  0
-#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units.  0 marks as invalid,
- * and 7 is reserved.  We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-static char int_type[16] = "auto";
-module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
-MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
-
-/* packet rate matching delay; chip has support */
-static u8 rate_to_delay[2][2] = {
-	/* 1x, 4x */
-	{   8, 2 }, /* SDR */
-	{   4, 1 }  /* DDR */
-};
-
-/* 7220 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
-	INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
-	INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
-	/*
-	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
-	 * parity or memory parity error failures, because most likely we
-	 * won't be able to talk to the core of the chip.  Nonetheless, we
-	 * might see them, if they are in parts of the PCIe core that aren't
-	 * essential.
-	 */
-	INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
-	INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
-	INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
-	INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
-	INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-	INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
-	INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
-	INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
-	INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
-	INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
-		"PCIe serdes Q0 no clock"),
-	INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
-		"PCIe serdes Q1 no clock"),
-	INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
-		"PCIe serdes Q2 no clock"),
-	INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
-		"PCIe serdes Q3 no clock"),
-	INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
-		"DDS RXEQ memory parity"),
-	INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
-	INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
-		"PCIe uC oct0 memory parity"),
-	INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
-		"PCIe uC oct1 memory parity"),
-};
-
-static void autoneg_work(struct work_struct *);
-
-/*
- * the offset is different for different configured port numbers, since
- * port0 is fixed in size, but others can vary.   Make it a function to
- * make the issue more obvious.
-*/
-static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
-{
-	 return port ? dd->ipath_p0_rcvegrcnt +
-		 (port-1) * dd->ipath_rcvegrcnt : 0;
-}
-
-static void ipath_7220_txe_recover(struct ipath_devdata *dd)
-{
-	++ipath_stats.sps_txeparity;
-
-	dev_info(&dd->pcidev->dev,
-		"Recovering from TXE PIO parity error\n");
-	ipath_disarm_senderrbufs(dd);
-}
-
-
-/**
- * ipath_7220_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-				       size_t msgl)
-{
-	ipath_err_t hwerrs;
-	u32 bits, ctrl;
-	int isfatal = 0;
-	char bitsmsg[64];
-	int log_idx;
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-	if (!hwerrs) {
-		/*
-		 * better than printing cofusing messages
-		 * This seems to be related to clearing the crc error, or
-		 * the pll error during init.
-		 */
-		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-		goto bail;
-	} else if (hwerrs == ~0ULL) {
-		ipath_dev_err(dd, "Read of hardware error status failed "
-			      "(all bits set); ignoring\n");
-		goto bail;
-	}
-	ipath_stats.sps_hwerrs++;
-
-	/*
-	 * Always clear the error status register, except MEMBISTFAIL,
-	 * regardless of whether we continue or stop using the chip.
-	 * We want that set so we know it failed, even across driver reload.
-	 * We'll still ignore it in the hwerrmask.  We do this partly for
-	 * diagnostics, but also for support.
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-	hwerrs &= dd->ipath_hwerrmask;
-
-	/* We log some errors to EEPROM, check if we have any of those. */
-	for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
-		if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
-			ipath_inc_eeprom_err(dd, log_idx, 1);
-	/*
-	 * Make sure we get this much out, unless told to be quiet,
-	 * or it's occurred within the last 5 seconds.
-	 */
-	if ((hwerrs & ~(dd->ipath_lasthwerror |
-			((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
-			  INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
-			 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
-	    (ipath_debug & __IPATH_VERBDBG))
-		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-			 "(cleared)\n", (unsigned long long) hwerrs);
-	dd->ipath_lasthwerror |= hwerrs;
-
-	if (hwerrs & ~dd->ipath_hwe_bitsextant)
-		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (hwerrs & ~dd->ipath_hwe_bitsextant));
-
-	if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
-		ipath_sd7220_clr_ibpar(dd);
-
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
-		/*
-		 * Parity errors in send memory are recoverable by h/w
-		 * just do housekeeping, exit freeze mode and continue.
-		 */
-		if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
-			       INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
-			      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
-			ipath_7220_txe_recover(dd);
-			hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
-				     INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
-				    << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
-		}
-		if (hwerrs) {
-			/*
-			 * If any set that we aren't ignoring only make the
-			 * complaint once, in case it's stuck or recurring,
-			 * and we get here multiple times
-			 * Force link down, so switch knows, and
-			 * LEDs are turned off.
-			 */
-			if (dd->ipath_flags & IPATH_INITTED) {
-				ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-				ipath_setup_7220_setextled(dd,
-					INFINIPATH_IBCS_L_STATE_DOWN,
-					INFINIPATH_IBCS_LT_STATE_DISABLED);
-				ipath_dev_err(dd, "Fatal Hardware Error "
-					      "(freeze mode), no longer"
-					      " usable, SN %.16s\n",
-						  dd->ipath_serial);
-				isfatal = 1;
-			}
-			/*
-			 * Mark as having had an error for driver, and also
-			 * for /sys and status word mapped to user programs.
-			 * This marks unit as not usable, until reset.
-			 */
-			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-			dd->ipath_flags &= ~IPATH_INITTED;
-		} else {
-			ipath_dbg("Clearing freezemode on ignored or "
-				"recovered hardware error\n");
-			ipath_clear_freeze(dd);
-		}
-	}
-
-	*msg = '\0';
-
-	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-		strlcat(msg, "[Memory BIST test failed, "
-			"InfiniPath hardware unusable]", msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	ipath_format_hwerrors(hwerrs,
-			      ipath_7220_hwerror_msgs,
-			      ARRAY_SIZE(ipath_7220_hwerror_msgs),
-			      msg, msgl);
-
-	if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PCIe Mem Parity Errs %x] ", bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
-			 INFINIPATH_HWE_COREPLL_RFSLIP)
-
-	if (hwerrs & _IPATH_PLL_FAIL) {
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PLL failed (%llx), InfiniPath hardware unusable]",
-			 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
-		strlcat(msg, bitsmsg, msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-		/*
-		 * If it occurs, it is left masked since the eternal
-		 * interface is unused.
-		 */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	ipath_dev_err(dd, "%s hardware error\n", msg);
-	/*
-	 * For /sys status file. if no trailing } is copied, we'll
-	 * know it was truncated.
-	 */
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
-		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
-			 "{%s}", msg);
-bail:;
-}
-
-/**
- * ipath_7220_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
-	size_t namelen)
-{
-	char *n = NULL;
-	u8 boardrev = dd->ipath_boardrev;
-	int ret;
-
-	if (boardrev == 15) {
-		/*
-		 * Emulator sometimes comes up all-ones, rather than zero.
-		 */
-		boardrev = 0;
-		dd->ipath_boardrev = boardrev;
-	}
-	switch (boardrev) {
-	case 0:
-		n = "InfiniPath_7220_Emulation";
-		break;
-	case 1:
-		n = "InfiniPath_QLE7240";
-		break;
-	case 2:
-		n = "InfiniPath_QLE7280";
-		break;
-	case 3:
-		n = "InfiniPath_QLE7242";
-		break;
-	case 4:
-		n = "InfiniPath_QEM7240";
-		break;
-	case 5:
-		n = "InfiniPath_QMI7240";
-		break;
-	case 6:
-		n = "InfiniPath_QMI7264";
-		break;
-	case 7:
-		n = "InfiniPath_QMH7240";
-		break;
-	case 8:
-		n = "InfiniPath_QME7240";
-		break;
-	case 9:
-		n = "InfiniPath_QLE7250";
-		break;
-	case 10:
-		n = "InfiniPath_QLE7290";
-		break;
-	case 11:
-		n = "InfiniPath_QEM7250";
-		break;
-	case 12:
-		n = "InfiniPath_QLE-Bringup";
-		break;
-	default:
-		ipath_dev_err(dd,
-			      "Don't yet know about board with ID %u\n",
-			      boardrev);
-		snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
-			 boardrev);
-		break;
-	}
-	if (n)
-		snprintf(name, namelen, "%s", n);
-
-	if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
-		dd->ipath_minrev > 2) {
-		ipath_dev_err(dd, "Unsupported InfiniPath hardware "
-			      "revision %u.%u!\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 1;
-	} else if (dd->ipath_minrev == 1 &&
-		!(dd->ipath_flags & IPATH_INITTED)) {
-		/* Rev1 chips are prototype. Complain at init, but allow use */
-		ipath_dev_err(dd, "Unsupported hardware "
-			      "revision %u.%u, Contact support@qlogic.com\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 0;
-	} else
-		ret = 0;
-
-	/*
-	 * Set here not in ipath_init_*_funcs because we have to do
-	 * it after we can read chip registers.
-	 */
-	dd->ipath_ureg_align = 0x10000;  /* 64KB alignment */
-
-	return ret;
-}
-
-/**
- * ipath_7220_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
-{
-	ipath_err_t val;
-	u64 extsval;
-
-	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-	if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
-			INFINIPATH_EXTS_MEMBIST_DISABLED)))
-		ipath_dev_err(dd, "MemBIST did not complete!\n");
-	if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
-		dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
-
-	val = ~0ULL;	/* barring bugs, all hwerrors become interrupts, */
-
-	if (!dd->ipath_boardrev)	/* no PLL for Emulator */
-		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
-	if (dd->ipath_minrev == 1)
-		val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
-
-	val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
-	dd->ipath_hwerrmask = val;
-
-	/*
-	 * special trigger "error" is for debugging purposes. It
-	 * works around a processor/chipset problem.  The error
-	 * interrupt allows us to count occurrences, but we don't
-	 * want to pay the overhead for normal use.  Emulation only
-	 */
-	if (!dd->ipath_boardrev)
-		dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is ipath_sd7220_init().
- */
-
-/**
- * ipath_7220_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
-{
-	int ret = 0;
-	u64 val, prev_val, guid;
-	int was_reset;		/* Note whether uC was reset */
-
-	ipath_dbg("Trying to bringup serdes\n");
-
-	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-	    INFINIPATH_HWE_SERDESPLLFAILED) {
-		ipath_dbg("At start, serdes PLL failed bit set "
-			  "in hwerrstatus, clearing and continuing\n");
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-				 INFINIPATH_HWE_SERDESPLLFAILED);
-	}
-
-	dd->ibdeltainprog = 1;
-	dd->ibsymsnap =
-	     ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
-	dd->iblnkerrsnap =
-	     ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
-	if (!dd->ipath_ibcddrctrl) {
-		/* not on re-init after reset */
-		dd->ipath_ibcddrctrl =
-			ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
-
-		if (dd->ipath_link_speed_enabled ==
-			(IPATH_IB_SDR | IPATH_IB_DDR))
-			dd->ipath_ibcddrctrl |=
-				IBA7220_IBC_SPEED_AUTONEG_MASK |
-				IBA7220_IBC_IBTA_1_2_MASK;
-		else
-			dd->ipath_ibcddrctrl |=
-				dd->ipath_link_speed_enabled == IPATH_IB_DDR
-				?  IBA7220_IBC_SPEED_DDR :
-				IBA7220_IBC_SPEED_SDR;
-		if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
-			IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
-			dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
-		else
-			dd->ipath_ibcddrctrl |=
-				dd->ipath_link_width_enabled == IB_WIDTH_4X
-				? IBA7220_IBC_WIDTH_4X_ONLY :
-				IBA7220_IBC_WIDTH_1X_ONLY;
-
-		/* always enable these on driver reload, not sticky */
-		dd->ipath_ibcddrctrl |=
-			IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
-		dd->ipath_ibcddrctrl |=
-			IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
-		/*
-		 * automatic lane reversal detection for receive
-		 * doesn't work correctly in rev 1, so disable it
-		 * on that rev, otherwise enable (disabling not
-		 * sticky across reload for >rev1)
-		 */
-		if (dd->ipath_minrev == 1)
-			dd->ipath_ibcddrctrl &=
-			~IBA7220_IBC_LANE_REV_SUPPORTED;
-		else
-			dd->ipath_ibcddrctrl |=
-				IBA7220_IBC_LANE_REV_SUPPORTED;
-	}
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
-			dd->ipath_ibcddrctrl);
-
-	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
-
-	/* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
-	/* remember if uC was in Reset or not, for dactrim */
-	was_reset = (val & 1);
-	ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
-		   was_reset ? "Asserted" : "Negated", (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	if (dd->ipath_boardrev) {
-		/*
-		 * Hardware is not emulator, and may have been reset. Init it.
-		 * Below will release reset, but needs to know if chip was
-		 * originally in reset, to only trim DACs on first time
-		 * after chip reset or powercycle (not driver reload)
-		 */
-		ret = ipath_sd7220_init(dd, was_reset);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	prev_val = val;
-	val |= INFINIPATH_XGXS_FC_SAFE;
-	if (val != prev_val) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	}
-	if (val & INFINIPATH_XGXS_RESET)
-		val &= ~INFINIPATH_XGXS_RESET;
-	if (val != prev_val)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-	ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
-		   (unsigned long long) prev_val);
-
-	guid = be64_to_cpu(dd->ipath_guid);
-
-	if (!guid) {
-		/* have to have something, so use likely unique tsc */
-		guid = get_cycles();
-		ipath_dbg("No GUID for heartbeat, faking %llx\n",
-			(unsigned long long)guid);
-	} else
-		ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
-			(unsigned long long) guid);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
-	return ret;
-}
-
-static void ipath_7220_config_jint(struct ipath_devdata *dd,
-				   u16 idle_ticks, u16 max_packets)
-{
-
-	/*
-	 * We can request a receive interrupt for 1 or more packets
-	 * from current offset.
-	 */
-	if (idle_ticks == 0 || max_packets == 0)
-		/* interrupt after one packet if no mitigation */
-		dd->ipath_rhdrhead_intr_off =
-			1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
-	else
-		/* Turn off RcvHdrHead interrupts if using mitigation */
-		dd->ipath_rhdrhead_intr_off = 0ULL;
-
-	/* refresh kernel RcvHdrHead registers... */
-	ipath_write_ureg(dd, ur_rcvhdrhead,
-			 dd->ipath_rhdrhead_intr_off |
-			 dd->ipath_pd[0]->port_head, 0);
-
-	dd->ipath_jint_max_packets = max_packets;
-	dd->ipath_jint_idle_ticks = idle_ticks;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
-			 ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
-			 idle_ticks);
-}
-
-/**
- * ipath_7220_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
-{
-	u64 val;
-	if (dd->ibsymdelta || dd->iblnkerrdelta ||
-	    dd->ibdeltainprog) {
-		u64 diagc;
-		/* enable counter writes */
-		diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
-				 diagc | INFINIPATH_DC_COUNTERWREN);
-
-		if (dd->ibsymdelta || dd->ibdeltainprog) {
-			val = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_ibsymbolerrcnt);
-			if (dd->ibdeltainprog)
-				val -= val - dd->ibsymsnap;
-			val -= dd->ibsymdelta;
-			ipath_write_creg(dd,
-				  dd->ipath_cregs->cr_ibsymbolerrcnt, val);
-		}
-		if (dd->iblnkerrdelta || dd->ibdeltainprog) {
-			val = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_iblinkerrrecovcnt);
-			if (dd->ibdeltainprog)
-				val -= val - dd->iblnkerrsnap;
-			val -= dd->iblnkerrdelta;
-			ipath_write_creg(dd,
-				   dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
-	     }
-
-	     /* and disable counter writes */
-	     ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
-	}
-
-	dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
-	wake_up(&dd->ipath_autoneg_wait);
-	cancel_delayed_work(&dd->ipath_autoneg_work);
-	flush_scheduled_work();
-	ipath_shutdown_relock_poll(dd);
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	val |= INFINIPATH_XGXS_RESET;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-}
-
-static int ipath_7220_intconfig(struct ipath_devdata *dd)
-{
-	ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
-			       dd->ipath_jint_max_packets);
-	return 0;
-}
-
-/**
- * ipath_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
-				       u64 ltst)
-{
-	u64 extctl, ledblink = 0;
-	unsigned long flags = 0;
-
-	/* the diags use the LED to indicate diag info, so we leave
-	 * the external LED alone when the diags are running */
-	if (ipath_diag_inuse)
-		return;
-
-	/* Allow override of LED display for, e.g. Locating system in rack */
-	if (dd->ipath_led_override) {
-		ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
-			? INFINIPATH_IBCS_LT_STATE_LINKUP
-			: INFINIPATH_IBCS_LT_STATE_DISABLED;
-		lst = (dd->ipath_led_override & IPATH_LED_LOG)
-			? INFINIPATH_IBCS_L_STATE_ACTIVE
-			: INFINIPATH_IBCS_L_STATE_DOWN;
-	}
-
-	spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-				       INFINIPATH_EXTC_LED2PRIPORT_ON);
-	if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
-		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-		/*
-		 * counts are in chip clock (4ns) periods.
-		 * This is 1/16 sec (66.6ms) on,
-		 * 3/16 sec (187.5 ms) off, with packets rcvd
-		 */
-		ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
-			| ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
-	}
-	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-	dd->ipath_extctrl = extctl;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-	spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-	if (ledblink) /* blink the LED on packet receive */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
-			ledblink);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi is off...
- */
-static void ipath_enable_intx(struct pci_dev *pdev)
-{
-	u16 cw, new;
-	int pos;
-
-	/* first, turn on INTx */
-	pci_read_config_word(pdev, PCI_COMMAND, &cw);
-	new = cw & ~PCI_COMMAND_INTX_DISABLE;
-	if (new != cw)
-		pci_write_config_word(pdev, PCI_COMMAND, new);
-
-	/* then turn off MSI */
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (pos) {
-		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
-		new = cw & ~PCI_MSI_FLAGS_ENABLE;
-		if (new != cw)
-			pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
-	}
-}
-
-static int ipath_msi_enabled(struct pci_dev *pdev)
-{
-	int pos, ret = 0;
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (pos) {
-		u16 cw;
-
-		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
-		ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
-	}
-	return ret;
-}
-
-/*
- * disable msi interrupt if enabled, and clear the flag.
- * flag is used primarily for the fallback to INTx, but
- * is also used in reinit after reset as a flag.
- */
-static void ipath_7220_nomsi(struct ipath_devdata *dd)
-{
-	dd->ipath_msi_lo = 0;
-
-	if (ipath_msi_enabled(dd->pcidev)) {
-		/*
-		 * free, but don't zero; later kernels require
-		 * it be freed before disable_msi, so the intx
-		 * setup has to request it again.
-		 */
-		 if (dd->ipath_irq)
-			free_irq(dd->ipath_irq, dd);
-		pci_disable_msi(dd->pcidev);
-	}
-}
-
-/*
- * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Nothing but msi interrupt cleanup for now.
- *
- * This is called during driver unload.
- */
-static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
-{
-	ipath_7220_nomsi(dd);
-}
-
-
-static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
-{
-	u16 linkstat, minwidth, speed;
-	int pos;
-
-	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
-	if (!pos) {
-		ipath_dev_err(dd, "Can't find PCI Express capability!\n");
-		goto bail;
-	}
-
-	pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
-			     &linkstat);
-	/*
-	 * speed is bits 0-4, linkwidth is bits 4-8
-	 * no defines for them in headers
-	 */
-	speed = linkstat & 0xf;
-	linkstat >>= 4;
-	linkstat &= 0x1f;
-	dd->ipath_lbus_width = linkstat;
-	switch (boardrev) {
-	case 0:
-	case 2:
-	case 10:
-	case 12:
-		minwidth = 16; /* x16 capable boards */
-		break;
-	default:
-		minwidth = 8; /* x8 capable boards */
-		break;
-	}
-
-	switch (speed) {
-	case 1:
-		dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
-		break;
-	case 2:
-		dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
-		break;
-	default: /* not defined, assume gen1 */
-		dd->ipath_lbus_speed = 2500;
-		break;
-	}
-
-	if (linkstat < minwidth)
-		ipath_dev_err(dd,
-			"PCIe width %u (x%u HCA), performance "
-			"reduced\n", linkstat, minwidth);
-	else
-		ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
-			dd->ipath_lbus_speed, linkstat, minwidth);
-
-	if (speed != 1)
-		ipath_dev_err(dd,
-			"PCIe linkspeed %u is incorrect; "
-			"should be 1 (2500)!\n", speed);
-
-bail:
-	/* fill in string, even on errors */
-	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
-		"PCIe,%uMHz,x%u\n",
-		dd->ipath_lbus_speed,
-		dd->ipath_lbus_width);
-	return;
-}
-
-
-/**
- * ipath_setup_7220_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe.  If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_7220_config(struct ipath_devdata *dd,
-				   struct pci_dev *pdev)
-{
-	int pos, ret = -1;
-	u32 boardrev;
-
-	dd->ipath_msi_lo = 0;	/* used as a flag during reset processing */
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
-		ret = pci_enable_msi(pdev);
-	if (ret) {
-		if (!strcmp(int_type, "force_msi")) {
-			ipath_dev_err(dd, "pci_enable_msi failed: %d, "
-				      "force_msi is on, so not continuing.\n",
-				      ret);
-			return ret;
-		}
-
-		ipath_enable_intx(pdev);
-		if (!strcmp(int_type, "auto"))
-			ipath_dev_err(dd, "pci_enable_msi failed: %d, "
-				      "falling back to INTx\n", ret);
-	} else if (pos) {
-		u16 control;
-		pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
-				      &dd->ipath_msi_lo);
-		pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
-				      &dd->ipath_msi_hi);
-		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
-				     &control);
-		/* now save the data (vector) info */
-		pci_read_config_word(pdev,
-				     pos + ((control & PCI_MSI_FLAGS_64BIT)
-					    ? PCI_MSI_DATA_64 :
-					    PCI_MSI_DATA_32),
-				     &dd->ipath_msi_data);
-	} else
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't save MSI settings for reset\n");
-
-	dd->ipath_irq = pdev->irq;
-
-	/*
-	 * We save the cachelinesize also, although it doesn't
-	 * really matter.
-	 */
-	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
-			     &dd->ipath_pci_cacheline);
-
-	/*
-	 * this function called early, ipath_boardrev not set yet.  Can't
-	 * use ipath_read_kreg64() yet, too early in init, so use readq()
-	 */
-	boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
-		 >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
-
-	ipath_7220_pcie_params(dd, boardrev);
-
-	dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
-		IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
-	dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
-	return 0;
-}
-
-static void ipath_init_7220_variables(struct ipath_devdata *dd)
-{
-	/*
-	 * setup the register offsets, since they are different for each
-	 * chip
-	 */
-	dd->ipath_kregs = &ipath_7220_kregs;
-	dd->ipath_cregs = &ipath_7220_cregs;
-
-	/*
-	 * bits for selecting i2c direction and values,
-	 * used for I2C serial flash
-	 */
-	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
-	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
-	/*
-	 * Fill in data for field-values that change in IBA7220.
-	 * We dynamically specify only the mask for LINKTRAININGSTATE
-	 * and only the shift for LINKSTATE, as they are the only ones
-	 * that change.  Also precalculate the 3 link states of interest
-	 * and the combined mask.
-	 */
-	dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
-	dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
-	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
-	/*
-	 * Fill in data for ibcc field-values that change in IBA7220.
-	 * We dynamically specify only the mask for LINKINITCMD
-	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
-	 * the only ones that change.
-	 */
-	dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
-	dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
-	dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
-
-	/* Fill in shifts for RcvCtrl. */
-	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-	dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
-	dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
-	dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
-
-	/* variables for sanity checking interrupt and errors */
-	dd->ipath_hwe_bitsextant =
-		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
-		INFINIPATH_HWE_PCIE1PLLFAILED |
-		INFINIPATH_HWE_PCIE0PLLFAILED |
-		INFINIPATH_HWE_PCIEPOISONEDTLP |
-		INFINIPATH_HWE_PCIECPLTIMEOUT |
-		INFINIPATH_HWE_PCIEBUSPARITYXTLH |
-		INFINIPATH_HWE_PCIEBUSPARITYXADM |
-		INFINIPATH_HWE_PCIEBUSPARITYRADM |
-		INFINIPATH_HWE_MEMBISTFAILED |
-		INFINIPATH_HWE_COREPLL_FBSLIP |
-		INFINIPATH_HWE_COREPLL_RFSLIP |
-		INFINIPATH_HWE_SERDESPLLFAILED |
-		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
-		INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
-		INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
-		INFINIPATH_HWE_SDMAMEMREADERR |
-		INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
-		INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
-		INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
-		INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
-		INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
-		INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
-		INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
-		INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
-		INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
-	dd->ipath_i_bitsextant =
-		INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
-		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-		(INFINIPATH_I_RCVAVAIL_MASK <<
-		 INFINIPATH_I_RCVAVAIL_SHIFT) |
-		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
-		INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
-	dd->ipath_e_bitsextant =
-		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-		INFINIPATH_E_SENDSPECIALTRIGGER |
-		INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
-		INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
-		INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
-		INFINIPATH_E_SDROPPEDDATAPKT |
-		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
-		INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
-		INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
-		INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
-		INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
-		INFINIPATH_E_SDMAUNEXPDATA |
-		INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
-		INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
-		INFINIPATH_E_SDMADESCADDRMISALIGN |
-		INFINIPATH_E_INVALIDEEPCMD;
-
-	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-	dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
-		| IPATH_HAS_LINK_LATENCY;
-
-	/*
-	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-	 * 2 is Some Misc, 3 is reserved for future.
-	 */
-	dd->ipath_eep_st_masks[0].hwerrs_to_log =
-		INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
-	dd->ipath_eep_st_masks[1].hwerrs_to_log =
-		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
-	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
-	ipath_linkrecovery = 0;
-
-	init_waitqueue_head(&dd->ipath_autoneg_wait);
-	INIT_DELAYED_WORK(&dd->ipath_autoneg_work,  autoneg_work);
-
-	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-	dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
-
-	dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
-	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-	/*
-	 * set the initial values to reasonable default, will be set
-	 * for real when link is up.
-	 */
-	dd->ipath_link_width_active = IB_WIDTH_4X;
-	dd->ipath_link_speed_active = IPATH_IB_SDR;
-	dd->delay_mult = rate_to_delay[0][1];
-}
-
-
-/*
- * Setup the MSI stuff again after a reset.  I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline.  Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_7220_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
-	int ret = 0;
-
-	int pos;
-	u16 control;
-	if (!dd->ipath_msi_lo) /* Using intX, or init problem */
-		goto bail;
-
-	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
-	if (!pos) {
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't restore MSI settings\n");
-		goto bail;
-	}
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
-			       dd->ipath_msi_lo);
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
-			       dd->ipath_msi_hi);
-	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
-	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
-		ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
-			   "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
-			   control, control | PCI_MSI_FLAGS_ENABLE);
-		control |= PCI_MSI_FLAGS_ENABLE;
-		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
-				      control);
-	}
-	/* now rewrite the data (vector) info */
-	pci_write_config_word(dd->pcidev, pos +
-			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
-			      dd->ipath_msi_data);
-	ret = 1;
-
-bail:
-	if (!ret) {
-		ipath_dbg("Using INTx, MSI disabled or not configured\n");
-		ipath_enable_intx(dd->pcidev);
-		ret = 1;
-	}
-	/*
-	 * We restore the cachelinesize also, although it doesn't really
-	 * matter.
-	 */
-	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
-			      dd->ipath_pci_cacheline);
-	/* and now set the pci master bit again */
-	pci_set_master(dd->pcidev);
-
-	return ret;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.  If we need interrupt context, we can split
- * it into two routines.
- */
-static int ipath_setup_7220_reset(struct ipath_devdata *dd)
-{
-	u64 val;
-	int i;
-	int ret;
-	u16 cmdval;
-
-	pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
-	/* Use dev_err so it shows up in logs, etc. */
-	ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
-
-	/* keep chip from being accessed in a few places */
-	dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
-	val = dd->ipath_control | INFINIPATH_C_RESET;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
-	mb();
-
-	for (i = 1; i <= 5; i++) {
-		int r;
-
-		/*
-		 * Allow MBIST, etc. to complete; longer on each retry.
-		 * We sometimes get machine checks from bus timeout if no
-		 * response, so for now, make it *really* long.
-		 */
-		msleep(1000 + (1 + i) * 2000);
-		r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
-					   dd->ipath_pcibar0);
-		if (r)
-			ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
-		r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
-					   dd->ipath_pcibar1);
-		if (r)
-			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
-		/* now re-enable memory access */
-		pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
-		r = pci_enable_device(dd->pcidev);
-		if (r)
-			ipath_dev_err(dd, "pci_enable_device failed after "
-				      "reset: %d\n", r);
-		/*
-		 * whether it fully enabled or not, mark as present,
-		 * again (but not INITTED)
-		 */
-		dd->ipath_flags |= IPATH_PRESENT;
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-		if (val == dd->ipath_revision) {
-			ipath_cdbg(VERBOSE, "Got matching revision "
-				   "register %llx on try %d\n",
-				   (unsigned long long) val, i);
-			ret = ipath_reinit_msi(dd);
-			goto bail;
-		}
-		/* Probably getting -1 back */
-		ipath_dbg("Didn't get expected revision register, "
-			  "got %llx, try %d\n", (unsigned long long) val,
-			  i + 1);
-	}
-	ret = 0; /* failed */
-
-bail:
-	if (ret)
-		ipath_7220_pcie_params(dd, dd->ipath_boardrev);
-
-	return ret;
-}
-
-/**
- * ipath_7220_put_tid - write a TID to the chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
-			     u32 type, unsigned long pa)
-{
-	if (pa != dd->ipath_tidinvalid) {
-		u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
-		/* paranoia checks */
-		if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
-			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
-				 "not 2KB aligned!\n", pa);
-			return;
-		}
-		if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
-			ipath_dev_err(dd,
-				      "BUG: Physical page address 0x%lx "
-				      "larger than supported\n", pa);
-			return;
-		}
-
-		if (type == RCVHQ_RCV_TYPE_EAGER)
-			chippa |= dd->ipath_tidtemplate;
-		else /* for now, always full 4KB page */
-			chippa |= IBA7220_TID_SZ_4K;
-		writeq(chippa, tidptr);
-	} else
-		writeq(pa, tidptr);
-	mmiowb();
-}
-
-/**
- * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close().  On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-	u64 __iomem *tidbase;
-	unsigned long tidinv;
-	int i;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-	tidinv = dd->ipath_tidinvalid;
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvtidbase +
-		 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-		ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
-				   tidinv);
-
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
-		 * sizeof(*tidbase));
-
-	for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
-		ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
-			tidinv);
-}
-
-/**
- * ipath_7220_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
-{
-	/* For now, we always allocate 4KB buffers (at init) so we can
-	 * receive max size packets.  We may want a module parameter to
-	 * specify 2KB or 4KB and/or make be per port instead of per device
-	 * for those who want to reduce memory footprint.  Note that the
-	 * ipath_rcvhdrentsize size must be large enough to hold the largest
-	 * IB header (currently 96 bytes) that we expect to handle (plus of
-	 * course the 2 dwords of RHF).
-	 */
-	if (dd->ipath_rcvegrbufsize == 2048)
-		dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
-	else if (dd->ipath_rcvegrbufsize == 4096)
-		dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
-	else {
-		dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
-			 "%u, using %u\n", dd->ipath_rcvegrbufsize,
-			 4096);
-		dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
-	}
-	dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_7220_early_init(struct ipath_devdata *dd)
-{
-	u32 i, s;
-
-	if (strcmp(int_type, "auto") &&
-	    strcmp(int_type, "force_msi") &&
-	    strcmp(int_type, "force_intx")) {
-		ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
-			      "auto, force_msi or force_intx\n", int_type);
-		return -EINVAL;
-	}
-
-	/*
-	 * Control[4] has been added to change the arbitration within
-	 * the SDMA engine between favoring data fetches over descriptor
-	 * fetches.  ipath_sdma_fetch_arb==0 gives data fetches priority.
-	 */
-	if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
-		dd->ipath_control |= 1<<4;
-
-	dd->ipath_flags |= IPATH_4BYTE_TID;
-
-	/*
-	 * For openfabrics, we need to be able to handle an IB header of
-	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
-	 * made them the same size as the PIO buffers.  This chip does not
-	 * handle arbitrary size buffers, so we need the header large enough
-	 * to handle largest IB header, but still have room for a 2KB MTU
-	 * standard IB packet.
-	 */
-	dd->ipath_rcvhdrentsize = 24;
-	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-	dd->ipath_rhf_offset =
-		dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
-	dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
-	/*
-	 * the min() check here is currently a nop, but it may not always
-	 * be, depending on just how we do ipath_rcvegrbufsize
-	 */
-	dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
-				 dd->ipath_piosize2k,
-				 dd->ipath_rcvegrbufsize +
-				 (dd->ipath_rcvhdrentsize << 2));
-	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
-	ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
-			       INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
-
-	if (dd->ipath_boardrev) /* no eeprom on emulator */
-		ipath_get_eeprom_info(dd);
-
-	/* start of code to check and print procmon */
-	s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-	s &= ~(1U<<31); /* clear done bit */
-	s |= 1U<<14; /* clear counter (write 1 to clear) */
-	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-	/* make sure clear_counter low long enough before start */
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-
-	s &= ~(1U<<14); /* allow counter to count (before starting) */
-	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-
-	s |= 1U<<15; /* start the counter */
-	s &= ~(1U<<31); /* clear done bit */
-	s &= ~0x7ffU; /* clear frequency bits */
-	s |= 0xe29; /* set frequency bits, in case cleared */
-	ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-
-	s = 0;
-	for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-		s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-	}
-	if (!(s&(1U<<31)))
-		ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
-	else
-		ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
-
-	return 0;
-}
-
-/**
- * ipath_init_7220_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-	struct ipath_base_info *kinfo = kbase;
-
-	kinfo->spi_runtime_flags |=
-		IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
-		IPATH_RUNTIME_SDMA;
-
-	return 0;
-}
-
-static void ipath_7220_free_irq(struct ipath_devdata *dd)
-{
-	free_irq(dd->ipath_irq, dd);
-	dd->ipath_irq = 0;
-}
-
-static struct ipath_message_header *
-ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
-	u32 offset = ipath_hdrget_offset(rhf_addr);
-
-	return (struct ipath_message_header *)
-		(rhf_addr - dd->ipath_rhf_offset + offset);
-}
-
-static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
-	u32 nchipports;
-
-	nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-	if (!cfgports) {
-		int ncpus = num_online_cpus();
-
-		if (ncpus <= 4)
-			dd->ipath_portcnt = 5;
-		else if (ncpus <= 8)
-			dd->ipath_portcnt = 9;
-		if (dd->ipath_portcnt)
-			ipath_dbg("Auto-configured for %u ports, %d cpus "
-				"online\n", dd->ipath_portcnt, ncpus);
-	} else if (cfgports <= nchipports)
-		dd->ipath_portcnt = cfgports;
-	if (!dd->ipath_portcnt) /* none of the above, set to max */
-		dd->ipath_portcnt = nchipports;
-	/*
-	 * chip can be configured for 5, 9, or 17 ports, and choice
-	 * affects number of eager TIDs per port (1K, 2K, 4K).
-	 */
-	if (dd->ipath_portcnt > 9)
-		dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
-	else if (dd->ipath_portcnt > 5)
-		dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
-	/* else configure for default 5 receive ports */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-			 dd->ipath_rcvctrl);
-	dd->ipath_p0_rcvegrcnt = 2048; /* always */
-	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-		dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
-}
-
-
-static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
-	int lsb, ret = 0;
-	u64 maskr; /* right-justified mask */
-
-	switch (which) {
-	case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
-		lsb = IBA7220_IBC_HRTBT_SHIFT;
-		maskr = IBA7220_IBC_HRTBT_MASK;
-		break;
-
-	case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
-		ret = dd->ipath_link_width_enabled;
-		goto done;
-
-	case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
-		ret = dd->ipath_link_width_active;
-		goto done;
-
-	case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
-		ret = dd->ipath_link_speed_enabled;
-		goto done;
-
-	case IPATH_IB_CFG_SPD: /* Get current Link spd */
-		ret = dd->ipath_link_speed_active;
-		goto done;
-
-	case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
-		lsb = IBA7220_IBC_RXPOL_SHIFT;
-		maskr = IBA7220_IBC_RXPOL_MASK;
-		break;
-
-	case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
-		lsb = IBA7220_IBC_LREV_SHIFT;
-		maskr = IBA7220_IBC_LREV_MASK;
-		break;
-
-	case IPATH_IB_CFG_LINKLATENCY:
-		ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
-			& IBA7220_DDRSTAT_LINKLAT_MASK;
-		goto done;
-
-	default:
-		ret = -ENOTSUPP;
-		goto done;
-	}
-	ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
-done:
-	return ret;
-}
-
-static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
-	int lsb, ret = 0, setforce = 0;
-	u64 maskr; /* right-justified mask */
-
-	switch (which) {
-	case IPATH_IB_CFG_LIDLMC:
-		/*
-		 * Set LID and LMC. Combined to avoid possible hazard
-		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
-		 */
-		lsb = IBA7220_IBC_DLIDLMC_SHIFT;
-		maskr = IBA7220_IBC_DLIDLMC_MASK;
-		break;
-
-	case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
-		if (val & IPATH_IB_HRTBT_ON &&
-			(dd->ipath_flags & IPATH_NO_HRTBT))
-			goto bail;
-		lsb = IBA7220_IBC_HRTBT_SHIFT;
-		maskr = IBA7220_IBC_HRTBT_MASK;
-		break;
-
-	case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
-		/*
-		 * As with speed, only write the actual register if
-		 * the link is currently down, otherwise takes effect
-		 * on next link change.
-		 */
-		dd->ipath_link_width_enabled = val;
-		if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
-			IPATH_LINKDOWN)
-			goto bail;
-		/*
-		 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
-		 * will get called because we want update
-		 * link_width_active, and the change may not take
-		 * effect for some time (if we are in POLL), so this
-		 * flag will force the updown routine to be called
-		 * on the next ibstatuschange down interrupt, even
-		 * if it's not an down->up transition.
-		 */
-		val--; /* convert from IB to chip */
-		maskr = IBA7220_IBC_WIDTH_MASK;
-		lsb = IBA7220_IBC_WIDTH_SHIFT;
-		setforce = 1;
-		dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
-		break;
-
-	case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
-		/*
-		 * If we turn off IB1.2, need to preset SerDes defaults,
-		 * but not right now. Set a flag for the next time
-		 * we command the link down.  As with width, only write the
-		 * actual register if the link is currently down, otherwise
-		 * takes effect on next link change.  Since setting is being
-		 * explictly requested (via MAD or sysfs), clear autoneg
-		 * failure status if speed autoneg is enabled.
-		 */
-		dd->ipath_link_speed_enabled = val;
-		if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
-		    !(val & (val - 1)))
-			dd->ipath_presets_needed = 1;
-		if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
-			IPATH_LINKDOWN)
-			goto bail;
-		/*
-		 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
-		 * will get called because we want update
-		 * link_speed_active, and the change may not take
-		 * effect for some time (if we are in POLL), so this
-		 * flag will force the updown routine to be called
-		 * on the next ibstatuschange down interrupt, even
-		 * if it's not an down->up transition.  When setting
-		 * speed autoneg, clear AUTONEG_FAILED.
-		 */
-		if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
-			val = IBA7220_IBC_SPEED_AUTONEG_MASK |
-				IBA7220_IBC_IBTA_1_2_MASK;
-			dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
-		} else
-			val = val == IPATH_IB_DDR ?  IBA7220_IBC_SPEED_DDR
-				: IBA7220_IBC_SPEED_SDR;
-		maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
-			IBA7220_IBC_IBTA_1_2_MASK;
-		lsb = 0; /* speed bits are low bits */
-		setforce = 1;
-		break;
-
-	case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
-		lsb = IBA7220_IBC_RXPOL_SHIFT;
-		maskr = IBA7220_IBC_RXPOL_MASK;
-		break;
-
-	case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
-		lsb = IBA7220_IBC_LREV_SHIFT;
-		maskr = IBA7220_IBC_LREV_MASK;
-		break;
-
-	default:
-		ret = -ENOTSUPP;
-		goto bail;
-	}
-	dd->ipath_ibcddrctrl &= ~(maskr << lsb);
-	dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
-			 dd->ipath_ibcddrctrl);
-	if (setforce)
-		dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
-bail:
-	return ret;
-}
-
-static void ipath_7220_read_counters(struct ipath_devdata *dd,
-				     struct infinipath_counters *cntrs)
-{
-	u64 *counters = (u64 *) cntrs;
-	int i;
-
-	for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
-		counters[i] = ipath_snap_cntr(dd, i);
-}
-
-/* if we are using MSI, try to fallback to INTx */
-static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
-{
-	if (dd->ipath_msi_lo) {
-		dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
-			" trying INTx interrupts\n");
-		ipath_7220_nomsi(dd);
-		ipath_enable_intx(dd->pcidev);
-		/*
-		 * some newer kernels require free_irq before disable_msi,
-		 * and irq can be changed during disable and intx enable
-		 * and we need to therefore use the pcidev->irq value,
-		 * not our saved MSI value.
-		 */
-		dd->ipath_irq = dd->pcidev->irq;
-		if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
-			IPATH_DRV_NAME, dd))
-			ipath_dev_err(dd,
-				"Could not re-request_irq for INTx\n");
-		return 1;
-	}
-	return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC).  Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining.  To do this right, we reset IBC
- * as well.
- */
-static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
-{
-	u64 val, prev_val;
-
-	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	val = prev_val | INFINIPATH_XGXS_RESET;
-	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
-}
-
-
-/* Still needs cleanup, too much hardwired stuff */
-static void autoneg_send(struct ipath_devdata *dd,
-	u32 *hdr, u32 dcnt, u32 *data)
-{
-	int i;
-	u64 cnt;
-	u32 __iomem *piobuf;
-	u32 pnum;
-
-	i = 0;
-	cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
-	while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
-		if (i++ > 15) {
-			ipath_dbg("Couldn't get pio buffer for send\n");
-			return;
-		}
-		udelay(2);
-	}
-	if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
-		cnt |= 0x80000000UL<<32; /* mark as VL15 */
-	writeq(cnt, piobuf);
-	ipath_flush_wc();
-	__iowrite32_copy(piobuf + 2, hdr, 7);
-	__iowrite32_copy(piobuf + 9, data, dcnt);
-	ipath_flush_wc();
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
-{
-	static u32 swapped;
-	u32 dw, i, hcnt, dcnt, *data;
-	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
-	static u32 madpayload_start[0x40] = {
-		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
-		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
-		};
-	static u32 madpayload_done[0x40] = {
-		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
-		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-		0x40000001, 0x1388, 0x15e, /* rest 0's */
-		};
-	dcnt = ARRAY_SIZE(madpayload_start);
-	hcnt = ARRAY_SIZE(hdr);
-	if (!swapped) {
-		/* for maintainability, do it at runtime */
-		for (i = 0; i < hcnt; i++) {
-			dw = (__force u32) cpu_to_be32(hdr[i]);
-			hdr[i] = dw;
-		}
-		for (i = 0; i < dcnt; i++) {
-			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
-			madpayload_start[i] = dw;
-			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
-			madpayload_done[i] = dw;
-		}
-		swapped = 1;
-	}
-
-	data = which ? madpayload_done : madpayload_start;
-	ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
-
-	autoneg_send(dd, hdr, dcnt, data);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	udelay(2);
-	autoneg_send(dd, hdr, dcnt, data);
-	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	udelay(2);
-}
-
-
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change.   The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
-{
-	dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
-		IBA7220_IBC_IBTA_1_2_MASK |
-		(IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
-
-	if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
-		dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
-			IBA7220_IBC_IBTA_1_2_MASK;
-	else
-		dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
-			IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
-	/*
-	 * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
-	 * to chip-centric       0 = 1x, 1 = 4x, 2 = auto
-	 */
-	dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
-		IBA7220_IBC_WIDTH_SHIFT;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
-			dd->ipath_ibcddrctrl);
-	ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
-}
-
-
-/*
- * this routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_auto_neg(struct ipath_devdata *dd)
-{
-	/*
-	 * required for older non-IB1.2 DDR switches.  Newer
-	 * non-IB-compliant switches don't need it, but so far,
-	 * aren't bothered by it either.  "Magic constant"
-	 */
-	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
-		0x3b9dc07);
-	dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
-	ipath_autoneg_send(dd, 0);
-	set_speed_fast(dd, IPATH_IB_DDR);
-	ipath_toggle_rclkrls(dd);
-	/* 2 msec is minimum length of a poll cycle */
-	schedule_delayed_work(&dd->ipath_autoneg_work,
-		msecs_to_jiffies(2));
-}
-
-
-static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
-	int ret = 0, symadj = 0;
-	u32 ltstate = ipath_ib_linkstate(dd, ibcs);
-
-	dd->ipath_link_width_active =
-		((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
-		    IB_WIDTH_4X : IB_WIDTH_1X;
-	dd->ipath_link_speed_active =
-		((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
-		    IPATH_IB_DDR : IPATH_IB_SDR;
-
-	if (!ibup) {
-		/*
-		 * when link goes down we don't want aeq running, so it
-		 * won't't interfere with IBC training, etc., and we need
-		 * to go back to the static SerDes preset values
-		 */
-		if (dd->ipath_x1_fix_tries &&
-			 ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
-			ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
-			dd->ipath_x1_fix_tries = 0;
-		if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
-			IPATH_IB_AUTONEG_INPROG)))
-			set_speed_fast(dd, dd->ipath_link_speed_enabled);
-		if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
-			ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
-			ipath_sd7220_presets(dd);
-		}
-		/* this might better in ipath_sd7220_presets() */
-		ipath_set_relock_poll(dd, ibup);
-	} else {
-		if (ipath_compat_ddr_negotiate &&
-		    !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
-			IPATH_IB_AUTONEG_INPROG)) &&
-			dd->ipath_link_speed_active == IPATH_IB_SDR &&
-			(dd->ipath_link_speed_enabled &
-			    (IPATH_IB_DDR | IPATH_IB_SDR)) ==
-			    (IPATH_IB_DDR | IPATH_IB_SDR) &&
-			dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
-			/* we are SDR, and DDR auto-negotiation enabled */
-			++dd->ipath_autoneg_tries;
-			ipath_dbg("DDR negotiation try, %u/%u\n",
-				dd->ipath_autoneg_tries,
-				IPATH_AUTONEG_TRIES);
-			if (!dd->ibdeltainprog) {
-				dd->ibdeltainprog = 1;
-				dd->ibsymsnap = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_ibsymbolerrcnt);
-				dd->iblnkerrsnap = ipath_read_creg32(dd,
-					dd->ipath_cregs->cr_iblinkerrrecovcnt);
-			}
-			try_auto_neg(dd);
-			ret = 1; /* no other IB status change processing */
-		} else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
-			&& dd->ipath_link_speed_active == IPATH_IB_SDR) {
-			ipath_autoneg_send(dd, 1);
-			set_speed_fast(dd, IPATH_IB_DDR);
-			udelay(2);
-			ipath_toggle_rclkrls(dd);
-			ret = 1; /* no other IB status change processing */
-		} else {
-			if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
-				(dd->ipath_link_speed_active & IPATH_IB_DDR)) {
-				ipath_dbg("Got to INIT with DDR autoneg\n");
-				dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
-					| IPATH_IB_AUTONEG_FAILED);
-				dd->ipath_autoneg_tries = 0;
-				/* re-enable SDR, for next link down */
-				set_speed_fast(dd,
-					dd->ipath_link_speed_enabled);
-				wake_up(&dd->ipath_autoneg_wait);
-				symadj = 1;
-			} else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
-				/*
-				 * clear autoneg failure flag, and do setup
-				 * so we'll try next time link goes down and
-				 * back to INIT (possibly connected to different
-				 * device).
-				 */
-				ipath_dbg("INIT %sDR after autoneg failure\n",
-					(dd->ipath_link_speed_active &
-					  IPATH_IB_DDR) ? "D" : "S");
-				dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
-				dd->ipath_ibcddrctrl |=
-					IBA7220_IBC_IBTA_1_2_MASK;
-				ipath_write_kreg(dd,
-					IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
-				symadj = 1;
-			}
-		}
-		/*
-		 * if we are in 1X on rev1 only, and are in autoneg width,
-		 * it could be due to an xgxs problem, so if we haven't
-		 * already tried, try twice to get to 4X; if we
-		 * tried, and couldn't, report it, since it will
-		 * probably not be what is desired.
-		 */
-		if (dd->ipath_minrev == 1 &&
-		    (dd->ipath_link_width_enabled & (IB_WIDTH_1X |
-			IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
-			&& dd->ipath_link_width_active == IB_WIDTH_1X
-			&& dd->ipath_x1_fix_tries < 3) {
-		     if (++dd->ipath_x1_fix_tries == 3) {
-				dev_info(&dd->pcidev->dev,
-					"IB link is in 1X mode\n");
-				if (!(dd->ipath_flags &
-				      IPATH_IB_AUTONEG_INPROG))
-					symadj = 1;
-		     }
-			else {
-				ipath_cdbg(VERBOSE, "IB 1X in "
-					"auto-width, try %u to be "
-					"sure it's really 1X; "
-					"ltstate %u\n",
-					 dd->ipath_x1_fix_tries,
-					 ltstate);
-				dd->ipath_f_xgxs_reset(dd);
-				ret = 1; /* skip other processing */
-			}
-		} else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
-			symadj = 1;
-
-		if (!ret) {
-			dd->delay_mult = rate_to_delay
-			    [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
-			    [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
-
-			ipath_set_relock_poll(dd, ibup);
-		}
-	}
-
-	if (symadj) {
-		if (dd->ibdeltainprog) {
-			dd->ibdeltainprog = 0;
-			dd->ibsymdelta += ipath_read_creg32(dd,
-				dd->ipath_cregs->cr_ibsymbolerrcnt) -
-				dd->ibsymsnap;
-			dd->iblnkerrdelta += ipath_read_creg32(dd,
-				dd->ipath_cregs->cr_iblinkerrrecovcnt) -
-				dd->iblnkerrsnap;
-		}
-	} else if (!ibup && !dd->ibdeltainprog
-		   && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
-		dd->ibdeltainprog = 1;
-		dd->ibsymsnap =	ipath_read_creg32(dd,
-				     dd->ipath_cregs->cr_ibsymbolerrcnt);
-		dd->iblnkerrsnap = ipath_read_creg32(dd,
-				     dd->ipath_cregs->cr_iblinkerrrecovcnt);
-	}
-
-	if (!ret)
-		ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-			ltstate);
-	return ret;
-}
-
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_work(struct work_struct *work)
-{
-	struct ipath_devdata *dd;
-	u64 startms;
-	u32 lastlts, i;
-
-	dd = container_of(work, struct ipath_devdata,
-		ipath_autoneg_work.work);
-
-	startms = jiffies_to_msecs(jiffies);
-
-	/*
-	 * busy wait for this first part, it should be at most a
-	 * few hundred usec, since we scheduled ourselves for 2msec.
-	 */
-	for (i = 0; i < 25; i++) {
-		lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-		if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-			ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
-			break;
-		}
-		udelay(100);
-	}
-
-	if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
-		goto done; /* we got there early or told to stop */
-
-	/* we expect this to timeout */
-	if (wait_event_timeout(dd->ipath_autoneg_wait,
-		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
-		msecs_to_jiffies(90)))
-		goto done;
-
-	ipath_toggle_rclkrls(dd);
-
-	/* we expect this to timeout */
-	if (wait_event_timeout(dd->ipath_autoneg_wait,
-		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
-		msecs_to_jiffies(1700)))
-		goto done;
-
-	set_speed_fast(dd, IPATH_IB_SDR);
-	ipath_toggle_rclkrls(dd);
-
-	/*
-	 * wait up to 250 msec for link to train and get to INIT at DDR;
-	 * this should terminate early
-	 */
-	wait_event_timeout(dd->ipath_autoneg_wait,
-		!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
-		msecs_to_jiffies(250));
-done:
-	if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
-		ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
-			ipath_ib_state(dd, dd->ipath_lastibcstat),
-			(unsigned long long) jiffies_to_msecs(jiffies)-startms);
-		dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
-		if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
-			dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
-			ipath_dbg("Giving up on DDR until next IB "
-				"link Down\n");
-			dd->ipath_autoneg_tries = 0;
-		}
-		set_speed_fast(dd, dd->ipath_link_speed_enabled);
-	}
-}
-
-
-/**
- * ipath_init_iba7220_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
-{
-	dd->ipath_f_intrsetup = ipath_7220_intconfig;
-	dd->ipath_f_bus = ipath_setup_7220_config;
-	dd->ipath_f_reset = ipath_setup_7220_reset;
-	dd->ipath_f_get_boardname = ipath_7220_boardname;
-	dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
-	dd->ipath_f_early_init = ipath_7220_early_init;
-	dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
-	dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
-	dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
-	dd->ipath_f_clear_tids = ipath_7220_clear_tids;
-	dd->ipath_f_put_tid = ipath_7220_put_tid;
-	dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
-	dd->ipath_f_setextled = ipath_setup_7220_setextled;
-	dd->ipath_f_get_base_info = ipath_7220_get_base_info;
-	dd->ipath_f_free_irq = ipath_7220_free_irq;
-	dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
-	dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
-	dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
-	dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
-	dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
-	dd->ipath_f_config_jint = ipath_7220_config_jint;
-	dd->ipath_f_config_ports = ipath_7220_config_ports;
-	dd->ipath_f_read_counters = ipath_7220_read_counters;
-	dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
-	dd->ipath_f_ib_updown = ipath_7220_ib_updown;
-
-	/* initialize chip-specific variables */
-	ipath_init_7220_variables(dd);
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index b3d7efcdf021..6559af60bffd 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
 void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
 				unsigned len, int avail);
-void ipath_init_iba7220_funcs(struct ipath_devdata *);
-void ipath_init_iba6120_funcs(struct ipath_devdata *);
 void ipath_init_iba6110_funcs(struct ipath_devdata *);
 void ipath_get_eeprom_info(struct ipath_devdata *);
 int ipath_update_eeprom_log(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 559f39be0dcc..dd7f26d04d46 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
 	snprintf(dev->node_desc, sizeof(dev->node_desc),
 		 IPATH_IDSTR " %s", init_utsname()->nodename);
 
-	ret = ib_register_device(dev);
+	ret = ib_register_device(dev, NULL);
 	if (ret)
 		goto err_reg;
 
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 39051417054c..4e94e360e43b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 	spin_lock_init(&ibdev->sm_lock);
 	mutex_init(&ibdev->cap_mask_mutex);
 
-	if (ib_register_device(&ibdev->ib_dev))
+	if (ib_register_device(&ibdev->ib_dev, NULL))
 		goto err_map;
 
 	if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index f080a784bc79..1e0b4b6074ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
 
 	mutex_init(&dev->cap_mask_mutex);
 
-	ret = ib_register_device(&dev->ib_dev);
+	ret = ib_register_device(&dev->ib_dev, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 86acb7d57064..57874a165083 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
 
 		if (phy_data & 0x0004) {
 			if (wide_ppm_offset &&
@@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
 		}
 	}
 
+	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
 	nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
 }
 
@@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	u32 aeq_info;
 	u32 next_iwarp_state = 0;
+	u32 aeqe_cq_id;
 	u16 async_event_id;
 	u8 tcp_state;
 	u8 iwarp_state;
@@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 			le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
 			nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
 
+	aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+	if (aeq_info & NES_AEQE_QP) {
+		if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
+				aeqe_cq_id)) ||
+				(atomic_read(&nesqp->close_timer_started)))
+			return;
+	}
+
 	switch (async_event_id) {
 		case NES_AEQE_AEID_LLP_FIN_RECEIVED:
 			if (nesqp->term_flags)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9f4cadf9f851..242f42d8c1c6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1002,6 +1002,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
 	return ret;
 }
 
+
 static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
 	"Link Change Interrupts",
 	"Linearized SKBs",
@@ -1016,11 +1017,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
 	"Rx Jabber Errors",
 	"Rx Oversized Frames",
 	"Rx Short Frames",
+	"Rx Length Errors",
+	"Rx CRC Errors",
+	"Rx Port Discard",
 	"Endnode Rx Discards",
 	"Endnode Rx Octets",
 	"Endnode Rx Frames",
 	"Endnode Tx Octets",
 	"Endnode Tx Frames",
+	"Tx Errors",
 	"mh detected",
 	"mh pauses",
 	"Retransmission Count",
@@ -1049,19 +1054,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
 	"CM Nodes Destroyed",
 	"CM Accel Drops",
 	"CM Resets Received",
+	"Free 4Kpbls",
+	"Free 256pbls",
 	"Timer Inits",
-	"CQ Depth 1",
-	"CQ Depth 4",
-	"CQ Depth 16",
-	"CQ Depth 24",
-	"CQ Depth 32",
-	"CQ Depth 128",
-	"CQ Depth 256",
 	"LRO aggregated",
 	"LRO flushed",
 	"LRO no_desc",
 };
-
 #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
 
 /**
@@ -1121,12 +1120,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
 /**
  * nes_netdev_get_ethtool_stats
  */
+
 static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 		struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
 {
 	u64 u64temp;
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
+	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	u32 nic_count;
 	u32 u32temp;
 	u32 index = 0;
@@ -1155,6 +1156,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 	nesvnic->nesdev->port_tx_discards += u32temp;
 	nesvnic->netstats.tx_dropped += u32temp;
 
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->netstats.rx_dropped += u32temp;
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+	nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->netstats.rx_dropped += u32temp;
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+	nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->netstats.rx_dropped += u32temp;
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+	nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->netstats.rx_dropped += u32temp;
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+	nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->netstats.rx_length_errors += u32temp;
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->nesdev->mac_rx_errors += u32temp;
+	nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+	nesvnic->netstats.rx_crc_errors += u32temp;
+
+	u32temp = nes_read_indexed(nesdev,
+			NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+	nesvnic->nesdev->mac_tx_errors += u32temp;
+	nesvnic->netstats.tx_errors += u32temp;
+
 	for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
 		if (nesvnic->qp_nic_index[nic_count] == 0xf)
 			break;
@@ -1219,11 +1260,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 	target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
 	target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
 	target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
+	target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
+	target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
+	target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
 	target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
 	target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
 	target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
 	target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
 	target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
+	target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
 	target_stat_values[++index] = mh_detected;
 	target_stat_values[++index] = mh_pauses_sent;
 	target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@@ -1252,21 +1297,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 	target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
 	target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
 	target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+	target_stat_values[++index] = nesadapter->free_4kpbl;
+	target_stat_values[++index] = nesadapter->free_256pbl;
 	target_stat_values[++index] = int_mod_timer_init;
-	target_stat_values[++index] = int_mod_cq_depth_1;
-	target_stat_values[++index] = int_mod_cq_depth_4;
-	target_stat_values[++index] = int_mod_cq_depth_16;
-	target_stat_values[++index] = int_mod_cq_depth_24;
-	target_stat_values[++index] = int_mod_cq_depth_32;
-	target_stat_values[++index] = int_mod_cq_depth_128;
-	target_stat_values[++index] = int_mod_cq_depth_256;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-
 }
 
-
 /**
  * nes_netdev_get_drvinfo
  */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 925e1f2d1d55..9bc2d744b2ea 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	int i, ret;
 
-	ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+	ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
 	if (ret) {
 		return ret;
 	}
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 000000000000..7c03a70c55a2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_QIB
+	tristate "QLogic PCIe HCA support"
+	depends on 64BIT && NET
+	---help---
+	This is a low-level driver for QLogic PCIe QLE InfiniBand host
+	channel adapters.  This driver does not support the QLogic
+	HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 000000000000..c6515a1b9a6a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
+
+ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
+	qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
+	qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
+	qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+	qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
+	qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+	qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
+
+# 6120 has no fallback if no MSI interrupts, others can do INTx
+ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
+
+ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
+ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 000000000000..32d9208efcff
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
+#ifndef _QIB_KERNEL_H
+#define _QIB_KERNEL_H
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file is the base header file for qlogic_ib kernel code
+ * qib_user.h serves a similar purpose for user code.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+
+#include "qib_common.h"
+#include "qib_verbs.h"
+
+/* only s/w major version of QLogic_IB we can handle */
+#define QIB_CHIP_VERS_MAJ 2U
+
+/* don't care about this except printing */
+#define QIB_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define QIB_OUI 0x001175
+#define QIB_OUI_LSB 40
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted qib_statnames[] in qib_fs.c must
+ * change to match.
+ */
+struct qlogic_ib_stats {
+	__u64 sps_ints; /* number of interrupts handled */
+	__u64 sps_errints; /* number of error interrupts */
+	__u64 sps_txerrs; /* tx-related packet errors */
+	__u64 sps_rcverrs; /* non-crc rcv packet errors */
+	__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+	__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+	__u64 sps_ctxts; /* number of contexts currently open */
+	__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+	__u64 sps_buffull;
+	__u64 sps_hdrfull;
+};
+
+extern struct qlogic_ib_stats qib_stats;
+extern struct pci_error_handlers qib_pci_err_handler;
+extern struct pci_driver qib_driver;
+
+#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
+/*
+ * First-cut critierion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Struct used to indicate which errors are logged in each of the
+ * error-counters that are logged to EEPROM. A counter is incremented
+ * _once_ (saturating at 255) for each event with any bits set in
+ * the error or hwerror register masks below.
+ */
+#define QIB_EEP_LOG_CNT (4)
+struct qib_eep_log_mask {
+	u64 errs_to_log;
+	u64 hwerrs_to_log;
+};
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+struct qib_ctxtdata {
+	void **rcvegrbuf;
+	dma_addr_t *rcvegrbuf_phys;
+	/* rcvhdrq base, needs mmap before useful */
+	void *rcvhdrq;
+	/* kernel virtual address where hdrqtail is updated */
+	void *rcvhdrtail_kvaddr;
+	/*
+	 * temp buffer for expected send setup, allocated at open, instead
+	 * of each setup call
+	 */
+	void *tid_pg_list;
+	/*
+	 * Shared page for kernel to signal user processes that send buffers
+	 * need disarming.  The process should call QIB_CMD_DISARM_BUFS
+	 * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
+	 */
+	unsigned long *user_event_mask;
+	/* when waiting for rcv or pioavail */
+	wait_queue_head_t wait;
+	/*
+	 * rcvegr bufs base, physical, must fit
+	 * in 44 bits so 32 bit programs mmap64 44 bit works)
+	 */
+	dma_addr_t rcvegr_phys;
+	/* mmap of hdrq, must fit in 44 bits */
+	dma_addr_t rcvhdrq_phys;
+	dma_addr_t rcvhdrqtailaddr_phys;
+
+	/*
+	 * number of opens (including slave sub-contexts) on this instance
+	 * (ignoring forks, dup, etc. for now)
+	 */
+	int cnt;
+	/*
+	 * how much space to leave at start of eager TID entries for
+	 * protocol use, on each TID
+	 */
+	/* instead of calculating it */
+	unsigned ctxt;
+	/* non-zero if ctxt is being shared. */
+	u16 subctxt_cnt;
+	/* non-zero if ctxt is being shared. */
+	u16 subctxt_id;
+	/* number of eager TID entries. */
+	u16 rcvegrcnt;
+	/* index of first eager TID entry. */
+	u16 rcvegr_tid_base;
+	/* number of pio bufs for this ctxt (all procs, if shared) */
+	u32 piocnt;
+	/* first pio buffer for this ctxt */
+	u32 pio_base;
+	/* chip offset of PIO buffers for this ctxt */
+	u32 piobufs;
+	/* how many alloc_pages() chunks in rcvegrbuf_pages */
+	u32 rcvegrbuf_chunks;
+	/* how many egrbufs per chunk */
+	u32 rcvegrbufs_perchunk;
+	/* order for rcvegrbuf_pages */
+	size_t rcvegrbuf_size;
+	/* rcvhdrq size (for freeing) */
+	size_t rcvhdrq_size;
+	/* per-context flags for fileops/intr communication */
+	unsigned long flag;
+	/* next expected TID to check when looking for free */
+	u32 tidcursor;
+	/* WAIT_RCV that timed out, no interrupt */
+	u32 rcvwait_to;
+	/* WAIT_PIO that timed out, no interrupt */
+	u32 piowait_to;
+	/* WAIT_RCV already happened, no wait */
+	u32 rcvnowait;
+	/* WAIT_PIO already happened, no wait */
+	u32 pionowait;
+	/* total number of polled urgent packets */
+	u32 urgent;
+	/* saved total number of polled urgent packets for poll edge trigger */
+	u32 urgent_poll;
+	/* pid of process using this ctxt */
+	pid_t pid;
+	pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
+	/* same size as task_struct .comm[], command that opened context */
+	char comm[16];
+	/* pkeys set by this use of this ctxt */
+	u16 pkeys[4];
+	/* so file ops can get at unit */
+	struct qib_devdata *dd;
+	/* so funcs that need physical port can get it easily */
+	struct qib_pportdata *ppd;
+	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+	void *subctxt_uregbase;
+	/* An array of pages for the eager receive buffers * N */
+	void *subctxt_rcvegrbuf;
+	/* An array of pages for the eager header queue entries * N */
+	void *subctxt_rcvhdr_base;
+	/* The version of the library which opened this ctxt */
+	u32 userversion;
+	/* Bitmask of active slaves */
+	u32 active_slaves;
+	/* Type of packets or conditions we want to poll for */
+	u16 poll_type;
+	/* receive packet sequence counter */
+	u8 seq_cnt;
+	u8 redirect_seq_cnt;
+	/* ctxt rcvhdrq head offset */
+	u32 head;
+	u32 pkt_count;
+	/* QPs waiting for context processing */
+	struct list_head qp_wait_list;
+};
+
+struct qib_sge_state;
+
+struct qib_sdma_txreq {
+	int                 flags;
+	int                 sg_count;
+	dma_addr_t          addr;
+	void              (*callback)(struct qib_sdma_txreq *, int);
+	u16                 start_idx;  /* sdma private */
+	u16                 next_descq_idx;  /* sdma private */
+	struct list_head    list;       /* sdma private */
+};
+
+struct qib_sdma_desc {
+	__le64 qw[2];
+};
+
+struct qib_verbs_txreq {
+	struct qib_sdma_txreq   txreq;
+	struct qib_qp           *qp;
+	struct qib_swqe         *wqe;
+	u32                     dwords;
+	u16                     hdr_dwords;
+	u16                     hdr_inx;
+	struct qib_pio_header	*align_buf;
+	struct qib_mregion	*mr;
+	struct qib_sge_state    *ss;
+};
+
+#define QIB_SDMA_TXREQ_F_USELARGEBUF  0x1
+#define QIB_SDMA_TXREQ_F_HEADTOHOST   0x2
+#define QIB_SDMA_TXREQ_F_INTREQ       0x4
+#define QIB_SDMA_TXREQ_F_FREEBUF      0x8
+#define QIB_SDMA_TXREQ_F_FREEDESC     0x10
+
+#define QIB_SDMA_TXREQ_S_OK        0
+#define QIB_SDMA_TXREQ_S_SENDERROR 1
+#define QIB_SDMA_TXREQ_S_ABORTED   2
+#define QIB_SDMA_TXREQ_S_SHUTDOWN  3
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
+#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define QIB_IB_CFG_SPD 5 /* current Link spd */
+#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
+#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
+#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
+#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
+#define QIB_IB_CFG_VL_HIGH_LIMIT 19
+#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
+ * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
+ * QIB_IB_CFG_LINKDEFAULT cmd
+ */
+#define   IB_LINKCMD_DOWN   (0 << 16)
+#define   IB_LINKCMD_ARMED  (1 << 16)
+#define   IB_LINKCMD_ACTIVE (2 << 16)
+#define   IB_LINKINITCMD_NOP     0
+#define   IB_LINKINITCMD_POLL    1
+#define   IB_LINKINITCMD_SLEEP   2
+#define   IB_LINKINITCMD_DISABLE 3
+
+/*
+ * valid states passed to qib_set_linkstate() user call
+ */
+#define QIB_IB_LINKDOWN         0
+#define QIB_IB_LINKARM          1
+#define QIB_IB_LINKACTIVE       2
+#define QIB_IB_LINKDOWN_ONLY    3
+#define QIB_IB_LINKDOWN_SLEEP   4
+#define QIB_IB_LINKDOWN_DISABLE 5
+
+/*
+ * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
+ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
+ * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
+ * are also the the possible values for qib_link_speed_enabled and active
+ * The values were chosen to match values used within the IB spec.
+ */
+#define QIB_IB_SDR 1
+#define QIB_IB_DDR 2
+#define QIB_IB_QDR 4
+
+#define QIB_DEFAULT_MTU 4096
+
+/*
+ * Possible IB config parameters for f_get/set_ib_table()
+ */
+#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
+#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
+ */
+#define QIB_RCVCTRL_TAILUPD_ENB 0x01
+#define QIB_RCVCTRL_TAILUPD_DIS 0x02
+#define QIB_RCVCTRL_CTXT_ENB 0x04
+#define QIB_RCVCTRL_CTXT_DIS 0x08
+#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
+#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
+#define QIB_RCVCTRL_PKEY_ENB 0x40  /* Note, default is enabled */
+#define QIB_RCVCTRL_PKEY_DIS 0x80
+#define QIB_RCVCTRL_BP_ENB 0x0100
+#define QIB_RCVCTRL_BP_DIS 0x0200
+#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
+#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
+
+/*
+ * Possible "operations" for f_sendctrl(ppd, op, var)
+ * these are bits so they can be combined, e.g.
+ * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
+ * Some operations (e.g. DISARM, ABORT) are known to
+ * be "one-shot", so do not modify shadow.
+ */
+#define QIB_SENDCTRL_DISARM       (0x1000)
+#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
+	/* available (0x2000) */
+#define QIB_SENDCTRL_AVAIL_DIS    (0x4000)
+#define QIB_SENDCTRL_AVAIL_ENB    (0x8000)
+#define QIB_SENDCTRL_AVAIL_BLIP  (0x10000)
+#define QIB_SENDCTRL_SEND_DIS    (0x20000)
+#define QIB_SENDCTRL_SEND_ENB    (0x40000)
+#define QIB_SENDCTRL_FLUSH       (0x80000)
+#define QIB_SENDCTRL_CLEAR      (0x100000)
+#define QIB_SENDCTRL_DISARM_ALL (0x200000)
+
+/*
+ * These are the generic indices for requesting per-port
+ * counter values via the f_portcntr function.  They
+ * are always returned as 64 bit values, although most
+ * are 32 bit counters.
+ */
+/* send-related counters */
+#define QIBPORTCNTR_PKTSEND         0U
+#define QIBPORTCNTR_WORDSEND        1U
+#define QIBPORTCNTR_PSXMITDATA      2U
+#define QIBPORTCNTR_PSXMITPKTS      3U
+#define QIBPORTCNTR_PSXMITWAIT      4U
+#define QIBPORTCNTR_SENDSTALL       5U
+/* receive-related counters */
+#define QIBPORTCNTR_PKTRCV          6U
+#define QIBPORTCNTR_PSRCVDATA       7U
+#define QIBPORTCNTR_PSRCVPKTS       8U
+#define QIBPORTCNTR_RCVEBP          9U
+#define QIBPORTCNTR_RCVOVFL         10U
+#define QIBPORTCNTR_WORDRCV         11U
+/* IB link related error counters */
+#define QIBPORTCNTR_RXLOCALPHYERR   12U
+#define QIBPORTCNTR_RXVLERR         13U
+#define QIBPORTCNTR_ERRICRC         14U
+#define QIBPORTCNTR_ERRVCRC         15U
+#define QIBPORTCNTR_ERRLPCRC        16U
+#define QIBPORTCNTR_BADFORMAT       17U
+#define QIBPORTCNTR_ERR_RLEN        18U
+#define QIBPORTCNTR_IBSYMBOLERR     19U
+#define QIBPORTCNTR_INVALIDRLEN     20U
+#define QIBPORTCNTR_UNSUPVL         21U
+#define QIBPORTCNTR_EXCESSBUFOVFL   22U
+#define QIBPORTCNTR_ERRLINK         23U
+#define QIBPORTCNTR_IBLINKDOWN      24U
+#define QIBPORTCNTR_IBLINKERRRECOV  25U
+#define QIBPORTCNTR_LLI             26U
+/* other error counters */
+#define QIBPORTCNTR_RXDROPPKT       27U
+#define QIBPORTCNTR_VL15PKTDROP     28U
+#define QIBPORTCNTR_ERRPKEY         29U
+#define QIBPORTCNTR_KHDROVFL        30U
+/* sampling counters (these are actually control registers) */
+#define QIBPORTCNTR_PSINTERVAL      31U
+#define QIBPORTCNTR_PSSTART         32U
+#define QIBPORTCNTR_PSSTAT          33U
+
+/* how often we check for packet activity for "power on hours (in seconds) */
+#define ACTIVITY_TIMER 5
+
+/* Below is an opaque struct. Each chip (device) can maintain
+ * private data needed for its operation, but not germane to the
+ * rest of the driver.  For convenience, we define another that
+ * is chip-specific, per-port
+ */
+struct qib_chip_specific;
+struct qib_chipport_specific;
+
+enum qib_sdma_states {
+	qib_sdma_state_s00_hw_down,
+	qib_sdma_state_s10_hw_start_up_wait,
+	qib_sdma_state_s20_idle,
+	qib_sdma_state_s30_sw_clean_up_wait,
+	qib_sdma_state_s40_hw_clean_up_wait,
+	qib_sdma_state_s50_hw_halt_wait,
+	qib_sdma_state_s99_running,
+};
+
+enum qib_sdma_events {
+	qib_sdma_event_e00_go_hw_down,
+	qib_sdma_event_e10_go_hw_start,
+	qib_sdma_event_e20_hw_started,
+	qib_sdma_event_e30_go_running,
+	qib_sdma_event_e40_sw_cleaned,
+	qib_sdma_event_e50_hw_cleaned,
+	qib_sdma_event_e60_hw_halted,
+	qib_sdma_event_e70_go_idle,
+	qib_sdma_event_e7220_err_halted,
+	qib_sdma_event_e7322_err_halted,
+	qib_sdma_event_e90_timer_tick,
+};
+
+extern char *qib_sdma_state_names[];
+extern char *qib_sdma_event_names[];
+
+struct sdma_set_state_action {
+	unsigned op_enable:1;
+	unsigned op_intenable:1;
+	unsigned op_halt:1;
+	unsigned op_drain:1;
+	unsigned go_s99_running_tofalse:1;
+	unsigned go_s99_running_totrue:1;
+};
+
+struct qib_sdma_state {
+	struct kref          kref;
+	struct completion    comp;
+	enum qib_sdma_states current_state;
+	struct sdma_set_state_action *set_state_action;
+	unsigned             current_op;
+	unsigned             go_s99_running;
+	unsigned             first_sendbuf;
+	unsigned             last_sendbuf; /* really last +1 */
+	/* debugging/devel */
+	enum qib_sdma_states previous_state;
+	unsigned             previous_op;
+	enum qib_sdma_events last_event;
+};
+
+struct xmit_wait {
+	struct timer_list timer;
+	u64 counter;
+	u8 flags;
+	struct cache {
+		u64 psxmitdata;
+		u64 psrcvdata;
+		u64 psxmitpkts;
+		u64 psrcvpkts;
+		u64 psxmitwait;
+	} counter_cache;
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct qib_pportdata {
+	struct qib_ibport ibport_data;
+
+	struct qib_devdata *dd;
+	struct qib_chippport_specific *cpspec; /* chip-specific per-port */
+	struct kobject pport_kobj;
+	struct kobject sl2vl_kobj;
+	struct kobject diagc_kobj;
+
+	/* GUID for this interface, in network order */
+	__be64 guid;
+
+	/* QIB_POLL, etc. link-state specific flags, per port */
+	u32 lflags;
+	/* qib_lflags driver is waiting for */
+	u32 state_wanted;
+	spinlock_t lflags_lock;
+	/* number of (port-specific) interrupts for this port -- saturates... */
+	u32 int_counter;
+
+	/* ref count for each pkey */
+	atomic_t pkeyrefs[4];
+
+	/*
+	 * this address is mapped readonly into user processes so they can
+	 * get status cheaply, whenever they want.  One qword of status per port
+	 */
+	u64 *statusp;
+
+	/* SendDMA related entries */
+	spinlock_t            sdma_lock;
+	struct qib_sdma_state sdma_state;
+	unsigned long         sdma_buf_jiffies;
+	struct qib_sdma_desc *sdma_descq;
+	u64                   sdma_descq_added;
+	u64                   sdma_descq_removed;
+	u16                   sdma_descq_cnt;
+	u16                   sdma_descq_tail;
+	u16                   sdma_descq_head;
+	u16                   sdma_next_intr;
+	u16                   sdma_reset_wait;
+	u8                    sdma_generation;
+	struct tasklet_struct sdma_sw_clean_up_task;
+	struct list_head      sdma_activelist;
+
+	dma_addr_t       sdma_descq_phys;
+	volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
+	dma_addr_t       sdma_head_phys;
+
+	wait_queue_head_t state_wait; /* for state_wanted */
+
+	/* HoL blocking for SMP replies */
+	unsigned          hol_state;
+	struct timer_list hol_timer;
+
+	/*
+	 * Shadow copies of registers; size indicates read access size.
+	 * Most of them are readonly, but some are write-only register,
+	 * where we manipulate the bits in the shadow copy, and then write
+	 * the shadow copy to qlogic_ib.
+	 *
+	 * We deliberately make most of these 32 bits, since they have
+	 * restricted range.  For any that we read, we won't to generate 32
+	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
+	 * transactions for a 64 bit read, and we want to avoid unnecessary
+	 * bus transactions.
+	 */
+
+	/* This is the 64 bit group */
+	/* last ibcstatus.  opaque outside chip-specific code */
+	u64 lastibcstat;
+
+	/* these are the "32 bit" regs */
+
+	/*
+	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+	 * all expect bit fields to be "unsigned long"
+	 */
+	unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
+	unsigned long p_sendctrl; /* shadow per-port sendctrl */
+
+	u32 ibmtu; /* The MTU programmed for this unit */
+	/*
+	 * Current max size IB packet (in bytes) including IB headers, that
+	 * we can send. Changes when ibmtu changes.
+	 */
+	u32 ibmaxlen;
+	/*
+	 * ibmaxlen at init time, limited by chip and by receive buffer
+	 * size.  Not changed after init.
+	 */
+	u32 init_ibmaxlen;
+	/* LID programmed for this instance */
+	u16 lid;
+	/* list of pkeys programmed; 0 if not set */
+	u16 pkeys[4];
+	/* LID mask control */
+	u8 lmc;
+	u8 link_width_supported;
+	u8 link_speed_supported;
+	u8 link_width_enabled;
+	u8 link_speed_enabled;
+	u8 link_width_active;
+	u8 link_speed_active;
+	u8 vls_supported;
+	u8 vls_operational;
+	/* Rx Polarity inversion (compensate for ~tx on partner) */
+	u8 rx_pol_inv;
+
+	u8 hw_pidx;     /* physical port index */
+	u8 port;        /* IB port number and index into dd->pports - 1 */
+
+	u8 delay_mult;
+
+	/* used to override LED behavior */
+	u8 led_override;  /* Substituted for normal value, if non-zero */
+	u16 led_override_timeoff; /* delta to next timer event */
+	u8 led_override_vals[2]; /* Alternates per blink-frame */
+	u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+	atomic_t led_override_timer_active;
+	/* Used to flash LEDs in override mode */
+	struct timer_list led_override_timer;
+	struct xmit_wait cong_stats;
+	struct timer_list symerr_clear_timer;
+};
+
+/* Observers. Not to be taken lightly, possibly not to ship. */
+/*
+ * If a diag read or write is to (bottom <= offset <= top),
+ * the "hoook" is called, allowing, e.g. shadows to be
+ * updated in sync with the driver. struct diag_observer
+ * is the "visible" part.
+ */
+struct diag_observer;
+
+typedef int (*diag_hook) (struct qib_devdata *dd,
+	const struct diag_observer *op,
+	u32 offs, u64 *data, u64 mask, int only_32);
+
+struct diag_observer {
+	diag_hook hook;
+	u32 bottom;
+	u32 top;
+};
+
+extern int qib_register_observer(struct qib_devdata *dd,
+	const struct diag_observer *op);
+
+/* Only declared here, not defined. Private to diags */
+struct diag_observer_list_elt;
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a qib_pportdata struct,
+ * described above) while fields only used by a particualr chip-type are in
+ * a qib_chipdata struct, whose contents are opaque to this file.
+ */
+struct qib_devdata {
+	struct qib_ibdev verbs_dev;     /* must be first */
+	struct list_head list;
+	/* pointers to related structs for this device */
+	/* pci access data structure */
+	struct pci_dev *pcidev;
+	struct cdev *user_cdev;
+	struct cdev *diag_cdev;
+	struct device *user_device;
+	struct device *diag_device;
+
+	/* mem-mapped pointer to base of chip regs */
+	u64 __iomem *kregbase;
+	/* end of mem-mapped chip space excluding sendbuf and user regs */
+	u64 __iomem *kregend;
+	/* physical address of chip for io_remap, etc. */
+	resource_size_t physaddr;
+	/* qib_cfgctxts pointers */
+	struct qib_ctxtdata **rcd; /* Receive Context Data */
+
+	/* qib_pportdata, points to array of (physical) port-specific
+	 * data structs, indexed by pidx (0..n-1)
+	 */
+	struct qib_pportdata *pport;
+	struct qib_chip_specific *cspec; /* chip-specific */
+
+	/* kvirt address of 1st 2k pio buffer */
+	void __iomem *pio2kbase;
+	/* kvirt address of 1st 4k pio buffer */
+	void __iomem *pio4kbase;
+	/* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
+	void __iomem *piobase;
+	/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
+	u64 __iomem *userbase;
+	/*
+	 * points to area where PIOavail registers will be DMA'ed.
+	 * Has to be on a page of it's own, because the page will be
+	 * mapped into user program space.  This copy is *ONLY* ever
+	 * written by DMA, not by the driver!  Need a copy per device
+	 * when we get to multiple devices
+	 */
+	volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
+	/* physical address where updates occur */
+	dma_addr_t pioavailregs_phys;
+
+	/* device-specific implementations of functions needed by
+	 * common code. Contrary to previous consensus, we can't
+	 * really just point to a device-specific table, because we
+	 * may need to "bend", e.g. *_f_put_tid
+	 */
+	/* fallback to alternate interrupt type if possible */
+	int (*f_intr_fallback)(struct qib_devdata *);
+	/* hard reset chip */
+	int (*f_reset)(struct qib_devdata *);
+	void (*f_quiet_serdes)(struct qib_pportdata *);
+	int (*f_bringup_serdes)(struct qib_pportdata *);
+	int (*f_early_init)(struct qib_devdata *);
+	void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
+	void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
+				u32, unsigned long);
+	void (*f_cleanup)(struct qib_devdata *);
+	void (*f_setextled)(struct qib_pportdata *, u32);
+	/* fill out chip-specific fields */
+	int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
+	/* free irq */
+	void (*f_free_irq)(struct qib_devdata *);
+	struct qib_message_header *(*f_get_msgheader)
+					(struct qib_devdata *, __le32 *);
+	void (*f_config_ctxts)(struct qib_devdata *);
+	int (*f_get_ib_cfg)(struct qib_pportdata *, int);
+	int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
+	int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
+	int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
+	int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
+	u32 (*f_iblink_state)(u64);
+	u8 (*f_ibphys_portstate)(u64);
+	void (*f_xgxs_reset)(struct qib_pportdata *);
+	/* per chip actions needed for IB Link up/down changes */
+	int (*f_ib_updown)(struct qib_pportdata *, int, u64);
+	u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
+	/* Read/modify/write of GPIO pins (potentially chip-specific */
+	int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
+		u32 mask);
+	/* Enable writes to config EEPROM (if supported) */
+	int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
+	/*
+	 * modify rcvctrl shadow[s] and write to appropriate chip-regs.
+	 * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
+	 * (ctxt == -1) means "all contexts", only meaningful for
+	 * clearing. Could remove if chip_spec shutdown properly done.
+	 */
+	void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
+		int ctxt);
+	/* Read/modify/write sendctrl appropriately for op and port. */
+	void (*f_sendctrl)(struct qib_pportdata *, u32 op);
+	void (*f_set_intr_state)(struct qib_devdata *, u32);
+	void (*f_set_armlaunch)(struct qib_devdata *, u32);
+	void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
+	int (*f_late_initreg)(struct qib_devdata *);
+	int (*f_init_sdma_regs)(struct qib_pportdata *);
+	u16 (*f_sdma_gethead)(struct qib_pportdata *);
+	int (*f_sdma_busy)(struct qib_pportdata *);
+	void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
+	void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
+	void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
+	void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
+	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
+	void (*f_sdma_init_early)(struct qib_pportdata *);
+	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
+	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
+	u64 (*f_portcntr)(struct qib_pportdata *, u32);
+	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
+		u64 **);
+	u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
+		char **, u64 **);
+	u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
+	void (*f_initvl15_bufs)(struct qib_devdata *);
+	void (*f_init_ctxt)(struct qib_ctxtdata *);
+	void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
+		struct qib_ctxtdata *);
+	void (*f_writescratch)(struct qib_devdata *, u32);
+	int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
+
+	char *boardname; /* human readable board info */
+
+	/* template for writing TIDs  */
+	u64 tidtemplate;
+	/* value to write to free TIDs */
+	u64 tidinvalid;
+
+	/* number of registers used for pioavail */
+	u32 pioavregs;
+	/* device (not port) flags, basically device capabilities */
+	u32 flags;
+	/* last buffer for user use */
+	u32 lastctxt_piobuf;
+
+	/* saturating counter of (non-port-specific) device interrupts */
+	u32 int_counter;
+
+	/* pio bufs allocated per ctxt */
+	u32 pbufsctxt;
+	/* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
+	u32 ctxts_extrabuf;
+	/*
+	 * number of ctxts configured as max; zero is set to number chip
+	 * supports, less gives more pio bufs/ctxt, etc.
+	 */
+	u32 cfgctxts;
+
+	/*
+	 * hint that we should update pioavailshadow before
+	 * looking for a PIO buffer
+	 */
+	u32 upd_pio_shadow;
+
+	/* internal debugging stats */
+	u32 maxpkts_call;
+	u32 avgpkts_call;
+	u64 nopiobufs;
+
+	/* PCI Vendor ID (here for NodeInfo) */
+	u16 vendorid;
+	/* PCI Device ID (here for NodeInfo) */
+	u16 deviceid;
+	/* for write combining settings */
+	unsigned long wc_cookie;
+	unsigned long wc_base;
+	unsigned long wc_len;
+
+	/* shadow copy of struct page *'s for exp tid pages */
+	struct page **pageshadow;
+	/* shadow copy of dma handles for exp tid pages */
+	dma_addr_t *physshadow;
+	u64 __iomem *egrtidbase;
+	spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
+	/* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
+	spinlock_t uctxt_lock; /* rcd and user context changes */
+	/*
+	 * per unit status, see also portdata statusp
+	 * mapped readonly into user processes so they can get unit and
+	 * IB link status cheaply
+	 */
+	u64 *devstatusp;
+	char *freezemsg; /* freeze msg if hw error put chip in freeze */
+	u32 freezelen; /* max length of freezemsg */
+	/* timer used to prevent stats overflow, error throttling, etc. */
+	struct timer_list stats_timer;
+
+	/* timer to verify interrupts work, and fallback if possible */
+	struct timer_list intrchk_timer;
+	unsigned long ureg_align; /* user register alignment */
+
+	/*
+	 * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
+	 * pio_writing.
+	 */
+	spinlock_t pioavail_lock;
+
+	/*
+	 * Shadow copies of registers; size indicates read access size.
+	 * Most of them are readonly, but some are write-only register,
+	 * where we manipulate the bits in the shadow copy, and then write
+	 * the shadow copy to qlogic_ib.
+	 *
+	 * We deliberately make most of these 32 bits, since they have
+	 * restricted range.  For any that we read, we won't to generate 32
+	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
+	 * transactions for a 64 bit read, and we want to avoid unnecessary
+	 * bus transactions.
+	 */
+
+	/* This is the 64 bit group */
+
+	unsigned long pioavailshadow[6];
+	/* bitmap of send buffers available for the kernel to use with PIO. */
+	unsigned long pioavailkernel[6];
+	/* bitmap of send buffers which need to be disarmed. */
+	unsigned long pio_need_disarm[3];
+	/* bitmap of send buffers which are being written to. */
+	unsigned long pio_writing[3];
+	/* kr_revision shadow */
+	u64 revision;
+	/* Base GUID for device (from eeprom, network order) */
+	__be64 base_guid;
+
+	/*
+	 * kr_sendpiobufbase value (chip offset of pio buffers), and the
+	 * base of the 2KB buffer s(user processes only use 2K)
+	 */
+	u64 piobufbase;
+	u32 pio2k_bufbase;
+
+	/* these are the "32 bit" regs */
+
+	/* number of GUIDs in the flash for this interface */
+	u32 nguid;
+	/*
+	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+	 * all expect bit fields to be "unsigned long"
+	 */
+	unsigned long rcvctrl; /* shadow per device rcvctrl */
+	unsigned long sendctrl; /* shadow per device sendctrl */
+
+	/* value we put in kr_rcvhdrcnt */
+	u32 rcvhdrcnt;
+	/* value we put in kr_rcvhdrsize */
+	u32 rcvhdrsize;
+	/* value we put in kr_rcvhdrentsize */
+	u32 rcvhdrentsize;
+	/* kr_ctxtcnt value */
+	u32 ctxtcnt;
+	/* kr_pagealign value */
+	u32 palign;
+	/* number of "2KB" PIO buffers */
+	u32 piobcnt2k;
+	/* size in bytes of "2KB" PIO buffers */
+	u32 piosize2k;
+	/* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
+	u32 piosize2kmax_dwords;
+	/* number of "4KB" PIO buffers */
+	u32 piobcnt4k;
+	/* size in bytes of "4KB" PIO buffers */
+	u32 piosize4k;
+	/* kr_rcvegrbase value */
+	u32 rcvegrbase;
+	/* kr_rcvtidbase value */
+	u32 rcvtidbase;
+	/* kr_rcvtidcnt value */
+	u32 rcvtidcnt;
+	/* kr_userregbase */
+	u32 uregbase;
+	/* shadow the control register contents */
+	u32 control;
+
+	/* chip address space used by 4k pio buffers */
+	u32 align4k;
+	/* size of each rcvegrbuffer */
+	u32 rcvegrbufsize;
+	/* localbus width (1, 2,4,8,16,32) from config space  */
+	u32 lbus_width;
+	/* localbus speed in MHz */
+	u32 lbus_speed;
+	int unit; /* unit # of this chip */
+
+	/* start of CHIP_SPEC move to chipspec, but need code changes */
+	/* low and high portions of MSI capability/vector */
+	u32 msi_lo;
+	/* saved after PCIe init for restore after reset */
+	u32 msi_hi;
+	/* MSI data (vector) saved for restore */
+	u16 msi_data;
+	/* so we can rewrite it after a chip reset */
+	u32 pcibar0;
+	/* so we can rewrite it after a chip reset */
+	u32 pcibar1;
+	u64 rhdrhead_intr_off;
+
+	/*
+	 * ASCII serial number, from flash, large enough for original
+	 * all digit strings, and longer QLogic serial number format
+	 */
+	u8 serial[16];
+	/* human readable board version */
+	u8 boardversion[96];
+	u8 lbus_info[32]; /* human readable localbus info */
+	/* chip major rev, from qib_revision */
+	u8 majrev;
+	/* chip minor rev, from qib_revision */
+	u8 minrev;
+
+	/* Misc small ints */
+	/* Number of physical ports available */
+	u8 num_pports;
+	/* Lowest context number which can be used by user processes */
+	u8 first_user_ctxt;
+	u8 n_krcv_queues;
+	u8 qpn_mask;
+	u8 skip_kctxt_mask;
+
+	u16 rhf_offset; /* offset of RHF within receive header entry */
+
+	/*
+	 * GPIO pins for twsi-connected devices, and device code for eeprom
+	 */
+	u8 gpio_sda_num;
+	u8 gpio_scl_num;
+	u8 twsi_eeprom_dev;
+	u8 board_atten;
+
+	/* Support (including locks) for EEPROM logging of errors and time */
+	/* control access to actual counters, timer */
+	spinlock_t eep_st_lock;
+	/* control high-level access to EEPROM */
+	struct mutex eep_lock;
+	uint64_t traffic_wds;
+	/* active time is kept in seconds, but logged in hours */
+	atomic_t active_time;
+	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
+	uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+	uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+	uint16_t eep_hrs;
+	/*
+	 * masks for which bits of errs, hwerrs that cause
+	 * each of the counters to increment.
+	 */
+	struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
+	struct qib_diag_client *diag_client;
+	spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
+	struct diag_observer_list_elt *diag_observer_list;
+
+	u8 psxmitwait_supported;
+	/* cycle length of PS* counters in HW (in picoseconds) */
+	u16 psxmitwait_check_rate;
+};
+
+/* hol_state values */
+#define QIB_HOL_UP       0
+#define QIB_HOL_INIT     1
+
+#define QIB_SDMA_SENDCTRL_OP_ENABLE    (1U << 0)
+#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
+#define QIB_SDMA_SENDCTRL_OP_HALT      (1U << 2)
+#define QIB_SDMA_SENDCTRL_OP_CLEANUP   (1U << 3)
+#define QIB_SDMA_SENDCTRL_OP_DRAIN     (1U << 4)
+
+/* operation types for f_txchk_change() */
+#define TXCHK_CHG_TYPE_DIS1  3
+#define TXCHK_CHG_TYPE_ENAB1 2
+#define TXCHK_CHG_TYPE_KERN  1
+#define TXCHK_CHG_TYPE_USER  0
+
+#define QIB_CHASE_TIME msecs_to_jiffies(145)
+#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
+
+/* Private data for file operations */
+struct qib_filedata {
+	struct qib_ctxtdata *rcd;
+	unsigned subctxt;
+	unsigned tidcursor;
+	struct qib_user_sdma_queue *pq;
+	int rec_cpu_num; /* for cpu affinity; -1 if none */
+};
+
+extern struct list_head qib_dev_list;
+extern spinlock_t qib_devs_lock;
+extern struct qib_devdata *qib_lookup(int unit);
+extern u32 qib_cpulist_count;
+extern unsigned long *qib_cpulist;
+
+extern unsigned qib_wc_pat;
+int qib_init(struct qib_devdata *, int);
+int init_chip_wc_pat(struct qib_devdata *dd, u32);
+int qib_enable_wc(struct qib_devdata *dd);
+void qib_disable_wc(struct qib_devdata *dd);
+int qib_count_units(int *npresentp, int *nupp);
+int qib_count_active_units(void);
+
+int qib_cdev_init(int minor, const char *name,
+		  const struct file_operations *fops,
+		  struct cdev **cdevp, struct device **devp);
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
+int qib_dev_init(void);
+void qib_dev_cleanup(void);
+
+int qib_diag_add(struct qib_devdata *);
+void qib_diag_remove(struct qib_devdata *);
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
+void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
+
+int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
+void qib_bad_intrstatus(struct qib_devdata *);
+void qib_handle_urcv(struct qib_devdata *, u64);
+
+/* clean up any per-chip chip-specific stuff */
+void qib_chip_cleanup(struct qib_devdata *);
+/* clean up any chip type-specific stuff */
+void qib_chip_done(void);
+
+/* check to see if we have to force ordering for write combining */
+int qib_unordered_wc(void);
+void qib_pio_copy(void __iomem *to, const void *from, size_t count);
+
+void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
+void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
+void qib_cancel_sends(struct qib_pportdata *);
+
+int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
+int qib_setup_eagerbufs(struct qib_ctxtdata *);
+void qib_set_ctxtcnt(struct qib_devdata *);
+int qib_create_ctxts(struct qib_devdata *dd);
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
+void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
+void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
+
+u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
+int qib_reset_device(int);
+int qib_wait_linkstate(struct qib_pportdata *, u32, int);
+int qib_set_linkstate(struct qib_pportdata *, u8);
+int qib_set_mtu(struct qib_pportdata *, u16);
+int qib_set_lid(struct qib_pportdata *, u32, u8);
+void qib_hol_down(struct qib_pportdata *);
+void qib_hol_init(struct qib_pportdata *);
+void qib_hol_up(struct qib_pportdata *);
+void qib_hol_event(unsigned long);
+void qib_disable_after_error(struct qib_devdata *);
+int qib_set_uevent_bits(struct qib_pportdata *, const int);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define ctxt_fp(fp) \
+	(((struct qib_filedata *)(fp)->private_data)->rcd)
+#define subctxt_fp(fp) \
+	(((struct qib_filedata *)(fp)->private_data)->subctxt)
+#define tidcursor_fp(fp) \
+	(((struct qib_filedata *)(fp)->private_data)->tidcursor)
+#define user_sdma_queue_fp(fp) \
+	(((struct qib_filedata *)(fp)->private_data)->pq)
+
+static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
+{
+	return ppd->dd;
+}
+
+static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
+{
+	return container_of(dev, struct qib_devdata, verbs_dev);
+}
+
+static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+	return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
+{
+	return container_of(ibp, struct qib_pportdata, ibport_data);
+}
+
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+	WARN_ON(pidx >= dd->num_pports);
+	return &dd->pport[pidx].ibport_data;
+}
+
+/*
+ * values for dd->flags (_device_ related flags) and
+ */
+#define QIB_HAS_LINK_LATENCY  0x1 /* supports link latency (IB 1.2) */
+#define QIB_INITTED           0x2 /* chip and driver up and initted */
+#define QIB_DOING_RESET       0x4  /* in the middle of doing chip reset */
+#define QIB_PRESENT           0x8  /* chip accesses can be done */
+#define QIB_PIO_FLUSH_WC      0x10 /* Needs Write combining flush for PIO */
+#define QIB_HAS_THRESH_UPDATE 0x40
+#define QIB_HAS_SDMA_TIMEOUT  0x80
+#define QIB_USE_SPCL_TRIG     0x100 /* SpecialTrigger launch enabled */
+#define QIB_NODMA_RTAIL       0x200 /* rcvhdrtail register DMA enabled */
+#define QIB_HAS_INTX          0x800 /* Supports INTx interrupts */
+#define QIB_HAS_SEND_DMA      0x1000 /* Supports Send DMA */
+#define QIB_HAS_VLSUPP        0x2000 /* Supports multiple VLs; PBC different */
+#define QIB_HAS_HDRSUPP       0x4000 /* Supports header suppression */
+#define QIB_BADINTR           0x8000 /* severe interrupt problems */
+#define QIB_DCA_ENABLED       0x10000 /* Direct Cache Access enabled */
+#define QIB_HAS_QSFP          0x20000 /* device (card instance) has QSFP */
+
+/*
+ * values for ppd->lflags (_ib_port_ related flags)
+ */
+#define QIBL_LINKV             0x1 /* IB link state valid */
+#define QIBL_LINKDOWN          0x8 /* IB link is down */
+#define QIBL_LINKINIT          0x10 /* IB link level is up */
+#define QIBL_LINKARMED         0x20 /* IB link is ARMED */
+#define QIBL_LINKACTIVE        0x40 /* IB link is ACTIVE */
+/* leave a gap for more IB-link state */
+#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
+#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
+#define QIBL_IB_LINK_DISABLED  0x4000 /* Linkdown-disable forced,
+				       * Do not try to bring up */
+#define QIBL_IB_FORCE_NOTIFY   0x8000 /* force notify on next ib change */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define QIB_PBC_LENGTH_MASK                     ((1 << 11) - 1)
+
+
+/* ctxt_flag bit offsets */
+		/* waiting for a packet to arrive */
+#define QIB_CTXT_WAITING_RCV   2
+		/* master has not finished initializing */
+#define QIB_CTXT_MASTER_UNINIT 4
+		/* waiting for an urgent packet to arrive */
+#define QIB_CTXT_WAITING_URG 5
+
+/* free up any allocated data at closes */
+void qib_free_data(struct qib_ctxtdata *dd);
+void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
+			    u32, struct qib_ctxtdata *);
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
+					   const struct pci_device_id *);
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
+					   const struct pci_device_id *);
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
+					   const struct pci_device_id *);
+void qib_free_devdata(struct qib_devdata *);
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
+
+#define QIB_TWSI_NO_DEV 0xFF
+/* Below qib_twsi_ functions must be called with eep_lock held */
+int qib_twsi_reset(struct qib_devdata *dd);
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+		    int len);
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+		    const void *buffer, int len);
+void qib_get_eeprom_info(struct qib_devdata *);
+int qib_update_eeprom_log(struct qib_devdata *dd);
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+void qib_dump_lookup_output_queue(struct qib_devdata *);
+void qib_force_pio_avail_update(struct qib_devdata *);
+void qib_clear_symerror_on_linkup(unsigned long opaque);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define QIB_LED_LOG 2  /* Logical (link) YELLOW LED */
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
+
+/* send dma routines */
+int qib_setup_sdma(struct qib_pportdata *);
+void qib_teardown_sdma(struct qib_pportdata *);
+void __qib_sdma_intr(struct qib_pportdata *);
+void qib_sdma_intr(struct qib_pportdata *);
+int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+			u32, struct qib_verbs_txreq *);
+/* ppd->sdma_lock should be locked before calling this. */
+int qib_sdma_make_progress(struct qib_pportdata *dd);
+
+/* must be called under qib_sdma_lock */
+static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
+{
+	return ppd->sdma_descq_cnt -
+		(ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
+}
+
+static inline int __qib_sdma_running(struct qib_pportdata *ppd)
+{
+	return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
+}
+int qib_sdma_running(struct qib_pportdata *);
+
+void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+
+/*
+ * number of words used for protocol header if not set by qib_userinit();
+ */
+#define QIB_DFLT_RCVHDRSIZE 9
+
+/*
+ * We need to be able to handle an IB header of at least 24 dwords.
+ * We need the rcvhdrq large enough to handle largest IB header, but
+ * still have room for a 2KB MTU standard IB packet.
+ * Additionally, some processor/memory controller combinations
+ * benefit quite strongly from having the DMA'ed data be cacheline
+ * aligned and a cacheline multiple, so we set the size to 32 dwords
+ * (2 64-byte primary cachelines for pretty much all processors of
+ * interest).  The alignment hurts nothing, other than using somewhat
+ * more memory.
+ */
+#define QIB_RCVHDR_ENTSIZE 32
+
+int qib_get_user_pages(unsigned long, size_t, struct page **);
+void qib_release_user_pages(struct page **, size_t);
+int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
+int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
+void qib_sendbuf_done(struct qib_devdata *, unsigned);
+
+static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+	*((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+}
+
+static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+	/*
+	 * volatile because it's a DMA target from the chip, routine is
+	 * inlined, and don't want register caching or reordering.
+	 */
+	return (u32) le64_to_cpu(
+		*((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
+}
+
+static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
+{
+	const struct qib_devdata *dd = rcd->dd;
+	u32 hdrqtail;
+
+	if (dd->flags & QIB_NODMA_RTAIL) {
+		__le32 *rhf_addr;
+		u32 seq;
+
+		rhf_addr = (__le32 *) rcd->rcvhdrq +
+			rcd->head + dd->rhf_offset;
+		seq = qib_hdrget_seq(rhf_addr);
+		hdrqtail = rcd->head;
+		if (seq == rcd->seq_cnt)
+			hdrqtail++;
+	} else
+		hdrqtail = qib_get_rcvhdrtail(rcd);
+
+	return hdrqtail;
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_qib_version[];
+
+int qib_device_create(struct qib_devdata *);
+void qib_device_remove(struct qib_devdata *);
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+			  struct kobject *kobj);
+int qib_verbs_register_sysfs(struct qib_devdata *);
+void qib_verbs_unregister_sysfs(struct qib_devdata *);
+/* Hook for sysfs read of QSFP */
+extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
+
+int __init qib_init_qibfs(void);
+int __exit qib_exit_qibfs(void);
+
+int qibfs_add(struct qib_devdata *);
+int qibfs_remove(struct qib_devdata *);
+
+int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
+int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
+		    const struct pci_device_id *);
+void qib_pcie_ddcleanup(struct qib_devdata *);
+int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
+int qib_reinit_intr(struct qib_devdata *);
+void qib_enable_intx(struct pci_dev *);
+void qib_nomsi(struct qib_devdata *);
+void qib_nomsix(struct qib_devdata *);
+void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
+void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
+
+/*
+ * dma_addr wrappers - all 0's invalid for hw
+ */
+dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
+			  size_t, int);
+const char *qib_get_unit_name(int unit);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+#if defined(CONFIG_X86_64)
+#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+#else
+#define qib_flush_wc() wmb() /* no reorder around wc flush */
+#endif
+
+/* global module parameter variables */
+extern unsigned qib_ibmtu;
+extern ushort qib_cfgctxts;
+extern ushort qib_num_cfg_vls;
+extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
+extern unsigned qib_n_krcv_queues;
+extern unsigned qib_sdma_fetch_arb;
+extern unsigned qib_compat_ddr_negotiate;
+extern int qib_special_trigger;
+
+extern struct mutex qib_mutex;
+
+/* Number of seconds before our card status check...  */
+#define STATUS_TIMEOUT 60
+
+#define QIB_DRV_NAME            "ib_qib"
+#define QIB_USER_MINOR_BASE     0
+#define QIB_TRACE_MINOR         127
+#define QIB_DIAGPKT_MINOR       128
+#define QIB_DIAG_MINOR_BASE     129
+#define QIB_NMINORS             255
+
+#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
+#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
+#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
+
+/*
+ * qib_early_err is used (only!) to print early errors before devdata is
+ * allocated, or when dd->pcidev may not be valid, and at the tail end of
+ * cleanup when devdata may have been freed, etc.  qib_dev_porterr is
+ * the same as qib_dev_err, but is used when the message really needs
+ * the IB port# to be definitive as to what's happening..
+ * All of these go to the trace log, and the trace log entry is done
+ * first to avoid possible serial port delays from printk.
+ */
+#define qib_early_err(dev, fmt, ...) \
+	do { \
+		dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define qib_dev_err(dd, fmt, ...) \
+	do { \
+		dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+			qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
+	} while (0)
+
+#define qib_dev_porterr(dd, port, fmt, ...) \
+	do { \
+		dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+			qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+			##__VA_ARGS__); \
+	} while (0)
+
+#define qib_devinfo(pcidev, fmt, ...) \
+	do { \
+		dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+/*
+ * this is used for formatting hw error messages...
+ */
+struct qib_hwerror_msgs {
+	u64 mask;
+	const char *msg;
+};
+
+#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
+
+/* in qib_intr.c... */
+void qib_format_hwerrors(u64 hwerrs,
+			 const struct qib_hwerror_msgs *hwerrmsgs,
+			 size_t nhwerrmsgs, char *msg, size_t lmsg);
+#endif                          /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 000000000000..e16cb6f7de2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_6120_Revision_OFFS 0x0
+#define QIB_6120_Revision_R_Simulator_LSB 0x3F
+#define QIB_6120_Revision_R_Simulator_RMASK 0x1
+#define QIB_6120_Revision_Reserved_LSB 0x28
+#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_Revision_BoardID_LSB 0x20
+#define QIB_6120_Revision_BoardID_RMASK 0xFF
+#define QIB_6120_Revision_R_SW_LSB 0x18
+#define QIB_6120_Revision_R_SW_RMASK 0xFF
+#define QIB_6120_Revision_R_Arch_LSB 0x10
+#define QIB_6120_Revision_R_Arch_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_6120_Control_OFFS 0x8
+#define QIB_6120_Control_TxLatency_LSB 0x4
+#define QIB_6120_Control_TxLatency_RMASK 0x1
+#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_6120_Control_LinkEn_LSB 0x2
+#define QIB_6120_Control_LinkEn_RMASK 0x1
+#define QIB_6120_Control_FreezeMode_LSB 0x1
+#define QIB_6120_Control_FreezeMode_RMASK 0x1
+#define QIB_6120_Control_SyncReset_LSB 0x0
+#define QIB_6120_Control_SyncReset_RMASK 0x1
+
+#define QIB_6120_PageAlign_OFFS 0x10
+
+#define QIB_6120_PortCnt_OFFS 0x18
+
+#define QIB_6120_SendRegBase_OFFS 0x30
+
+#define QIB_6120_UserRegBase_OFFS 0x38
+
+#define QIB_6120_CntrRegBase_OFFS 0x40
+
+#define QIB_6120_Scratch_OFFS 0x48
+#define QIB_6120_Scratch_TopHalf_LSB 0x20
+#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
+#define QIB_6120_Scratch_BottomHalf_LSB 0x0
+#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
+
+#define QIB_6120_IntBlocked_OFFS 0x60
+#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
+#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
+#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved_LSB 0xF
+#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
+#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
+
+#define QIB_6120_IntMask_OFFS 0x68
+#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved_LSB 0x11
+#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
+#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
+#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
+#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
+#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
+#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
+#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved1_LSB 0x5
+#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
+#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
+#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
+#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
+#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
+#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
+
+#define QIB_6120_IntStatus_OFFS 0x70
+#define QIB_6120_IntStatus_Error_LSB 0x1F
+#define QIB_6120_IntStatus_Error_RMASK 0x1
+#define QIB_6120_IntStatus_PioSent_LSB 0x1E
+#define QIB_6120_IntStatus_PioSent_RMASK 0x1
+#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved_LSB 0xF
+#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
+#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
+#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
+#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
+#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
+#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved1_LSB 0x5
+#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
+#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
+#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
+#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
+#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
+#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
+#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
+
+#define QIB_6120_IntClear_OFFS 0x78
+#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved_LSB 0xF
+#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
+#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
+#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
+#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
+#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
+#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved1_LSB 0x5
+#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
+#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
+#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
+#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
+#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
+#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
+
+#define QIB_6120_ErrMask_OFFS 0x80
+#define QIB_6120_ErrMask_Reserved_LSB 0x34
+#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved1_LSB 0x26
+#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved2_LSB 0x12
+#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_6120_ErrStatus_OFFS 0x88
+#define QIB_6120_ErrStatus_Reserved_LSB 0x34
+#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
+#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
+#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_6120_ErrClear_OFFS 0x90
+#define QIB_6120_ErrClear_Reserved_LSB 0x34
+#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved1_LSB 0x26
+#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved2_LSB 0x12
+#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_6120_HwErrMask_OFFS 0x98
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
+#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
+#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
+#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
+#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
+#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
+
+#define QIB_6120_HwErrStatus_OFFS 0xA0
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
+#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
+#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
+#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
+#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
+#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_HwErrClear_OFFS 0xA8
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
+#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
+#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
+#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
+#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
+#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
+
+#define QIB_6120_HwDiagCtrl_OFFS 0xB0
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
+#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_IBCStatus_OFFS 0xC0
+#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
+#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
+#define QIB_6120_IBCStatus_Reserved_LSB 0x7
+#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_IBCStatus_LinkState_LSB 0x4
+#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
+#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
+
+#define QIB_6120_IBCCtrl_OFFS 0xC8
+#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
+#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
+#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
+#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
+#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_6120_EXTStatus_OFFS 0xD0
+#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved_LSB 0x20
+#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
+#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
+#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
+#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_6120_EXTCtrl_OFFS 0xD8
+#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
+#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_6120_GPIOOut_OFFS 0xE0
+
+#define QIB_6120_GPIOMask_OFFS 0xE8
+
+#define QIB_6120_GPIOStatus_OFFS 0xF0
+
+#define QIB_6120_GPIOClear_OFFS 0xF8
+
+#define QIB_6120_RcvCtrl_OFFS 0x100
+#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
+#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
+#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
+#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
+#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
+#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
+#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
+#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
+#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
+
+#define QIB_6120_RcvBTHQP_OFFS 0x108
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
+#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
+#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_6120_RcvHdrSize_OFFS 0x110
+
+#define QIB_6120_RcvHdrCnt_OFFS 0x118
+
+#define QIB_6120_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_6120_RcvTIDBase_OFFS 0x128
+
+#define QIB_6120_RcvTIDCnt_OFFS 0x130
+
+#define QIB_6120_RcvEgrBase_OFFS 0x138
+
+#define QIB_6120_RcvEgrCnt_OFFS 0x140
+
+#define QIB_6120_RcvBufBase_OFFS 0x148
+
+#define QIB_6120_RcvBufSize_OFFS 0x150
+
+#define QIB_6120_RxIntMemBase_OFFS 0x158
+
+#define QIB_6120_RxIntMemSize_OFFS 0x160
+
+#define QIB_6120_RcvPartitionKey_OFFS 0x168
+
+#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
+#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_6120_SendCtrl_OFFS 0x1C0
+#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
+#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
+#define QIB_6120_SendCtrl_Reserved_LSB 0x17
+#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
+#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
+#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
+#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
+#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
+#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
+#define QIB_6120_SendCtrl_Abort_LSB 0x0
+#define QIB_6120_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
+#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
+#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
+#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_6120_SendPIOSize_OFFS 0x1D0
+#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
+#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
+#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
+#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
+#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
+#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
+#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
+
+#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
+#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_6120_SendBufErr0_OFFS 0x240
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
+
+#define QIB_6120_RcvHdrAddr0_OFFS 0x280
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_SerdesCfg0_OFFS 0x3C0
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
+#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
+#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
+#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
+#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
+#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
+#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
+#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
+#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
+#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
+#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
+#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
+#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
+#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
+#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
+#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
+#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
+#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
+#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
+#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
+#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
+#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
+#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
+#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
+#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
+#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
+#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
+#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
+#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
+#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
+#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
+#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
+#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
+#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
+#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
+#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
+
+#define QIB_6120_SerdesStat_OFFS 0x3D0
+#define QIB_6120_SerdesStat_Reserved_LSB 0xC
+#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
+#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
+#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
+#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
+#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
+#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
+#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
+#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
+#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
+#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
+#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
+#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
+#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
+
+#define QIB_6120_XGXSCfg_OFFS 0x3D8
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
+#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
+#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
+#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
+#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
+#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
+#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
+#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
+#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
+#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
+#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
+#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
+#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
+
+#define QIB_6120_LBIntCnt_OFFS 0x12000
+
+#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
+
+#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
+
+#define QIB_6120_TxDataPktCnt_OFFS 0x12020
+
+#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
+
+#define QIB_6120_TxDwordCnt_OFFS 0x12030
+
+#define QIB_6120_TxLenErrCnt_OFFS 0x12038
+
+#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
+
+#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
+
+#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
+
+#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
+
+#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
+
+#define QIB_6120_RxDataPktCnt_OFFS 0x12068
+
+#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
+
+#define QIB_6120_RxDwordCnt_OFFS 0x12078
+
+#define QIB_6120_RxLenErrCnt_OFFS 0x12080
+
+#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
+
+#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
+
+#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
+
+#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
+
+#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
+
+#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
+
+#define QIB_6120_RxEBPCnt_OFFS 0x120B8
+
+#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
+
+#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
+
+#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
+
+#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
+
+#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
+
+#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
+
+#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
+
+#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
+
+#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
+
+#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
+
+#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
+
+#define QIB_6120_RcvEgrArray0_OFFS 0x14000
+
+#define QIB_6120_RcvTIDArray0_OFFS 0x54000
+
+#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_6120_RcvBuf1_OFFS 0x72000
+
+#define QIB_6120_RcvBuf2_OFFS 0x75000
+
+#define QIB_6120_RcvFlags_OFFS 0x77000
+
+#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_6120_RcvDMABuf_OFFS 0x7B000
+
+#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_6120_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_6120_PCIERetryBuf_OFFS 0x82000
+
+#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
+
+#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 000000000000..ea0bfd896f92
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
+#ifndef _QIB_7220_H
+#define _QIB_7220_H
+/*
+ * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* grab register-defs auto-generated by HW */
+#include "qib_7220_regs.h"
+
+/* The number of eager receive TIDs for context zero. */
+#define IBA7220_KRCVEGRCNT      2048U
+
+#define IB_7220_LT_STATE_CFGRCVFCFG      0x09
+#define IB_7220_LT_STATE_CFGWAITRMT      0x0a
+#define IB_7220_LT_STATE_TXREVLANES      0x0d
+#define IB_7220_LT_STATE_CFGENH          0x10
+
+struct qib_chip_specific {
+	u64 __iomem *cregbase;
+	u64 *cntrs;
+	u64 *portcntrs;
+	spinlock_t sdepb_lock; /* serdes EPB bus */
+	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+	u64 hwerrmask;
+	u64 errormask;
+	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+	u64 gpio_mask; /* shadow the gpio mask register */
+	u64 extctrl; /* shadow the gpio output enable, etc... */
+	u32 ncntrs;
+	u32 nportcntrs;
+	u32 cntrnamelen;
+	u32 portcntrnamelen;
+	u32 numctxts;
+	u32 rcvegrcnt;
+	u32 autoneg_tries;
+	u32 serdes_first_init_done;
+	u32 sdmabufcnt;
+	u32 lastbuf_for_pio;
+	u32 updthresh; /* current AvailUpdThld */
+	u32 updthresh_dflt; /* default AvailUpdThld */
+	int irq;
+	u8 presets_needed;
+	u8 relock_timer_active;
+	char emsgbuf[128];
+	char sdmamsgbuf[192];
+	char bitsmsgbuf[64];
+	struct timer_list relock_timer;
+	unsigned int relock_interval; /* in jiffies */
+};
+
+struct qib_chippport_specific {
+	struct qib_pportdata pportdata;
+	wait_queue_head_t autoneg_wait;
+	struct delayed_work autoneg_work;
+	struct timer_list chase_timer;
+	/*
+	 * these 5 fields are used to establish deltas for IB symbol
+	 * errors and linkrecovery errors.  They can be reported on
+	 * some chips during link negotiation prior to INIT, and with
+	 * DDR when faking DDR negotiations with non-IBTA switches.
+	 * The chip counters are adjusted at driver unload if there is
+	 * a non-zero delta.
+	 */
+	u64 ibdeltainprog;
+	u64 ibsymdelta;
+	u64 ibsymsnap;
+	u64 iblnkerrdelta;
+	u64 iblnkerrsnap;
+	u64 ibcctrl; /* kr_ibcctrl shadow */
+	u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
+	u64 chase_end;
+	u32 last_delay_mult;
+};
+
+/*
+ * This header file provides the declarations and common definitions
+ * for (mostly) manipulation of the SerDes blocks within the IBA7220.
+ * the functions declared should only be called from within other
+ * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
+ */
+int qib_sd7220_presets(struct qib_devdata *dd);
+int qib_sd7220_init(struct qib_devdata *dd);
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
+		       int len, int offset);
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
+			int len, int offset);
+void qib_sd7220_clr_ibpar(struct qib_devdata *);
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB, which is the
+ * only one currently used
+ */
+#define IB_7220_SERDES 2
+
+int qib_sd7220_ib_load(struct qib_devdata *dd);
+int qib_sd7220_ib_vfy(struct qib_devdata *dd);
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+				  const u16 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+	return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+				  const u16 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+
+	return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+				  const u16 regno, u64 value)
+{
+	if (dd->kregbase)
+		writeq(value, &dd->kregbase[regno]);
+}
+
+void set_7220_relock_poll(struct qib_devdata *, int);
+void shutdown_7220_relock_poll(struct qib_devdata *);
+void toggle_7220_rclkrls(struct qib_devdata *);
+
+
+#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 000000000000..0da5bb750e52
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7220_Revision_OFFS 0x0
+#define QIB_7220_Revision_R_Simulator_LSB 0x3F
+#define QIB_7220_Revision_R_Simulator_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_LSB 0x3E
+#define QIB_7220_Revision_R_Emulation_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7220_Revision_BoardID_LSB 0x20
+#define QIB_7220_Revision_BoardID_RMASK 0xFF
+#define QIB_7220_Revision_R_SW_LSB 0x18
+#define QIB_7220_Revision_R_SW_RMASK 0xFF
+#define QIB_7220_Revision_R_Arch_LSB 0x10
+#define QIB_7220_Revision_R_Arch_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7220_Control_OFFS 0x8
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
+#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7220_Control_Reserved_LSB 0x5
+#define QIB_7220_Control_Reserved_RMASK 0x1
+#define QIB_7220_Control_TxLatency_LSB 0x4
+#define QIB_7220_Control_TxLatency_RMASK 0x1
+#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7220_Control_LinkEn_LSB 0x2
+#define QIB_7220_Control_LinkEn_RMASK 0x1
+#define QIB_7220_Control_FreezeMode_LSB 0x1
+#define QIB_7220_Control_FreezeMode_RMASK 0x1
+#define QIB_7220_Control_SyncReset_LSB 0x0
+#define QIB_7220_Control_SyncReset_RMASK 0x1
+
+#define QIB_7220_PageAlign_OFFS 0x10
+
+#define QIB_7220_PortCnt_OFFS 0x18
+
+#define QIB_7220_SendRegBase_OFFS 0x30
+
+#define QIB_7220_UserRegBase_OFFS 0x38
+
+#define QIB_7220_CntrRegBase_OFFS 0x40
+
+#define QIB_7220_Scratch_OFFS 0x48
+
+#define QIB_7220_IntMask_OFFS 0x68
+#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
+#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
+#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
+#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
+#define QIB_7220_IntMask_Reserved_LSB 0x31
+#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
+#define QIB_7220_IntMask_JIntMask_LSB 0x1A
+#define QIB_7220_IntMask_JIntMask_RMASK 0x1
+#define QIB_7220_IntMask_Reserved1_LSB 0x11
+#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7220_IntStatus_OFFS 0x70
+#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
+#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
+#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
+#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved_LSB 0x31
+#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7220_IntStatus_Error_LSB 0x1F
+#define QIB_7220_IntStatus_Error_RMASK 0x1
+#define QIB_7220_IntStatus_PioSent_LSB 0x1E
+#define QIB_7220_IntStatus_PioSent_RMASK 0x1
+#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
+#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
+#define QIB_7220_IntStatus_JInt_LSB 0x1A
+#define QIB_7220_IntStatus_JInt_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved1_LSB 0x11
+#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7220_IntClear_OFFS 0x78
+#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
+#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
+#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
+#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved_LSB 0x31
+#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
+#define QIB_7220_IntClear_JIntClear_LSB 0x1A
+#define QIB_7220_IntClear_JIntClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved1_LSB 0x11
+#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7220_ErrMask_OFFS 0x80
+#define QIB_7220_ErrMask_Reserved_LSB 0x36
+#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_Reserved1_LSB 0x12
+#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7220_ErrStatus_OFFS 0x88
+#define QIB_7220_ErrStatus_Reserved_LSB 0x36
+#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
+#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
+#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
+#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
+#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
+#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
+#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_7220_ErrClear_OFFS 0x90
+#define QIB_7220_ErrClear_Reserved_LSB 0x36
+#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_Reserved1_LSB 0x12
+#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7220_HwErrMask_OFFS 0x98
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved_LSB 0x37
+#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
+#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
+#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
+#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
+
+#define QIB_7220_HwErrStatus_OFFS 0xA0
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
+#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
+#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
+#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
+#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_HwErrClear_OFFS 0xA8
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved_LSB 0x37
+#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
+#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
+#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
+#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
+
+#define QIB_7220_HwDiagCtrl_OFFS 0xB0
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
+#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_REG_0000B8_OFFS 0xB8
+
+#define QIB_7220_IBCStatus_OFFS 0xC0
+#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
+#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
+#define QIB_7220_IBCStatus_Reserved_LSB 0xE
+#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
+#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
+#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
+#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
+#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkState_LSB 0x5
+#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
+#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7220_IBCCtrl_OFFS 0xC8
+#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
+#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
+#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7220_EXTStatus_OFFS 0xD0
+#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved_LSB 0x20
+#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
+#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
+#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_7220_EXTCtrl_OFFS 0xD8
+#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
+#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_7220_GPIOOut_OFFS 0xE0
+
+#define QIB_7220_GPIOMask_OFFS 0xE8
+
+#define QIB_7220_GPIOStatus_OFFS 0xF0
+
+#define QIB_7220_GPIOClear_OFFS 0xF8
+
+#define QIB_7220_RcvCtrl_OFFS 0x100
+#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
+#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
+#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
+#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
+#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
+#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
+#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
+#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
+
+#define QIB_7220_RcvBTHQP_OFFS 0x108
+#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
+#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7220_RcvHdrSize_OFFS 0x110
+
+#define QIB_7220_RcvHdrCnt_OFFS 0x118
+
+#define QIB_7220_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_7220_RcvTIDBase_OFFS 0x128
+
+#define QIB_7220_RcvTIDCnt_OFFS 0x130
+
+#define QIB_7220_RcvEgrBase_OFFS 0x138
+
+#define QIB_7220_RcvEgrCnt_OFFS 0x140
+
+#define QIB_7220_RcvBufBase_OFFS 0x148
+
+#define QIB_7220_RcvBufSize_OFFS 0x150
+
+#define QIB_7220_RxIntMemBase_OFFS 0x158
+
+#define QIB_7220_RxIntMemSize_OFFS 0x160
+
+#define QIB_7220_RcvPartitionKey_OFFS 0x168
+
+#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
+#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
+#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
+
+#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
+#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7220_IBCDDRCtrl_OFFS 0x180
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
+#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
+#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
+#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7220_HRTBT_GUID_OFFS 0x188
+
+#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7220_JIntReload_OFFS 0x1B0
+#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
+#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
+#define QIB_7220_JIntReload_J_reload_LSB 0x0
+#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
+
+#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
+#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
+#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
+#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7220_SendCtrl_OFFS 0x1C0
+#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
+#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
+#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
+#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
+#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
+#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
+#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
+#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
+#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
+#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
+#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
+#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
+#define QIB_7220_SendCtrl_Abort_LSB 0x0
+#define QIB_7220_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_7220_SendBufBase_OFFS 0x1C8
+#define QIB_7220_SendBufBase_Reserved_LSB 0x35
+#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
+#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7220_SendBufSize_OFFS 0x1D0
+#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
+#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
+#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7220_SendBufCnt_OFFS 0x1D8
+#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
+#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
+#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
+#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
+#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_7220_TxIntMemBase_OFFS 0x1E8
+
+#define QIB_7220_TxIntMemSize_OFFS 0x1F0
+
+#define QIB_7220_SendDmaBase_OFFS 0x1F8
+#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
+#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
+#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaLenGen_OFFS 0x200
+#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
+#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
+#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
+#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
+#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
+#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
+#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaTail_OFFS 0x208
+#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
+#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
+#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
+#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHead_OFFS 0x210
+#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
+#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
+#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
+#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaBufMask0_OFFS 0x220
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7220_SendDmaStatus_OFFS 0x238
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
+#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
+#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
+#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
+#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
+#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7220_SendBufErr0_OFFS 0x240
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7220_RcvHdrAddr0_OFFS 0x270
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
+
+#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
+
+#define QIB_7220_XGXSCfg_OFFS 0x3D8
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
+#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
+#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
+#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
+#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
+#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
+#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
+#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
+#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
+
+#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
+#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
+#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
+#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
+#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
+#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
+#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
+#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
+#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
+#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
+#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
+#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
+#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
+
+#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
+
+#define QIB_7220_LBIntCnt_OFFS 0x13000
+
+#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
+
+#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
+
+#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
+
+#define QIB_7220_TxDataPktCnt_OFFS 0x13020
+
+#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
+
+#define QIB_7220_TxDwordCnt_OFFS 0x13030
+
+#define QIB_7220_TxLenErrCnt_OFFS 0x13038
+
+#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
+
+#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
+
+#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
+
+#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
+
+#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
+
+#define QIB_7220_RxDataPktCnt_OFFS 0x13068
+
+#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
+
+#define QIB_7220_RxDwordCnt_OFFS 0x13078
+
+#define QIB_7220_RxLenErrCnt_OFFS 0x13080
+
+#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
+
+#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
+
+#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
+
+#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
+
+#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
+
+#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
+
+#define QIB_7220_RxEBPCnt_OFFS 0x130B8
+
+#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
+
+#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
+
+#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
+
+#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
+
+#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
+
+#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
+
+#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
+
+#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
+
+#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
+
+#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
+
+#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
+
+#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
+
+#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
+
+#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
+
+#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
+
+#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
+
+#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
+
+#define QIB_7220_CNT_0131C8_OFFS 0x131C8
+
+#define QIB_7220_PSStat_OFFS 0x13200
+
+#define QIB_7220_PSStart_OFFS 0x13208
+
+#define QIB_7220_PSInterval_OFFS 0x13210
+
+#define QIB_7220_PSRcvDataCount_OFFS 0x13218
+
+#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
+
+#define QIB_7220_PSXmitDataCount_OFFS 0x13228
+
+#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
+
+#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
+
+#define QIB_7220_CNT_013240_OFFS 0x13240
+
+#define QIB_7220_RcvEgrArray_OFFS 0x14000
+
+#define QIB_7220_MEM_038000_OFFS 0x38000
+
+#define QIB_7220_RcvTIDArray0_OFFS 0x53000
+
+#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_7220_MEM_064480_OFFS 0x64480
+
+#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_7220_MEM_064C80_OFFS 0x64C80
+
+#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
+
+#define QIB_7220_MEM_065080_OFFS 0x65080
+
+#define QIB_7220_ScoreBoard_OFFS 0x65400
+
+#define QIB_7220_MEM_065440_OFFS 0x65440
+
+#define QIB_7220_DescriptorFIFO_OFFS 0x65800
+
+#define QIB_7220_MEM_065880_OFFS 0x65880
+
+#define QIB_7220_RcvBuf1_OFFS 0x72000
+
+#define QIB_7220_MEM_074800_OFFS 0x74800
+
+#define QIB_7220_RcvBuf2_OFFS 0x75000
+
+#define QIB_7220_MEM_076400_OFFS 0x76400
+
+#define QIB_7220_RcvFlags_OFFS 0x77000
+
+#define QIB_7220_MEM_078400_OFFS 0x78400
+
+#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_7220_MEM_07A400_OFFS 0x7A400
+
+#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
+
+#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
+
+#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_7220_MEM_07D400_OFFS 0x7D400
+
+#define QIB_7220_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_7220_PCIERetryBuf_OFFS 0x84000
+
+#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
+
+#define QIB_7220_PCIECplBuf_OFFS 0x90000
+
+#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
+
+#define QIB_7220_MEM_095000_OFFS 0x95000
+
+#define QIB_7220_SendBuf0_MA_OFFS 0x100000
+
+#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 000000000000..a97440ba924c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7322_Revision_OFFS 0x0
+#define QIB_7322_Revision_DEF 0x0000000002010601
+#define QIB_7322_Revision_R_Simulator_LSB 0x3F
+#define QIB_7322_Revision_R_Simulator_MSB 0x3F
+#define QIB_7322_Revision_R_Simulator_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_LSB 0x3E
+#define QIB_7322_Revision_R_Emulation_MSB 0x3E
+#define QIB_7322_Revision_R_Emulation_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
+#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7322_Revision_BoardID_LSB 0x20
+#define QIB_7322_Revision_BoardID_MSB 0x27
+#define QIB_7322_Revision_BoardID_RMASK 0xFF
+#define QIB_7322_Revision_R_SW_LSB 0x18
+#define QIB_7322_Revision_R_SW_MSB 0x1F
+#define QIB_7322_Revision_R_SW_RMASK 0xFF
+#define QIB_7322_Revision_R_Arch_LSB 0x10
+#define QIB_7322_Revision_R_Arch_MSB 0x17
+#define QIB_7322_Revision_R_Arch_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
+#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
+#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7322_Control_OFFS 0x8
+#define QIB_7322_Control_DEF 0x0000000000000000
+#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
+#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7322_Control_FreezeMode_LSB 0x1
+#define QIB_7322_Control_FreezeMode_MSB 0x1
+#define QIB_7322_Control_FreezeMode_RMASK 0x1
+#define QIB_7322_Control_SyncReset_LSB 0x0
+#define QIB_7322_Control_SyncReset_MSB 0x0
+#define QIB_7322_Control_SyncReset_RMASK 0x1
+
+#define QIB_7322_PageAlign_OFFS 0x10
+#define QIB_7322_PageAlign_DEF 0x0000000000001000
+
+#define QIB_7322_ContextCnt_OFFS 0x18
+#define QIB_7322_ContextCnt_DEF 0x0000000000000012
+
+#define QIB_7322_Scratch_OFFS 0x20
+#define QIB_7322_Scratch_DEF 0x0000000000000000
+
+#define QIB_7322_CntrRegBase_OFFS 0x28
+#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
+
+#define QIB_7322_SendRegBase_OFFS 0x30
+#define QIB_7322_SendRegBase_DEF 0x0000000000003000
+
+#define QIB_7322_UserRegBase_OFFS 0x38
+#define QIB_7322_UserRegBase_DEF 0x0000000000200000
+
+#define QIB_7322_IntMask_OFFS 0x68
+#define QIB_7322_IntMask_DEF 0x0000000000000000
+#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
+#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7322_IntStatus_OFFS 0x70
+#define QIB_7322_IntStatus_DEF 0x0000000000000000
+#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_1_LSB 0x1F
+#define QIB_7322_IntStatus_Err_1_MSB 0x1F
+#define QIB_7322_IntStatus_Err_1_RMASK 0x1
+#define QIB_7322_IntStatus_Err_0_LSB 0x1E
+#define QIB_7322_IntStatus_Err_0_MSB 0x1E
+#define QIB_7322_IntStatus_Err_0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_LSB 0x1D
+#define QIB_7322_IntStatus_Err_MSB 0x1D
+#define QIB_7322_IntStatus_Err_RMASK 0x1
+#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7322_IntClear_OFFS 0x78
+#define QIB_7322_IntClear_DEF 0x0000000000000000
+#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
+#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7322_ErrMask_OFFS 0x80
+#define QIB_7322_ErrMask_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_OFFS 0x88
+#define QIB_7322_ErrStatus_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_OFFS 0x90
+#define QIB_7322_ErrClear_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+
+#define QIB_7322_HwErrMask_OFFS 0x98
+#define QIB_7322_HwErrMask_DEF 0x0000000000000000
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
+
+#define QIB_7322_HwErrStatus_OFFS 0xA0
+#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
+#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
+#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
+
+#define QIB_7322_HwErrClear_OFFS 0xA8
+#define QIB_7322_HwErrClear_DEF 0x0000000000000000
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
+#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
+#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
+
+#define QIB_7322_HwDiagCtrl_OFFS 0xB0
+#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
+#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
+
+#define QIB_7322_EXTStatus_OFFS 0xC0
+#define QIB_7322_EXTStatus_DEF 0x000000000000X000
+#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
+#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
+
+#define QIB_7322_EXTCtrl_OFFS 0xC8
+#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
+#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
+#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
+#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
+
+#define QIB_7322_GPIOOut_OFFS 0xE0
+#define QIB_7322_GPIOOut_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOMask_OFFS 0xE8
+#define QIB_7322_GPIOMask_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOStatus_OFFS 0xF0
+#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOClear_OFFS 0xF8
+#define QIB_7322_GPIOClear_DEF 0x0000000000000000
+
+#define QIB_7322_RcvCtrl_OFFS 0x100
+#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
+#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
+#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
+#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
+#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
+#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
+#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
+#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
+#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
+#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
+#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
+#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
+#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
+#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
+
+#define QIB_7322_RcvHdrSize_OFFS 0x110
+#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrCnt_OFFS 0x118
+#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrEntSize_OFFS 0x120
+#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDBase_OFFS 0x128
+#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
+
+#define QIB_7322_RcvTIDCnt_OFFS 0x130
+#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
+
+#define QIB_7322_RcvEgrBase_OFFS 0x138
+#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
+
+#define QIB_7322_RcvEgrCnt_OFFS 0x140
+#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
+
+#define QIB_7322_RcvBufBase_OFFS 0x148
+#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
+
+#define QIB_7322_RcvBufSize_OFFS 0x150
+#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
+
+#define QIB_7322_RxIntMemBase_OFFS 0x158
+#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
+
+#define QIB_7322_RxIntMemSize_OFFS 0x160
+#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
+
+#define QIB_7322_feature_mask_OFFS 0x190
+#define QIB_7322_feature_mask_DEF 0x00000000000000XX
+
+#define QIB_7322_active_feature_mask_OFFS 0x198
+#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
+
+#define QIB_7322_SendCtrl_OFFS 0x1C0
+#define QIB_7322_SendCtrl_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
+#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
+#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
+#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
+#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
+#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
+
+#define QIB_7322_SendBufBase_OFFS 0x1C8
+#define QIB_7322_SendBufBase_DEF 0x0018000000100000
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7322_SendBufSize_OFFS 0x1D0
+#define QIB_7322_SendBufSize_DEF 0x0000108000000880
+#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
+#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
+#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7322_SendBufCnt_OFFS 0x1D8
+#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+
+#define QIB_7322_SendBufErr0_OFFS 0x240
+#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7322_AvailUpdCount_OFFS 0x268
+#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
+#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
+#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
+#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
+
+#define QIB_7322_RcvHdrAddr0_OFFS 0x280
+#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
+#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_ahb_access_ctrl_OFFS 0x460
+#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
+
+#define QIB_7322_ahb_transaction_reg_OFFS 0x468
+#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
+#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
+#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
+#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
+#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
+#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
+
+#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
+#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
+
+#define QIB_7322_SendCheckMask0_OFFS 0x4C0
+#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
+#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendIBPacketMask0_OFFS 0x500
+#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
+
+#define QIB_7322_IntRedirect0_OFFS 0x540
+#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
+#define QIB_7322_IntRedirect0_vec11_LSB 0x37
+#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
+#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec10_LSB 0x32
+#define QIB_7322_IntRedirect0_vec10_MSB 0x36
+#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
+#define QIB_7322_IntRedirect0_vec9_MSB 0x31
+#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec8_LSB 0x28
+#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
+#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec7_LSB 0x23
+#define QIB_7322_IntRedirect0_vec7_MSB 0x27
+#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
+#define QIB_7322_IntRedirect0_vec6_MSB 0x22
+#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec5_LSB 0x19
+#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
+#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec4_LSB 0x14
+#define QIB_7322_IntRedirect0_vec4_MSB 0x18
+#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec3_LSB 0xF
+#define QIB_7322_IntRedirect0_vec3_MSB 0x13
+#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec2_LSB 0xA
+#define QIB_7322_IntRedirect0_vec2_MSB 0xE
+#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec1_LSB 0x5
+#define QIB_7322_IntRedirect0_vec1_MSB 0x9
+#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec0_LSB 0x0
+#define QIB_7322_IntRedirect0_vec0_MSB 0x4
+#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
+
+#define QIB_7322_Int_Granted_OFFS 0x570
+#define QIB_7322_Int_Granted_DEF 0x0000000000000000
+
+#define QIB_7322_vec_clr_without_int_OFFS 0x578
+#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
+
+#define QIB_7322_DCACtrlA_OFFS 0x580
+#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
+
+#define QIB_7322_DCACtrlB_OFFS 0x588
+#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlC_OFFS 0x590
+#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlD_OFFS 0x598
+#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlE_OFFS 0x5A0
+#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlF_OFFS 0x5A8
+#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
+
+#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
+#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
+
+#define QIB_7322_CntrRegBase_0_OFFS 0x1028
+#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
+
+#define QIB_7322_ErrMask_0_OFFS 0x1080
+#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_0_OFFS 0x1088
+#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_0_OFFS 0x1090
+#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7322_TXEStatus_0_OFFS 0x10B8
+#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
+
+#define QIB_7322_RcvCtrl_0_OFFS 0x1100
+#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
+
+#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
+#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
+#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
+#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
+#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
+#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
+#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
+#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
+
+#define QIB_7322_PSStat_0_OFFS 0x1140
+#define QIB_7322_PSStat_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSStart_0_OFFS 0x1148
+#define QIB_7322_PSStart_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSInterval_0_OFFS 0x1150
+#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvStatus_0_OFFS 0x1160
+#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
+#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
+
+#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
+#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
+#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
+
+#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
+#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
+#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
+#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
+#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendCtrl_0_OFFS 0x11C0
+#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
+
+#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
+#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
+#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
+#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
+#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
+#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
+#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
+#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
+#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
+#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaTail_0_OFFS 0x1208
+#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
+#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
+#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHead_0_OFFS 0x1210
+#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
+#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
+#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
+#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
+#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
+#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
+
+#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
+#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
+
+#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
+#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
+
+#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
+#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
+
+#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
+#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
+
+#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
+#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
+
+#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
+#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
+
+#define QIB_7322_IBCStatusA_0_OFFS 0x1540
+#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
+#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
+#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7322_IBCStatusB_0_OFFS 0x1548
+#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
+#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
+#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
+#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
+#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
+#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
+#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
+#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
+#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
+#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
+#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
+#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
+
+#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
+#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
+
+#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
+#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
+
+#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
+#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
+#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
+#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
+
+#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
+#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
+#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
+#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
+
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
+
+#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
+#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
+#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
+#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
+
+#define QIB_7322_LowPriority0_0_OFFS 0x1C00
+#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
+#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
+#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_HighPriority0_0_OFFS 0x1E00
+#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
+#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
+#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_CntrRegBase_1_OFFS 0x2028
+#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
+
+#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
+
+#define QIB_7322_SendCtrl_1_OFFS 0x21C0
+
+#define QIB_7322_SendBufAvail0_OFFS 0x3000
+#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
+
+#define QIB_7322_MsixTable_OFFS 0x8000
+#define QIB_7322_MsixTable_DEF 0x0000000000000000
+
+#define QIB_7322_MsixPba_OFFS 0x9000
+#define QIB_7322_MsixPba_DEF 0x0000000000000000
+
+#define QIB_7322_LAMemory_OFFS 0xA000
+#define QIB_7322_LAMemory_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_OFFS 0x11000
+#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
+#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
+#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
+#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
+#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
+
+#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
+#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
+#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_0_OFFS 0x12000
+#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
+#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
+#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
+#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
+#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
+#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
+#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
+#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
+#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
+#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
+#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
+#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
+#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
+#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
+#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
+#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
+#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
+#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
+#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
+#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
+#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
+#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
+#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
+#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
+#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
+#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
+#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
+#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
+#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
+#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
+#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
+#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
+#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
+#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
+#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
+#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
+#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
+#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
+#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
+#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
+#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
+#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
+#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_1_OFFS 0x13000
+#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
+#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
+#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
+#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
+#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
+#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
+#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
+#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
+#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
+#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
+#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
+#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
+#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
+#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
+#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
+#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
+#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
+#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
+#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
+#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
+#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
+#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
+#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
+#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
+#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
+#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
+#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
+#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
+#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
+#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
+#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
+#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
+#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
+#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
+#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
+#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
+#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
+#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
+#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
+#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
+#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
+#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
+#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrArray_OFFS 0x14000
+#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
+#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
+#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
+#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_RcvTIDArray0_OFFS 0x50000
+#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
+#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
+#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrTail0_OFFS 0x200000
+#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrHead0_OFFS 0x200008
+#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
+#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
+#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
+
+#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
+#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
+#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
+#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
+#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
+#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 000000000000..b3955ed8f794
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QIB_COMMON_H
+#define _QIB_COMMON_H
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
+#define QIB_SRC_OUI_1 0x00
+#define QIB_SRC_OUI_2 0x11
+#define QIB_SRC_OUI_3 0x75
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fastpath code
+ * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in faspath code
+ * _QIB_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/*
+ * The value in the BTH QP field that QLogic_IB uses to differentiate
+ * an qlogic_ib protocol IB packet vs standard IB transport
+ * This it needs to be even (0x656b78), because the LSB is sometimes
+ * used for the MSB of context. The change may cause a problem
+ * interoperating with older software.
+ */
+#define QIB_KD_QP 0x656b78
+
+/*
+ * These are the status bits readable (in ascii form, 64bit value)
+ * from the "status" sysfs file.  For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define QIB_STATUS_INITTED       0x1    /* basic initialization done */
+/* Chip has been found and initted */
+#define QIB_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define QIB_STATUS_IB_READY     0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define QIB_STATUS_IB_CONF      0x80
+/* A Fatal hardware error has occurred. */
+#define QIB_STATUS_HWERROR     0x200
+
+/*
+ * The list of usermode accessible registers.  Also see Reg_* later in file.
+ */
+enum qib_ureg {
+	/* (RO)  DMA RcvHdr to be used next. */
+	ur_rcvhdrtail = 0,
+	/* (RW)  RcvHdr entry to be processed next by host. */
+	ur_rcvhdrhead = 1,
+	/* (RO)  Index of next Eager index to use. */
+	ur_rcvegrindextail = 2,
+	/* (RW)  Eager TID to be processed next */
+	ur_rcvegrindexhead = 3,
+	/* For internal use only; max register number. */
+	_QIB_UregMax
+};
+
+/* bit values for spi_runtime_flags */
+#define QIB_RUNTIME_PCIE                0x0002
+#define QIB_RUNTIME_FORCE_WC_ORDER      0x0004
+#define QIB_RUNTIME_RCVHDR_COPY         0x0008
+#define QIB_RUNTIME_MASTER              0x0010
+#define QIB_RUNTIME_RCHK                0x0020
+#define QIB_RUNTIME_NODMA_RTAIL         0x0080
+#define QIB_RUNTIME_SPECIAL_TRIGGER     0x0100
+#define QIB_RUNTIME_SDMA                0x0200
+#define QIB_RUNTIME_FORCE_PIOAVAIL      0x0400
+#define QIB_RUNTIME_PIO_REGSWAPPED      0x0800
+#define QIB_RUNTIME_CTXT_MSB_IN_QP      0x1000
+#define QIB_RUNTIME_CTXT_REDIRECT       0x2000
+#define QIB_RUNTIME_HDRSUPP             0x4000
+
+/*
+ * This structure is returned by qib_userinit() immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explict pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct qib_base_info {
+	/* version of hardware, for feature checking. */
+	__u32 spi_hw_version;
+	/* version of software, for feature checking. */
+	__u32 spi_sw_version;
+	/* QLogic_IB context assigned, goes into sent packets */
+	__u16 spi_ctxt;
+	__u16 spi_subctxt;
+	/*
+	 * IB MTU, packets IB data must be less than this.
+	 * The MTU is in bytes, and will be a multiple of 4 bytes.
+	 */
+	__u32 spi_mtu;
+	/*
+	 * Size of a PIO buffer.  Any given packet's total size must be less
+	 * than this (in words).  Included is the starting control word, so
+	 * if 513 is returned, then total pkt size is 512 words or less.
+	 */
+	__u32 spi_piosize;
+	/* size of the TID cache in qlogic_ib, in entries */
+	__u32 spi_tidcnt;
+	/* size of the TID Eager list in qlogic_ib, in entries */
+	__u32 spi_tidegrcnt;
+	/* size of a single receive header queue entry in words. */
+	__u32 spi_rcvhdrent_size;
+	/*
+	 * Count of receive header queue entries allocated.
+	 * This may be less than the spu_rcvhdrcnt passed in!.
+	 */
+	__u32 spi_rcvhdr_cnt;
+
+	/* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
+	__u32 spi_runtime_flags;
+
+	/* address where hardware receive header queue is mapped */
+	__u64 spi_rcvhdr_base;
+
+	/* user program. */
+
+	/* base address of eager TID receive buffers used by hardware. */
+	__u64 spi_rcv_egrbufs;
+
+	/* Allocated by initialization code, not by protocol. */
+
+	/*
+	 * Size of each TID buffer in host memory, starting at
+	 * spi_rcv_egrbufs.  The buffers are virtually contiguous.
+	 */
+	__u32 spi_rcv_egrbufsize;
+	/*
+	 * The special QP (queue pair) value that identifies an qlogic_ib
+	 * protocol packet from standard IB packets.  More, probably much
+	 * more, to be added.
+	 */
+	__u32 spi_qpair;
+
+	/*
+	 * User register base for init code, not to be used directly by
+	 * protocol or applications.  Always points to chip registers,
+	 * for normal or shared context.
+	 */
+	__u64 spi_uregbase;
+	/*
+	 * Maximum buffer size in bytes that can be used in a single TID
+	 * entry (assuming the buffer is aligned to this boundary).  This is
+	 * the minimum of what the hardware and software support Guaranteed
+	 * to be a power of 2.
+	 */
+	__u32 spi_tid_maxsize;
+	/*
+	 * alignment of each pio send buffer (byte count
+	 * to add to spi_piobufbase to get to second buffer)
+	 */
+	__u32 spi_pioalign;
+	/*
+	 * The index of the first pio buffer available to this process;
+	 * needed to do lookup in spi_pioavailaddr; not added to
+	 * spi_piobufbase.
+	 */
+	__u32 spi_pioindex;
+	 /* number of buffers mapped for this process */
+	__u32 spi_piocnt;
+
+	/*
+	 * Base address of writeonly pio buffers for this process.
+	 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
+	 * boundaries.  spi_piocnt buffers are mapped from this address
+	 */
+	__u64 spi_piobufbase;
+
+	/*
+	 * Base address of readonly memory copy of the pioavail registers.
+	 * There are 2 bits for each buffer.
+	 */
+	__u64 spi_pioavailaddr;
+
+	/*
+	 * Address where driver updates a copy of the interface and driver
+	 * status (QIB_STATUS_*) as a 64 bit value.  It's followed by a
+	 * link status qword (formerly combined with driver status), then a
+	 * string indicating hardware error, if there was one.
+	 */
+	__u64 spi_status;
+
+	/* number of chip ctxts available to user processes */
+	__u32 spi_nctxts;
+	__u16 spi_unit; /* unit number of chip we are using */
+	__u16 spi_port; /* IB port number we are using */
+	/* num bufs in each contiguous set */
+	__u32 spi_rcv_egrperchunk;
+	/* size in bytes of each contiguous set */
+	__u32 spi_rcv_egrchunksize;
+	/* total size of mmap to cover full rcvegrbuffers */
+	__u32 spi_rcv_egrbuftotlen;
+	__u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
+	/* address of readonly memory copy of the rcvhdrq tail register. */
+	__u64 spi_rcvhdr_tailaddr;
+
+	/*
+	 * shared memory pages for subctxts if ctxt is shared; these cover
+	 * all the processes in the group sharing a single context.
+	 * all have enough space for the num_subcontexts value on this job.
+	 */
+	__u64 spi_subctxt_uregbase;
+	__u64 spi_subctxt_rcvegrbuf;
+	__u64 spi_subctxt_rcvhdr_base;
+
+	/* shared memory page for send buffer disarm status */
+	__u64 spi_sendbuf_status;
+} __attribute__ ((aligned(8)));
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of qib_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures
+ * change in an incompatible way.  The driver must be the same or higher
+ * for initialization to succeed.  In some cases, a higher version
+ * driver will not interoperate with older software, and initialization
+ * will return an error.
+ */
+#define QIB_USER_SWMAJOR 1
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define QIB_USER_SWMINOR 10
+
+#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
+
+#ifndef QIB_KERN_TYPE
+#define QIB_KERN_TYPE 0
+#define QIB_IDSTR "QLogic kernel.org driver"
+#endif
+
+/*
+ * Similarly, this is the kernel version going back to the user.  It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of qib_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
+
+/*
+ * This structure is passed to qib_userinit() to tell the driver where
+ * user code buffers are, sizes, etc.   The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility.  It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct qib_user_info {
+	/*
+	 * version of user software, to detect compatibility issues.
+	 * Should be set to QIB_USER_SWVERSION.
+	 */
+	__u32 spu_userversion;
+
+	__u32 _spu_unused2;
+
+	/* size of struct base_info to write to */
+	__u32 spu_base_info_size;
+
+	__u32 _spu_unused3;
+
+	/*
+	 * If two or more processes wish to share a context, each process
+	 * must set the spu_subctxt_cnt and spu_subctxt_id to the same
+	 * values.  The only restriction on the spu_subctxt_id is that
+	 * it be unique for a given node.
+	 */
+	__u16 spu_subctxt_cnt;
+	__u16 spu_subctxt_id;
+
+	__u32 spu_port; /* IB port requested by user if > 0 */
+
+	/*
+	 * address of struct base_info to write to
+	 */
+	__u64 spu_base_info;
+
+} __attribute__ ((aligned(8)));
+
+/* User commands. */
+
+/* 16 available, was: old set up userspace (for old user code) */
+#define QIB_CMD_CTXT_INFO       17      /* find out what resources we got */
+#define QIB_CMD_RECV_CTRL       18      /* control receipt of packets */
+#define QIB_CMD_TID_UPDATE      19      /* update expected TID entries */
+#define QIB_CMD_TID_FREE        20      /* free expected TID entries */
+#define QIB_CMD_SET_PART_KEY    21      /* add partition key */
+/* 22 available, was: return info on slave processes (for old user code) */
+#define QIB_CMD_ASSIGN_CTXT     23      /* allocate HCA and ctxt */
+#define QIB_CMD_USER_INIT       24      /* set up userspace */
+#define QIB_CMD_UNUSED_1        25
+#define QIB_CMD_UNUSED_2        26
+#define QIB_CMD_PIOAVAILUPD     27      /* force an update of PIOAvail reg */
+#define QIB_CMD_POLL_TYPE       28      /* set the kind of polling we want */
+#define QIB_CMD_ARMLAUNCH_CTRL  29      /* armlaunch detection control */
+/* 30 is unused */
+#define QIB_CMD_SDMA_INFLIGHT   31      /* sdma inflight counter request */
+#define QIB_CMD_SDMA_COMPLETE   32      /* sdma completion counter request */
+/* 33 available, was a testing feature  */
+#define QIB_CMD_DISARM_BUFS     34      /* disarm send buffers w/ errors */
+#define QIB_CMD_ACK_EVENT       35      /* ack & clear bits */
+#define QIB_CMD_CPUS_LIST       36      /* list of cpus allocated, for pinned
+					 * processes: qib_cpus_list */
+
+/*
+ * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
+ * compatibility with libraries from previous release.   The ACK_EVENT
+ * will take appropriate driver action (if any, just DISARM for now),
+ * then clear the bits passed in as part of the mask.  These bits are
+ * in the first 64bit word at spi_sendbuf_status, and are passed to
+ * the driver in the event_mask union as well.
+ */
+#define _QIB_EVENT_DISARM_BUFS_BIT	0
+#define _QIB_EVENT_LINKDOWN_BIT		1
+#define _QIB_EVENT_LID_CHANGE_BIT	2
+#define _QIB_EVENT_LMC_CHANGE_BIT	3
+#define _QIB_EVENT_SL2VL_CHANGE_BIT	4
+#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
+
+#define QIB_EVENT_DISARM_BUFS_BIT	(1UL << _QIB_EVENT_DISARM_BUFS_BIT)
+#define QIB_EVENT_LINKDOWN_BIT		(1UL << _QIB_EVENT_LINKDOWN_BIT)
+#define QIB_EVENT_LID_CHANGE_BIT	(1UL << _QIB_EVENT_LID_CHANGE_BIT)
+#define QIB_EVENT_LMC_CHANGE_BIT	(1UL << _QIB_EVENT_LMC_CHANGE_BIT)
+#define QIB_EVENT_SL2VL_CHANGE_BIT	(1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
+
+
+/*
+ * Poll types
+ */
+#define QIB_POLL_TYPE_ANYRCV     0x0
+#define QIB_POLL_TYPE_URGENT     0x1
+
+struct qib_ctxt_info {
+	__u16 num_active;       /* number of active units */
+	__u16 unit;             /* unit (chip) assigned to caller */
+	__u16 port;             /* IB port assigned to caller (1-based) */
+	__u16 ctxt;             /* ctxt on unit assigned to caller */
+	__u16 subctxt;          /* subctxt on unit assigned to caller */
+	__u16 num_ctxts;        /* number of ctxts available on unit */
+	__u16 num_subctxts;     /* number of subctxts opened on ctxt */
+	__u16 rec_cpu;          /* cpu # for affinity (ffff if none) */
+};
+
+struct qib_tid_info {
+	__u32 tidcnt;
+	/* make structure same size in 32 and 64 bit */
+	__u32 tid__unused;
+	/* virtual address of first page in transfer */
+	__u64 tidvaddr;
+	/* pointer (same size 32/64 bit) to __u16 tid array */
+	__u64 tidlist;
+
+	/*
+	 * pointer (same size 32/64 bit) to bitmap of TIDs used
+	 * for this call; checked for being large enough at open
+	 */
+	__u64 tidmap;
+};
+
+struct qib_cmd {
+	__u32 type;                     /* command type */
+	union {
+		struct qib_tid_info tid_info;
+		struct qib_user_info user_info;
+
+		/*
+		 * address in userspace where we should put the sdma
+		 * inflight counter
+		 */
+		__u64 sdma_inflight;
+		/*
+		 * address in userspace where we should put the sdma
+		 * completion counter
+		 */
+		__u64 sdma_complete;
+		/* address in userspace of struct qib_ctxt_info to
+		   write result to */
+		__u64 ctxt_info;
+		/* enable/disable receipt of packets */
+		__u32 recv_ctrl;
+		/* enable/disable armlaunch errors (non-zero to enable) */
+		__u32 armlaunch_ctrl;
+		/* partition key to set */
+		__u16 part_key;
+		/* user address of __u32 bitmask of active slaves */
+		__u64 slave_mask_addr;
+		/* type of polling we want */
+		__u16 poll_type;
+		/* back pressure enable bit for one particular context */
+		__u8 ctxt_bp;
+		/* qib_user_event_ack(), IPATH_EVENT_* bits */
+		__u64 event_mask;
+	} cmd;
+};
+
+struct qib_iovec {
+	/* Pointer to data, but same size 32 and 64 bit */
+	__u64 iov_base;
+
+	/*
+	 * Length of data; don't need 64 bits, but want
+	 * qib_sendpkt to remain same size as before 32 bit changes, so...
+	 */
+	__u64 iov_len;
+};
+
+/*
+ * Describes a single packet for send.  Each packet can have one or more
+ * buffers, but the total length (exclusive of IB headers) must be less
+ * than the MTU, and if using the PIO method, entire packet length,
+ * including IB headers, must be less than the qib_piosize value (words).
+ * Use of this necessitates including sys/uio.h
+ */
+struct __qib_sendpkt {
+	__u32 sps_flags;        /* flags for packet (TBD) */
+	__u32 sps_cnt;          /* number of entries to use in sps_iov */
+	/* array of iov's describing packet. TEMPORARY */
+	struct qib_iovec sps_iov[4];
+};
+
+/*
+ * Diagnostics can send a packet by "writing" the following
+ * structs to the diag data special file.
+ * This allows a custom
+ * pbc (+ static rate) qword, so that special modes and deliberate
+ * changes to CRCs can be used. The elements were also re-ordered
+ * for better alignment and to avoid padding issues.
+ */
+#define _DIAG_XPKT_VERS 3
+struct qib_diag_xpkt {
+	__u16 version;
+	__u16 unit;
+	__u16 port;
+	__u16 len;
+	__u64 data;
+	__u64 pbc_wd;
+};
+
+/*
+ * Data layout in I2C flash (for GUID, etc.)
+ * All fields are little-endian binary unless otherwise stated
+ */
+#define QIB_FLASH_VERSION 2
+struct qib_flash {
+	/* flash layout version (QIB_FLASH_VERSION) */
+	__u8 if_fversion;
+	/* checksum protecting if_length bytes */
+	__u8 if_csum;
+	/*
+	 * valid length (in use, protected by if_csum), including
+	 * if_fversion and if_csum themselves)
+	 */
+	__u8 if_length;
+	/* the GUID, in network order */
+	__u8 if_guid[8];
+	/* number of GUIDs to use, starting from if_guid */
+	__u8 if_numguid;
+	/* the (last 10 characters of) board serial number, in ASCII */
+	char if_serial[12];
+	/* board mfg date (YYYYMMDD ASCII) */
+	char if_mfgdate[8];
+	/* last board rework/test date (YYYYMMDD ASCII) */
+	char if_testdate[8];
+	/* logging of error counts, TBD */
+	__u8 if_errcntp[4];
+	/* powered on hours, updated at driver unload */
+	__u8 if_powerhour[2];
+	/* ASCII free-form comment field */
+	char if_comment[32];
+	/* Backwards compatible prefix for longer QLogic Serial Numbers */
+	char if_sprefix[4];
+	/* 82 bytes used, min flash size is 128 bytes */
+	__u8 if_future[46];
+};
+
+/*
+ * These are the counters implemented in the chip, and are listed in order.
+ * The InterCaps naming is taken straight from the chip spec.
+ */
+struct qlogic_ib_counters {
+	__u64 LBIntCnt;
+	__u64 LBFlowStallCnt;
+	__u64 TxSDmaDescCnt;    /* was Reserved1 */
+	__u64 TxUnsupVLErrCnt;
+	__u64 TxDataPktCnt;
+	__u64 TxFlowPktCnt;
+	__u64 TxDwordCnt;
+	__u64 TxLenErrCnt;
+	__u64 TxMaxMinLenErrCnt;
+	__u64 TxUnderrunCnt;
+	__u64 TxFlowStallCnt;
+	__u64 TxDroppedPktCnt;
+	__u64 RxDroppedPktCnt;
+	__u64 RxDataPktCnt;
+	__u64 RxFlowPktCnt;
+	__u64 RxDwordCnt;
+	__u64 RxLenErrCnt;
+	__u64 RxMaxMinLenErrCnt;
+	__u64 RxICRCErrCnt;
+	__u64 RxVCRCErrCnt;
+	__u64 RxFlowCtrlErrCnt;
+	__u64 RxBadFormatCnt;
+	__u64 RxLinkProblemCnt;
+	__u64 RxEBPCnt;
+	__u64 RxLPCRCErrCnt;
+	__u64 RxBufOvflCnt;
+	__u64 RxTIDFullErrCnt;
+	__u64 RxTIDValidErrCnt;
+	__u64 RxPKeyMismatchCnt;
+	__u64 RxP0HdrEgrOvflCnt;
+	__u64 RxP1HdrEgrOvflCnt;
+	__u64 RxP2HdrEgrOvflCnt;
+	__u64 RxP3HdrEgrOvflCnt;
+	__u64 RxP4HdrEgrOvflCnt;
+	__u64 RxP5HdrEgrOvflCnt;
+	__u64 RxP6HdrEgrOvflCnt;
+	__u64 RxP7HdrEgrOvflCnt;
+	__u64 RxP8HdrEgrOvflCnt;
+	__u64 RxP9HdrEgrOvflCnt;
+	__u64 RxP10HdrEgrOvflCnt;
+	__u64 RxP11HdrEgrOvflCnt;
+	__u64 RxP12HdrEgrOvflCnt;
+	__u64 RxP13HdrEgrOvflCnt;
+	__u64 RxP14HdrEgrOvflCnt;
+	__u64 RxP15HdrEgrOvflCnt;
+	__u64 RxP16HdrEgrOvflCnt;
+	__u64 IBStatusChangeCnt;
+	__u64 IBLinkErrRecoveryCnt;
+	__u64 IBLinkDownedCnt;
+	__u64 IBSymbolErrCnt;
+	__u64 RxVL15DroppedPktCnt;
+	__u64 RxOtherLocalPhyErrCnt;
+	__u64 PcieRetryBufDiagQwordCnt;
+	__u64 ExcessBufferOvflCnt;
+	__u64 LocalLinkIntegrityErrCnt;
+	__u64 RxVlErrCnt;
+	__u64 RxDlidFltrCnt;
+};
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/* RcvHdrFlags bits */
+#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
+#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
+#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
+#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
+#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
+#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
+#define QLOGIC_IB_RHF_SEQ_MASK 0xF
+#define QLOGIC_IB_RHF_SEQ_SHIFT 0
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
+#define QLOGIC_IB_RHF_H_ICRCERR   0x80000000
+#define QLOGIC_IB_RHF_H_VCRCERR   0x40000000
+#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
+#define QLOGIC_IB_RHF_H_LENERR    0x10000000
+#define QLOGIC_IB_RHF_H_MTUERR    0x08000000
+#define QLOGIC_IB_RHF_H_IHDRERR   0x04000000
+#define QLOGIC_IB_RHF_H_TIDERR    0x02000000
+#define QLOGIC_IB_RHF_H_MKERR     0x01000000
+#define QLOGIC_IB_RHF_H_IBERR     0x00800000
+#define QLOGIC_IB_RHF_H_ERR_MASK  0xFF800000
+#define QLOGIC_IB_RHF_L_USE_EGR   0x80000000
+#define QLOGIC_IB_RHF_L_SWA       0x00008000
+#define QLOGIC_IB_RHF_L_SWB       0x00004000
+
+/* qlogic_ib header fields */
+#define QLOGIC_IB_I_VERS_MASK 0xF
+#define QLOGIC_IB_I_VERS_SHIFT 28
+#define QLOGIC_IB_I_CTXT_MASK 0xF
+#define QLOGIC_IB_I_CTXT_SHIFT 24
+#define QLOGIC_IB_I_TID_MASK 0x7FF
+#define QLOGIC_IB_I_TID_SHIFT 13
+#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
+#define QLOGIC_IB_I_OFFSET_SHIFT 0
+
+/* K_PktFlags bits */
+#define QLOGIC_IB_KPF_INTR 0x1
+#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
+#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
+
+#define QLOGIC_IB_MAX_SUBCTXT   4
+
+/* SendPIO per-buffer control */
+#define QLOGIC_IB_SP_TEST    0x40
+#define QLOGIC_IB_SP_TESTEBP 0x20
+#define QLOGIC_IB_SP_TRIGGER_SHIFT  15
+
+/* SendPIOAvail bits */
+#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
+#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
+
+/* qlogic_ib header format */
+struct qib_header {
+	/*
+	 * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
+	 * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
+	 * Context 4, TID 11, offset 13.
+	 */
+	__le32 ver_ctxt_tid_offset;
+	__le16 chksum;
+	__le16 pkt_flags;
+};
+
+/*
+ * qlogic_ib user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ qlogic_ib.
+ */
+struct qib_message_header {
+	__be16 lrh[4];
+	__be32 bth[3];
+	/* fields below this point are in host byte order */
+	struct qib_header iph;
+	__u8 sub_opcode;
+};
+
+/* IB - LRH header consts */
+#define QIB_LRH_GRH 0x0003      /* 1. word of IB LRH - next header: GRH */
+#define QIB_LRH_BTH 0x0002      /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define QIB_DEFAULT_P_KEY 0xFFFF
+#define QIB_PERMISSIVE_LID 0xFFFF
+#define QIB_AETH_CREDIT_SHIFT 24
+#define QIB_AETH_CREDIT_MASK 0x1F
+#define QIB_AETH_CREDIT_INVAL 0x1F
+#define QIB_PSN_MASK 0xFFFFFF
+#define QIB_MSN_MASK 0xFFFFFF
+#define QIB_QPN_MASK 0xFFFFFF
+#define QIB_MULTICAST_LID_BASE 0xC000
+#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
+#define QIB_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from qlogic_ib) */
+#define RCVHQ_RCV_TYPE_EXPECTED  0
+#define RCVHQ_RCV_TYPE_EAGER     1
+#define RCVHQ_RCV_TYPE_NON_KD    2
+#define RCVHQ_RCV_TYPE_ERROR     3
+
+#define QIB_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
+{
+	return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
+}
+
+static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
+		QLOGIC_IB_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
+{
+	return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
+		QLOGIC_IB_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 qib_hdrget_index(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
+		QLOGIC_IB_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
+		QLOGIC_IB_RHF_SEQ_MASK;
+}
+
+static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
+{
+	return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
+		QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
+{
+	return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
+}
+
+static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
+{
+	return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
+		QLOGIC_IB_I_VERS_MASK;
+}
+
+#endif                          /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 000000000000..a86cbf880f98
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicitated entry
+ *
+ * This may be called with qp->s_lock held.
+ */
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
+{
+	struct qib_cq_wc *wc;
+	unsigned long flags;
+	u32 head;
+	u32 next;
+
+	spin_lock_irqsave(&cq->lock, flags);
+
+	/*
+	 * Note that the head pointer might be writable by user processes.
+	 * Take care to verify it is a sane value.
+	 */
+	wc = cq->queue;
+	head = wc->head;
+	if (head >= (unsigned) cq->ibcq.cqe) {
+		head = cq->ibcq.cqe;
+		next = 0;
+	} else
+		next = head + 1;
+	if (unlikely(next == wc->tail)) {
+		spin_unlock_irqrestore(&cq->lock, flags);
+		if (cq->ibcq.event_handler) {
+			struct ib_event ev;
+
+			ev.device = cq->ibcq.device;
+			ev.element.cq = &cq->ibcq;
+			ev.event = IB_EVENT_CQ_ERR;
+			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+		}
+		return;
+	}
+	if (cq->ip) {
+		wc->uqueue[head].wr_id = entry->wr_id;
+		wc->uqueue[head].status = entry->status;
+		wc->uqueue[head].opcode = entry->opcode;
+		wc->uqueue[head].vendor_err = entry->vendor_err;
+		wc->uqueue[head].byte_len = entry->byte_len;
+		wc->uqueue[head].ex.imm_data =
+			(__u32 __force)entry->ex.imm_data;
+		wc->uqueue[head].qp_num = entry->qp->qp_num;
+		wc->uqueue[head].src_qp = entry->src_qp;
+		wc->uqueue[head].wc_flags = entry->wc_flags;
+		wc->uqueue[head].pkey_index = entry->pkey_index;
+		wc->uqueue[head].slid = entry->slid;
+		wc->uqueue[head].sl = entry->sl;
+		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+		wc->uqueue[head].port_num = entry->port_num;
+		/* Make sure entry is written before the head index. */
+		smp_wmb();
+	} else
+		wc->kqueue[head] = *entry;
+	wc->head = next;
+
+	if (cq->notify == IB_CQ_NEXT_COMP ||
+	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+		cq->notify = IB_CQ_NONE;
+		cq->triggered++;
+		/*
+		 * This will cause send_complete() to be called in
+		 * another thread.
+		 */
+		queue_work(qib_cq_wq, &cq->comptask);
+	}
+
+	spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+/**
+ * qib_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context.  Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+	struct qib_cq *cq = to_icq(ibcq);
+	struct qib_cq_wc *wc;
+	unsigned long flags;
+	int npolled;
+	u32 tail;
+
+	/* The kernel can only poll a kernel completion queue */
+	if (cq->ip) {
+		npolled = -EINVAL;
+		goto bail;
+	}
+
+	spin_lock_irqsave(&cq->lock, flags);
+
+	wc = cq->queue;
+	tail = wc->tail;
+	if (tail > (u32) cq->ibcq.cqe)
+		tail = (u32) cq->ibcq.cqe;
+	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+		if (tail == wc->head)
+			break;
+		/* The kernel doesn't need a RMB since it has the lock. */
+		*entry = wc->kqueue[tail];
+		if (tail >= cq->ibcq.cqe)
+			tail = 0;
+		else
+			tail++;
+	}
+	wc->tail = tail;
+
+	spin_unlock_irqrestore(&cq->lock, flags);
+
+bail:
+	return npolled;
+}
+
+static void send_complete(struct work_struct *work)
+{
+	struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
+
+	/*
+	 * The completion handler will most likely rearm the notification
+	 * and poll for all pending entries.  If a new completion entry
+	 * is added while we are in this routine, queue_work()
+	 * won't call us again until we return so we check triggered to
+	 * see if we need to call the handler again.
+	 */
+	for (;;) {
+		u8 triggered = cq->triggered;
+
+		/*
+		 * IPoIB connected mode assumes the callback is from a
+		 * soft IRQ. We simulate this by blocking "bottom halves".
+		 * See the implementation for ipoib_cm_handle_tx_wc(),
+		 * netif_tx_lock_bh() and netif_tx_lock().
+		 */
+		local_bh_disable();
+		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+		local_bh_enable();
+
+		if (cq->triggered == triggered)
+			return;
+	}
+}
+
+/**
+ * qib_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @entries: the minimum size of the completion queue
+ * @context: unused by the QLogic_IB driver
+ * @udata: user data for libibverbs.so
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+			    int comp_vector, struct ib_ucontext *context,
+			    struct ib_udata *udata)
+{
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_cq *cq;
+	struct qib_cq_wc *wc;
+	struct ib_cq *ret;
+	u32 sz;
+
+	if (entries < 1 || entries > ib_qib_max_cqes) {
+		ret = ERR_PTR(-EINVAL);
+		goto done;
+	}
+
+	/* Allocate the completion queue structure. */
+	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ret = ERR_PTR(-ENOMEM);
+		goto done;
+	}
+
+	/*
+	 * Allocate the completion queue entries and head/tail pointers.
+	 * This is allocated separately so that it can be resized and
+	 * also mapped into user space.
+	 * We need to use vmalloc() in order to support mmap and large
+	 * numbers of entries.
+	 */
+	sz = sizeof(*wc);
+	if (udata && udata->outlen >= sizeof(__u64))
+		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+	else
+		sz += sizeof(struct ib_wc) * (entries + 1);
+	wc = vmalloc_user(sz);
+	if (!wc) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_cq;
+	}
+
+	/*
+	 * Return the address of the WC as the offset to mmap.
+	 * See qib_mmap() for details.
+	 */
+	if (udata && udata->outlen >= sizeof(__u64)) {
+		int err;
+
+		cq->ip = qib_create_mmap_info(dev, sz, context, wc);
+		if (!cq->ip) {
+			ret = ERR_PTR(-ENOMEM);
+			goto bail_wc;
+		}
+
+		err = ib_copy_to_udata(udata, &cq->ip->offset,
+				       sizeof(cq->ip->offset));
+		if (err) {
+			ret = ERR_PTR(err);
+			goto bail_ip;
+		}
+	} else
+		cq->ip = NULL;
+
+	spin_lock(&dev->n_cqs_lock);
+	if (dev->n_cqs_allocated == ib_qib_max_cqs) {
+		spin_unlock(&dev->n_cqs_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_ip;
+	}
+
+	dev->n_cqs_allocated++;
+	spin_unlock(&dev->n_cqs_lock);
+
+	if (cq->ip) {
+		spin_lock_irq(&dev->pending_lock);
+		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+		spin_unlock_irq(&dev->pending_lock);
+	}
+
+	/*
+	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+	 * The number of entries should be >= the number requested or return
+	 * an error.
+	 */
+	cq->ibcq.cqe = entries;
+	cq->notify = IB_CQ_NONE;
+	cq->triggered = 0;
+	spin_lock_init(&cq->lock);
+	INIT_WORK(&cq->comptask, send_complete);
+	wc->head = 0;
+	wc->tail = 0;
+	cq->queue = wc;
+
+	ret = &cq->ibcq;
+
+	goto done;
+
+bail_ip:
+	kfree(cq->ip);
+bail_wc:
+	vfree(wc);
+bail_cq:
+	kfree(cq);
+done:
+	return ret;
+}
+
+/**
+ * qib_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int qib_destroy_cq(struct ib_cq *ibcq)
+{
+	struct qib_ibdev *dev = to_idev(ibcq->device);
+	struct qib_cq *cq = to_icq(ibcq);
+
+	flush_work(&cq->comptask);
+	spin_lock(&dev->n_cqs_lock);
+	dev->n_cqs_allocated--;
+	spin_unlock(&dev->n_cqs_lock);
+	if (cq->ip)
+		kref_put(&cq->ip->ref, qib_release_mmap_info);
+	else
+		vfree(cq->queue);
+	kfree(cq);
+
+	return 0;
+}
+
+/**
+ * qib_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context.  Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+	struct qib_cq *cq = to_icq(ibcq);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&cq->lock, flags);
+	/*
+	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+	 */
+	if (cq->notify != IB_CQ_NEXT_COMP)
+		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+	    cq->queue->head != cq->queue->tail)
+		ret = 1;
+
+	spin_unlock_irqrestore(&cq->lock, flags);
+
+	return ret;
+}
+
+/**
+ * qib_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+	struct qib_cq *cq = to_icq(ibcq);
+	struct qib_cq_wc *old_wc;
+	struct qib_cq_wc *wc;
+	u32 head, tail, n;
+	int ret;
+	u32 sz;
+
+	if (cqe < 1 || cqe > ib_qib_max_cqes) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Need to use vmalloc() if we want to support large #s of entries.
+	 */
+	sz = sizeof(*wc);
+	if (udata && udata->outlen >= sizeof(__u64))
+		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+	else
+		sz += sizeof(struct ib_wc) * (cqe + 1);
+	wc = vmalloc_user(sz);
+	if (!wc) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	/* Check that we can write the offset to mmap. */
+	if (udata && udata->outlen >= sizeof(__u64)) {
+		__u64 offset = 0;
+
+		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+		if (ret)
+			goto bail_free;
+	}
+
+	spin_lock_irq(&cq->lock);
+	/*
+	 * Make sure head and tail are sane since they
+	 * might be user writable.
+	 */
+	old_wc = cq->queue;
+	head = old_wc->head;
+	if (head > (u32) cq->ibcq.cqe)
+		head = (u32) cq->ibcq.cqe;
+	tail = old_wc->tail;
+	if (tail > (u32) cq->ibcq.cqe)
+		tail = (u32) cq->ibcq.cqe;
+	if (head < tail)
+		n = cq->ibcq.cqe + 1 + head - tail;
+	else
+		n = head - tail;
+	if (unlikely((u32)cqe < n)) {
+		ret = -EINVAL;
+		goto bail_unlock;
+	}
+	for (n = 0; tail != head; n++) {
+		if (cq->ip)
+			wc->uqueue[n] = old_wc->uqueue[tail];
+		else
+			wc->kqueue[n] = old_wc->kqueue[tail];
+		if (tail == (u32) cq->ibcq.cqe)
+			tail = 0;
+		else
+			tail++;
+	}
+	cq->ibcq.cqe = cqe;
+	wc->head = n;
+	wc->tail = 0;
+	cq->queue = wc;
+	spin_unlock_irq(&cq->lock);
+
+	vfree(old_wc);
+
+	if (cq->ip) {
+		struct qib_ibdev *dev = to_idev(ibcq->device);
+		struct qib_mmap_info *ip = cq->ip;
+
+		qib_update_mmap_info(dev, ip, sz, wc);
+
+		/*
+		 * Return the offset to mmap.
+		 * See qib_mmap() for details.
+		 */
+		if (udata && udata->outlen >= sizeof(__u64)) {
+			ret = ib_copy_to_udata(udata, &ip->offset,
+					       sizeof(ip->offset));
+			if (ret)
+				goto bail;
+		}
+
+		spin_lock_irq(&dev->pending_lock);
+		if (list_empty(&ip->pending_mmaps))
+			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
+		spin_unlock_irq(&dev->pending_lock);
+	}
+
+	ret = 0;
+	goto bail;
+
+bail_unlock:
+	spin_unlock_irq(&cq->lock);
+bail_free:
+	vfree(wc);
+bail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 000000000000..ca98dd523752
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains support for diagnostic functions.  It is accessed by
+ * opening the qib_diag device, normally minor number 129.  Diagnostic use
+ * of the QLogic_IB chip may render the chip or board unusable until the
+ * driver is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * Each client that opens the diag device must read then write
+ * offset 0, to prevent lossage from random cat or od. diag_state
+ * sequences this "handshake".
+ */
+enum diag_state { UNUSED = 0, OPENED, INIT, READY };
+
+/* State for an individual client. PID so children cannot abuse handshake */
+static struct qib_diag_client {
+	struct qib_diag_client *next;
+	struct qib_devdata *dd;
+	pid_t pid;
+	enum diag_state state;
+} *client_pool;
+
+/*
+ * Get a client struct. Recycled if possible, else kmalloc.
+ * Must be called with qib_mutex held
+ */
+static struct qib_diag_client *get_client(struct qib_devdata *dd)
+{
+	struct qib_diag_client *dc;
+
+	dc = client_pool;
+	if (dc)
+		/* got from pool remove it and use */
+		client_pool = dc->next;
+	else
+		/* None in pool, alloc and init */
+		dc = kmalloc(sizeof *dc, GFP_KERNEL);
+
+	if (dc) {
+		dc->next = NULL;
+		dc->dd = dd;
+		dc->pid = current->pid;
+		dc->state = OPENED;
+	}
+	return dc;
+}
+
+/*
+ * Return to pool. Must be called with qib_mutex held
+ */
+static void return_client(struct qib_diag_client *dc)
+{
+	struct qib_devdata *dd = dc->dd;
+	struct qib_diag_client *tdc, *rdc;
+
+	rdc = NULL;
+	if (dc == dd->diag_client) {
+		dd->diag_client = dc->next;
+		rdc = dc;
+	} else {
+		tdc = dc->dd->diag_client;
+		while (tdc) {
+			if (dc == tdc->next) {
+				tdc->next = dc->next;
+				rdc = dc;
+				break;
+			}
+			tdc = tdc->next;
+		}
+	}
+	if (rdc) {
+		rdc->state = UNUSED;
+		rdc->dd = NULL;
+		rdc->pid = 0;
+		rdc->next = client_pool;
+		client_pool = rdc;
+	}
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp);
+static int qib_diag_release(struct inode *in, struct file *fp);
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+			     size_t count, loff_t *off);
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+			      size_t count, loff_t *off);
+
+static const struct file_operations diag_file_ops = {
+	.owner = THIS_MODULE,
+	.write = qib_diag_write,
+	.read = qib_diag_read,
+	.open = qib_diag_open,
+	.release = qib_diag_release
+};
+
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+static struct cdev *diagpkt_cdev;
+static struct device *diagpkt_device;
+
+static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
+				 size_t count, loff_t *off);
+
+static const struct file_operations diagpkt_file_ops = {
+	.owner = THIS_MODULE,
+	.write = qib_diagpkt_write,
+};
+
+int qib_diag_add(struct qib_devdata *dd)
+{
+	char name[16];
+	int ret = 0;
+
+	if (atomic_inc_return(&diagpkt_count) == 1) {
+		ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
+				    &diagpkt_file_ops, &diagpkt_cdev,
+				    &diagpkt_device);
+		if (ret)
+			goto done;
+	}
+
+	snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
+	ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
+			    &diag_file_ops, &dd->diag_cdev,
+			    &dd->diag_device);
+done:
+	return ret;
+}
+
+static void qib_unregister_observers(struct qib_devdata *dd);
+
+void qib_diag_remove(struct qib_devdata *dd)
+{
+	struct qib_diag_client *dc;
+
+	if (atomic_dec_and_test(&diagpkt_count))
+		qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
+
+	qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
+
+	/*
+	 * Return all diag_clients of this device. There should be none,
+	 * as we are "guaranteed" that no clients are still open
+	 */
+	while (dd->diag_client)
+		return_client(dd->diag_client);
+
+	/* Now clean up all unused client structs */
+	while (client_pool) {
+		dc = client_pool;
+		client_pool = dc->next;
+		kfree(dc);
+	}
+	/* Clean up observer list */
+	qib_unregister_observers(dd);
+}
+
+/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
+ *
+ * @dd: the qlogic_ib device
+ * @offs: the offset in chip-space
+ * @cntp: Pointer to max (byte) count for transfer starting at offset
+ * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
+ * mapping. It is needed because with the use of PAT for control of
+ * write-combining, the logically contiguous address-space of the chip
+ * may be split into virtually non-contiguous spaces, with different
+ * attributes, which are them mapped to contiguous physical space
+ * based from the first BAR.
+ *
+ * The code below makes the same assumptions as were made in
+ * init_chip_wc_pat() (qib_init.c), copied here:
+ * Assumes chip address space looks like:
+ *		- kregs + sregs + cregs + uregs (in any order)
+ *		- piobufs (2K and 4K bufs in either order)
+ *	or:
+ *		- kregs + sregs + cregs (in any order)
+ *		- piobufs (2K and 4K bufs in either order)
+ *		- uregs
+ *
+ * If cntp is non-NULL, returns how many bytes from offset can be accessed
+ * Returns 0 if the offset is not mapped.
+ */
+static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
+				       u32 *cntp)
+{
+	u32 kreglen;
+	u32 snd_bottom, snd_lim = 0;
+	u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
+	u32 __iomem *map = NULL;
+	u32 cnt = 0;
+
+	/* First, simplest case, offset is within the first map. */
+	kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
+	if (offset < kreglen) {
+		map = krb32 + (offset / sizeof(u32));
+		cnt = kreglen - offset;
+		goto mapped;
+	}
+
+	/*
+	 * Next check for user regs, the next most common case,
+	 * and a cheap check because if they are not in the first map
+	 * they are last in chip.
+	 */
+	if (dd->userbase) {
+		/* If user regs mapped, they are after send, so set limit. */
+		u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+		snd_lim = dd->uregbase;
+		krb32 = (u32 __iomem *)dd->userbase;
+		if (offset >= dd->uregbase && offset < ulim) {
+			map = krb32 + (offset - dd->uregbase) / sizeof(u32);
+			cnt = ulim - offset;
+			goto mapped;
+		}
+	}
+
+	/*
+	 * Lastly, check for offset within Send Buffers.
+	 * This is gnarly because struct devdata is deliberately vague
+	 * about things like 7322 VL15 buffers, and we are not in
+	 * chip-specific code here, so should not make many assumptions.
+	 * The one we _do_ make is that the only chip that has more sndbufs
+	 * than we admit is the 7322, and it has userregs above that, so
+	 * we know the snd_lim.
+	 */
+	/* Assume 2K buffers are first. */
+	snd_bottom = dd->pio2k_bufbase;
+	if (snd_lim == 0) {
+		u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+		snd_lim = snd_bottom + tot2k;
+	}
+	/* If 4k buffers exist, account for them by bumping
+	 * appropriate limit.
+	 */
+	if (dd->piobcnt4k) {
+		u32 tot4k = dd->piobcnt4k * dd->align4k;
+		u32 offs4k = dd->piobufbase >> 32;
+		if (snd_bottom > offs4k)
+			snd_bottom = offs4k;
+		else {
+			/* 4k above 2k. Bump snd_lim, if needed*/
+			if (!dd->userbase)
+				snd_lim = offs4k + tot4k;
+		}
+	}
+	/*
+	 * Judgement call: can we ignore the space between SendBuffs and
+	 * UserRegs, where we would like to see vl15 buffs, but not more?
+	 */
+	if (offset >= snd_bottom && offset < snd_lim) {
+		offset -= snd_bottom;
+		map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
+		cnt = snd_lim - offset;
+	}
+
+mapped:
+	if (cntp)
+		*cntp = cnt;
+	return map;
+}
+
+/*
+ * qib_read_umem64 - read a 64-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy (multiple of 32 bits)
+ *
+ * This function also localizes all chip memory accesses.
+ * The copy should be written such that we read full cacheline packets
+ * from the chip.  This is usually used for a single qword
+ *
+ * NOTE:  This assumes the chip address is 64-bit aligned.
+ */
+static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
+			   u32 regoffs, size_t count)
+{
+	const u64 __iomem *reg_addr;
+	const u64 __iomem *reg_end;
+	u32 limit;
+	int ret;
+
+	reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (count >= limit)
+		count = limit;
+	reg_end = reg_addr + (count / sizeof(u64));
+
+	/* not very efficient, but it works for now */
+	while (reg_addr < reg_end) {
+		u64 data = readq(reg_addr);
+
+		if (copy_to_user(uaddr, &data, sizeof(u64))) {
+			ret = -EFAULT;
+			goto bail;
+		}
+		reg_addr++;
+		uaddr += sizeof(u64);
+	}
+	ret = 0;
+bail:
+	return ret;
+}
+
+/*
+ * qib_write_umem64 - write a 64-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: the number of bytes to copy (multiple of 32 bits)
+ *
+ * This is usually used for a single qword
+ * NOTE:  This assumes the chip address is 64-bit aligned.
+ */
+
+static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
+			    const void __user *uaddr, size_t count)
+{
+	u64 __iomem *reg_addr;
+	const u64 __iomem *reg_end;
+	u32 limit;
+	int ret;
+
+	reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (count >= limit)
+		count = limit;
+	reg_end = reg_addr + (count / sizeof(u64));
+
+	/* not very efficient, but it works for now */
+	while (reg_addr < reg_end) {
+		u64 data;
+		if (copy_from_user(&data, uaddr, sizeof(data))) {
+			ret = -EFAULT;
+			goto bail;
+		}
+		writeq(data, reg_addr);
+
+		reg_addr++;
+		uaddr += sizeof(u64);
+	}
+	ret = 0;
+bail:
+	return ret;
+}
+
+/*
+ * qib_read_umem32 - read a 32-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy
+ *
+ * read 32 bit values, not 64 bit; for memories that only
+ * support 32 bit reads; usually a single dword.
+ */
+static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
+			   u32 regoffs, size_t count)
+{
+	const u32 __iomem *reg_addr;
+	const u32 __iomem *reg_end;
+	u32 limit;
+	int ret;
+
+	reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (count >= limit)
+		count = limit;
+	reg_end = reg_addr + (count / sizeof(u32));
+
+	/* not very efficient, but it works for now */
+	while (reg_addr < reg_end) {
+		u32 data = readl(reg_addr);
+
+		if (copy_to_user(uaddr, &data, sizeof(data))) {
+			ret = -EFAULT;
+			goto bail;
+		}
+
+		reg_addr++;
+		uaddr += sizeof(u32);
+
+	}
+	ret = 0;
+bail:
+	return ret;
+}
+
+/*
+ * qib_write_umem32 - write a 32-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: number of bytes to copy
+ *
+ * write 32 bit values, not 64 bit; for memories that only
+ * support 32 bit write; usually a single dword.
+ */
+
+static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
+			    const void __user *uaddr, size_t count)
+{
+	u32 __iomem *reg_addr;
+	const u32 __iomem *reg_end;
+	u32 limit;
+	int ret;
+
+	reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (count >= limit)
+		count = limit;
+	reg_end = reg_addr + (count / sizeof(u32));
+
+	while (reg_addr < reg_end) {
+		u32 data;
+
+		if (copy_from_user(&data, uaddr, sizeof(data))) {
+			ret = -EFAULT;
+			goto bail;
+		}
+		writel(data, reg_addr);
+
+		reg_addr++;
+		uaddr += sizeof(u32);
+	}
+	ret = 0;
+bail:
+	return ret;
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp)
+{
+	int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
+	struct qib_devdata *dd;
+	struct qib_diag_client *dc;
+	int ret;
+
+	mutex_lock(&qib_mutex);
+
+	dd = qib_lookup(unit);
+
+	if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
+	    !dd->kregbase) {
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	dc = get_client(dd);
+	if (!dc) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+	dc->next = dd->diag_client;
+	dd->diag_client = dc;
+	fp->private_data = dc;
+	ret = 0;
+bail:
+	mutex_unlock(&qib_mutex);
+
+	return ret;
+}
+
+/**
+ * qib_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: qib_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t qib_diagpkt_write(struct file *fp,
+				 const char __user *data,
+				 size_t count, loff_t *off)
+{
+	u32 __iomem *piobuf;
+	u32 plen, clen, pbufn;
+	struct qib_diag_xpkt dp;
+	u32 *tmpbuf = NULL;
+	struct qib_devdata *dd;
+	struct qib_pportdata *ppd;
+	ssize_t ret = 0;
+
+	if (count != sizeof(dp)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (copy_from_user(&dp, data, sizeof(dp))) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dd = qib_lookup(dp.unit);
+	if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
+		ret = -ENODEV;
+		goto bail;
+	}
+	if (!(dd->flags & QIB_INITTED)) {
+		/* no hardware, freeze, etc. */
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	if (dp.version != _DIAG_XPKT_VERS) {
+		qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
+			    dp.version);
+		ret = -EINVAL;
+		goto bail;
+	}
+	/* send count must be an exact number of dwords */
+	if (dp.len & 3) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (!dp.port || dp.port > dd->num_pports) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	ppd = &dd->pport[dp.port - 1];
+
+	/* need total length before first word written */
+	/* +1 word is for the qword padding */
+	plen = sizeof(u32) + dp.len;
+	clen = dp.len >> 2;
+
+	if ((plen + 4) > ppd->ibmaxlen) {
+		ret = -EINVAL;
+		goto bail;      /* before writing pbc */
+	}
+	tmpbuf = vmalloc(plen);
+	if (!tmpbuf) {
+		qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
+			 "failing\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	if (copy_from_user(tmpbuf,
+			   (const void __user *) (unsigned long) dp.data,
+			   dp.len)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	plen >>= 2;             /* in dwords */
+
+	if (dp.pbc_wd == 0)
+		dp.pbc_wd = plen;
+
+	piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
+	if (!piobuf) {
+		ret = -EBUSY;
+		goto bail;
+	}
+	/* disarm it just to be extra sure */
+	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
+
+	/* disable header check on pbufn for this packet */
+	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+
+	writeq(dp.pbc_wd, piobuf);
+	/*
+	 * Copy all but the trigger word, then flush, so it's written
+	 * to chip before trigger word, then write trigger word, then
+	 * flush again, so packet is sent.
+	 */
+	if (dd->flags & QIB_PIO_FLUSH_WC) {
+		qib_flush_wc();
+		qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
+		qib_flush_wc();
+		__raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+	} else
+		qib_pio_copy(piobuf + 2, tmpbuf, clen);
+
+	if (dd->flags & QIB_USE_SPCL_TRIG) {
+		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+		qib_flush_wc();
+		__raw_writel(0xaebecede, piobuf + spcl_off);
+	}
+
+	/*
+	 * Ensure buffer is written to the chip, then re-enable
+	 * header checks (if supported by chip).  The txchk
+	 * code will ensure seen by chip before returning.
+	 */
+	qib_flush_wc();
+	qib_sendbuf_done(dd, pbufn);
+	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+	ret = sizeof(dp);
+
+bail:
+	vfree(tmpbuf);
+	return ret;
+}
+
+static int qib_diag_release(struct inode *in, struct file *fp)
+{
+	mutex_lock(&qib_mutex);
+	return_client(fp->private_data);
+	fp->private_data = NULL;
+	mutex_unlock(&qib_mutex);
+	return 0;
+}
+
+/*
+ * Chip-specific code calls to register its interest in
+ * a specific range.
+ */
+struct diag_observer_list_elt {
+	struct diag_observer_list_elt *next;
+	const struct diag_observer *op;
+};
+
+int qib_register_observer(struct qib_devdata *dd,
+			  const struct diag_observer *op)
+{
+	struct diag_observer_list_elt *olp;
+	int ret = -EINVAL;
+
+	if (!dd || !op)
+		goto bail;
+	ret = -ENOMEM;
+	olp = vmalloc(sizeof *olp);
+	if (!olp) {
+		printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
+		goto bail;
+	}
+	if (olp) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+		olp->op = op;
+		olp->next = dd->diag_observer_list;
+		dd->diag_observer_list = olp;
+		spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+		ret = 0;
+	}
+bail:
+	return ret;
+}
+
+/* Remove all registered observers when device is closed */
+static void qib_unregister_observers(struct qib_devdata *dd)
+{
+	struct diag_observer_list_elt *olp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+	olp = dd->diag_observer_list;
+	while (olp) {
+		/* Pop one observer, let go of lock */
+		dd->diag_observer_list = olp->next;
+		spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+		vfree(olp);
+		/* try again. */
+		spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+		olp = dd->diag_observer_list;
+	}
+	spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+}
+
+/*
+ * Find the observer, if any, for the specified address. Initial implementation
+ * is simple stack of observers. This must be called with diag transaction
+ * lock held.
+ */
+static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
+						     u32 addr)
+{
+	struct diag_observer_list_elt *olp;
+	const struct diag_observer *op = NULL;
+
+	olp = dd->diag_observer_list;
+	while (olp) {
+		op = olp->op;
+		if (addr >= op->bottom && addr <= op->top)
+			break;
+		olp = olp->next;
+	}
+	if (!olp)
+		op = NULL;
+
+	return op;
+}
+
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+			     size_t count, loff_t *off)
+{
+	struct qib_diag_client *dc = fp->private_data;
+	struct qib_devdata *dd = dc->dd;
+	void __iomem *kreg_base;
+	ssize_t ret;
+
+	if (dc->pid != current->pid) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	kreg_base = dd->kregbase;
+
+	if (count == 0)
+		ret = 0;
+	else if ((count % 4) || (*off % 4))
+		/* address or length is not 32-bit aligned, hence invalid */
+		ret = -EINVAL;
+	else if (dc->state < READY && (*off || count != 8))
+		ret = -EINVAL;  /* prevent cat /dev/qib_diag* */
+	else {
+		unsigned long flags;
+		u64 data64 = 0;
+		int use_32;
+		const struct diag_observer *op;
+
+		use_32 = (count % 8) || (*off % 8);
+		ret = -1;
+		spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+		/*
+		 * Check for observer on this address range.
+		 * we only support a single 32 or 64-bit read
+		 * via observer, currently.
+		 */
+		op = diag_get_observer(dd, *off);
+		if (op) {
+			u32 offset = *off;
+			ret = op->hook(dd, op, offset, &data64, 0, use_32);
+		}
+		/*
+		 * We need to release lock before any copy_to_user(),
+		 * whether implicit in qib_read_umem* or explicit below.
+		 */
+		spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+		if (!op) {
+			if (use_32)
+				/*
+				 * Address or length is not 64-bit aligned;
+				 * do 32-bit rd
+				 */
+				ret = qib_read_umem32(dd, data, (u32) *off,
+						      count);
+			else
+				ret = qib_read_umem64(dd, data, (u32) *off,
+						      count);
+		} else if (ret == count) {
+			/* Below finishes case where observer existed */
+			ret = copy_to_user(data, &data64, use_32 ?
+					   sizeof(u32) : sizeof(u64));
+			if (ret)
+				ret = -EFAULT;
+		}
+	}
+
+	if (ret >= 0) {
+		*off += count;
+		ret = count;
+		if (dc->state == OPENED)
+			dc->state = INIT;
+	}
+bail:
+	return ret;
+}
+
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+			      size_t count, loff_t *off)
+{
+	struct qib_diag_client *dc = fp->private_data;
+	struct qib_devdata *dd = dc->dd;
+	void __iomem *kreg_base;
+	ssize_t ret;
+
+	if (dc->pid != current->pid) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	kreg_base = dd->kregbase;
+
+	if (count == 0)
+		ret = 0;
+	else if ((count % 4) || (*off % 4))
+		/* address or length is not 32-bit aligned, hence invalid */
+		ret = -EINVAL;
+	else if (dc->state < READY &&
+		((*off || count != 8) || dc->state != INIT))
+		/* No writes except second-step of init seq */
+		ret = -EINVAL;  /* before any other write allowed */
+	else {
+		unsigned long flags;
+		const struct diag_observer *op = NULL;
+		int use_32 =  (count % 8) || (*off % 8);
+
+		/*
+		 * Check for observer on this address range.
+		 * We only support a single 32 or 64-bit write
+		 * via observer, currently. This helps, because
+		 * we would otherwise have to jump through hoops
+		 * to make "diag transaction" meaningful when we
+		 * cannot do a copy_from_user while holding the lock.
+		 */
+		if (count == 4 || count == 8) {
+			u64 data64;
+			u32 offset = *off;
+			ret = copy_from_user(&data64, data, count);
+			if (ret) {
+				ret = -EFAULT;
+				goto bail;
+			}
+			spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+			op = diag_get_observer(dd, *off);
+			if (op)
+				ret = op->hook(dd, op, offset, &data64, ~0Ull,
+					       use_32);
+			spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+		}
+
+		if (!op) {
+			if (use_32)
+				/*
+				 * Address or length is not 64-bit aligned;
+				 * do 32-bit write
+				 */
+				ret = qib_write_umem32(dd, (u32) *off, data,
+						       count);
+			else
+				ret = qib_write_umem64(dd, (u32) *off, data,
+						       count);
+		}
+	}
+
+	if (ret >= 0) {
+		*off += count;
+		ret = count;
+		if (dc->state == INIT)
+			dc->state = READY; /* all read/write OK now */
+	}
+bail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 000000000000..2920bb39a65b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "qib_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+	return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
+			      size_t size, enum dma_data_direction direction)
+{
+	BUG_ON(!valid_dma_direction(direction));
+	return (u64) cpu_addr;
+}
+
+static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+				 enum dma_data_direction direction)
+{
+	BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
+			    unsigned long offset, size_t size,
+			    enum dma_data_direction direction)
+{
+	u64 addr;
+
+	BUG_ON(!valid_dma_direction(direction));
+
+	if (offset + size > PAGE_SIZE) {
+		addr = BAD_DMA_ADDRESS;
+		goto done;
+	}
+
+	addr = (u64) page_address(page);
+	if (addr)
+		addr += offset;
+	/* TODO: handle highmem pages */
+
+done:
+	return addr;
+}
+
+static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+			       enum dma_data_direction direction)
+{
+	BUG_ON(!valid_dma_direction(direction));
+}
+
+static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+		      int nents, enum dma_data_direction direction)
+{
+	struct scatterlist *sg;
+	u64 addr;
+	int i;
+	int ret = nents;
+
+	BUG_ON(!valid_dma_direction(direction));
+
+	for_each_sg(sgl, sg, nents, i) {
+		addr = (u64) page_address(sg_page(sg));
+		/* TODO: handle highmem pages */
+		if (!addr) {
+			ret = 0;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void qib_unmap_sg(struct ib_device *dev,
+			 struct scatterlist *sg, int nents,
+			 enum dma_data_direction direction)
+{
+	BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+	u64 addr = (u64) page_address(sg_page(sg));
+
+	if (addr)
+		addr += sg->offset;
+	return addr;
+}
+
+static unsigned int qib_sg_dma_len(struct ib_device *dev,
+				   struct scatterlist *sg)
+{
+	return sg->length;
+}
+
+static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+				    size_t size, enum dma_data_direction dir)
+{
+}
+
+static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
+				       size_t size,
+				       enum dma_data_direction dir)
+{
+}
+
+static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
+				    u64 *dma_handle, gfp_t flag)
+{
+	struct page *p;
+	void *addr = NULL;
+
+	p = alloc_pages(flag, get_order(size));
+	if (p)
+		addr = page_address(p);
+	if (dma_handle)
+		*dma_handle = (u64) addr;
+	return addr;
+}
+
+static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
+				  void *cpu_addr, u64 dma_handle)
+{
+	free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops qib_dma_mapping_ops = {
+	.mapping_error = qib_mapping_error,
+	.map_single = qib_dma_map_single,
+	.unmap_single = qib_dma_unmap_single,
+	.map_page = qib_dma_map_page,
+	.unmap_page = qib_dma_unmap_page,
+	.map_sg = qib_map_sg,
+	.unmap_sg = qib_unmap_sg,
+	.dma_address = qib_sg_dma_address,
+	.dma_len = qib_sg_dma_len,
+	.sync_single_for_cpu = qib_sync_single_for_cpu,
+	.sync_single_for_device = qib_sync_single_for_device,
+	.alloc_coherent = qib_dma_alloc_coherent,
+	.free_coherent = qib_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 000000000000..f15ce076ac49
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the init code.
+ */
+const char ib_qib_version[] = QIB_IDSTR "\n";
+
+DEFINE_SPINLOCK(qib_devs_lock);
+LIST_HEAD(qib_dev_list);
+DEFINE_MUTEX(qib_mutex);	/* general driver use */
+
+unsigned qib_ibmtu;
+module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
+MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
+
+unsigned qib_compat_ddr_negotiate = 1;
+module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
+		   S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(compat_ddr_negotiate,
+		 "Attempt pre-IBTA 1.2 DDR speed negotiation");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("QLogic <support@qlogic.com>");
+MODULE_DESCRIPTION("QLogic IB driver");
+
+/*
+ * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
+ * PIO send buffers.  This is well beyond anything currently
+ * defined in the InfiniBand spec.
+ */
+#define QIB_PIO_MAXIBHDR 128
+
+struct qlogic_ib_stats qib_stats;
+
+const char *qib_get_unit_name(int unit)
+{
+	static char iname[16];
+
+	snprintf(iname, sizeof iname, "infinipath%u", unit);
+	return iname;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int qib_count_active_units(void)
+{
+	struct qib_devdata *dd;
+	struct qib_pportdata *ppd;
+	unsigned long flags;
+	int pidx, nunits_active = 0;
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+	list_for_each_entry(dd, &qib_dev_list, list) {
+		if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
+			continue;
+		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+			ppd = dd->pport + pidx;
+			if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+					 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
+				nunits_active++;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+	return nunits_active;
+}
+
+/*
+ * Return count of all units, optionally return in arguments
+ * the number of usable (present) units, and the number of
+ * ports that are up.
+ */
+int qib_count_units(int *npresentp, int *nupp)
+{
+	int nunits = 0, npresent = 0, nup = 0;
+	struct qib_devdata *dd;
+	unsigned long flags;
+	int pidx;
+	struct qib_pportdata *ppd;
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+
+	list_for_each_entry(dd, &qib_dev_list, list) {
+		nunits++;
+		if ((dd->flags & QIB_PRESENT) && dd->kregbase)
+			npresent++;
+		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+			ppd = dd->pport + pidx;
+			if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+					 QIBL_LINKARMED | QIBL_LINKACTIVE)))
+				nup++;
+		}
+	}
+
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+	if (npresentp)
+		*npresentp = npresent;
+	if (nupp)
+		*nupp = nup;
+
+	return nunits;
+}
+
+/**
+ * qib_wait_linkstate - wait for an IB link state change to occur
+ * @dd: the qlogic_ib device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * wait up to msecs milliseconds for IB link state change to occur for
+ * now, take the easy polling route.  Currently used only by
+ * qib_set_linkstate.  Returns 0 if state reached, otherwise
+ * -ETIMEDOUT state can have multiple states set, for any of several
+ * transitions.
+ */
+int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	if (ppd->state_wanted) {
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		ret = -EBUSY;
+		goto bail;
+	}
+	ppd->state_wanted = state;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	wait_event_interruptible_timeout(ppd->state_wait,
+					 (ppd->lflags & state),
+					 msecs_to_jiffies(msecs));
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->state_wanted = 0;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+	if (!(ppd->lflags & state))
+		ret = -ETIMEDOUT;
+	else
+		ret = 0;
+bail:
+	return ret;
+}
+
+int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
+{
+	u32 lstate;
+	int ret;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	switch (newstate) {
+	case QIB_IB_LINKDOWN_ONLY:
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case QIB_IB_LINKDOWN:
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case QIB_IB_LINKDOWN_SLEEP:
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case QIB_IB_LINKDOWN_DISABLE:
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case QIB_IB_LINKARM:
+		if (ppd->lflags & QIBL_LINKARMED) {
+			ret = 0;
+			goto bail;
+		}
+		if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		/*
+		 * Since the port can be ACTIVE when we ask for ARMED,
+		 * clear QIBL_LINKV so we can wait for a transition.
+		 * If the link isn't ARMED, then something else happened
+		 * and there is no point waiting for ARMED.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_LINKV;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
+		lstate = QIBL_LINKV;
+		break;
+
+	case QIB_IB_LINKACTIVE:
+		if (ppd->lflags & QIBL_LINKACTIVE) {
+			ret = 0;
+			goto bail;
+		}
+		if (!(ppd->lflags & QIBL_LINKARMED)) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+				 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
+		lstate = QIBL_LINKACTIVE;
+		break;
+
+	default:
+		ret = -EINVAL;
+		goto bail;
+	}
+	ret = qib_wait_linkstate(ppd, lstate, 10);
+
+bail:
+	return ret;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
+{
+	const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
+	const u32 idx =  etail % rcd->rcvegrbufs_perchunk;
+
+	return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+}
+
+/*
+ * Returns 1 if error was a CRC, else 0.
+ * Needed for some chip's synthesized error counters.
+ */
+static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
+			  u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
+			  struct qib_message_header *hdr)
+{
+	u32 ret = 0;
+
+	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
+		ret = 1;
+	return ret;
+}
+
+/*
+ * qib_kreceive - receive a packet
+ * @rcd: the qlogic_ib context
+ * @llic: gets count of good packets needed to clear lli,
+ *          (used with chips that need need to track crcs for lli)
+ *
+ * called from interrupt handler for errors or receive interrupt
+ * Returns number of CRC error packets, needed by some chips for
+ * local link integrity tracking.   crcs are adjusted down by following
+ * good packets, if any, and count of good packets is also tracked.
+ */
+u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
+{
+	struct qib_devdata *dd = rcd->dd;
+	struct qib_pportdata *ppd = rcd->ppd;
+	__le32 *rhf_addr;
+	void *ebuf;
+	const u32 rsize = dd->rcvhdrentsize;        /* words */
+	const u32 maxcnt = dd->rcvhdrcnt * rsize;   /* words */
+	u32 etail = -1, l, hdrqtail;
+	struct qib_message_header *hdr;
+	u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
+	int last;
+	u64 lval;
+	struct qib_qp *qp, *nqp;
+
+	l = rcd->head;
+	rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+	if (dd->flags & QIB_NODMA_RTAIL) {
+		u32 seq = qib_hdrget_seq(rhf_addr);
+		if (seq != rcd->seq_cnt)
+			goto bail;
+		hdrqtail = 0;
+	} else {
+		hdrqtail = qib_get_rcvhdrtail(rcd);
+		if (l == hdrqtail)
+			goto bail;
+		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
+	}
+
+	for (last = 0, i = 1; !last; i += !last) {
+		hdr = dd->f_get_msgheader(dd, rhf_addr);
+		eflags = qib_hdrget_err_flags(rhf_addr);
+		etype = qib_hdrget_rcv_type(rhf_addr);
+		/* total length */
+		tlen = qib_hdrget_length_in_bytes(rhf_addr);
+		ebuf = NULL;
+		if ((dd->flags & QIB_NODMA_RTAIL) ?
+		    qib_hdrget_use_egr_buf(rhf_addr) :
+		    (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
+			etail = qib_hdrget_index(rhf_addr);
+			updegr = 1;
+			if (tlen > sizeof(*hdr) ||
+			    etype >= RCVHQ_RCV_TYPE_NON_KD)
+				ebuf = qib_get_egrbuf(rcd, etail);
+		}
+		if (!eflags) {
+			u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
+
+			if (lrh_len != tlen) {
+				qib_stats.sps_lenerrs++;
+				goto move_along;
+			}
+		}
+		if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
+		    ebuf == NULL &&
+		    tlen > (dd->rcvhdrentsize - 2 + 1 -
+				qib_hdrget_offset(rhf_addr)) << 2) {
+			goto move_along;
+		}
+
+		/*
+		 * Both tiderr and qibhdrerr are set for all plain IB
+		 * packets; only qibhdrerr should be set.
+		 */
+		if (unlikely(eflags))
+			crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+					       etail, rhf_addr, hdr);
+		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+			qib_ib_rcv(rcd, hdr, ebuf, tlen);
+			if (crcs)
+				crcs--;
+			else if (llic && *llic)
+				--*llic;
+		}
+move_along:
+		l += rsize;
+		if (l >= maxcnt)
+			l = 0;
+		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+		if (dd->flags & QIB_NODMA_RTAIL) {
+			u32 seq = qib_hdrget_seq(rhf_addr);
+
+			if (++rcd->seq_cnt > 13)
+				rcd->seq_cnt = 1;
+			if (seq != rcd->seq_cnt)
+				last = 1;
+		} else if (l == hdrqtail)
+			last = 1;
+		/*
+		 * Update head regs etc., every 16 packets, if not last pkt,
+		 * to help prevent rcvhdrq overflows, when many packets
+		 * are processed and queue is nearly full.
+		 * Don't request an interrupt for intermediate updates.
+		 */
+		lval = l;
+		if (!last && !(i & 0xf)) {
+			dd->f_update_usrhead(rcd, lval, updegr, etail);
+			updegr = 0;
+		}
+	}
+
+	rcd->head = l;
+	rcd->pkt_count += i;
+
+	/*
+	 * Iterate over all QPs waiting to respond.
+	 * The list won't change since the IRQ is only run on one CPU.
+	 */
+	list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+		list_del_init(&qp->rspwait);
+		if (qp->r_flags & QIB_R_RSP_NAK) {
+			qp->r_flags &= ~QIB_R_RSP_NAK;
+			qib_send_rc_ack(qp);
+		}
+		if (qp->r_flags & QIB_R_RSP_SEND) {
+			unsigned long flags;
+
+			qp->r_flags &= ~QIB_R_RSP_SEND;
+			spin_lock_irqsave(&qp->s_lock, flags);
+			if (ib_qib_state_ops[qp->state] &
+					QIB_PROCESS_OR_FLUSH_SEND)
+				qib_schedule_send(qp);
+			spin_unlock_irqrestore(&qp->s_lock, flags);
+		}
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	}
+
+bail:
+	/* Report number of packets consumed */
+	if (npkts)
+		*npkts = i;
+
+	/*
+	 * Always write head at end, and setup rcv interrupt, even
+	 * if no packets were processed.
+	 */
+	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
+	dd->f_update_usrhead(rcd, lval, updegr, etail);
+	return crcs;
+}
+
+/**
+ * qib_set_mtu - set the MTU
+ * @ppd: the perport data
+ * @arg: the new MTU
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size.   For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link INIT state...
+ */
+int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
+{
+	u32 piosize;
+	int ret, chk;
+
+	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+	    arg != 4096) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	chk = ib_mtu_enum_to_int(qib_ibmtu);
+	if (chk > 0 && arg > chk) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	piosize = ppd->ibmaxlen;
+	ppd->ibmtu = arg;
+
+	if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
+		/* Only if it's not the initial value (or reset to it) */
+		if (piosize != ppd->init_ibmaxlen) {
+			if (arg > piosize && arg <= ppd->init_ibmaxlen)
+				piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
+			ppd->ibmaxlen = piosize;
+		}
+	} else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
+		piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
+		ppd->ibmaxlen = piosize;
+	}
+
+	ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
+{
+	struct qib_devdata *dd = ppd->dd;
+	ppd->lid = lid;
+	ppd->lmc = lmc;
+
+	dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
+			 lid | (~((1U << lmc) - 1)) << 16);
+
+	qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
+		    dd->unit, ppd->port, lid);
+
+	return 0;
+}
+
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDS, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void qib_run_led_override(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+	struct qib_devdata *dd = ppd->dd;
+	int timeoff;
+	int ph_idx;
+
+	if (!(dd->flags & QIB_INITTED))
+		return;
+
+	ph_idx = ppd->led_override_phase++ & 1;
+	ppd->led_override = ppd->led_override_vals[ph_idx];
+	timeoff = ppd->led_override_timeoff;
+
+	dd->f_setextled(ppd, 1);
+	/*
+	 * don't re-fire the timer if user asked for it to be off; we let
+	 * it fire one more time after they turn it off to simplify
+	 */
+	if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+		mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+}
+
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int timeoff, freq;
+
+	if (!(dd->flags & QIB_INITTED))
+		return;
+
+	/* First check if we are blinking. If not, use 1HZ polling */
+	timeoff = HZ;
+	freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+	if (freq) {
+		/* For blink, set each phase from one nybble of val */
+		ppd->led_override_vals[0] = val & 0xF;
+		ppd->led_override_vals[1] = (val >> 4) & 0xF;
+		timeoff = (HZ << 4)/freq;
+	} else {
+		/* Non-blink set both phases the same. */
+		ppd->led_override_vals[0] = val & 0xF;
+		ppd->led_override_vals[1] = val & 0xF;
+	}
+	ppd->led_override_timeoff = timeoff;
+
+	/*
+	 * If the timer has not already been started, do so. Use a "quick"
+	 * timeout so the function will be called soon, to look at our request.
+	 */
+	if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
+		/* Need to start timer */
+		init_timer(&ppd->led_override_timer);
+		ppd->led_override_timer.function = qib_run_led_override;
+		ppd->led_override_timer.data = (unsigned long) ppd;
+		ppd->led_override_timer.expires = jiffies + 1;
+		add_timer(&ppd->led_override_timer);
+	} else {
+		if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+			mod_timer(&ppd->led_override_timer, jiffies + 1);
+		atomic_dec(&ppd->led_override_timer_active);
+	}
+}
+
+/**
+ * qib_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload).  We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize.  For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int qib_reset_device(int unit)
+{
+	int ret, i;
+	struct qib_devdata *dd = qib_lookup(unit);
+	struct qib_pportdata *ppd;
+	unsigned long flags;
+	int pidx;
+
+	if (!dd) {
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
+
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
+		qib_devinfo(dd->pcidev, "Invalid unit number %u or "
+			    "not initialized or not present\n", unit);
+		ret = -ENXIO;
+		goto bail;
+	}
+
+	spin_lock_irqsave(&dd->uctxt_lock, flags);
+	if (dd->rcd)
+		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+			if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+				continue;
+			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+			ret = -EBUSY;
+			goto bail;
+		}
+	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+		if (atomic_read(&ppd->led_override_timer_active)) {
+			/* Need to stop LED timer, _then_ shut off LEDs */
+			del_timer_sync(&ppd->led_override_timer);
+			atomic_set(&ppd->led_override_timer_active, 0);
+		}
+
+		/* Shut off LEDs after we are sure timer is not running */
+		ppd->led_override = LED_OVER_BOTH_OFF;
+		dd->f_setextled(ppd, 0);
+		if (dd->flags & QIB_HAS_SEND_DMA)
+			qib_teardown_sdma(ppd);
+	}
+
+	ret = dd->f_reset(dd);
+	if (ret == 1)
+		ret = qib_init(dd, 1);
+	else
+		ret = -EAGAIN;
+	if (ret)
+		qib_dev_err(dd, "Reinitialize unit %u after "
+			    "reset failed with %d\n", unit, ret);
+	else
+		qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
+			    "resetting\n", unit);
+
+bail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 000000000000..92d9cfe98a68
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * Functions specific to the serial EEPROM on cards handled by ib_qib.
+ * The actual serail interface code is in qib_twsi.c. This file is a client
+ */
+
+/**
+ * qib_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
+ */
+int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
+		    void *buff, int len)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (!ret) {
+		ret = qib_twsi_reset(dd);
+		if (ret)
+			qib_dev_err(dd, "EEPROM Reset for read failed\n");
+		else
+			ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
+					      eeprom_offset, buff, len);
+		mutex_unlock(&dd->eep_lock);
+	}
+
+	return ret;
+}
+
+/*
+ * Actually update the eeprom, first doing write enable if
+ * needed, then restoring write enable state.
+ * Must be called with eep_lock held
+ */
+static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
+		     const void *buf, int len)
+{
+	int ret, pwen;
+
+	pwen = dd->f_eeprom_wen(dd, 1);
+	ret = qib_twsi_reset(dd);
+	if (ret)
+		qib_dev_err(dd, "EEPROM Reset for write failed\n");
+	else
+		ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
+				      offset, buf, len);
+	dd->f_eeprom_wen(dd, pwen);
+	return ret;
+}
+
+/**
+ * qib_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
+int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
+		     const void *buff, int len)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (!ret) {
+		ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
+		mutex_unlock(&dd->eep_lock);
+	}
+
+	return ret;
+}
+
+static u8 flash_csum(struct qib_flash *ifp, int adjust)
+{
+	u8 *ip = (u8 *) ifp;
+	u8 csum = 0, len;
+
+	/*
+	 * Limit length checksummed to max length of actual data.
+	 * Checksum of erased eeprom will still be bad, but we avoid
+	 * reading past the end of the buffer we were passed.
+	 */
+	len = ifp->if_length;
+	if (len > sizeof(struct qib_flash))
+		len = sizeof(struct qib_flash);
+	while (len--)
+		csum += *ip++;
+	csum -= ifp->if_csum;
+	csum = ~csum;
+	if (adjust)
+		ifp->if_csum = csum;
+
+	return csum;
+}
+
+/**
+ * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
+ * @dd: the qlogic_ib device
+ *
+ * We have the capability to use the nguid field, and get
+ * the guid from the first chip's flash, to use for all of them.
+ */
+void qib_get_eeprom_info(struct qib_devdata *dd)
+{
+	void *buf;
+	struct qib_flash *ifp;
+	__be64 guid;
+	int len, eep_stat;
+	u8 csum, *bguid;
+	int t = dd->unit;
+	struct qib_devdata *dd0 = qib_lookup(0);
+
+	if (t && dd0->nguid > 1 && t <= dd0->nguid) {
+		u8 oguid;
+		dd->base_guid = dd0->base_guid;
+		bguid = (u8 *) &dd->base_guid;
+
+		oguid = bguid[7];
+		bguid[7] += t;
+		if (oguid > bguid[7]) {
+			if (bguid[6] == 0xff) {
+				if (bguid[5] == 0xff) {
+					qib_dev_err(dd, "Can't set %s GUID"
+						    " from base, wraps to"
+						    " OUI!\n",
+						    qib_get_unit_name(t));
+					dd->base_guid = 0;
+					goto bail;
+				}
+				bguid[5]++;
+			}
+			bguid[6]++;
+		}
+		dd->nguid = 1;
+		goto bail;
+	}
+
+	/*
+	 * Read full flash, not just currently used part, since it may have
+	 * been written with a newer definition.
+	 * */
+	len = sizeof(struct qib_flash);
+	buf = vmalloc(len);
+	if (!buf) {
+		qib_dev_err(dd, "Couldn't allocate memory to read %u "
+			    "bytes from eeprom for GUID\n", len);
+		goto bail;
+	}
+
+	/*
+	 * Use "public" eeprom read function, which does locking and
+	 * figures out device. This will migrate to chip-specific.
+	 */
+	eep_stat = qib_eeprom_read(dd, 0, buf, len);
+
+	if (eep_stat) {
+		qib_dev_err(dd, "Failed reading GUID from eeprom\n");
+		goto done;
+	}
+	ifp = (struct qib_flash *)buf;
+
+	csum = flash_csum(ifp, 0);
+	if (csum != ifp->if_csum) {
+		qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
+			 "0x%x, not 0x%x\n", csum, ifp->if_csum);
+		goto done;
+	}
+	if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+	    *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
+		qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
+			    *(unsigned long long *) ifp->if_guid);
+		/* don't allow GUID if all 0 or all 1's */
+		goto done;
+	}
+
+	/* complain, but allow it */
+	if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
+		qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
+			 "default, probably not correct!\n",
+			 *(unsigned long long *) ifp->if_guid);
+
+	bguid = ifp->if_guid;
+	if (!bguid[0] && !bguid[1] && !bguid[2]) {
+		/*
+		 * Original incorrect GUID format in flash; fix in
+		 * core copy, by shifting up 2 octets; don't need to
+		 * change top octet, since both it and shifted are 0.
+		 */
+		bguid[1] = bguid[3];
+		bguid[2] = bguid[4];
+		bguid[3] = 0;
+		bguid[4] = 0;
+		guid = *(__be64 *) ifp->if_guid;
+	} else
+		guid = *(__be64 *) ifp->if_guid;
+	dd->base_guid = guid;
+	dd->nguid = ifp->if_numguid;
+	/*
+	 * Things are slightly complicated by the desire to transparently
+	 * support both the Pathscale 10-digit serial number and the QLogic
+	 * 13-character version.
+	 */
+	if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
+	    ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
+		char *snp = dd->serial;
+
+		/*
+		 * This board has a Serial-prefix, which is stored
+		 * elsewhere for backward-compatibility.
+		 */
+		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+		snp[sizeof ifp->if_sprefix] = '\0';
+		len = strlen(snp);
+		snp += len;
+		len = (sizeof dd->serial) - len;
+		if (len > sizeof ifp->if_serial)
+			len = sizeof ifp->if_serial;
+		memcpy(snp, ifp->if_serial, len);
+	} else
+		memcpy(dd->serial, ifp->if_serial,
+		       sizeof ifp->if_serial);
+	if (!strstr(ifp->if_comment, "Tested successfully"))
+		qib_dev_err(dd, "Board SN %s did not pass functional "
+			    "test: %s\n", dd->serial, ifp->if_comment);
+
+	memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+	/*
+	 * Power-on (actually "active") hours are kept as little-endian value
+	 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+	 * atomic_t while running.
+	 */
+	atomic_set(&dd->active_time, 0);
+	dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+
+done:
+	vfree(buf);
+
+bail:;
+}
+
+/**
+ * qib_update_eeprom_log - copy active-time and error counters to eeprom
+ * @dd: the qlogic_ib device
+ *
+ * Although the time is kept as seconds in the qib_devdata struct, it is
+ * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+ * First-cut code reads whole (expected) struct qib_flash, modifies,
+ * re-writes. Future direction: read/write only what we need, assuming
+ * that the EEPROM had to have been "good enough" for driver init, and
+ * if not, we aren't making it worse.
+ *
+ */
+int qib_update_eeprom_log(struct qib_devdata *dd)
+{
+	void *buf;
+	struct qib_flash *ifp;
+	int len, hi_water;
+	uint32_t new_time, new_hrs;
+	u8 csum;
+	int ret, idx;
+	unsigned long flags;
+
+	/* first, check if we actually need to do anything. */
+	ret = 0;
+	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+		if (dd->eep_st_new_errs[idx]) {
+			ret = 1;
+			break;
+		}
+	}
+	new_time = atomic_read(&dd->active_time);
+
+	if (ret == 0 && new_time < 3600)
+		goto bail;
+
+	/*
+	 * The quick-check above determined that there is something worthy
+	 * of logging, so get current contents and do a more detailed idea.
+	 * read full flash, not just currently used part, since it may have
+	 * been written with a newer definition
+	 */
+	len = sizeof(struct qib_flash);
+	buf = vmalloc(len);
+	ret = 1;
+	if (!buf) {
+		qib_dev_err(dd, "Couldn't allocate memory to read %u "
+			    "bytes from eeprom for logging\n", len);
+		goto bail;
+	}
+
+	/* Grab semaphore and read current EEPROM. If we get an
+	 * error, let go, but if not, keep it until we finish write.
+	 */
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (ret) {
+		qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+		goto free_bail;
+	}
+	ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+	if (ret) {
+		mutex_unlock(&dd->eep_lock);
+		qib_dev_err(dd, "Unable read EEPROM for logging\n");
+		goto free_bail;
+	}
+	ifp = (struct qib_flash *)buf;
+
+	csum = flash_csum(ifp, 0);
+	if (csum != ifp->if_csum) {
+		mutex_unlock(&dd->eep_lock);
+		qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+			    csum, ifp->if_csum);
+		ret = 1;
+		goto free_bail;
+	}
+	hi_water = 0;
+	spin_lock_irqsave(&dd->eep_st_lock, flags);
+	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+		int new_val = dd->eep_st_new_errs[idx];
+		if (new_val) {
+			/*
+			 * If we have seen any errors, add to EEPROM values
+			 * We need to saturate at 0xFF (255) and we also
+			 * would need to adjust the checksum if we were
+			 * trying to minimize EEPROM traffic
+			 * Note that we add to actual current count in EEPROM,
+			 * in case it was altered while we were running.
+			 */
+			new_val += ifp->if_errcntp[idx];
+			if (new_val > 0xFF)
+				new_val = 0xFF;
+			if (ifp->if_errcntp[idx] != new_val) {
+				ifp->if_errcntp[idx] = new_val;
+				hi_water = offsetof(struct qib_flash,
+						    if_errcntp) + idx;
+			}
+			/*
+			 * update our shadow (used to minimize EEPROM
+			 * traffic), to match what we are about to write.
+			 */
+			dd->eep_st_errs[idx] = new_val;
+			dd->eep_st_new_errs[idx] = 0;
+		}
+	}
+	/*
+	 * Now update active-time. We would like to round to the nearest hour
+	 * but unless atomic_t are sure to be proper signed ints we cannot,
+	 * because we need to account for what we "transfer" to EEPROM and
+	 * if we log an hour at 31 minutes, then we would need to set
+	 * active_time to -29 to accurately count the _next_ hour.
+	 */
+	if (new_time >= 3600) {
+		new_hrs = new_time / 3600;
+		atomic_sub((new_hrs * 3600), &dd->active_time);
+		new_hrs += dd->eep_hrs;
+		if (new_hrs > 0xFFFF)
+			new_hrs = 0xFFFF;
+		dd->eep_hrs = new_hrs;
+		if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+			ifp->if_powerhour[0] = new_hrs & 0xFF;
+			hi_water = offsetof(struct qib_flash, if_powerhour);
+		}
+		if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+			ifp->if_powerhour[1] = new_hrs >> 8;
+			hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+		}
+	}
+	/*
+	 * There is a tiny possibility that we could somehow fail to write
+	 * the EEPROM after updating our shadows, but problems from holding
+	 * the spinlock too long are a much bigger issue.
+	 */
+	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+	if (hi_water) {
+		/* we made some change to the data, uopdate cksum and write */
+		csum = flash_csum(ifp, 1);
+		ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+	}
+	mutex_unlock(&dd->eep_lock);
+	if (ret)
+		qib_dev_err(dd, "Failed updating EEPROM\n");
+
+free_bail:
+	vfree(buf);
+bail:
+	return ret;
+}
+
+/**
+ * qib_inc_eeprom_err - increment one of the four error counters
+ * that are logged to EEPROM.
+ * @dd: the qlogic_ib device
+ * @eidx: 0..3, the counter to increment
+ * @incr: how much to add
+ *
+ * Each counter is 8-bits, and saturates at 255 (0xFF). They
+ * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+ * is called, but it can only be called in a context that allows sleep.
+ * This function can be called even at interrupt level.
+ */
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+{
+	uint new_val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->eep_st_lock, flags);
+	new_val = dd->eep_st_new_errs[eidx] + incr;
+	if (new_val > 255)
+		new_val = 255;
+	dd->eep_st_new_errs[eidx] = new_val;
+	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 000000000000..a142a9eb5226
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/jiffies.h>
+#include <asm/pgtable.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+#include "qib_user_sdma.h"
+
+static int qib_open(struct inode *, struct file *);
+static int qib_close(struct inode *, struct file *);
+static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
+			     unsigned long, loff_t);
+static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static int qib_mmapf(struct file *, struct vm_area_struct *);
+
+static const struct file_operations qib_file_ops = {
+	.owner = THIS_MODULE,
+	.write = qib_write,
+	.aio_write = qib_aio_write,
+	.open = qib_open,
+	.release = qib_close,
+	.poll = qib_poll,
+	.mmap = qib_mmapf
+};
+
+/*
+ * Convert kernel virtual addresses to physical addresses so they don't
+ * potentially conflict with the chip addresses used as mmap offsets.
+ * It doesn't really matter what mmap offset we use as long as we can
+ * interpret it correctly.
+ */
+static u64 cvt_kvaddr(void *p)
+{
+	struct page *page;
+	u64 paddr = 0;
+
+	page = vmalloc_to_page(p);
+	if (page)
+		paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+	return paddr;
+}
+
+static int qib_get_base_info(struct file *fp, void __user *ubase,
+			     size_t ubase_size)
+{
+	struct qib_ctxtdata *rcd = ctxt_fp(fp);
+	int ret = 0;
+	struct qib_base_info *kinfo = NULL;
+	struct qib_devdata *dd = rcd->dd;
+	struct qib_pportdata *ppd = rcd->ppd;
+	unsigned subctxt_cnt;
+	int shared, master;
+	size_t sz;
+
+	subctxt_cnt = rcd->subctxt_cnt;
+	if (!subctxt_cnt) {
+		shared = 0;
+		master = 0;
+		subctxt_cnt = 1;
+	} else {
+		shared = 1;
+		master = !subctxt_fp(fp);
+	}
+
+	sz = sizeof(*kinfo);
+	/* If context sharing is not requested, allow the old size structure */
+	if (!shared)
+		sz -= 7 * sizeof(u64);
+	if (ubase_size < sz) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
+	if (kinfo == NULL) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	ret = dd->f_get_base_info(rcd, kinfo);
+	if (ret < 0)
+		goto bail;
+
+	kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
+	kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
+	kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
+	kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
+	/*
+	 * have to mmap whole thing
+	 */
+	kinfo->spi_rcv_egrbuftotlen =
+		rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+	kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
+	kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
+		rcd->rcvegrbuf_chunks;
+	kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
+	if (master)
+		kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
+	/*
+	 * for this use, may be cfgctxts summed over all chips that
+	 * are are configured and present
+	 */
+	kinfo->spi_nctxts = dd->cfgctxts;
+	/* unit (chip/board) our context is on */
+	kinfo->spi_unit = dd->unit;
+	kinfo->spi_port = ppd->port;
+	/* for now, only a single page */
+	kinfo->spi_tid_maxsize = PAGE_SIZE;
+
+	/*
+	 * Doing this per context, and based on the skip value, etc.  This has
+	 * to be the actual buffer size, since the protocol code treats it
+	 * as an array.
+	 *
+	 * These have to be set to user addresses in the user code via mmap.
+	 * These values are used on return to user code for the mmap target
+	 * addresses only.  For 32 bit, same 44 bit address problem, so use
+	 * the physical address, not virtual.  Before 2.6.11, using the
+	 * page_address() macro worked, but in 2.6.11, even that returns the
+	 * full 64 bit address (upper bits all 1's).  So far, using the
+	 * physical addresses (or chip offsets, for chip mapping) works, but
+	 * no doubt some future kernel release will change that, and we'll be
+	 * on to yet another method of dealing with this.
+	 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
+	 * since the chips with non-zero rhf_offset don't normally
+	 * enable tail register updates to host memory, but for testing,
+	 * both can be enabled and used.
+	 */
+	kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
+	kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
+	kinfo->spi_rhf_offset = dd->rhf_offset;
+	kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
+	kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
+	/* setup per-unit (not port) status area for user programs */
+	kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
+		(char *) ppd->statusp -
+		(char *) dd->pioavailregs_dma;
+	kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
+	if (!shared) {
+		kinfo->spi_piocnt = rcd->piocnt;
+		kinfo->spi_piobufbase = (u64) rcd->piobufs;
+		kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
+	} else if (master) {
+		kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
+				    (rcd->piocnt % subctxt_cnt);
+		/* Master's PIO buffers are after all the slave's */
+		kinfo->spi_piobufbase = (u64) rcd->piobufs +
+			dd->palign *
+			(rcd->piocnt - kinfo->spi_piocnt);
+	} else {
+		unsigned slave = subctxt_fp(fp) - 1;
+
+		kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
+		kinfo->spi_piobufbase = (u64) rcd->piobufs +
+			dd->palign * kinfo->spi_piocnt * slave;
+	}
+
+	if (shared) {
+		kinfo->spi_sendbuf_status =
+			cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
+		/* only spi_subctxt_* fields should be set in this block! */
+		kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
+
+		kinfo->spi_subctxt_rcvegrbuf =
+			cvt_kvaddr(rcd->subctxt_rcvegrbuf);
+		kinfo->spi_subctxt_rcvhdr_base =
+			cvt_kvaddr(rcd->subctxt_rcvhdr_base);
+	}
+
+	/*
+	 * All user buffers are 2KB buffers.  If we ever support
+	 * giving 4KB buffers to user processes, this will need some
+	 * work.  Can't use piobufbase directly, because it has
+	 * both 2K and 4K buffer base values.
+	 */
+	kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
+		dd->palign;
+	kinfo->spi_pioalign = dd->palign;
+	kinfo->spi_qpair = QIB_KD_QP;
+	/*
+	 * user mode PIO buffers are always 2KB, even when 4KB can
+	 * be received, and sent via the kernel; this is ibmaxlen
+	 * for 2K MTU.
+	 */
+	kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
+	kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
+	kinfo->spi_ctxt = rcd->ctxt;
+	kinfo->spi_subctxt = subctxt_fp(fp);
+	kinfo->spi_sw_version = QIB_KERN_SWVERSION;
+	kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
+	kinfo->spi_hw_version = dd->revision;
+
+	if (master)
+		kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
+
+	sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
+	if (copy_to_user(ubase, kinfo, sz))
+		ret = -EFAULT;
+bail:
+	kfree(kinfo);
+	return ret;
+}
+
+/**
+ * qib_tid_update - update a context TID
+ * @rcd: the context
+ * @fp: the qib device file
+ * @ti: the TID information
+ *
+ * The new implementation as of Oct 2004 is that the driver assigns
+ * the tid and returns it to the caller.   To reduce search time, we
+ * keep a cursor for each context, walking the shadow tid array to find
+ * one that's not in use.
+ *
+ * For now, if we can't allocate the full list, we fail, although
+ * in the long run, we'll allocate as many as we can, and the
+ * caller will deal with that by trying the remaining pages later.
+ * That means that when we fail, we have to mark the tids as not in
+ * use again, in our shadow copy.
+ *
+ * It's up to the caller to free the tids when they are done.
+ * We'll unlock the pages as they free them.
+ *
+ * Also, right now we are locking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.
+ */
+static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+			  const struct qib_tid_info *ti)
+{
+	int ret = 0, ntids;
+	u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
+	u16 *tidlist;
+	struct qib_devdata *dd = rcd->dd;
+	u64 physaddr;
+	unsigned long vaddr;
+	u64 __iomem *tidbase;
+	unsigned long tidmap[8];
+	struct page **pagep = NULL;
+	unsigned subctxt = subctxt_fp(fp);
+
+	if (!dd->pageshadow) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	cnt = ti->tidcnt;
+	if (!cnt) {
+		ret = -EFAULT;
+		goto done;
+	}
+	ctxttid = rcd->ctxt * dd->rcvtidcnt;
+	if (!rcd->subctxt_cnt) {
+		tidcnt = dd->rcvtidcnt;
+		tid = rcd->tidcursor;
+		tidoff = 0;
+	} else if (!subctxt) {
+		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+			 (dd->rcvtidcnt % rcd->subctxt_cnt);
+		tidoff = dd->rcvtidcnt - tidcnt;
+		ctxttid += tidoff;
+		tid = tidcursor_fp(fp);
+	} else {
+		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+		tidoff = tidcnt * (subctxt - 1);
+		ctxttid += tidoff;
+		tid = tidcursor_fp(fp);
+	}
+	if (cnt > tidcnt) {
+		/* make sure it all fits in tid_pg_list */
+		qib_devinfo(dd->pcidev, "Process tried to allocate %u "
+			 "TIDs, only trying max (%u)\n", cnt, tidcnt);
+		cnt = tidcnt;
+	}
+	pagep = (struct page **) rcd->tid_pg_list;
+	tidlist = (u16 *) &pagep[dd->rcvtidcnt];
+	pagep += tidoff;
+	tidlist += tidoff;
+
+	memset(tidmap, 0, sizeof(tidmap));
+	/* before decrement; chip actual # */
+	ntids = tidcnt;
+	tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
+				   dd->rcvtidbase +
+				   ctxttid * sizeof(*tidbase));
+
+	/* virtual address of first page in transfer */
+	vaddr = ti->tidvaddr;
+	if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
+		       cnt * PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto done;
+	}
+	ret = qib_get_user_pages(vaddr, cnt, pagep);
+	if (ret) {
+		/*
+		 * if (ret == -EBUSY)
+		 * We can't continue because the pagep array won't be
+		 * initialized. This should never happen,
+		 * unless perhaps the user has mpin'ed the pages
+		 * themselves.
+		 */
+		qib_devinfo(dd->pcidev,
+			 "Failed to lock addr %p, %u pages: "
+			 "errno %d\n", (void *) vaddr, cnt, -ret);
+		goto done;
+	}
+	for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+		for (; ntids--; tid++) {
+			if (tid == tidcnt)
+				tid = 0;
+			if (!dd->pageshadow[ctxttid + tid])
+				break;
+		}
+		if (ntids < 0) {
+			/*
+			 * Oops, wrapped all the way through their TIDs,
+			 * and didn't have enough free; see comments at
+			 * start of routine
+			 */
+			i--;    /* last tidlist[i] not filled in */
+			ret = -ENOMEM;
+			break;
+		}
+		tidlist[i] = tid + tidoff;
+		/* we "know" system pages and TID pages are same size */
+		dd->pageshadow[ctxttid + tid] = pagep[i];
+		dd->physshadow[ctxttid + tid] =
+			qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
+				     PCI_DMA_FROMDEVICE);
+		/*
+		 * don't need atomic or it's overhead
+		 */
+		__set_bit(tid, tidmap);
+		physaddr = dd->physshadow[ctxttid + tid];
+		/* PERFORMANCE: below should almost certainly be cached */
+		dd->f_put_tid(dd, &tidbase[tid],
+				  RCVHQ_RCV_TYPE_EXPECTED, physaddr);
+		/*
+		 * don't check this tid in qib_ctxtshadow, since we
+		 * just filled it in; start with the next one.
+		 */
+		tid++;
+	}
+
+	if (ret) {
+		u32 limit;
+cleanup:
+		/* jump here if copy out of updated info failed... */
+		/* same code that's in qib_free_tid() */
+		limit = sizeof(tidmap) * BITS_PER_BYTE;
+		if (limit > tidcnt)
+			/* just in case size changes in future */
+			limit = tidcnt;
+		tid = find_first_bit((const unsigned long *)tidmap, limit);
+		for (; tid < limit; tid++) {
+			if (!test_bit(tid, tidmap))
+				continue;
+			if (dd->pageshadow[ctxttid + tid]) {
+				dma_addr_t phys;
+
+				phys = dd->physshadow[ctxttid + tid];
+				dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+				/* PERFORMANCE: below should almost certainly
+				 * be cached
+				 */
+				dd->f_put_tid(dd, &tidbase[tid],
+					      RCVHQ_RCV_TYPE_EXPECTED,
+					      dd->tidinvalid);
+				pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+					       PCI_DMA_FROMDEVICE);
+				dd->pageshadow[ctxttid + tid] = NULL;
+			}
+		}
+		qib_release_user_pages(pagep, cnt);
+	} else {
+		/*
+		 * Copy the updated array, with qib_tid's filled in, back
+		 * to user.  Since we did the copy in already, this "should
+		 * never fail" If it does, we have to clean up...
+		 */
+		if (copy_to_user((void __user *)
+				 (unsigned long) ti->tidlist,
+				 tidlist, cnt * sizeof(*tidlist))) {
+			ret = -EFAULT;
+			goto cleanup;
+		}
+		if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
+				 tidmap, sizeof tidmap)) {
+			ret = -EFAULT;
+			goto cleanup;
+		}
+		if (tid == tidcnt)
+			tid = 0;
+		if (!rcd->subctxt_cnt)
+			rcd->tidcursor = tid;
+		else
+			tidcursor_fp(fp) = tid;
+	}
+
+done:
+	return ret;
+}
+
+/**
+ * qib_tid_free - free a context TID
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @ti: the TID info
+ *
+ * right now we are unlocking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.  We check that the TID is in range for this context
+ * but otherwise don't check validity; if user has an error and
+ * frees the wrong tid, it's only their own data that can thereby
+ * be corrupted.  We do check that the TID was in use, for sanity
+ * We always use our idea of the saved address, not the address that
+ * they pass in to us.
+ */
+static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
+			const struct qib_tid_info *ti)
+{
+	int ret = 0;
+	u32 tid, ctxttid, cnt, limit, tidcnt;
+	struct qib_devdata *dd = rcd->dd;
+	u64 __iomem *tidbase;
+	unsigned long tidmap[8];
+
+	if (!dd->pageshadow) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
+			   sizeof tidmap)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	ctxttid = rcd->ctxt * dd->rcvtidcnt;
+	if (!rcd->subctxt_cnt)
+		tidcnt = dd->rcvtidcnt;
+	else if (!subctxt) {
+		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+			 (dd->rcvtidcnt % rcd->subctxt_cnt);
+		ctxttid += dd->rcvtidcnt - tidcnt;
+	} else {
+		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+		ctxttid += tidcnt * (subctxt - 1);
+	}
+	tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
+				   dd->rcvtidbase +
+				   ctxttid * sizeof(*tidbase));
+
+	limit = sizeof(tidmap) * BITS_PER_BYTE;
+	if (limit > tidcnt)
+		/* just in case size changes in future */
+		limit = tidcnt;
+	tid = find_first_bit(tidmap, limit);
+	for (cnt = 0; tid < limit; tid++) {
+		/*
+		 * small optimization; if we detect a run of 3 or so without
+		 * any set, use find_first_bit again.  That's mainly to
+		 * accelerate the case where we wrapped, so we have some at
+		 * the beginning, and some at the end, and a big gap
+		 * in the middle.
+		 */
+		if (!test_bit(tid, tidmap))
+			continue;
+		cnt++;
+		if (dd->pageshadow[ctxttid + tid]) {
+			struct page *p;
+			dma_addr_t phys;
+
+			p = dd->pageshadow[ctxttid + tid];
+			dd->pageshadow[ctxttid + tid] = NULL;
+			phys = dd->physshadow[ctxttid + tid];
+			dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+			/* PERFORMANCE: below should almost certainly be
+			 * cached
+			 */
+			dd->f_put_tid(dd, &tidbase[tid],
+				      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
+			pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+			qib_release_user_pages(&p, 1);
+		}
+	}
+done:
+	return ret;
+}
+
+/**
+ * qib_set_part_key - set a partition key
+ * @rcd: the context
+ * @key: the key
+ *
+ * We can have up to 4 active at a time (other than the default, which is
+ * always allowed).  This is somewhat tricky, since multiple contexts may set
+ * the same key, so we reference count them, and clean up at exit.  All 4
+ * partition keys are packed into a single qlogic_ib register.  It's an
+ * error for a process to set the same pkey multiple times.  We provide no
+ * mechanism to de-allocate a pkey at this time, we may eventually need to
+ * do that.  I've used the atomic operations, and no locking, and only make
+ * a single pass through what's available.  This should be more than
+ * adequate for some time. I'll think about spinlocks or the like if and as
+ * it's necessary.
+ */
+static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
+{
+	struct qib_pportdata *ppd = rcd->ppd;
+	int i, any = 0, pidx = -1;
+	u16 lkey = key & 0x7FFF;
+	int ret;
+
+	if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+		/* nothing to do; this key always valid */
+		ret = 0;
+		goto bail;
+	}
+
+	if (!lkey) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Set the full membership bit, because it has to be
+	 * set in the register or the packet, and it seems
+	 * cleaner to set in the register than to force all
+	 * callers to set it.
+	 */
+	key |= 0x8000;
+
+	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+		if (!rcd->pkeys[i] && pidx == -1)
+			pidx = i;
+		if (rcd->pkeys[i] == key) {
+			ret = -EEXIST;
+			goto bail;
+		}
+	}
+	if (pidx == -1) {
+		ret = -EBUSY;
+		goto bail;
+	}
+	for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+		if (!ppd->pkeys[i]) {
+			any++;
+			continue;
+		}
+		if (ppd->pkeys[i] == key) {
+			atomic_t *pkrefs = &ppd->pkeyrefs[i];
+
+			if (atomic_inc_return(pkrefs) > 1) {
+				rcd->pkeys[pidx] = key;
+				ret = 0;
+				goto bail;
+			} else {
+				/*
+				 * lost race, decrement count, catch below
+				 */
+				atomic_dec(pkrefs);
+				any++;
+			}
+		}
+		if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+			/*
+			 * It makes no sense to have both the limited and
+			 * full membership PKEY set at the same time since
+			 * the unlimited one will disable the limited one.
+			 */
+			ret = -EEXIST;
+			goto bail;
+		}
+	}
+	if (!any) {
+		ret = -EBUSY;
+		goto bail;
+	}
+	for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+		if (!ppd->pkeys[i] &&
+		    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+			rcd->pkeys[pidx] = key;
+			ppd->pkeys[i] = key;
+			(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+			ret = 0;
+			goto bail;
+		}
+	}
+	ret = -EBUSY;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_manage_rcvq - manage a context's receive queue
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions.  start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
+			   int start_stop)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned int rcvctrl_op;
+
+	if (subctxt)
+		goto bail;
+	/* atomically clear receive enable ctxt. */
+	if (start_stop) {
+		/*
+		 * On enable, force in-memory copy of the tail register to
+		 * 0, so that protocol code doesn't have to worry about
+		 * whether or not the chip has yet updated the in-memory
+		 * copy or not on return from the system call. The chip
+		 * always resets it's tail register back to 0 on a
+		 * transition from disabled to enabled.
+		 */
+		if (rcd->rcvhdrtail_kvaddr)
+			qib_clear_rcvhdrtail(rcd);
+		rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
+	} else
+		rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
+	dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
+	/* always; new head should be equal to new tail; see above */
+bail:
+	return 0;
+}
+
+static void qib_clean_part_key(struct qib_ctxtdata *rcd,
+			       struct qib_devdata *dd)
+{
+	int i, j, pchanged = 0;
+	u64 oldpkey;
+	struct qib_pportdata *ppd = rcd->ppd;
+
+	/* for debugging only */
+	oldpkey = (u64) ppd->pkeys[0] |
+		((u64) ppd->pkeys[1] << 16) |
+		((u64) ppd->pkeys[2] << 32) |
+		((u64) ppd->pkeys[3] << 48);
+
+	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+		if (!rcd->pkeys[i])
+			continue;
+		for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
+			/* check for match independent of the global bit */
+			if ((ppd->pkeys[j] & 0x7fff) !=
+			    (rcd->pkeys[i] & 0x7fff))
+				continue;
+			if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
+				ppd->pkeys[j] = 0;
+				pchanged++;
+			}
+			break;
+		}
+		rcd->pkeys[i] = 0;
+	}
+	if (pchanged)
+		(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+}
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
+			unsigned len, void *kvaddr, u32 write_ok, char *what)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned long pfn;
+	int ret;
+
+	if ((vma->vm_end - vma->vm_start) > len) {
+		qib_devinfo(dd->pcidev,
+			 "FAIL on %s: len %lx > %x\n", what,
+			 vma->vm_end - vma->vm_start, len);
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	/*
+	 * shared context user code requires rcvhdrq mapped r/w, others
+	 * only allowed readonly mapping.
+	 */
+	if (!write_ok) {
+		if (vma->vm_flags & VM_WRITE) {
+			qib_devinfo(dd->pcidev,
+				 "%s must be mapped readonly\n", what);
+			ret = -EPERM;
+			goto bail;
+		}
+
+		/* don't allow them to later change with mprotect */
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
+	ret = remap_pfn_range(vma, vma->vm_start, pfn,
+			      len, vma->vm_page_prot);
+	if (ret)
+		qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
+			 "bytes failed: %d\n", what, rcd->ctxt,
+			 pfn, len, ret);
+bail:
+	return ret;
+}
+
+static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
+		     u64 ureg)
+{
+	unsigned long phys;
+	unsigned long sz;
+	int ret;
+
+	/*
+	 * This is real hardware, so use io_remap.  This is the mechanism
+	 * for the user process to update the head registers for their ctxt
+	 * in the chip.
+	 */
+	sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
+	if ((vma->vm_end - vma->vm_start) > sz) {
+		qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
+			 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+		ret = -EFAULT;
+	} else {
+		phys = dd->physaddr + ureg;
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+		ret = io_remap_pfn_range(vma, vma->vm_start,
+					 phys >> PAGE_SHIFT,
+					 vma->vm_end - vma->vm_start,
+					 vma->vm_page_prot);
+	}
+	return ret;
+}
+
+static int mmap_piobufs(struct vm_area_struct *vma,
+			struct qib_devdata *dd,
+			struct qib_ctxtdata *rcd,
+			unsigned piobufs, unsigned piocnt)
+{
+	unsigned long phys;
+	int ret;
+
+	/*
+	 * When we map the PIO buffers in the chip, we want to map them as
+	 * writeonly, no read possible; unfortunately, x86 doesn't allow
+	 * for this in hardware, but we still prevent users from asking
+	 * for it.
+	 */
+	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
+		qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
+			 "reqlen %lx > PAGE\n",
+			 vma->vm_end - vma->vm_start);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	phys = dd->physaddr + piobufs;
+
+#if defined(__powerpc__)
+	/* There isn't a generic way to specify writethrough mappings */
+	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+	pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+	pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
+
+	/*
+	 * don't allow them to later change to readable with mprotect (for when
+	 * not initially mapped readable, as is normally the case)
+	 */
+	vma->vm_flags &= ~VM_MAYREAD;
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+
+	if (qib_wc_pat)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
+				 vma->vm_end - vma->vm_start,
+				 vma->vm_page_prot);
+bail:
+	return ret;
+}
+
+static int mmap_rcvegrbufs(struct vm_area_struct *vma,
+			   struct qib_ctxtdata *rcd)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned long start, size;
+	size_t total_size, i;
+	unsigned long pfn;
+	int ret;
+
+	size = rcd->rcvegrbuf_size;
+	total_size = rcd->rcvegrbuf_chunks * size;
+	if ((vma->vm_end - vma->vm_start) > total_size) {
+		qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
+			 "reqlen %lx > actual %lx\n",
+			 vma->vm_end - vma->vm_start,
+			 (unsigned long) total_size);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (vma->vm_flags & VM_WRITE) {
+		qib_devinfo(dd->pcidev, "Can't map eager buffers as "
+			 "writable (flags=%lx)\n", vma->vm_flags);
+		ret = -EPERM;
+		goto bail;
+	}
+	/* don't allow them to later change to writeable with mprotect */
+	vma->vm_flags &= ~VM_MAYWRITE;
+
+	start = vma->vm_start;
+
+	for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
+		pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
+		ret = remap_pfn_range(vma, start, pfn, size,
+				      vma->vm_page_prot);
+		if (ret < 0)
+			goto bail;
+	}
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/*
+ * qib_file_vma_fault - handle a VMA page fault.
+ */
+static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct page *page;
+
+	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+	if (!page)
+		return VM_FAULT_SIGBUS;
+
+	get_page(page);
+	vmf->page = page;
+
+	return 0;
+}
+
+static struct vm_operations_struct qib_file_vm_ops = {
+	.fault = qib_file_vma_fault,
+};
+
+static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+		       struct qib_ctxtdata *rcd, unsigned subctxt)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned subctxt_cnt;
+	unsigned long len;
+	void *addr;
+	size_t size;
+	int ret = 0;
+
+	subctxt_cnt = rcd->subctxt_cnt;
+	size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+
+	/*
+	 * Each process has all the subctxt uregbase, rcvhdrq, and
+	 * rcvegrbufs mmapped - as an array for all the processes,
+	 * and also separately for this process.
+	 */
+	if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
+		addr = rcd->subctxt_uregbase;
+		size = PAGE_SIZE * subctxt_cnt;
+	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
+		addr = rcd->subctxt_rcvhdr_base;
+		size = rcd->rcvhdrq_size * subctxt_cnt;
+	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
+		addr = rcd->subctxt_rcvegrbuf;
+		size *= subctxt_cnt;
+	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
+					PAGE_SIZE * subctxt)) {
+		addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
+		size = PAGE_SIZE;
+	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
+					rcd->rcvhdrq_size * subctxt)) {
+		addr = rcd->subctxt_rcvhdr_base +
+			rcd->rcvhdrq_size * subctxt;
+		size = rcd->rcvhdrq_size;
+	} else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
+		addr = rcd->user_event_mask;
+		size = PAGE_SIZE;
+	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
+					size * subctxt)) {
+		addr = rcd->subctxt_rcvegrbuf + size * subctxt;
+		/* rcvegrbufs are read-only on the slave */
+		if (vma->vm_flags & VM_WRITE) {
+			qib_devinfo(dd->pcidev,
+				 "Can't map eager buffers as "
+				 "writable (flags=%lx)\n", vma->vm_flags);
+			ret = -EPERM;
+			goto bail;
+		}
+		/*
+		 * Don't allow permission to later change to writeable
+		 * with mprotect.
+		 */
+		vma->vm_flags &= ~VM_MAYWRITE;
+	} else
+		goto bail;
+	len = vma->vm_end - vma->vm_start;
+	if (len > size) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
+	vma->vm_ops = &qib_file_vm_ops;
+	vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+	ret = 1;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_mmapf - mmap various structures into user space
+ * @fp: the file pointer
+ * @vma: the VM area
+ *
+ * We use this to have a shared buffer between the kernel and the user code
+ * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
+ * buffers in the chip.  We have the open and close entries so we can bump
+ * the ref count and keep the driver from being unloaded while still mapped.
+ */
+static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
+{
+	struct qib_ctxtdata *rcd;
+	struct qib_devdata *dd;
+	u64 pgaddr, ureg;
+	unsigned piobufs, piocnt;
+	int ret, match = 1;
+
+	rcd = ctxt_fp(fp);
+	if (!rcd || !(vma->vm_flags & VM_SHARED)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	dd = rcd->dd;
+
+	/*
+	 * This is the qib_do_user_init() code, mapping the shared buffers
+	 * and per-context user registers into the user process. The address
+	 * referred to by vm_pgoff is the file offset passed via mmap().
+	 * For shared contexts, this is the kernel vmalloc() address of the
+	 * pages to share with the master.
+	 * For non-shared or master ctxts, this is a physical address.
+	 * We only do one mmap for each space mapped.
+	 */
+	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+
+	/*
+	 * Check for 0 in case one of the allocations failed, but user
+	 * called mmap anyway.
+	 */
+	if (!pgaddr)  {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Physical addresses must fit in 40 bits for our hardware.
+	 * Check for kernel virtual addresses first, anything else must
+	 * match a HW or memory address.
+	 */
+	ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
+	if (ret) {
+		if (ret > 0)
+			ret = 0;
+		goto bail;
+	}
+
+	ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
+	if (!rcd->subctxt_cnt) {
+		/* ctxt is not shared */
+		piocnt = rcd->piocnt;
+		piobufs = rcd->piobufs;
+	} else if (!subctxt_fp(fp)) {
+		/* caller is the master */
+		piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
+			 (rcd->piocnt % rcd->subctxt_cnt);
+		piobufs = rcd->piobufs +
+			dd->palign * (rcd->piocnt - piocnt);
+	} else {
+		unsigned slave = subctxt_fp(fp) - 1;
+
+		/* caller is a slave */
+		piocnt = rcd->piocnt / rcd->subctxt_cnt;
+		piobufs = rcd->piobufs + dd->palign * piocnt * slave;
+	}
+
+	if (pgaddr == ureg)
+		ret = mmap_ureg(vma, dd, ureg);
+	else if (pgaddr == piobufs)
+		ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
+	else if (pgaddr == dd->pioavailregs_phys)
+		/* in-memory copy of pioavail registers */
+		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+				   (void *) dd->pioavailregs_dma, 0,
+				   "pioavail registers");
+	else if (pgaddr == rcd->rcvegr_phys)
+		ret = mmap_rcvegrbufs(vma, rcd);
+	else if (pgaddr == (u64) rcd->rcvhdrq_phys)
+		/*
+		 * The rcvhdrq itself; multiple pages, contiguous
+		 * from an i/o perspective.  Shared contexts need
+		 * to map r/w, so we allow writing.
+		 */
+		ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
+				   rcd->rcvhdrq, 1, "rcvhdrq");
+	else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
+		/* in-memory copy of rcvhdrq tail register */
+		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+				   rcd->rcvhdrtail_kvaddr, 0,
+				   "rcvhdrq tail");
+	else
+		match = 0;
+	if (!match)
+		ret = -EINVAL;
+
+	vma->vm_private_data = NULL;
+
+	if (ret < 0)
+		qib_devinfo(dd->pcidev,
+			 "mmap Failure %d: off %llx len %lx\n",
+			 -ret, (unsigned long long)pgaddr,
+			 vma->vm_end - vma->vm_start);
+bail:
+	return ret;
+}
+
+static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+				    struct file *fp,
+				    struct poll_table_struct *pt)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned pollflag;
+
+	poll_wait(fp, &rcd->wait, pt);
+
+	spin_lock_irq(&dd->uctxt_lock);
+	if (rcd->urgent != rcd->urgent_poll) {
+		pollflag = POLLIN | POLLRDNORM;
+		rcd->urgent_poll = rcd->urgent;
+	} else {
+		pollflag = 0;
+		set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
+	}
+	spin_unlock_irq(&dd->uctxt_lock);
+
+	return pollflag;
+}
+
+static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+				  struct file *fp,
+				  struct poll_table_struct *pt)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned pollflag;
+
+	poll_wait(fp, &rcd->wait, pt);
+
+	spin_lock_irq(&dd->uctxt_lock);
+	if (dd->f_hdrqempty(rcd)) {
+		set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
+		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
+		pollflag = 0;
+	} else
+		pollflag = POLLIN | POLLRDNORM;
+	spin_unlock_irq(&dd->uctxt_lock);
+
+	return pollflag;
+}
+
+static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+{
+	struct qib_ctxtdata *rcd;
+	unsigned pollflag;
+
+	rcd = ctxt_fp(fp);
+	if (!rcd)
+		pollflag = POLLERR;
+	else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
+		pollflag = qib_poll_urgent(rcd, fp, pt);
+	else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
+		pollflag = qib_poll_next(rcd, fp, pt);
+	else /* invalid */
+		pollflag = POLLERR;
+
+	return pollflag;
+}
+
+/*
+ * Check that userland and driver are compatible for subcontexts.
+ */
+static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
+{
+	/* this code is written long-hand for clarity */
+	if (QIB_USER_SWMAJOR != user_swmajor) {
+		/* no promise of compatibility if major mismatch */
+		return 0;
+	}
+	if (QIB_USER_SWMAJOR == 1) {
+		switch (QIB_USER_SWMINOR) {
+		case 0:
+		case 1:
+		case 2:
+			/* no subctxt implementation so cannot be compatible */
+			return 0;
+		case 3:
+			/* 3 is only compatible with itself */
+			return user_swminor == 3;
+		default:
+			/* >= 4 are compatible (or are expected to be) */
+			return user_swminor >= 4;
+		}
+	}
+	/* make no promises yet for future major versions */
+	return 0;
+}
+
+static int init_subctxts(struct qib_devdata *dd,
+			 struct qib_ctxtdata *rcd,
+			 const struct qib_user_info *uinfo)
+{
+	int ret = 0;
+	unsigned num_subctxts;
+	size_t size;
+
+	/*
+	 * If the user is requesting zero subctxts,
+	 * skip the subctxt allocation.
+	 */
+	if (uinfo->spu_subctxt_cnt <= 0)
+		goto bail;
+	num_subctxts = uinfo->spu_subctxt_cnt;
+
+	/* Check for subctxt compatibility */
+	if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
+		uinfo->spu_userversion & 0xffff)) {
+		qib_devinfo(dd->pcidev,
+			 "Mismatched user version (%d.%d) and driver "
+			 "version (%d.%d) while context sharing. Ensure "
+			 "that driver and library are from the same "
+			 "release.\n",
+			 (int) (uinfo->spu_userversion >> 16),
+			 (int) (uinfo->spu_userversion & 0xffff),
+			 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
+		goto bail;
+	}
+	if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
+	if (!rcd->subctxt_uregbase) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+	/* Note: rcd->rcvhdrq_size isn't initialized yet. */
+	size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+		     sizeof(u32), PAGE_SIZE) * num_subctxts;
+	rcd->subctxt_rcvhdr_base = vmalloc_user(size);
+	if (!rcd->subctxt_rcvhdr_base) {
+		ret = -ENOMEM;
+		goto bail_ureg;
+	}
+
+	rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
+					      rcd->rcvegrbuf_size *
+					      num_subctxts);
+	if (!rcd->subctxt_rcvegrbuf) {
+		ret = -ENOMEM;
+		goto bail_rhdr;
+	}
+
+	rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
+	rcd->subctxt_id = uinfo->spu_subctxt_id;
+	rcd->active_slaves = 1;
+	rcd->redirect_seq_cnt = 1;
+	set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+	goto bail;
+
+bail_rhdr:
+	vfree(rcd->subctxt_rcvhdr_base);
+bail_ureg:
+	vfree(rcd->subctxt_uregbase);
+	rcd->subctxt_uregbase = NULL;
+bail:
+	return ret;
+}
+
+static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
+		      struct file *fp, const struct qib_user_info *uinfo)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct qib_ctxtdata *rcd;
+	void *ptmp = NULL;
+	int ret;
+
+	rcd = qib_create_ctxtdata(ppd, ctxt);
+
+	/*
+	 * Allocate memory for use in qib_tid_update() at open to
+	 * reduce cost of expected send setup per message segment
+	 */
+	if (rcd)
+		ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
+			       dd->rcvtidcnt * sizeof(struct page **),
+			       GFP_KERNEL);
+
+	if (!rcd || !ptmp) {
+		qib_dev_err(dd, "Unable to allocate ctxtdata "
+			    "memory, failing open\n");
+		ret = -ENOMEM;
+		goto bailerr;
+	}
+	rcd->userversion = uinfo->spu_userversion;
+	ret = init_subctxts(dd, rcd, uinfo);
+	if (ret)
+		goto bailerr;
+	rcd->tid_pg_list = ptmp;
+	rcd->pid = current->pid;
+	init_waitqueue_head(&dd->rcd[ctxt]->wait);
+	strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+	ctxt_fp(fp) = rcd;
+	qib_stats.sps_ctxts++;
+	ret = 0;
+	goto bail;
+
+bailerr:
+	dd->rcd[ctxt] = NULL;
+	kfree(rcd);
+	kfree(ptmp);
+bail:
+	return ret;
+}
+
+static inline int usable(struct qib_pportdata *ppd, int active_only)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 linkok = active_only ? QIBL_LINKACTIVE :
+		 (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
+
+	return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
+		(ppd->lflags & linkok);
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+			  const struct qib_user_info *uinfo)
+{
+	struct qib_devdata *dd = qib_lookup(unit);
+	struct qib_pportdata *ppd = NULL;
+	int ret;
+	u32 ctxt;
+
+	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	/*
+	 * If users requests specific port, only try that one port, else
+	 * select "best" port below, based on context.
+	 */
+	if (uinfo->spu_port) {
+		ppd = dd->pport + uinfo->spu_port - 1;
+		if (!usable(ppd, 0)) {
+			ret = -ENETDOWN;
+			goto bail;
+		}
+	}
+
+	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+		if (dd->rcd[ctxt])
+			continue;
+		/*
+		 * The setting and clearing of user context rcd[x] protected
+		 * by the qib_mutex
+		 */
+		if (!ppd) {
+			/* choose port based on ctxt, if up, else 1st up */
+			ppd = dd->pport + (ctxt % dd->num_pports);
+			if (!usable(ppd, 0)) {
+				int i;
+				for (i = 0; i < dd->num_pports; i++) {
+					ppd = dd->pport + i;
+					if (usable(ppd, 0))
+						break;
+				}
+				if (i == dd->num_pports) {
+					ret = -ENETDOWN;
+					goto bail;
+				}
+			}
+		}
+		ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+		goto bail;
+	}
+	ret = -EBUSY;
+
+bail:
+	return ret;
+}
+
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+	struct qib_pportdata *ppd;
+	int ret = 0, devmax;
+	int npresent, nup;
+	int ndev;
+	u32 port = uinfo->spu_port, ctxt;
+
+	devmax = qib_count_units(&npresent, &nup);
+
+	for (ndev = 0; ndev < devmax; ndev++) {
+		struct qib_devdata *dd = qib_lookup(ndev);
+
+		/* device portion of usable() */
+		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+			continue;
+		for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+			if (dd->rcd[ctxt])
+				continue;
+			if (port) {
+				if (port > dd->num_pports)
+					continue;
+				ppd = dd->pport + port - 1;
+				if (!usable(ppd, 0))
+					continue;
+			} else {
+				/*
+				 * choose port based on ctxt, if up, else
+				 * first port that's up for multi-port HCA
+				 */
+				ppd = dd->pport + (ctxt % dd->num_pports);
+				if (!usable(ppd, 0)) {
+					int j;
+
+					ppd = NULL;
+					for (j = 0; j < dd->num_pports &&
+						!ppd; j++)
+						if (usable(dd->pport + j, 0))
+							ppd = dd->pport + j;
+					if (!ppd)
+						continue; /* to next unit */
+				}
+			}
+			ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+			goto done;
+		}
+	}
+
+	if (npresent) {
+		if (nup == 0)
+			ret = -ENETDOWN;
+		else
+			ret = -EBUSY;
+	} else
+		ret = -ENXIO;
+
+done:
+	return ret;
+}
+
+static int find_shared_ctxt(struct file *fp,
+			    const struct qib_user_info *uinfo)
+{
+	int devmax, ndev, i;
+	int ret = 0;
+
+	devmax = qib_count_units(NULL, NULL);
+
+	for (ndev = 0; ndev < devmax; ndev++) {
+		struct qib_devdata *dd = qib_lookup(ndev);
+
+		/* device portion of usable() */
+		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+			continue;
+		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+			struct qib_ctxtdata *rcd = dd->rcd[i];
+
+			/* Skip ctxts which are not yet open */
+			if (!rcd || !rcd->cnt)
+				continue;
+			/* Skip ctxt if it doesn't match the requested one */
+			if (rcd->subctxt_id != uinfo->spu_subctxt_id)
+				continue;
+			/* Verify the sharing process matches the master */
+			if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
+			    rcd->userversion != uinfo->spu_userversion ||
+			    rcd->cnt >= rcd->subctxt_cnt) {
+				ret = -EINVAL;
+				goto done;
+			}
+			ctxt_fp(fp) = rcd;
+			subctxt_fp(fp) = rcd->cnt++;
+			rcd->subpid[subctxt_fp(fp)] = current->pid;
+			tidcursor_fp(fp) = 0;
+			rcd->active_slaves |= 1 << subctxt_fp(fp);
+			ret = 1;
+			goto done;
+		}
+	}
+
+done:
+	return ret;
+}
+
+static int qib_open(struct inode *in, struct file *fp)
+{
+	/* The real work is performed later in qib_assign_ctxt() */
+	fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
+	if (fp->private_data) /* no cpu affinity by default */
+		((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
+	return fp->private_data ? 0 : -ENOMEM;
+}
+
+/*
+ * Get ctxt early, so can set affinity prior to memory allocation.
+ */
+static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+	int ret;
+	int i_minor;
+	unsigned swmajor, swminor;
+
+	/* Check to be sure we haven't already initialized this file */
+	if (ctxt_fp(fp)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* for now, if major version is different, bail */
+	swmajor = uinfo->spu_userversion >> 16;
+	if (swmajor != QIB_USER_SWMAJOR) {
+		ret = -ENODEV;
+		goto done;
+	}
+
+	swminor = uinfo->spu_userversion & 0xffff;
+
+	mutex_lock(&qib_mutex);
+
+	if (qib_compatible_subctxts(swmajor, swminor) &&
+	    uinfo->spu_subctxt_cnt) {
+		ret = find_shared_ctxt(fp, uinfo);
+		if (ret) {
+			if (ret > 0)
+				ret = 0;
+			goto done_chk_sdma;
+		}
+	}
+
+	i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+	if (i_minor)
+		ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+	else
+		ret = get_a_ctxt(fp, uinfo);
+
+done_chk_sdma:
+	if (!ret) {
+		struct qib_filedata *fd = fp->private_data;
+		const struct qib_ctxtdata *rcd = fd->rcd;
+		const struct qib_devdata *dd = rcd->dd;
+
+		if (dd->flags & QIB_HAS_SEND_DMA) {
+			fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
+							    dd->unit,
+							    rcd->ctxt,
+							    fd->subctxt);
+			if (!fd->pq)
+				ret = -ENOMEM;
+		}
+
+		/*
+		 * If process has NOT already set it's affinity, select and
+		 * reserve a processor for it, as a rendevous for all
+		 * users of the driver.  If they don't actually later
+		 * set affinity to this cpu, or set it to some other cpu,
+		 * it just means that sooner or later we don't recommend
+		 * a cpu, and let the scheduler do it's best.
+		 */
+		if (!ret && cpus_weight(current->cpus_allowed) >=
+		    qib_cpulist_count) {
+			int cpu;
+			cpu = find_first_zero_bit(qib_cpulist,
+						  qib_cpulist_count);
+			if (cpu != qib_cpulist_count) {
+				__set_bit(cpu, qib_cpulist);
+				fd->rec_cpu_num = cpu;
+			}
+		} else if (cpus_weight(current->cpus_allowed) == 1 &&
+			test_bit(first_cpu(current->cpus_allowed),
+				 qib_cpulist))
+			qib_devinfo(dd->pcidev, "%s PID %u affinity "
+				    "set to cpu %d; already allocated\n",
+				    current->comm, current->pid,
+				    first_cpu(current->cpus_allowed));
+	}
+
+	mutex_unlock(&qib_mutex);
+
+done:
+	return ret;
+}
+
+
+static int qib_do_user_init(struct file *fp,
+			    const struct qib_user_info *uinfo)
+{
+	int ret;
+	struct qib_ctxtdata *rcd = ctxt_fp(fp);
+	struct qib_devdata *dd;
+	unsigned uctxt;
+
+	/* Subctxts don't need to initialize anything since master did it. */
+	if (subctxt_fp(fp)) {
+		ret = wait_event_interruptible(rcd->wait,
+			!test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
+		goto bail;
+	}
+
+	dd = rcd->dd;
+
+	/* some ctxts may get extra buffers, calculate that here */
+	uctxt = rcd->ctxt - dd->first_user_ctxt;
+	if (uctxt < dd->ctxts_extrabuf) {
+		rcd->piocnt = dd->pbufsctxt + 1;
+		rcd->pio_base = rcd->piocnt * uctxt;
+	} else {
+		rcd->piocnt = dd->pbufsctxt;
+		rcd->pio_base = rcd->piocnt * uctxt +
+			dd->ctxts_extrabuf;
+	}
+
+	/*
+	 * All user buffers are 2KB buffers.  If we ever support
+	 * giving 4KB buffers to user processes, this will need some
+	 * work.  Can't use piobufbase directly, because it has
+	 * both 2K and 4K buffer base values.  So check and handle.
+	 */
+	if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
+		if (rcd->pio_base >= dd->piobcnt2k) {
+			qib_dev_err(dd,
+				    "%u:ctxt%u: no 2KB buffers available\n",
+				    dd->unit, rcd->ctxt);
+			ret = -ENOBUFS;
+			goto bail;
+		}
+		rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
+		qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
+			    rcd->ctxt, rcd->piocnt);
+	}
+
+	rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
+	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+			       TXCHK_CHG_TYPE_USER, rcd);
+	/*
+	 * try to ensure that processes start up with consistent avail update
+	 * for their own range, at least.   If system very quiet, it might
+	 * have the in-memory copy out of date at startup for this range of
+	 * buffers, when a context gets re-used.  Do after the chg_pioavail
+	 * and before the rest of setup, so it's "almost certain" the dma
+	 * will have occurred (can't 100% guarantee, but should be many
+	 * decimals of 9s, with this ordering), given how much else happens
+	 * after this.
+	 */
+	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+
+	/*
+	 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+	 * array for time being.  If rcd->ctxt > chip-supported,
+	 * we need to do extra stuff here to handle by handling overflow
+	 * through ctxt 0, someday
+	 */
+	ret = qib_create_rcvhdrq(dd, rcd);
+	if (!ret)
+		ret = qib_setup_eagerbufs(rcd);
+	if (ret)
+		goto bail_pio;
+
+	rcd->tidcursor = 0; /* start at beginning after open */
+
+	/* initialize poll variables... */
+	rcd->urgent = 0;
+	rcd->urgent_poll = 0;
+
+	/*
+	 * Now enable the ctxt for receive.
+	 * For chips that are set to DMA the tail register to memory
+	 * when they change (and when the update bit transitions from
+	 * 0 to 1.  So for those chips, we turn it off and then back on.
+	 * This will (very briefly) affect any other open ctxts, but the
+	 * duration is very short, and therefore isn't an issue.  We
+	 * explictly set the in-memory tail copy to 0 beforehand, so we
+	 * don't have to wait to be sure the DMA update has happened
+	 * (chip resets head/tail to 0 on transition to enable).
+	 */
+	if (rcd->rcvhdrtail_kvaddr)
+		qib_clear_rcvhdrtail(rcd);
+
+	dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
+		      rcd->ctxt);
+
+	/* Notify any waiting slaves */
+	if (rcd->subctxt_cnt) {
+		clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+		wake_up(&rcd->wait);
+	}
+	return 0;
+
+bail_pio:
+	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+			       TXCHK_CHG_TYPE_KERN, rcd);
+bail:
+	return ret;
+}
+
+/**
+ * unlock_exptid - unlock any expected TID entries context still had in use
+ * @rcd: ctxt
+ *
+ * We don't actually update the chip here, because we do a bulk update
+ * below, using f_clear_tids.
+ */
+static void unlock_expected_tids(struct qib_ctxtdata *rcd)
+{
+	struct qib_devdata *dd = rcd->dd;
+	int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
+	int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+	for (i = ctxt_tidbase; i < maxtid; i++) {
+		struct page *p = dd->pageshadow[i];
+		dma_addr_t phys;
+
+		if (!p)
+			continue;
+
+		phys = dd->physshadow[i];
+		dd->physshadow[i] = dd->tidinvalid;
+		dd->pageshadow[i] = NULL;
+		pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+		qib_release_user_pages(&p, 1);
+		cnt++;
+	}
+}
+
+static int qib_close(struct inode *in, struct file *fp)
+{
+	int ret = 0;
+	struct qib_filedata *fd;
+	struct qib_ctxtdata *rcd;
+	struct qib_devdata *dd;
+	unsigned long flags;
+	unsigned ctxt;
+	pid_t pid;
+
+	mutex_lock(&qib_mutex);
+
+	fd = (struct qib_filedata *) fp->private_data;
+	fp->private_data = NULL;
+	rcd = fd->rcd;
+	if (!rcd) {
+		mutex_unlock(&qib_mutex);
+		goto bail;
+	}
+
+	dd = rcd->dd;
+
+	/* ensure all pio buffer writes in progress are flushed */
+	qib_flush_wc();
+
+	/* drain user sdma queue */
+	if (fd->pq) {
+		qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
+		qib_user_sdma_queue_destroy(fd->pq);
+	}
+
+	if (fd->rec_cpu_num != -1)
+		__clear_bit(fd->rec_cpu_num, qib_cpulist);
+
+	if (--rcd->cnt) {
+		/*
+		 * XXX If the master closes the context before the slave(s),
+		 * revoke the mmap for the eager receive queue so
+		 * the slave(s) don't wait for receive data forever.
+		 */
+		rcd->active_slaves &= ~(1 << fd->subctxt);
+		rcd->subpid[fd->subctxt] = 0;
+		mutex_unlock(&qib_mutex);
+		goto bail;
+	}
+
+	/* early; no interrupt users after this */
+	spin_lock_irqsave(&dd->uctxt_lock, flags);
+	ctxt = rcd->ctxt;
+	dd->rcd[ctxt] = NULL;
+	pid = rcd->pid;
+	rcd->pid = 0;
+	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+	if (rcd->rcvwait_to || rcd->piowait_to ||
+	    rcd->rcvnowait || rcd->pionowait) {
+		rcd->rcvwait_to = 0;
+		rcd->piowait_to = 0;
+		rcd->rcvnowait = 0;
+		rcd->pionowait = 0;
+	}
+	if (rcd->flag)
+		rcd->flag = 0;
+
+	if (dd->kregbase) {
+		/* atomically clear receive enable ctxt and intr avail. */
+		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
+				  QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
+
+		/* clean up the pkeys for this ctxt user */
+		qib_clean_part_key(rcd, dd);
+		qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
+		qib_chg_pioavailkernel(dd, rcd->pio_base,
+				       rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
+
+		dd->f_clear_tids(dd, rcd);
+
+		if (dd->pageshadow)
+			unlock_expected_tids(rcd);
+		qib_stats.sps_ctxts--;
+	}
+
+	mutex_unlock(&qib_mutex);
+	qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
+
+bail:
+	kfree(fd);
+	return ret;
+}
+
+static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
+{
+	struct qib_ctxt_info info;
+	int ret;
+	size_t sz;
+	struct qib_ctxtdata *rcd = ctxt_fp(fp);
+	struct qib_filedata *fd;
+
+	fd = (struct qib_filedata *) fp->private_data;
+
+	info.num_active = qib_count_active_units();
+	info.unit = rcd->dd->unit;
+	info.port = rcd->ppd->port;
+	info.ctxt = rcd->ctxt;
+	info.subctxt =  subctxt_fp(fp);
+	/* Number of user ctxts available for this device. */
+	info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
+	info.num_subctxts = rcd->subctxt_cnt;
+	info.rec_cpu = fd->rec_cpu_num;
+	sz = sizeof(info);
+
+	if (copy_to_user(uinfo, &info, sz)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
+				 u32 __user *inflightp)
+{
+	const u32 val = qib_user_sdma_inflight_counter(pq);
+
+	if (put_user(val, inflightp))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int qib_sdma_get_complete(struct qib_pportdata *ppd,
+				 struct qib_user_sdma_queue *pq,
+				 u32 __user *completep)
+{
+	u32 val;
+	int err;
+
+	if (!pq)
+		return -EINVAL;
+
+	err = qib_user_sdma_make_progress(ppd, pq);
+	if (err < 0)
+		return err;
+
+	val = qib_user_sdma_complete_counter(pq);
+	if (put_user(val, completep))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int disarm_req_delay(struct qib_ctxtdata *rcd)
+{
+	int ret = 0;
+
+	if (!usable(rcd->ppd, 1)) {
+		int i;
+		/*
+		 * if link is down, or otherwise not usable, delay
+		 * the caller up to 30 seconds, so we don't thrash
+		 * in trying to get the chip back to ACTIVE, and
+		 * set flag so they make the call again.
+		 */
+		if (rcd->user_event_mask) {
+			/*
+			 * subctxt_cnt is 0 if not shared, so do base
+			 * separately, first, then remaining subctxt, if any
+			 */
+			set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+				&rcd->user_event_mask[0]);
+			for (i = 1; i < rcd->subctxt_cnt; i++)
+				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+					&rcd->user_event_mask[i]);
+		}
+		for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+			msleep(100);
+		ret = -ENETDOWN;
+	}
+	return ret;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
+{
+	struct qib_ctxtdata *rcd;
+	unsigned ctxt;
+	int ret = 0;
+
+	spin_lock(&ppd->dd->uctxt_lock);
+	for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
+	     ctxt++) {
+		rcd = ppd->dd->rcd[ctxt];
+		if (!rcd)
+			continue;
+		if (rcd->user_event_mask) {
+			int i;
+			/*
+			 * subctxt_cnt is 0 if not shared, so do base
+			 * separately, first, then remaining subctxt, if any
+			 */
+			set_bit(evtbit, &rcd->user_event_mask[0]);
+			for (i = 1; i < rcd->subctxt_cnt; i++)
+				set_bit(evtbit, &rcd->user_event_mask[i]);
+		}
+		ret = 1;
+		break;
+	}
+	spin_unlock(&ppd->dd->uctxt_lock);
+
+	return ret;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * For the DISARM_BUFS case, we also take action (this obsoletes
+ * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
+ * compatibility.
+ * Other bits don't currently require actions, just atomically clear.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
+			      unsigned long events)
+{
+	int ret = 0, i;
+
+	for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
+		if (!test_bit(i, &events))
+			continue;
+		if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
+			(void)qib_disarm_piobufs_ifneeded(rcd);
+			ret = disarm_req_delay(rcd);
+		} else
+			clear_bit(i, &rcd->user_event_mask[subctxt]);
+	}
+	return ret;
+}
+
+static ssize_t qib_write(struct file *fp, const char __user *data,
+			 size_t count, loff_t *off)
+{
+	const struct qib_cmd __user *ucmd;
+	struct qib_ctxtdata *rcd;
+	const void __user *src;
+	size_t consumed, copy = 0;
+	struct qib_cmd cmd;
+	ssize_t ret = 0;
+	void *dest;
+
+	if (count < sizeof(cmd.type)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	ucmd = (const struct qib_cmd __user *) data;
+
+	if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	consumed = sizeof(cmd.type);
+
+	switch (cmd.type) {
+	case QIB_CMD_ASSIGN_CTXT:
+	case QIB_CMD_USER_INIT:
+		copy = sizeof(cmd.cmd.user_info);
+		dest = &cmd.cmd.user_info;
+		src = &ucmd->cmd.user_info;
+		break;
+
+	case QIB_CMD_RECV_CTRL:
+		copy = sizeof(cmd.cmd.recv_ctrl);
+		dest = &cmd.cmd.recv_ctrl;
+		src = &ucmd->cmd.recv_ctrl;
+		break;
+
+	case QIB_CMD_CTXT_INFO:
+		copy = sizeof(cmd.cmd.ctxt_info);
+		dest = &cmd.cmd.ctxt_info;
+		src = &ucmd->cmd.ctxt_info;
+		break;
+
+	case QIB_CMD_TID_UPDATE:
+	case QIB_CMD_TID_FREE:
+		copy = sizeof(cmd.cmd.tid_info);
+		dest = &cmd.cmd.tid_info;
+		src = &ucmd->cmd.tid_info;
+		break;
+
+	case QIB_CMD_SET_PART_KEY:
+		copy = sizeof(cmd.cmd.part_key);
+		dest = &cmd.cmd.part_key;
+		src = &ucmd->cmd.part_key;
+		break;
+
+	case QIB_CMD_DISARM_BUFS:
+	case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
+		copy = 0;
+		src = NULL;
+		dest = NULL;
+		break;
+
+	case QIB_CMD_POLL_TYPE:
+		copy = sizeof(cmd.cmd.poll_type);
+		dest = &cmd.cmd.poll_type;
+		src = &ucmd->cmd.poll_type;
+		break;
+
+	case QIB_CMD_ARMLAUNCH_CTRL:
+		copy = sizeof(cmd.cmd.armlaunch_ctrl);
+		dest = &cmd.cmd.armlaunch_ctrl;
+		src = &ucmd->cmd.armlaunch_ctrl;
+		break;
+
+	case QIB_CMD_SDMA_INFLIGHT:
+		copy = sizeof(cmd.cmd.sdma_inflight);
+		dest = &cmd.cmd.sdma_inflight;
+		src = &ucmd->cmd.sdma_inflight;
+		break;
+
+	case QIB_CMD_SDMA_COMPLETE:
+		copy = sizeof(cmd.cmd.sdma_complete);
+		dest = &cmd.cmd.sdma_complete;
+		src = &ucmd->cmd.sdma_complete;
+		break;
+
+	case QIB_CMD_ACK_EVENT:
+		copy = sizeof(cmd.cmd.event_mask);
+		dest = &cmd.cmd.event_mask;
+		src = &ucmd->cmd.event_mask;
+		break;
+
+	default:
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (copy) {
+		if ((count - consumed) < copy) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		if (copy_from_user(dest, src, copy)) {
+			ret = -EFAULT;
+			goto bail;
+		}
+		consumed += copy;
+	}
+
+	rcd = ctxt_fp(fp);
+	if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	switch (cmd.type) {
+	case QIB_CMD_ASSIGN_CTXT:
+		ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
+		if (ret)
+			goto bail;
+		break;
+
+	case QIB_CMD_USER_INIT:
+		ret = qib_do_user_init(fp, &cmd.cmd.user_info);
+		if (ret)
+			goto bail;
+		ret = qib_get_base_info(fp, (void __user *) (unsigned long)
+					cmd.cmd.user_info.spu_base_info,
+					cmd.cmd.user_info.spu_base_info_size);
+		break;
+
+	case QIB_CMD_RECV_CTRL:
+		ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
+		break;
+
+	case QIB_CMD_CTXT_INFO:
+		ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
+				    (unsigned long) cmd.cmd.ctxt_info);
+		break;
+
+	case QIB_CMD_TID_UPDATE:
+		ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
+		break;
+
+	case QIB_CMD_TID_FREE:
+		ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
+		break;
+
+	case QIB_CMD_SET_PART_KEY:
+		ret = qib_set_part_key(rcd, cmd.cmd.part_key);
+		break;
+
+	case QIB_CMD_DISARM_BUFS:
+		(void)qib_disarm_piobufs_ifneeded(rcd);
+		ret = disarm_req_delay(rcd);
+		break;
+
+	case QIB_CMD_PIOAVAILUPD:
+		qib_force_pio_avail_update(rcd->dd);
+		break;
+
+	case QIB_CMD_POLL_TYPE:
+		rcd->poll_type = cmd.cmd.poll_type;
+		break;
+
+	case QIB_CMD_ARMLAUNCH_CTRL:
+		rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
+		break;
+
+	case QIB_CMD_SDMA_INFLIGHT:
+		ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
+					    (u32 __user *) (unsigned long)
+					    cmd.cmd.sdma_inflight);
+		break;
+
+	case QIB_CMD_SDMA_COMPLETE:
+		ret = qib_sdma_get_complete(rcd->ppd,
+					    user_sdma_queue_fp(fp),
+					    (u32 __user *) (unsigned long)
+					    cmd.cmd.sdma_complete);
+		break;
+
+	case QIB_CMD_ACK_EVENT:
+		ret = qib_user_event_ack(rcd, subctxt_fp(fp),
+					 cmd.cmd.event_mask);
+		break;
+	}
+
+	if (ret >= 0)
+		ret = consumed;
+
+bail:
+	return ret;
+}
+
+static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
+			     unsigned long dim, loff_t off)
+{
+	struct qib_filedata *fp = iocb->ki_filp->private_data;
+	struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
+	struct qib_user_sdma_queue *pq = fp->pq;
+
+	if (!dim || !pq)
+		return -EINVAL;
+
+	return qib_user_sdma_writev(rcd, pq, iov, dim);
+}
+
+static struct class *qib_class;
+static dev_t qib_dev;
+
+int qib_cdev_init(int minor, const char *name,
+		  const struct file_operations *fops,
+		  struct cdev **cdevp, struct device **devp)
+{
+	const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
+	struct cdev *cdev;
+	struct device *device = NULL;
+	int ret;
+
+	cdev = cdev_alloc();
+	if (!cdev) {
+		printk(KERN_ERR QIB_DRV_NAME
+		       ": Could not allocate cdev for minor %d, %s\n",
+		       minor, name);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	cdev->owner = THIS_MODULE;
+	cdev->ops = fops;
+	kobject_set_name(&cdev->kobj, name);
+
+	ret = cdev_add(cdev, dev, 1);
+	if (ret < 0) {
+		printk(KERN_ERR QIB_DRV_NAME
+		       ": Could not add cdev for minor %d, %s (err %d)\n",
+		       minor, name, -ret);
+		goto err_cdev;
+	}
+
+	device = device_create(qib_class, NULL, dev, NULL, name);
+	if (!IS_ERR(device))
+		goto done;
+	ret = PTR_ERR(device);
+	device = NULL;
+	printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+	       "device for minor %d, %s (err %d)\n",
+	       minor, name, -ret);
+err_cdev:
+	cdev_del(cdev);
+	cdev = NULL;
+done:
+	*cdevp = cdev;
+	*devp = device;
+	return ret;
+}
+
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
+{
+	struct device *device = *devp;
+
+	if (device) {
+		device_unregister(device);
+		*devp = NULL;
+	}
+
+	if (*cdevp) {
+		cdev_del(*cdevp);
+		*cdevp = NULL;
+	}
+}
+
+static struct cdev *wildcard_cdev;
+static struct device *wildcard_device;
+
+int __init qib_dev_init(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
+	if (ret < 0) {
+		printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
+		       "chrdev region (err %d)\n", -ret);
+		goto done;
+	}
+
+	qib_class = class_create(THIS_MODULE, "ipath");
+	if (IS_ERR(qib_class)) {
+		ret = PTR_ERR(qib_class);
+		printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+		       "device class (err %d)\n", -ret);
+		unregister_chrdev_region(qib_dev, QIB_NMINORS);
+	}
+
+done:
+	return ret;
+}
+
+void qib_dev_cleanup(void)
+{
+	if (qib_class) {
+		class_destroy(qib_class);
+		qib_class = NULL;
+	}
+
+	unregister_chrdev_region(qib_dev, QIB_NMINORS);
+}
+
+static atomic_t user_count = ATOMIC_INIT(0);
+
+static void qib_user_remove(struct qib_devdata *dd)
+{
+	if (atomic_dec_return(&user_count) == 0)
+		qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
+
+	qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+}
+
+static int qib_user_add(struct qib_devdata *dd)
+{
+	char name[10];
+	int ret;
+
+	if (atomic_inc_return(&user_count) == 1) {
+		ret = qib_cdev_init(0, "ipath", &qib_file_ops,
+				    &wildcard_cdev, &wildcard_device);
+		if (ret)
+			goto done;
+	}
+
+	snprintf(name, sizeof(name), "ipath%d", dd->unit);
+	ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
+			    &dd->user_cdev, &dd->user_device);
+	if (ret)
+		qib_user_remove(dd);
+done:
+	return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int qib_device_create(struct qib_devdata *dd)
+{
+	int r, ret;
+
+	r = qib_user_add(dd);
+	ret = qib_diag_add(dd);
+	if (r && !ret)
+		ret = r;
+	return ret;
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void qib_device_remove(struct qib_devdata *dd)
+{
+	qib_user_remove(dd);
+	qib_diag_remove(dd);
+}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 000000000000..755470440ef1
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+
+#include "qib.h"
+
+#define QIBFS_MAGIC 0x726a77
+
+static struct super_block *qib_super;
+
+#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+
+static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
+		       int mode, const struct file_operations *fops,
+		       void *data)
+{
+	int error;
+	struct inode *inode = new_inode(dir->i_sb);
+
+	if (!inode) {
+		error = -EPERM;
+		goto bail;
+	}
+
+	inode->i_mode = mode;
+	inode->i_uid = 0;
+	inode->i_gid = 0;
+	inode->i_blocks = 0;
+	inode->i_atime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime;
+	inode->i_ctime = inode->i_atime;
+	inode->i_private = data;
+	if ((mode & S_IFMT) == S_IFDIR) {
+		inode->i_op = &simple_dir_inode_operations;
+		inc_nlink(inode);
+		inc_nlink(dir);
+	}
+
+	inode->i_fop = fops;
+
+	d_instantiate(dentry, inode);
+	error = 0;
+
+bail:
+	return error;
+}
+
+static int create_file(const char *name, mode_t mode,
+		       struct dentry *parent, struct dentry **dentry,
+		       const struct file_operations *fops, void *data)
+{
+	int error;
+
+	*dentry = NULL;
+	mutex_lock(&parent->d_inode->i_mutex);
+	*dentry = lookup_one_len(name, parent, strlen(name));
+	if (!IS_ERR(*dentry))
+		error = qibfs_mknod(parent->d_inode, *dentry,
+				    mode, fops, data);
+	else
+		error = PTR_ERR(*dentry);
+	mutex_unlock(&parent->d_inode->i_mutex);
+
+	return error;
+}
+
+static ssize_t driver_stats_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, &qib_stats,
+				       sizeof qib_stats);
+}
+
+/*
+ * driver stats field names, one line per stat, single string.  Used by
+ * programs like ipathstats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if qlogic_ib_stats changes, this needs to change.  Names need to be
+ * 12 chars or less (w/o newline), for proper display by ipathstats utility.
+ */
+static const char qib_statnames[] =
+	"KernIntr\n"
+	"ErrorIntr\n"
+	"Tx_Errs\n"
+	"Rcv_Errs\n"
+	"H/W_Errs\n"
+	"NoPIOBufs\n"
+	"CtxtsOpen\n"
+	"RcvLen_Errs\n"
+	"EgrBufFull\n"
+	"EgrHdrFull\n"
+	;
+
+static ssize_t driver_names_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, qib_statnames,
+		sizeof qib_statnames - 1); /* no null */
+}
+
+static const struct file_operations driver_ops[] = {
+	{ .read = driver_stats_read, },
+	{ .read = driver_names_read, },
+};
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	u64 *counters;
+	struct qib_devdata *dd = private2dd(file);
+
+	return simple_read_from_buffer(buf, count, ppos, counters,
+		dd->f_read_cntrs(dd, *ppos, NULL, &counters));
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	char *names;
+	struct qib_devdata *dd = private2dd(file);
+
+	return simple_read_from_buffer(buf, count, ppos, names,
+		dd->f_read_cntrs(dd, *ppos, &names, NULL));
+}
+
+static const struct file_operations cntr_ops[] = {
+	{ .read = dev_counters_read, },
+	{ .read = dev_names_read, },
+};
+
+/*
+ * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	char *names;
+	struct qib_devdata *dd = private2dd(file);
+
+	return simple_read_from_buffer(buf, count, ppos, names,
+		dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL));
+}
+
+/* read the per-port counters for port 1 (pidx 0) */
+static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	u64 *counters;
+	struct qib_devdata *dd = private2dd(file);
+
+	return simple_read_from_buffer(buf, count, ppos, counters,
+		dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters));
+}
+
+/* read the per-port counters for port 2 (pidx 1) */
+static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	u64 *counters;
+	struct qib_devdata *dd = private2dd(file);
+
+	return simple_read_from_buffer(buf, count, ppos, counters,
+		dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters));
+}
+
+static const struct file_operations portcntr_ops[] = {
+	{ .read = portnames_read, },
+	{ .read = portcntrs_1_read, },
+	{ .read = portcntrs_2_read, },
+};
+
+/*
+ * read the per-port QSFP data for port 1 (pidx 0)
+ */
+static ssize_t qsfp_1_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct qib_devdata *dd = private2dd(file);
+	char *tmp;
+	int ret;
+
+	tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
+	if (ret > 0)
+		ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+	kfree(tmp);
+	return ret;
+}
+
+/*
+ * read the per-port QSFP data for port 2 (pidx 1)
+ */
+static ssize_t qsfp_2_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct qib_devdata *dd = private2dd(file);
+	char *tmp;
+	int ret;
+
+	if (dd->num_pports < 2)
+		return -ENODEV;
+
+	tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
+	if (ret > 0)
+		ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+	kfree(tmp);
+	return ret;
+}
+
+static const struct file_operations qsfp_ops[] = {
+	{ .read = qsfp_1_read, },
+	{ .read = qsfp_2_read, },
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct qib_devdata *dd;
+	ssize_t ret;
+	loff_t pos;
+	char *tmp;
+
+	pos = *ppos;
+
+	if (pos < 0) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (pos >= sizeof(struct qib_flash)) {
+		ret = 0;
+		goto bail;
+	}
+
+	if (count > sizeof(struct qib_flash) - pos)
+		count = sizeof(struct qib_flash) - pos;
+
+	tmp = kmalloc(count, GFP_KERNEL);
+	if (!tmp) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	dd = private2dd(file);
+	if (qib_eeprom_read(dd, pos, tmp, count)) {
+		qib_dev_err(dd, "failed to read from flash\n");
+		ret = -ENXIO;
+		goto bail_tmp;
+	}
+
+	if (copy_to_user(buf, tmp, count)) {
+		ret = -EFAULT;
+		goto bail_tmp;
+	}
+
+	*ppos = pos + count;
+	ret = count;
+
+bail_tmp:
+	kfree(tmp);
+
+bail:
+	return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct qib_devdata *dd;
+	ssize_t ret;
+	loff_t pos;
+	char *tmp;
+
+	pos = *ppos;
+
+	if (pos != 0) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (count != sizeof(struct qib_flash)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	tmp = kmalloc(count, GFP_KERNEL);
+	if (!tmp) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	if (copy_from_user(tmp, buf, count)) {
+		ret = -EFAULT;
+		goto bail_tmp;
+	}
+
+	dd = private2dd(file);
+	if (qib_eeprom_write(dd, pos, tmp, count)) {
+		ret = -ENXIO;
+		qib_dev_err(dd, "failed to write to flash\n");
+		goto bail_tmp;
+	}
+
+	*ppos = pos + count;
+	ret = count;
+
+bail_tmp:
+	kfree(tmp);
+
+bail:
+	return ret;
+}
+
+static const struct file_operations flash_ops = {
+	.read = flash_read,
+	.write = flash_write,
+};
+
+static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
+{
+	struct dentry *dir, *tmp;
+	char unit[10];
+	int ret, i;
+
+	/* create the per-unit directory */
+	snprintf(unit, sizeof unit, "%u", dd->unit);
+	ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+			  &simple_dir_operations, dd);
+	if (ret) {
+		printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+		goto bail;
+	}
+
+	/* create the files in the new directory */
+	ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
+			  &cntr_ops[0], dd);
+	if (ret) {
+		printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
+		       unit, ret);
+		goto bail;
+	}
+	ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
+			  &cntr_ops[1], dd);
+	if (ret) {
+		printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
+		       unit, ret);
+		goto bail;
+	}
+	ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
+			  &portcntr_ops[0], dd);
+	if (ret) {
+		printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+		       unit, "portcounter_names", ret);
+		goto bail;
+	}
+	for (i = 1; i <= dd->num_pports; i++) {
+		char fname[24];
+
+		sprintf(fname, "port%dcounters", i);
+		/* create the files in the new directory */
+		ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+				  &portcntr_ops[i], dd);
+		if (ret) {
+			printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+				unit, fname, ret);
+			goto bail;
+		}
+		if (!(dd->flags & QIB_HAS_QSFP))
+			continue;
+		sprintf(fname, "qsfp%d", i);
+		ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+				  &qsfp_ops[i - 1], dd);
+		if (ret) {
+			printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+				unit, fname, ret);
+			goto bail;
+		}
+	}
+
+	ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
+			  &flash_ops, dd);
+	if (ret)
+		printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
+			unit, ret);
+bail:
+	return ret;
+}
+
+static int remove_file(struct dentry *parent, char *name)
+{
+	struct dentry *tmp;
+	int ret;
+
+	tmp = lookup_one_len(name, parent, strlen(name));
+
+	if (IS_ERR(tmp)) {
+		ret = PTR_ERR(tmp);
+		goto bail;
+	}
+
+	spin_lock(&dcache_lock);
+	spin_lock(&tmp->d_lock);
+	if (!(d_unhashed(tmp) && tmp->d_inode)) {
+		dget_locked(tmp);
+		__d_drop(tmp);
+		spin_unlock(&tmp->d_lock);
+		spin_unlock(&dcache_lock);
+		simple_unlink(parent->d_inode, tmp);
+	} else {
+		spin_unlock(&tmp->d_lock);
+		spin_unlock(&dcache_lock);
+	}
+
+	ret = 0;
+bail:
+	/*
+	 * We don't expect clients to care about the return value, but
+	 * it's there if they need it.
+	 */
+	return ret;
+}
+
+static int remove_device_files(struct super_block *sb,
+			       struct qib_devdata *dd)
+{
+	struct dentry *dir, *root;
+	char unit[10];
+	int ret, i;
+
+	root = dget(sb->s_root);
+	mutex_lock(&root->d_inode->i_mutex);
+	snprintf(unit, sizeof unit, "%u", dd->unit);
+	dir = lookup_one_len(unit, root, strlen(unit));
+
+	if (IS_ERR(dir)) {
+		ret = PTR_ERR(dir);
+		printk(KERN_ERR "Lookup of %s failed\n", unit);
+		goto bail;
+	}
+
+	remove_file(dir, "counters");
+	remove_file(dir, "counter_names");
+	remove_file(dir, "portcounter_names");
+	for (i = 0; i < dd->num_pports; i++) {
+		char fname[24];
+
+		sprintf(fname, "port%dcounters", i + 1);
+		remove_file(dir, fname);
+		if (dd->flags & QIB_HAS_QSFP) {
+			sprintf(fname, "qsfp%d", i + 1);
+			remove_file(dir, fname);
+		}
+	}
+	remove_file(dir, "flash");
+	d_delete(dir);
+	ret = simple_rmdir(root->d_inode, dir);
+
+bail:
+	mutex_unlock(&root->d_inode->i_mutex);
+	dput(root);
+	return ret;
+}
+
+/*
+ * This fills everything in when the fs is mounted, to handle umount/mount
+ * after device init.  The direct add_cntr_files() call handles adding
+ * them from the init code, when the fs is already mounted.
+ */
+static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct qib_devdata *dd, *tmp;
+	unsigned long flags;
+	int ret;
+
+	static struct tree_descr files[] = {
+		[2] = {"driver_stats", &driver_ops[0], S_IRUGO},
+		[3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
+		{""},
+	};
+
+	ret = simple_fill_super(sb, QIBFS_MAGIC, files);
+	if (ret) {
+		printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+		goto bail;
+	}
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+
+	list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
+		spin_unlock_irqrestore(&qib_devs_lock, flags);
+		ret = add_cntr_files(sb, dd);
+		if (ret) {
+			deactivate_super(sb);
+			goto bail;
+		}
+		spin_lock_irqsave(&qib_devs_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+bail:
+	return ret;
+}
+
+static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
+			const char *dev_name, void *data, struct vfsmount *mnt)
+{
+	int ret = get_sb_single(fs_type, flags, data,
+				qibfs_fill_super, mnt);
+	if (ret >= 0)
+		qib_super = mnt->mnt_sb;
+	return ret;
+}
+
+static void qibfs_kill_super(struct super_block *s)
+{
+	kill_litter_super(s);
+	qib_super = NULL;
+}
+
+int qibfs_add(struct qib_devdata *dd)
+{
+	int ret;
+
+	/*
+	 * On first unit initialized, qib_super will not yet exist
+	 * because nobody has yet tried to mount the filesystem, so
+	 * we can't consider that to be an error; if an error occurs
+	 * during the mount, that will get a complaint, so this is OK.
+	 * add_cntr_files() for all units is done at mount from
+	 * qibfs_fill_super(), so one way or another, everything works.
+	 */
+	if (qib_super == NULL)
+		ret = 0;
+	else
+		ret = add_cntr_files(qib_super, dd);
+	return ret;
+}
+
+int qibfs_remove(struct qib_devdata *dd)
+{
+	int ret = 0;
+
+	if (qib_super)
+		ret = remove_device_files(qib_super, dd);
+
+	return ret;
+}
+
+static struct file_system_type qibfs_fs_type = {
+	.owner =        THIS_MODULE,
+	.name =         "ipathfs",
+	.get_sb =       qibfs_get_sb,
+	.kill_sb =      qibfs_kill_super,
+};
+
+int __init qib_init_qibfs(void)
+{
+	return register_filesystem(&qibfs_fs_type);
+}
+
+int __exit qib_exit_qibfs(void)
+{
+	return unregister_filesystem(&qibfs_fs_type);
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 000000000000..7b6549fd429b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3588 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 6120 PCIe chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_6120_regs.h"
+
+static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
+static u8 qib_6120_phys_portstate(u64);
+static u32 qib_6120_iblink_state(u64);
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB PCI-Express chip.
+ *
+ */
+
+/* KREG_IDX uses machine-generated #defines */
+#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
+#define kr_sendpiosize KREG_IDX(SendPIOSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_control KREG_IDX(Control)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_revision KREG_IDX(Revision)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
+#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
+#define kr_serdes_stat KREG_IDX(SerdesStat)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
+			QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxBadFormatCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkProblemCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64)              \
+	QIB_6120_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64)               \
+	QIB_6120_##regname##_##fldname##_RMASK <<       \
+	 QIB_6120_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+	(((value) >> SYM_LSB(regname, fldname)) & \
+	 SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* link training states, from IBC */
+#define IB_6120_LT_STATE_DISABLED        0x00
+#define IB_6120_LT_STATE_LINKUP          0x01
+#define IB_6120_LT_STATE_POLLACTIVE      0x02
+#define IB_6120_LT_STATE_POLLQUIET       0x03
+#define IB_6120_LT_STATE_SLEEPDELAY      0x04
+#define IB_6120_LT_STATE_SLEEPQUIET      0x05
+#define IB_6120_LT_STATE_CFGDEBOUNCE     0x08
+#define IB_6120_LT_STATE_CFGRCVFCFG      0x09
+#define IB_6120_LT_STATE_CFGWAITRMT      0x0a
+#define IB_6120_LT_STATE_CFGIDLE 0x0b
+#define IB_6120_LT_STATE_RECOVERRETRAIN  0x0c
+#define IB_6120_LT_STATE_RECOVERWAITRMT  0x0e
+#define IB_6120_LT_STATE_RECOVERIDLE     0x0f
+
+/* link state machine states from IBC */
+#define IB_6120_L_STATE_DOWN             0x0
+#define IB_6120_L_STATE_INIT             0x1
+#define IB_6120_L_STATE_ARM              0x2
+#define IB_6120_L_STATE_ACTIVE           0x3
+#define IB_6120_L_STATE_ACT_DEFER        0x4
+
+static const u8 qib_6120_physportstate[0x20] = {
+	[IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+	[IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+	[IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+	[IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+	[IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_6120_LT_STATE_CFGDEBOUNCE] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_6120_LT_STATE_CFGRCVFCFG] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_6120_LT_STATE_CFGWAITRMT] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_6120_LT_STATE_RECOVERRETRAIN] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_6120_LT_STATE_RECOVERWAITRMT] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_6120_LT_STATE_RECOVERIDLE] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+
+struct qib_chip_specific {
+	u64 __iomem *cregbase;
+	u64 *cntrs;
+	u64 *portcntrs;
+	void *dummy_hdrq;   /* used after ctxt close */
+	dma_addr_t dummy_hdrq_phys;
+	spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
+	spinlock_t user_tid_lock; /* no back to back user TID writes */
+	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+	u64 hwerrmask;
+	u64 errormask;
+	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+	u64 gpio_mask; /* shadow the gpio mask register */
+	u64 extctrl; /* shadow the gpio output enable, etc... */
+	/*
+	 * these 5 fields are used to establish deltas for IB symbol
+	 * errors and linkrecovery errors.  They can be reported on
+	 * some chips during link negotiation prior to INIT, and with
+	 * DDR when faking DDR negotiations with non-IBTA switches.
+	 * The chip counters are adjusted at driver unload if there is
+	 * a non-zero delta.
+	 */
+	u64 ibdeltainprog;
+	u64 ibsymdelta;
+	u64 ibsymsnap;
+	u64 iblnkerrdelta;
+	u64 iblnkerrsnap;
+	u64 ibcctrl; /* shadow for kr_ibcctrl */
+	u32 lastlinkrecov; /* link recovery issue */
+	int irq;
+	u32 cntrnamelen;
+	u32 portcntrnamelen;
+	u32 ncntrs;
+	u32 nportcntrs;
+	/* used with gpio interrupts to implement IB counters */
+	u32 rxfc_unsupvl_errs;
+	u32 overrun_thresh_errs;
+	/*
+	 * these count only cases where _successive_ LocalLinkIntegrity
+	 * errors were seen in the receive headers of IB standard packets
+	 */
+	u32 lli_errs;
+	u32 lli_counter;
+	u64 lli_thresh;
+	u64 sword; /* total dwords sent (sample result) */
+	u64 rword; /* total dwords received (sample result) */
+	u64 spkts; /* total packets sent (sample result) */
+	u64 rpkts; /* total packets received (sample result) */
+	u64 xmit_wait; /* # of ticks no data sent (sample result) */
+	struct timer_list pma_timer;
+	char emsgbuf[128];
+	char bitsmsgbuf[64];
+	u8 pma_sample_status;
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner.  It also gives us some error
+ * checking.  64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible.  User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+				  enum qib_ureg regno, int ctxt)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+
+	if (dd->userbase)
+		return readl(regno + (u64 __iomem *)
+			     ((char __iomem *)dd->userbase +
+			      dd->ureg_align * ctxt));
+	else
+		return readl(regno + (u64 __iomem *)
+			     (dd->uregbase +
+			      (char __iomem *)dd->kregbase +
+			      dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+				  enum qib_ureg regno, u64 value, int ctxt)
+{
+	u64 __iomem *ubase;
+	if (dd->userbase)
+		ubase = (u64 __iomem *)
+			((char __iomem *) dd->userbase +
+			 dd->ureg_align * ctxt);
+	else
+		ubase = (u64 __iomem *)
+			(dd->uregbase +
+			 (char __iomem *) dd->kregbase +
+			 dd->ureg_align * ctxt);
+
+	if (dd->kregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+				  const u16 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+	return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+				  const u16 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+
+	return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+				  const u16 regno, u64 value)
+{
+	if (dd->kregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &dd->kregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+				       const u16 regno, unsigned ctxt,
+				       u64 value)
+{
+	qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_6120_creg(const struct qib_devdata *dd,
+				   u16 regno, u64 value)
+{
+	if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET 1U
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 0
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+#define QLOGIC_IB_I_ERROR               0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT            0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL        0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO                0x0000000010000000ULL
+#define QLOGIC_IB_I_BITSEXTANT \
+		((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+		(QLOGIC_IB_I_RCVAVAIL_MASK << \
+		 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+		QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+		QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED      0x1000000000000000ULL
+
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST     0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_FOUND       0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET          0x5ULL
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+
+/* Bits in GPIO for the added IB link interrupts */
+#define GPIO_RXUVL_BIT 3
+#define GPIO_OVRUN_BIT 4
+#define GPIO_LLI_BIT 5
+#define GPIO_ERRINTR_MASK 0x38
+
+
+#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
+#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
+	((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
+#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
+#define QLOGIC_IB_RT_IS_VALID(tid) \
+	(((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
+	 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
+#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
+#define QLOGIC_IB_RT_ADDR_SHIFT 10
+
+#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
+#define QLOGIC_IB_R_TAILUPD_SHIFT 31
+#define IBA6120_R_PKEY_DIS_SHIFT 30
+
+#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+	((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 6120 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
+	/* generic hardware errors */
+	QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+	QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+			  "TXE PIOBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+			  "TXE PIOPBC Memory Parity"),
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+			  "TXE PIOLAUNCHFIFO Memory Parity"),
+
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+			  "RXE RCVBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+			  "RXE LOOKUPQ Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+			  "RXE EAGERTID Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+			  "RXE EXPTID Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+			  "RXE FLAGBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+			  "RXE DATAINFO Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+			  "RXE HDRINFO Memory Parity"),
+
+	/* chip-specific hardware errors */
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+			  "PCIe Poisoned TLP"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+			  "PCIe completion timeout"),
+	/*
+	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+	 * parity or memory parity error failures, because most likely we
+	 * won't be able to talk to the core of the chip.  Nonetheless, we
+	 * might see them, if they are in parts of the PCIe core that aren't
+	 * essential.
+	 */
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+			  "PCIePLL1"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+			  "PCIePLL0"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+			  "PCIe XTLH core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+			  "PCIe ADM TX core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+			  "PCIe ADM RX core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+			  "SerDes PLL"),
+};
+
+#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP |   \
+		QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+	/* variables for sanity checking interrupt and errors */
+#define IB_HWE_BITSEXTANT \
+	(HWE_MASK(RXEMemParityErr) |					\
+	 HWE_MASK(TXEMemParityErr) |					\
+	 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<			\
+	  QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) |			\
+	 QLOGIC_IB_HWE_PCIE1PLLFAILED |					\
+	 QLOGIC_IB_HWE_PCIE0PLLFAILED |					\
+	 QLOGIC_IB_HWE_PCIEPOISONEDTLP |				\
+	 QLOGIC_IB_HWE_PCIECPLTIMEOUT |					\
+	 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH |				\
+	 QLOGIC_IB_HWE_PCIEBUSPARITYXADM |				\
+	 QLOGIC_IB_HWE_PCIEBUSPARITYRADM |				\
+	 HWE_MASK(PowerOnBISTFailed) |					\
+	 QLOGIC_IB_HWE_COREPLL_FBSLIP |					\
+	 QLOGIC_IB_HWE_COREPLL_RFSLIP |					\
+	 QLOGIC_IB_HWE_SERDESPLLFAILED |				\
+	 HWE_MASK(IBCBusToSPCParityErr) |				\
+	 HWE_MASK(IBCBusFromSPCParityErr))
+
+#define IB_E_BITSEXTANT \
+	(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) |		\
+	 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) |		\
+	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) |	\
+	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+	 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) |		\
+	 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) |		\
+	 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |		\
+	 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) |		\
+	 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) |		\
+	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) |	\
+	 ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) |		\
+	 ERR_MASK(SendDroppedSmpPktErr) |				\
+	 ERR_MASK(SendDroppedDataPktErr) |				\
+	 ERR_MASK(SendPioArmLaunchErr) |				\
+	 ERR_MASK(SendUnexpectedPktNumErr) |				\
+	 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) |	\
+	 ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) |		\
+	 ERR_MASK(HardwareErr))
+
+#define QLOGIC_IB_E_PKTERRS ( \
+		ERR_MASK(SendPktLenErr) |				\
+		ERR_MASK(SendDroppedDataPktErr) |			\
+		ERR_MASK(RcvVCRCErr) |					\
+		ERR_MASK(RcvICRCErr) |					\
+		ERR_MASK(RcvShortPktLenErr) |				\
+		ERR_MASK(RcvEBPErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS						\
+	(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) |		\
+	 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) |		\
+	 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) |	\
+	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
+	 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) |	\
+	 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS							\
+	(ERR_MASK(SendPioArmLaunchErr) |				\
+	 ERR_MASK(SendUnexpectedPktNumErr) |				\
+	 ERR_MASK(SendDroppedDataPktErr) |				\
+	 ERR_MASK(SendDroppedSmpPktErr) |				\
+	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) |	\
+	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
+	 ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers.  Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+	(ERR_MASK(SendDroppedDataPktErr) |				\
+	 ERR_MASK(SendDroppedSmpPktErr) |				\
+	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) |	\
+	 ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received.  This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS		\
+	(ERR_MASK(SendDroppedDataPktErr) |				\
+	 ERR_MASK(SendDroppedSmpPktErr) |				\
+	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
+	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
+	 ERR_MASK(RcvUnexpectedCharErr))
+
+static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
+			       u32, unsigned long);
+
+/*
+ * On platforms using this chip, and not having ordered WC stores, we
+ * can get TXE parity errors due to speculative reads to the PIO buffers,
+ * and this, due to a chip issue can result in (many) false parity error
+ * reports.  So it's a debug print on those, and an info print on systems
+ * where the speculative reads don't occur.
+ */
+static void qib_6120_txe_recover(struct qib_devdata *dd)
+{
+	if (!qib_unordered_wc())
+		qib_devinfo(dd->pcidev,
+			    "Recovering from TXE PIO parity error\n");
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		if (dd->flags & QIB_BADINTR)
+			return;
+		qib_write_kreg(dd, kr_intmask, ~0ULL);
+		/* force re-interrupt of any pending interrupts. */
+		qib_write_kreg(dd, kr_intclear, 0ULL);
+	} else
+		qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips
+ */
+static void qib_6120_clear_freeze(struct qib_devdata *dd)
+{
+	/* disable error interrupts, to avoid confusion */
+	qib_write_kreg(dd, kr_errmask, 0ULL);
+
+	/* also disable interrupts; errormask is sometimes overwriten */
+	qib_6120_set_intr_state(dd, 0);
+
+	qib_cancel_sends(dd->pport);
+
+	/* clear the freeze, and be sure chip saw it */
+	qib_write_kreg(dd, kr_control, dd->control);
+	qib_read_kreg32(dd, kr_scratch);
+
+	/* force in-memory update now we are out of freeze */
+	qib_force_pio_avail_update(dd);
+
+	/*
+	 * force new interrupt if any hwerr, error or interrupt bits are
+	 * still set, and clear "safe" send packet errors related to freeze
+	 * and cancelling sends.  Re-enable error interrupts before possible
+	 * force of re-interrupt on pending interrupts.
+	 */
+	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+	qib_6120_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_handle_6120_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use.  Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue.  Reuse the same message buffer as
+ * handle_6120_errors() to avoid excessive stack usage.
+ */
+static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
+				     size_t msgl)
+{
+	u64 hwerrs;
+	u32 bits, ctrl;
+	int isfatal = 0;
+	char *bitsmsg;
+	int log_idx;
+
+	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+	if (!hwerrs)
+		return;
+	if (hwerrs == ~0ULL) {
+		qib_dev_err(dd, "Read of hardware error status failed "
+			    "(all bits set); ignoring\n");
+		return;
+	}
+	qib_stats.sps_hwerrs++;
+
+	/* Always clear the error status register, except MEMBISTFAIL,
+	 * regardless of whether we continue or stop using the chip.
+	 * We want that set so we know it failed, even across driver reload.
+	 * We'll still ignore it in the hwerrmask.  We do this partly for
+	 * diagnostics, but also for support */
+	qib_write_kreg(dd, kr_hwerrclear,
+		       hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+	hwerrs &= dd->cspec->hwerrmask;
+
+	/* We log some errors to EEPROM, check if we have any of those. */
+	for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+		if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+			qib_inc_eeprom_err(dd, log_idx, 1);
+
+	/*
+	 * Make sure we get this much out, unless told to be quiet,
+	 * or it's occurred within the last 5 seconds.
+	 */
+	if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
+		qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+			 "(cleared)\n", (unsigned long long) hwerrs);
+
+	if (hwerrs & ~IB_HWE_BITSEXTANT)
+		qib_dev_err(dd, "hwerror interrupt with unknown errors "
+			    "%llx set\n", (unsigned long long)
+			    (hwerrs & ~IB_HWE_BITSEXTANT));
+
+	ctrl = qib_read_kreg32(dd, kr_control);
+	if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+		/*
+		 * Parity errors in send memory are recoverable,
+		 * just cancel the send (if indicated in * sendbuffererror),
+		 * count the occurrence, unfreeze (if no other handled
+		 * hardware error bits are set), and continue. They can
+		 * occur if a processor speculative read is done to the PIO
+		 * buffer while we are sending a packet, for example.
+		 */
+		if (hwerrs & TXE_PIO_PARITY) {
+			qib_6120_txe_recover(dd);
+			hwerrs &= ~TXE_PIO_PARITY;
+		}
+
+		if (!hwerrs) {
+			static u32 freeze_cnt;
+
+			freeze_cnt++;
+			qib_6120_clear_freeze(dd);
+		} else
+			isfatal = 1;
+	}
+
+	*msg = '\0';
+
+	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+		isfatal = 1;
+		strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
+			" unusable]", msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
+			    ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
+
+	bitsmsg = dd->cspec->bitsmsgbuf;
+	if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+		      QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+			      QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+			 "[PCIe Mem Parity Errs %x] ", bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+
+	if (hwerrs & _QIB_PLL_FAIL) {
+		isfatal = 1;
+		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+			 "[PLL failed (%llx), InfiniPath hardware unusable]",
+			 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+		strlcat(msg, bitsmsg, msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+		/*
+		 * If it occurs, it is left masked since the external
+		 * interface is unused
+		 */
+		dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	if (hwerrs)
+		/*
+		 * if any set that we aren't ignoring; only
+		 * make the complaint once, in case it's stuck
+		 * or recurring, and we get here multiple
+		 * times.
+		 */
+		qib_dev_err(dd, "%s hardware error\n", msg);
+	else
+		*msg = 0; /* recovered from all of them */
+
+	if (isfatal && !dd->diag_client) {
+		qib_dev_err(dd, "Fatal Hardware Error, no longer"
+			    " usable, SN %.16s\n", dd->serial);
+		/*
+		 * for /sys status file and user programs to print; if no
+		 * trailing brace is copied, we'll know it was truncated.
+		 */
+		if (dd->freezemsg)
+			snprintf(dd->freezemsg, dd->freezelen,
+				 "{%s}", msg);
+		qib_disable_after_error(dd);
+	}
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else.   Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
+			       u64 err)
+{
+	int iserr = 1;
+
+	*buf = '\0';
+	if (err & QLOGIC_IB_E_PKTERRS) {
+		if (!(err & ~QLOGIC_IB_E_PKTERRS))
+			iserr = 0;
+		if ((err & ERR_MASK(RcvICRCErr)) &&
+		    !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
+			strlcat(buf, "CRC ", blen);
+		if (!iserr)
+			goto done;
+	}
+	if (err & ERR_MASK(RcvHdrLenErr))
+		strlcat(buf, "rhdrlen ", blen);
+	if (err & ERR_MASK(RcvBadTidErr))
+		strlcat(buf, "rbadtid ", blen);
+	if (err & ERR_MASK(RcvBadVersionErr))
+		strlcat(buf, "rbadversion ", blen);
+	if (err & ERR_MASK(RcvHdrErr))
+		strlcat(buf, "rhdr ", blen);
+	if (err & ERR_MASK(RcvLongPktLenErr))
+		strlcat(buf, "rlongpktlen ", blen);
+	if (err & ERR_MASK(RcvMaxPktLenErr))
+		strlcat(buf, "rmaxpktlen ", blen);
+	if (err & ERR_MASK(RcvMinPktLenErr))
+		strlcat(buf, "rminpktlen ", blen);
+	if (err & ERR_MASK(SendMinPktLenErr))
+		strlcat(buf, "sminpktlen ", blen);
+	if (err & ERR_MASK(RcvFormatErr))
+		strlcat(buf, "rformaterr ", blen);
+	if (err & ERR_MASK(RcvUnsupportedVLErr))
+		strlcat(buf, "runsupvl ", blen);
+	if (err & ERR_MASK(RcvUnexpectedCharErr))
+		strlcat(buf, "runexpchar ", blen);
+	if (err & ERR_MASK(RcvIBFlowErr))
+		strlcat(buf, "ribflow ", blen);
+	if (err & ERR_MASK(SendUnderRunErr))
+		strlcat(buf, "sunderrun ", blen);
+	if (err & ERR_MASK(SendPioArmLaunchErr))
+		strlcat(buf, "spioarmlaunch ", blen);
+	if (err & ERR_MASK(SendUnexpectedPktNumErr))
+		strlcat(buf, "sunexperrpktnum ", blen);
+	if (err & ERR_MASK(SendDroppedSmpPktErr))
+		strlcat(buf, "sdroppedsmppkt ", blen);
+	if (err & ERR_MASK(SendMaxPktLenErr))
+		strlcat(buf, "smaxpktlen ", blen);
+	if (err & ERR_MASK(SendUnsupportedVLErr))
+		strlcat(buf, "sunsupVL ", blen);
+	if (err & ERR_MASK(InvalidAddrErr))
+		strlcat(buf, "invalidaddr ", blen);
+	if (err & ERR_MASK(RcvEgrFullErr))
+		strlcat(buf, "rcvegrfull ", blen);
+	if (err & ERR_MASK(RcvHdrFullErr))
+		strlcat(buf, "rcvhdrfull ", blen);
+	if (err & ERR_MASK(IBStatusChanged))
+		strlcat(buf, "ibcstatuschg ", blen);
+	if (err & ERR_MASK(RcvIBLostLinkErr))
+		strlcat(buf, "riblostlink ", blen);
+	if (err & ERR_MASK(HardwareErr))
+		strlcat(buf, "hardware ", blen);
+	if (err & ERR_MASK(ResetNegated))
+		strlcat(buf, "reset ", blen);
+done:
+	return iserr;
+}
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ */
+static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
+{
+	unsigned long sbuf[2];
+	struct qib_devdata *dd = ppd->dd;
+
+	/*
+	 * It's possible that sendbuffererror could have bits set; might
+	 * have already done this as a result of hardware error handling.
+	 */
+	sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+	sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+
+	if (sbuf[0] || sbuf[1])
+		qib_disarm_piobufs_set(dd, sbuf,
+				       dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
+{
+	int ret = 1;
+	u32 ibstate = qib_6120_iblink_state(ibcs);
+	u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+	if (linkrecov != dd->cspec->lastlinkrecov) {
+		/* and no more until active again */
+		dd->cspec->lastlinkrecov = 0;
+		qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
+		ret = 0;
+	}
+	if (ibstate == IB_PORT_ACTIVE)
+		dd->cspec->lastlinkrecov =
+			read_6120_creg32(dd, cr_iblinkerrrecov);
+	return ret;
+}
+
+static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
+{
+	char *msg;
+	u64 ignore_this_time = 0;
+	u64 iserr = 0;
+	int log_idx;
+	struct qib_pportdata *ppd = dd->pport;
+	u64 mask;
+
+	/* don't report errors that are masked */
+	errs &= dd->cspec->errormask;
+	msg = dd->cspec->emsgbuf;
+
+	/* do these first, they are most important */
+	if (errs & ERR_MASK(HardwareErr))
+		qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+	else
+		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+				qib_inc_eeprom_err(dd, log_idx, 1);
+
+	if (errs & ~IB_E_BITSEXTANT)
+		qib_dev_err(dd, "error interrupt with unknown errors "
+			    "%llx set\n",
+			    (unsigned long long) (errs & ~IB_E_BITSEXTANT));
+
+	if (errs & E_SUM_ERRS) {
+		qib_disarm_6120_senderrbufs(ppd);
+		if ((errs & E_SUM_LINK_PKTERRS) &&
+		    !(ppd->lflags & QIBL_LINKACTIVE)) {
+			/*
+			 * This can happen when trying to bring the link
+			 * up, but the IB link changes state at the "wrong"
+			 * time. The IB logic then complains that the packet
+			 * isn't valid.  We don't want to confuse people, so
+			 * we just don't print them, except at debug
+			 */
+			ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+		}
+	} else if ((errs & E_SUM_LINK_PKTERRS) &&
+		   !(ppd->lflags & QIBL_LINKACTIVE)) {
+		/*
+		 * This can happen when SMA is trying to bring the link
+		 * up, but the IB link changes state at the "wrong" time.
+		 * The IB logic then complains that the packet isn't
+		 * valid.  We don't want to confuse people, so we just
+		 * don't print them, except at debug
+		 */
+		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+	}
+
+	qib_write_kreg(dd, kr_errclear, errs);
+
+	errs &= ~ignore_this_time;
+	if (!errs)
+		goto done;
+
+	/*
+	 * The ones we mask off are handled specially below
+	 * or above.
+	 */
+	mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
+		ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
+	qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+	if (errs & E_SUM_PKTERRS)
+		qib_stats.sps_rcverrs++;
+	if (errs & E_SUM_ERRS)
+		qib_stats.sps_txerrs++;
+
+	iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
+
+	if (errs & ERR_MASK(IBStatusChanged)) {
+		u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+		u32 ibstate = qib_6120_iblink_state(ibcs);
+		int handle = 1;
+
+		if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
+			handle = chk_6120_linkrecovery(dd, ibcs);
+		/*
+		 * Since going into a recovery state causes the link state
+		 * to go down and since recovery is transitory, it is better
+		 * if we "miss" ever seeing the link training state go into
+		 * recovery (i.e., ignore this transition for link state
+		 * special handling purposes) without updating lastibcstat.
+		 */
+		if (handle && qib_6120_phys_portstate(ibcs) ==
+					    IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+			handle = 0;
+		if (handle)
+			qib_handle_e_ibstatuschanged(ppd, ibcs);
+	}
+
+	if (errs & ERR_MASK(ResetNegated)) {
+		qib_dev_err(dd, "Got reset, requires re-init "
+			      "(unload and reload driver)\n");
+		dd->flags &= ~QIB_INITTED;  /* needs re-init */
+		/* mark as having had error */
+		*dd->devstatusp |= QIB_STATUS_HWERROR;
+		*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+	}
+
+	if (*msg && iserr)
+		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+	if (ppd->state_wanted & ppd->lflags)
+		wake_up_interruptible(&ppd->state_wait);
+
+	/*
+	 * If there were hdrq or egrfull errors, wake up any processes
+	 * waiting in poll.  We used to try to check which contexts had
+	 * the overflow, but given the cost of that and the chip reads
+	 * to support it, it's better to just wake everybody up if we
+	 * get an overflow; waiters can poll again if it's not them.
+	 */
+	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+		qib_handle_urcv(dd, ~0U);
+		if (errs & ERR_MASK(RcvEgrFullErr))
+			qib_stats.sps_buffull++;
+		else
+			qib_stats.sps_hdrfull++;
+	}
+done:
+	return;
+}
+
+/**
+ * qib_6120_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_6120_init_hwerrors(struct qib_devdata *dd)
+{
+	u64 val;
+	u64 extsval;
+
+	extsval = qib_read_kreg64(dd, kr_extstatus);
+
+	if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
+		qib_dev_err(dd, "MemBIST did not complete!\n");
+
+	/* init so all hwerrors interrupt, and enter freeze, ajdust below */
+	val = ~0ULL;
+	if (dd->minrev < 2) {
+		/*
+		 * Avoid problem with internal interface bus parity
+		 * checking. Fixed in Rev2.
+		 */
+		val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
+	}
+	/* avoid some intel cpu's speculative read freeze mode issue */
+	val &= ~TXEMEMPARITYERR_PIOBUF;
+
+	dd->cspec->hwerrmask = val;
+
+	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+	/* clear all */
+	qib_write_kreg(dd, kr_errclear, ~0ULL);
+	/* enable errors that are masked, at least this first time. */
+	qib_write_kreg(dd, kr_errmask, ~0ULL);
+	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+	/* clear any interrupts up to this point (ints still not enabled) */
+	qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+	qib_write_kreg(dd, kr_rcvbthqp,
+		       dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
+		       QIB_KD_QP);
+}
+
+/*
+ * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based.  There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		qib_write_kreg(dd, kr_errclear,
+			       ERR_MASK(SendPioArmLaunchErr));
+		dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+	} else
+		dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+				   u16 linitcmd)
+{
+	u64 mod_wd;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+		/*
+		 * If we are told to disable, note that so link-recovery
+		 * code does not attempt to bring us back up.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+		/*
+		 * Any other linkinitcmd will lead to LINKDOWN and then
+		 * to INIT (if all is well), so clear flag to let
+		 * link-recovery code attempt to bring us back up.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	}
+
+	mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
+		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+	qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
+	/* write to chip to prevent back-to-back writes of control reg */
+	qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/**
+ * qib_6120_bringup_serdes - bring up the serdes
+ * @dd: the qlogic_ib device
+ */
+static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 val, config1, prev_val, hwstat, ibc;
+
+	/* Put IBC in reset, sends disabled */
+	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+	qib_write_kreg(dd, kr_control, 0ULL);
+
+	dd->cspec->ibdeltainprog = 1;
+	dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
+	dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+	/* flowcontrolwatermark is in units of KBytes */
+	ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+	/*
+	 * How often flowctrl sent.  More or less in usecs; balance against
+	 * watermark value, so that in theory senders always get a flow
+	 * control update in time to not let the IB link go idle.
+	 */
+	ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+	/* max error tolerance */
+	dd->cspec->lli_thresh = 0xf;
+	ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
+	/* use "real" buffer space for */
+	ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+	/* IB credit flow control. */
+	ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+	/*
+	 * set initial max size pkt IBC will send, including ICRC; it's the
+	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+	 */
+	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+	dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+	/* initially come up waiting for TS1, without sending anything. */
+	val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+		QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+	qib_write_kreg(dd, kr_ibcctrl, val);
+
+	val = qib_read_kreg64(dd, kr_serdes_cfg0);
+	config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
+
+	/*
+	 * Force reset on, also set rxdetect enable.  Must do before reading
+	 * serdesstatus at least for simulation, or some of the bits in
+	 * serdes status will come back as undefined and cause simulation
+	 * failures
+	 */
+	val |= SYM_MASK(SerdesCfg0, ResetPLL) |
+		SYM_MASK(SerdesCfg0, RxDetEnX) |
+		(SYM_MASK(SerdesCfg0, L1PwrDnA) |
+		 SYM_MASK(SerdesCfg0, L1PwrDnB) |
+		 SYM_MASK(SerdesCfg0, L1PwrDnC) |
+		 SYM_MASK(SerdesCfg0, L1PwrDnD));
+	qib_write_kreg(dd, kr_serdes_cfg0, val);
+	/* be sure chip saw it */
+	qib_read_kreg64(dd, kr_scratch);
+	udelay(5);              /* need pll reset set at least for a bit */
+	/*
+	 * after PLL is reset, set the per-lane Resets and TxIdle and
+	 * clear the PLL reset and rxdetect (to get falling edge).
+	 * Leave L1PWR bits set (permanently)
+	 */
+	val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
+		 SYM_MASK(SerdesCfg0, ResetPLL) |
+		 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+		  SYM_MASK(SerdesCfg0, L1PwrDnB) |
+		  SYM_MASK(SerdesCfg0, L1PwrDnC) |
+		  SYM_MASK(SerdesCfg0, L1PwrDnD)));
+	val |= (SYM_MASK(SerdesCfg0, ResetA) |
+		SYM_MASK(SerdesCfg0, ResetB) |
+		SYM_MASK(SerdesCfg0, ResetC) |
+		SYM_MASK(SerdesCfg0, ResetD)) |
+		SYM_MASK(SerdesCfg0, TxIdeEnX);
+	qib_write_kreg(dd, kr_serdes_cfg0, val);
+	/* be sure chip saw it */
+	(void) qib_read_kreg64(dd, kr_scratch);
+	/* need PLL reset clear for at least 11 usec before lane
+	 * resets cleared; give it a few more to be sure */
+	udelay(15);
+	val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
+		  SYM_MASK(SerdesCfg0, ResetB) |
+		  SYM_MASK(SerdesCfg0, ResetC) |
+		  SYM_MASK(SerdesCfg0, ResetD)) |
+		 SYM_MASK(SerdesCfg0, TxIdeEnX));
+
+	qib_write_kreg(dd, kr_serdes_cfg0, val);
+	/* be sure chip saw it */
+	(void) qib_read_kreg64(dd, kr_scratch);
+
+	val = qib_read_kreg64(dd, kr_xgxs_cfg);
+	prev_val = val;
+	if (val & QLOGIC_IB_XGXS_RESET)
+		val &= ~QLOGIC_IB_XGXS_RESET;
+	if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
+		/* need to compensate for Tx inversion in partner */
+		val &= ~SYM_MASK(XGXSCfg, polarity_inv);
+		val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
+	}
+	if (val != prev_val)
+		qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+	val = qib_read_kreg64(dd, kr_serdes_cfg0);
+
+	/* clear current and de-emphasis bits */
+	config1 &= ~0x0ffffffff00ULL;
+	/* set current to 20ma */
+	config1 |= 0x00000000000ULL;
+	/* set de-emphasis to -5.68dB */
+	config1 |= 0x0cccc000000ULL;
+	qib_write_kreg(dd, kr_serdes_cfg1, config1);
+
+	/* base and port guid same for single port */
+	ppd->guid = dd->base_guid;
+
+	/*
+	 * the process of setting and un-resetting the serdes normally
+	 * causes a serdes PLL error, so check for that and clear it
+	 * here.  Also clearr hwerr bit in errstatus, but not others.
+	 */
+	hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
+	if (hwstat) {
+		/* should just have PLL, clear all set, in an case */
+		if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
+			qib_write_kreg(dd, kr_hwerrclear, hwstat);
+		qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
+	}
+
+	dd->control |= QLOGIC_IB_C_LINKENABLE;
+	dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
+	qib_write_kreg(dd, kr_control, dd->control);
+
+	return 0;
+}
+
+/**
+ * qib_6120_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 val;
+
+	qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+	/* disable IBC */
+	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+	qib_write_kreg(dd, kr_control,
+		       dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+	if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
+	    dd->cspec->ibdeltainprog) {
+		u64 diagc;
+
+		/* enable counter writes */
+		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+		qib_write_kreg(dd, kr_hwdiagctrl,
+			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+		if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
+			val = read_6120_creg32(dd, cr_ibsymbolerr);
+			if (dd->cspec->ibdeltainprog)
+				val -= val - dd->cspec->ibsymsnap;
+			val -= dd->cspec->ibsymdelta;
+			write_6120_creg(dd, cr_ibsymbolerr, val);
+		}
+		if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
+			val = read_6120_creg32(dd, cr_iblinkerrrecov);
+			if (dd->cspec->ibdeltainprog)
+				val -= val - dd->cspec->iblnkerrsnap;
+			val -= dd->cspec->iblnkerrdelta;
+			write_6120_creg(dd, cr_iblinkerrrecov, val);
+		}
+
+		/* and disable counter writes */
+		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+	}
+
+	val = qib_read_kreg64(dd, kr_serdes_cfg0);
+	val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
+	qib_write_kreg(dd, kr_serdes_cfg0, val);
+}
+
+/**
+ * qib_6120_setup_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
+{
+	u64 extctl, val, lst, ltst;
+	unsigned long flags;
+	struct qib_devdata *dd = ppd->dd;
+
+	/*
+	 * The diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running.
+	 */
+	if (dd->diag_client)
+		return;
+
+	/* Allow override of LED display for, e.g. Locating system in rack */
+	if (ppd->led_override) {
+		ltst = (ppd->led_override & QIB_LED_PHYS) ?
+			IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+		lst = (ppd->led_override & QIB_LED_LOG) ?
+			IB_PORT_ACTIVE : IB_PORT_DOWN;
+	} else if (on) {
+		val = qib_read_kreg64(dd, kr_ibcstatus);
+		ltst = qib_6120_phys_portstate(val);
+		lst = qib_6120_iblink_state(val);
+	} else {
+		ltst = 0;
+		lst = 0;
+	}
+
+	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+	extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+				 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+
+	if (ltst == IB_PHYSPORTSTATE_LINKUP)
+		extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+	if (lst == IB_PORT_ACTIVE)
+		extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+	dd->cspec->extctrl = extctl;
+	qib_write_kreg(dd, kr_extctrl, extctl);
+	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+static void qib_6120_free_irq(struct qib_devdata *dd)
+{
+	if (dd->cspec->irq) {
+		free_irq(dd->cspec->irq, dd);
+		dd->cspec->irq = 0;
+	}
+	qib_nomsi(dd);
+}
+
+/**
+ * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+*/
+static void qib_6120_setup_cleanup(struct qib_devdata *dd)
+{
+	qib_6120_free_irq(dd);
+	kfree(dd->cspec->cntrs);
+	kfree(dd->cspec->portcntrs);
+	if (dd->cspec->dummy_hdrq) {
+		dma_free_coherent(&dd->pcidev->dev,
+				  ALIGN(dd->rcvhdrcnt *
+					dd->rcvhdrentsize *
+					sizeof(u32), PAGE_SIZE),
+				  dd->cspec->dummy_hdrq,
+				  dd->cspec->dummy_hdrq_phys);
+		dd->cspec->dummy_hdrq = NULL;
+	}
+}
+
+static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+	if (needint)
+		dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
+	else
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
+	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling
+ */
+static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
+{
+	if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+		qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
+			    istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+	if (istat & QLOGIC_IB_I_ERROR) {
+		u64 estat = 0;
+
+		qib_stats.sps_errints++;
+		estat = qib_read_kreg64(dd, kr_errstatus);
+		if (!estat)
+			qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+				 "but no error bits set!\n", istat);
+		handle_6120_errors(dd, estat);
+	}
+
+	if (istat & QLOGIC_IB_I_GPIO) {
+		u32 gpiostatus;
+		u32 to_clear = 0;
+
+		/*
+		 * GPIO_3..5 on IBA6120 Rev2 chips indicate
+		 * errors that we need to count.
+		 */
+		gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+		/* First the error-counter case. */
+		if (gpiostatus & GPIO_ERRINTR_MASK) {
+			/* want to clear the bits we see asserted. */
+			to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
+
+			/*
+			 * Count appropriately, clear bits out of our copy,
+			 * as they have been "handled".
+			 */
+			if (gpiostatus & (1 << GPIO_RXUVL_BIT))
+				dd->cspec->rxfc_unsupvl_errs++;
+			if (gpiostatus & (1 << GPIO_OVRUN_BIT))
+				dd->cspec->overrun_thresh_errs++;
+			if (gpiostatus & (1 << GPIO_LLI_BIT))
+				dd->cspec->lli_errs++;
+			gpiostatus &= ~GPIO_ERRINTR_MASK;
+		}
+		if (gpiostatus) {
+			/*
+			 * Some unexpected bits remain. If they could have
+			 * caused the interrupt, complain and clear.
+			 * To avoid repetition of this condition, also clear
+			 * the mask. It is almost certainly due to error.
+			 */
+			const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+
+			/*
+			 * Also check that the chip reflects our shadow,
+			 * and report issues, If they caused the interrupt.
+			 * we will suppress by refreshing from the shadow.
+			 */
+			if (mask & gpiostatus) {
+				to_clear |= (gpiostatus & mask);
+				dd->cspec->gpio_mask &= ~(gpiostatus & mask);
+				qib_write_kreg(dd, kr_gpio_mask,
+					       dd->cspec->gpio_mask);
+			}
+		}
+		if (to_clear)
+			qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
+	}
+}
+
+static irqreturn_t qib_6120intr(int irq, void *data)
+{
+	struct qib_devdata *dd = data;
+	irqreturn_t ret;
+	u32 istat, ctxtrbits, rmask, crcs = 0;
+	unsigned i;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		ret = IRQ_HANDLED;
+		goto bail;
+	}
+
+	istat = qib_read_kreg32(dd, kr_intstatus);
+
+	if (unlikely(!istat)) {
+		ret = IRQ_NONE; /* not our interrupt, or already handled */
+		goto bail;
+	}
+	if (unlikely(istat == -1)) {
+		qib_bad_intrstatus(dd);
+		/* don't know if it was our interrupt or not */
+		ret = IRQ_NONE;
+		goto bail;
+	}
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+	if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+			      QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+		unlikely_6120_intr(dd, istat);
+
+	/*
+	 * Clear the interrupt bits we found set, relatively early, so we
+	 * "know" know the chip will have seen this by the time we process
+	 * the queue, and will re-interrupt if necessary.  The processor
+	 * itself won't take the interrupt again until we return.
+	 */
+	qib_write_kreg(dd, kr_intclear, istat);
+
+	/*
+	 * Handle kernel receive queues before checking for pio buffers
+	 * available since receives can overflow; piobuf waiters can afford
+	 * a few extra cycles, since they were waiting anyway.
+	 */
+	ctxtrbits = istat &
+		((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+		 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+	if (ctxtrbits) {
+		rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+			(1U << QLOGIC_IB_I_RCVURG_SHIFT);
+		for (i = 0; i < dd->first_user_ctxt; i++) {
+			if (ctxtrbits & rmask) {
+				ctxtrbits &= ~rmask;
+				crcs += qib_kreceive(dd->rcd[i],
+						     &dd->cspec->lli_counter,
+						     NULL);
+			}
+			rmask <<= 1;
+		}
+		if (crcs) {
+			u32 cntr = dd->cspec->lli_counter;
+			cntr += crcs;
+			if (cntr) {
+				if (cntr > dd->cspec->lli_thresh) {
+					dd->cspec->lli_counter = 0;
+					dd->cspec->lli_errs++;
+				} else
+					dd->cspec->lli_counter += cntr;
+			}
+		}
+
+
+		if (ctxtrbits) {
+			ctxtrbits =
+				(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+				(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+			qib_handle_urcv(dd, ctxtrbits);
+		}
+	}
+
+	if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+		qib_ib_piobufavail(dd);
+
+	ret = IRQ_HANDLED;
+bail:
+	return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ */
+static void qib_setup_6120_interrupt(struct qib_devdata *dd)
+{
+	/*
+	 * If the chip supports added error indication via GPIO pins,
+	 * enable interrupts on those bits so the interrupt routine
+	 * can count the events. Also set flag so interrupt routine
+	 * can know they are expected.
+	 */
+	if (SYM_FIELD(dd->revision, Revision_R,
+		      ChipRevMinor) > 1) {
+		/* Rev2+ reports extra errors via internal GPIO pins */
+		dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
+		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+	}
+
+	if (!dd->cspec->irq)
+		qib_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
+			    "work\n");
+	else {
+		int ret;
+		ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
+				  QIB_DRV_NAME, dd);
+		if (ret)
+			qib_dev_err(dd, "Couldn't setup interrupt "
+				    "(irq=%d): %d\n", dd->cspec->irq,
+				    ret);
+	}
+}
+
+/**
+ * pe_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void pe_boardname(struct qib_devdata *dd)
+{
+	char *n;
+	u32 boardid, namelen;
+
+	boardid = SYM_FIELD(dd->revision, Revision,
+			    BoardID);
+
+	switch (boardid) {
+	case 2:
+		n = "InfiniPath_QLE7140";
+		break;
+	default:
+		qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
+		n = "Unknown_InfiniPath_6120";
+		break;
+	}
+	namelen = strlen(n) + 1;
+	dd->boardname = kmalloc(namelen, GFP_KERNEL);
+	if (!dd->boardname)
+		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+	else
+		snprintf(dd->boardname, namelen, "%s", n);
+
+	if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
+		qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
+			    "%u.%u!\n", dd->majrev, dd->minrev);
+
+	snprintf(dd->boardversion, sizeof(dd->boardversion),
+		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+		 dd->majrev, dd->minrev,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.  If we need interrupt context, we can split
+ * it into two routines.
+ */
+static int qib_6120_setup_reset(struct qib_devdata *dd)
+{
+	u64 val;
+	int i;
+	int ret;
+	u16 cmdval;
+	u8 int_line, clinesz;
+
+	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+	/* Use ERROR so it shows up in logs, etc. */
+	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+	/* no interrupts till re-initted */
+	qib_6120_set_intr_state(dd, 0);
+
+	dd->cspec->ibdeltainprog = 0;
+	dd->cspec->ibsymdelta = 0;
+	dd->cspec->iblnkerrdelta = 0;
+
+	/*
+	 * Keep chip from being accessed until we are ready.  Use
+	 * writeq() directly, to allow the write even though QIB_PRESENT
+	 * isnt' set.
+	 */
+	dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+	dd->int_counter = 0; /* so we check interrupts work again */
+	val = dd->control | QLOGIC_IB_C_RESET;
+	writeq(val, &dd->kregbase[kr_control]);
+	mb(); /* prevent compiler re-ordering around actual reset */
+
+	for (i = 1; i <= 5; i++) {
+		/*
+		 * Allow MBIST, etc. to complete; longer on each retry.
+		 * We sometimes get machine checks from bus timeout if no
+		 * response, so for now, make it *really* long.
+		 */
+		msleep(1000 + (1 + i) * 2000);
+
+		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+		/*
+		 * Use readq directly, so we don't need to mark it as PRESENT
+		 * until we get a successful indication that all is well.
+		 */
+		val = readq(&dd->kregbase[kr_revision]);
+		if (val == dd->revision) {
+			dd->flags |= QIB_PRESENT; /* it's back */
+			ret = qib_reinit_intr(dd);
+			goto bail;
+		}
+	}
+	ret = 0; /* failed */
+
+bail:
+	if (ret) {
+		if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+			qib_dev_err(dd, "Reset failed to setup PCIe or "
+				    "interrupts; continuing anyway\n");
+		/* clear the reset error, init error/hwerror mask */
+		qib_6120_init_hwerrors(dd);
+		/* for Rev2 error interrupts; nop for rev 1 */
+		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+		/* clear the reset error, init error/hwerror mask */
+		qib_6120_init_hwerrors(dd);
+	}
+	return ret;
+}
+
+/**
+ * qib_6120_put_tid - write a TID in chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+			     u32 type, unsigned long pa)
+{
+	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+	unsigned long flags;
+	int tidx;
+	spinlock_t *tidlockp; /* select appropriate spinlock */
+
+	if (!dd->kregbase)
+		return;
+
+	if (pa != dd->tidinvalid) {
+		if (pa & ((1U << 11) - 1)) {
+			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+				    pa);
+			return;
+		}
+		pa >>= 11;
+		if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+			qib_dev_err(dd, "Physical page address 0x%lx "
+				    "larger than supported\n", pa);
+			return;
+		}
+
+		if (type == RCVHQ_RCV_TYPE_EAGER)
+			pa |= dd->tidtemplate;
+		else /* for now, always full 4KB page */
+			pa |= 2 << 29;
+	}
+
+	/*
+	 * Avoid chip issue by writing the scratch register
+	 * before and after the TID, and with an io write barrier.
+	 * We use a spinlock around the writes, so they can't intermix
+	 * with other TID (eager or expected) writes (the chip problem
+	 * is triggered by back to back TID writes). Unfortunately, this
+	 * call can be done from interrupt level for the ctxt 0 eager TIDs,
+	 * so we have to use irqsave locks.
+	 */
+	/*
+	 * Assumes tidptr always > egrtidbase
+	 * if type == RCVHQ_RCV_TYPE_EAGER.
+	 */
+	tidx = tidptr - dd->egrtidbase;
+
+	tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
+		? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
+	spin_lock_irqsave(tidlockp, flags);
+	qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
+	writel(pa, tidp32);
+	qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
+	mmiowb();
+	spin_unlock_irqrestore(tidlockp, flags);
+}
+
+/**
+ * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for selection of the
+ * appropriate "flavor". The static calls in cleanup just use the
+ * revision-agnostic form, as they are not performance critical.
+ */
+static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
+			       u32 type, unsigned long pa)
+{
+	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+	u32 tidx;
+
+	if (!dd->kregbase)
+		return;
+
+	if (pa != dd->tidinvalid) {
+		if (pa & ((1U << 11) - 1)) {
+			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+				    pa);
+			return;
+		}
+		pa >>= 11;
+		if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+			qib_dev_err(dd, "Physical page address 0x%lx "
+				    "larger than supported\n", pa);
+			return;
+		}
+
+		if (type == RCVHQ_RCV_TYPE_EAGER)
+			pa |= dd->tidtemplate;
+		else /* for now, always full 4KB page */
+			pa |= 2 << 29;
+	}
+	tidx = tidptr - dd->egrtidbase;
+	writel(pa, tidp32);
+	mmiowb();
+}
+
+
+/**
+ * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the context
+ *
+ * clear all TID entries for a context, expected and eager.
+ * Used from qib_close().  On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_6120_clear_tids(struct qib_devdata *dd,
+				struct qib_ctxtdata *rcd)
+{
+	u64 __iomem *tidbase;
+	unsigned long tidinv;
+	u32 ctxt;
+	int i;
+
+	if (!dd->kregbase || !rcd)
+		return;
+
+	ctxt = rcd->ctxt;
+
+	tidinv = dd->tidinvalid;
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->kregbase) +
+		 dd->rcvtidbase +
+		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->rcvtidcnt; i++)
+		/* use func pointer because could be one of two funcs */
+		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+				  tidinv);
+
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->kregbase) +
+		 dd->rcvegrbase +
+		 rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+	for (i = 0; i < rcd->rcvegrcnt; i++)
+		/* use func pointer because could be one of two funcs */
+		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+				  tidinv);
+}
+
+/**
+ * qib_6120_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_6120_tidtemplate(struct qib_devdata *dd)
+{
+	u32 egrsize = dd->rcvegrbufsize;
+
+	/*
+	 * For now, we always allocate 4KB buffers (at init) so we can
+	 * receive max size packets.  We may want a module parameter to
+	 * specify 2KB or 4KB and/or make be per ctxt instead of per device
+	 * for those who want to reduce memory footprint.  Note that the
+	 * rcvhdrentsize size must be large enough to hold the largest
+	 * IB header (currently 96 bytes) that we expect to handle (plus of
+	 * course the 2 dwords of RHF).
+	 */
+	if (egrsize == 2048)
+		dd->tidtemplate = 1U << 29;
+	else if (egrsize == 4096)
+		dd->tidtemplate = 2U << 29;
+	dd->tidinvalid = 0;
+}
+
+int __attribute__((weak)) qib_unordered_wc(void)
+{
+	return 0;
+}
+
+/**
+ * qib_6120_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithms.
+ */
+static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
+				  struct qib_base_info *kinfo)
+{
+	if (qib_unordered_wc())
+		kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
+
+	kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+		QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
+	return 0;
+}
+
+
+static struct qib_message_header *
+qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+	return (struct qib_message_header *)
+		&rhf_addr[sizeof(u64) / sizeof(u32)];
+}
+
+static void qib_6120_config_ctxts(struct qib_devdata *dd)
+{
+	dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
+	if (qib_n_krcv_queues > 1) {
+		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+		if (dd->first_user_ctxt > dd->ctxtcnt)
+			dd->first_user_ctxt = dd->ctxtcnt;
+		dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
+	} else
+		dd->first_user_ctxt = dd->num_pports;
+	dd->n_krcv_queues = dd->first_user_ctxt;
+}
+
+static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+				    u32 updegr, u32 egrhd)
+{
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	if (updegr)
+		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+{
+	u32 head, tail;
+
+	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+	if (rcd->rcvhdrtail_kvaddr)
+		tail = qib_get_rcvhdrtail(rcd);
+	else
+		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+	return head == tail;
+}
+
+/*
+ * Used when we close any ctxt, for DMA already in flight
+ * at close.  Can't be done until we know hdrq size, so not
+ * early in chip init.
+ */
+static void alloc_dummy_hdrq(struct qib_devdata *dd)
+{
+	dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
+					dd->rcd[0]->rcvhdrq_size,
+					&dd->cspec->dummy_hdrq_phys,
+					GFP_KERNEL | __GFP_COMP);
+	if (!dd->cspec->dummy_hdrq) {
+		qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
+		/* fallback to just 0'ing */
+		dd->cspec->dummy_hdrq_phys = 0UL;
+	}
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specific, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
+			     int ctxt)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 mask, val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+	if (op & QIB_RCVCTRL_TAILUPD_ENB)
+		dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+	if (op & QIB_RCVCTRL_TAILUPD_DIS)
+		dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+	if (op & QIB_RCVCTRL_PKEY_ENB)
+		dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+	if (op & QIB_RCVCTRL_PKEY_DIS)
+		dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+	if (ctxt < 0)
+		mask = (1ULL << dd->ctxtcnt) - 1;
+	else
+		mask = (1ULL << ctxt);
+	if (op & QIB_RCVCTRL_CTXT_ENB) {
+		/* always done for specific ctxt */
+		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+		if (!(dd->flags & QIB_NODMA_RTAIL))
+			dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
+		/* Write these registers before the context is enabled. */
+		qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+			dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+		qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+			dd->rcd[ctxt]->rcvhdrq_phys);
+
+		if (ctxt == 0 && !dd->cspec->dummy_hdrq)
+			alloc_dummy_hdrq(dd);
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS)
+		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+		dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+		dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+	if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+		/* arm rcv interrupt */
+		val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+			dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	}
+	if (op & QIB_RCVCTRL_CTXT_ENB) {
+		/*
+		 * Init the context registers also; if we were
+		 * disabled, tail and head should both be zero
+		 * already from the enable, but since we don't
+		 * know, we have to do it explictly.
+		 */
+		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+		dd->rcd[ctxt]->head = val;
+		/* If kctxt, interrupt on next receive. */
+		if (ctxt < dd->first_user_ctxt)
+			val |= dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS) {
+		/*
+		 * Be paranoid, and never write 0's to these, just use an
+		 * unused page.  Of course,
+		 * rcvhdraddr points to a large chunk of memory, so this
+		 * could still trash things, but at least it won't trash
+		 * page 0, and by disabling the ctxt, it should stop "soon",
+		 * even if a packet or two is in already in flight after we
+		 * disabled the ctxt.  Only 6120 has this issue.
+		 */
+		if (ctxt >= 0) {
+			qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+					    dd->cspec->dummy_hdrq_phys);
+			qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+					    dd->cspec->dummy_hdrq_phys);
+		} else {
+			unsigned i;
+
+			for (i = 0; i < dd->cfgctxts; i++) {
+				qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+					    i, dd->cspec->dummy_hdrq_phys);
+				qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
+					    i, dd->cspec->dummy_hdrq_phys);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. Only operations actually used
+ * are implemented yet.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 tmp_dd_sendctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+	/* First the ones that are "sticky", saved in shadow */
+	if (op & QIB_SENDCTRL_CLEAR)
+		dd->sendctrl = 0;
+	if (op & QIB_SENDCTRL_SEND_DIS)
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
+	else if (op & QIB_SENDCTRL_SEND_ENB)
+		dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
+	if (op & QIB_SENDCTRL_AVAIL_DIS)
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+	else if (op & QIB_SENDCTRL_AVAIL_ENB)
+		dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+	if (op & QIB_SENDCTRL_DISARM_ALL) {
+		u32 i, last;
+
+		tmp_dd_sendctrl = dd->sendctrl;
+		/*
+		 * disarm any that are not yet launched, disabling sends
+		 * and updates until done.
+		 */
+		last = dd->piobcnt2k + dd->piobcnt4k;
+		tmp_dd_sendctrl &=
+			~(SYM_MASK(SendCtrl, PIOEnable) |
+			  SYM_MASK(SendCtrl, PIOBufAvailUpd));
+		for (i = 0; i < last; i++) {
+			qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
+				       SYM_MASK(SendCtrl, Disarm) | i);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+	}
+
+	tmp_dd_sendctrl = dd->sendctrl;
+
+	if (op & QIB_SENDCTRL_FLUSH)
+		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+	if (op & QIB_SENDCTRL_DISARM)
+		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+			((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
+			 SYM_LSB(SendCtrl, DisarmPIOBuf));
+	if (op & QIB_SENDCTRL_AVAIL_BLIP)
+		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+	qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+	if (op & QIB_SENDCTRL_FLUSH) {
+		u32 v;
+		/*
+		 * ensure writes have hit chip, then do a few
+		 * more reads, to allow DMA of pioavail registers
+		 * to occur, so in-memory copy is in sync with
+		 * the chip.  Not always safe to sleep.
+		 */
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		qib_read_kreg32(dd, kr_scratch);
+	}
+}
+
+/**
+ * qib_portcntr_6120 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
+{
+	u64 ret = 0ULL;
+	struct qib_devdata *dd = ppd->dd;
+	u16 creg;
+	/* 0xffff for unimplemented or synthesized counters */
+	static const u16 xlator[] = {
+		[QIBPORTCNTR_PKTSEND] = cr_pktsend,
+		[QIBPORTCNTR_WORDSEND] = cr_wordsend,
+		[QIBPORTCNTR_PSXMITDATA] = 0xffff,
+		[QIBPORTCNTR_PSXMITPKTS] = 0xffff,
+		[QIBPORTCNTR_PSXMITWAIT] = 0xffff,
+		[QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+		[QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+		[QIBPORTCNTR_PSRCVDATA] = 0xffff,
+		[QIBPORTCNTR_PSRCVPKTS] = 0xffff,
+		[QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+		[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+		[QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+		[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+		[QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
+		[QIBPORTCNTR_RXVLERR] = 0xffff,
+		[QIBPORTCNTR_ERRICRC] = cr_erricrc,
+		[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+		[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+		[QIBPORTCNTR_BADFORMAT] = cr_badformat,
+		[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+		[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+		[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+		[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+		[QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
+		[QIBPORTCNTR_ERRLINK] = cr_errlink,
+		[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+		[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+		[QIBPORTCNTR_LLI] = 0xffff,
+		[QIBPORTCNTR_PSINTERVAL] = 0xffff,
+		[QIBPORTCNTR_PSSTART] = 0xffff,
+		[QIBPORTCNTR_PSSTAT] = 0xffff,
+		[QIBPORTCNTR_VL15PKTDROP] = 0xffff,
+		[QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+		[QIBPORTCNTR_KHDROVFL] = 0xffff,
+	};
+
+	if (reg >= ARRAY_SIZE(xlator)) {
+		qib_devinfo(ppd->dd->pcidev,
+			 "Unimplemented portcounter %u\n", reg);
+		goto done;
+	}
+	creg = xlator[reg];
+
+	/* handle counters requests not implemented as chip counters */
+	if (reg == QIBPORTCNTR_LLI)
+		ret = dd->cspec->lli_errs;
+	else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
+		ret = dd->cspec->overrun_thresh_errs;
+	else if (reg == QIBPORTCNTR_KHDROVFL) {
+		int i;
+
+		/* sum over all kernel contexts */
+		for (i = 0; i < dd->first_user_ctxt; i++)
+			ret += read_6120_creg32(dd, cr_portovfl + i);
+	} else if (reg == QIBPORTCNTR_PSSTAT)
+		ret = dd->cspec->pma_sample_status;
+	if (creg == 0xffff)
+		goto done;
+
+	/*
+	 * only fast incrementing counters are 64bit; use 32 bit reads to
+	 * avoid two independent reads when on opteron
+	 */
+	if (creg == cr_wordsend || creg == cr_wordrcv ||
+	    creg == cr_pktsend || creg == cr_pktrcv)
+		ret = read_6120_creg(dd, creg);
+	else
+		ret = read_6120_creg32(dd, creg);
+	if (creg == cr_ibsymbolerr) {
+		if (dd->cspec->ibdeltainprog)
+			ret -= ret - dd->cspec->ibsymsnap;
+		ret -= dd->cspec->ibsymdelta;
+	} else if (creg == cr_iblinkerrrecov) {
+		if (dd->cspec->ibdeltainprog)
+			ret -= ret - dd->cspec->iblnkerrsnap;
+		ret -= dd->cspec->iblnkerrdelta;
+	}
+	if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
+		ret += dd->cspec->rxfc_unsupvl_errs;
+
+done:
+	return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string.  Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility.  Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr6120indices contains the corresponding register indices.
+ */
+static const char cntr6120names[] =
+	"Interrupts\n"
+	"HostBusStall\n"
+	"E RxTIDFull\n"
+	"RxTIDInvalid\n"
+	"Ctxt0EgrOvfl\n"
+	"Ctxt1EgrOvfl\n"
+	"Ctxt2EgrOvfl\n"
+	"Ctxt3EgrOvfl\n"
+	"Ctxt4EgrOvfl\n";
+
+static const size_t cntr6120indices[] = {
+	cr_lbint,
+	cr_lbflowstall,
+	cr_errtidfull,
+	cr_errtidvalid,
+	cr_portovfl + 0,
+	cr_portovfl + 1,
+	cr_portovfl + 2,
+	cr_portovfl + 3,
+	cr_portovfl + 4,
+};
+
+/*
+ * same as cntr6120names and cntr6120indices, but for port-specific counters.
+ * portcntr6120indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr6120names[] =
+	"TxPkt\n"
+	"TxFlowPkt\n"
+	"TxWords\n"
+	"RxPkt\n"
+	"RxFlowPkt\n"
+	"RxWords\n"
+	"TxFlowStall\n"
+	"E IBStatusChng\n"
+	"IBLinkDown\n"
+	"IBLnkRecov\n"
+	"IBRxLinkErr\n"
+	"IBSymbolErr\n"
+	"RxLLIErr\n"
+	"RxBadFormat\n"
+	"RxBadLen\n"
+	"RxBufOvrfl\n"
+	"RxEBP\n"
+	"RxFlowCtlErr\n"
+	"RxICRCerr\n"
+	"RxLPCRCerr\n"
+	"RxVCRCerr\n"
+	"RxInvalLen\n"
+	"RxInvalPKey\n"
+	"RxPktDropped\n"
+	"TxBadLength\n"
+	"TxDropped\n"
+	"TxInvalLen\n"
+	"TxUnderrun\n"
+	"TxUnsupVL\n"
+	;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr6120indices[] = {
+	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+	cr_pktsendflow,
+	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+	cr_pktrcvflowctrl,
+	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+	cr_ibstatuschange,
+	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+	cr_rcvflowctrl_err,
+	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+	cr_invalidslen,
+	cr_senddropped,
+	cr_errslen,
+	cr_sendunderrun,
+	cr_txunsupvl,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_6120_cntrnames(struct qib_devdata *dd)
+{
+	int i, j = 0;
+	char *s;
+
+	for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
+	     i++) {
+		/* we always have at least one counter before the egrovfl */
+		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+			j = 1;
+		s = strchr(s + 1, '\n');
+		if (s && j)
+			j++;
+	}
+	dd->cspec->ncntrs = i;
+	if (!s)
+		/* full list; size is without terminating null */
+		dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
+	else
+		dd->cspec->cntrnamelen = 1 + s - cntr6120names;
+	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+		* sizeof(u64), GFP_KERNEL);
+	if (!dd->cspec->cntrs)
+		qib_dev_err(dd, "Failed allocation for counters\n");
+
+	for (i = 0, s = (char *)portcntr6120names; s; i++)
+		s = strchr(s + 1, '\n');
+	dd->cspec->nportcntrs = i - 1;
+	dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
+	dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+		* sizeof(u64), GFP_KERNEL);
+	if (!dd->cspec->portcntrs)
+		qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+			      u64 **cntrp)
+{
+	u32 ret;
+
+	if (namep) {
+		ret = dd->cspec->cntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+		else
+			*namep = (char *)cntr6120names;
+	} else {
+		u64 *cntr = dd->cspec->cntrs;
+		int i;
+
+		ret = dd->cspec->ncntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+		if (pos >= ret) {
+			ret = 0; /* final read after getting everything */
+			goto done;
+		}
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->ncntrs; i++)
+			*cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
+	}
+done:
+	return ret;
+}
+
+static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+				  char **namep, u64 **cntrp)
+{
+	u32 ret;
+
+	if (namep) {
+		ret = dd->cspec->portcntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+		else
+			*namep = (char *)portcntr6120names;
+	} else {
+		u64 *cntr = dd->cspec->portcntrs;
+		struct qib_pportdata *ppd = &dd->pport[port];
+		int i;
+
+		ret = dd->cspec->nportcntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->nportcntrs; i++) {
+			if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
+				*cntr++ = qib_portcntr_6120(ppd,
+					portcntr6120indices[i] &
+					~_PORT_VIRT_FLAG);
+			else
+				*cntr++ = read_6120_creg32(dd,
+					   portcntr6120indices[i]);
+		}
+	}
+done:
+	return ret;
+}
+
+static void qib_chk_6120_errormask(struct qib_devdata *dd)
+{
+	static u32 fixed;
+	u32 ctrl;
+	unsigned long errormask;
+	unsigned long hwerrs;
+
+	if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
+		return;
+
+	errormask = qib_read_kreg64(dd, kr_errmask);
+
+	if (errormask == dd->cspec->errormask)
+		return;
+	fixed++;
+
+	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+	ctrl = qib_read_kreg32(dd, kr_control);
+
+	qib_write_kreg(dd, kr_errmask,
+		dd->cspec->errormask);
+
+	if ((hwerrs & dd->cspec->hwerrmask) ||
+	    (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
+		qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+		qib_write_kreg(dd, kr_errclear, 0ULL);
+		/* force re-interrupt of pending events, just in case */
+		qib_write_kreg(dd, kr_intclear, 0ULL);
+		qib_devinfo(dd->pcidev,
+			 "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
+			 fixed, errormask, (unsigned long)dd->cspec->errormask,
+			 ctrl, hwerrs);
+	}
+}
+
+/**
+ * qib_get_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_6120_faststats(unsigned long opaque)
+{
+	struct qib_devdata *dd = (struct qib_devdata *) opaque;
+	struct qib_pportdata *ppd = dd->pport;
+	unsigned long flags;
+	u64 traffic_wds;
+
+	/*
+	 * don't access the chip while running diags, or memory diags can
+	 * fail
+	 */
+	if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+		/* but re-arm the timer, for diags case; won't hurt other */
+		goto done;
+
+	/*
+	 * We now try to maintain an activity timer, based on traffic
+	 * exceeding a threshold, so we need to check the word-counts
+	 * even if they are 64-bit.
+	 */
+	traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
+		qib_portcntr_6120(ppd, cr_wordrcv);
+	spin_lock_irqsave(&dd->eep_st_lock, flags);
+	traffic_wds -= dd->traffic_wds;
+	dd->traffic_wds += traffic_wds;
+	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+		atomic_add(5, &dd->active_time); /* S/B #define */
+	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+
+	qib_chk_6120_errormask(dd);
+done:
+	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/* no interrupt fallback for these chips */
+static int qib_6120_nointr_fallback(struct qib_devdata *dd)
+{
+	return 0;
+}
+
+/*
+ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining.  To do this right, we reset IBC
+ * as well.
+ */
+static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
+{
+	u64 val, prev_val;
+	struct qib_devdata *dd = ppd->dd;
+
+	prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+	val = prev_val | QLOGIC_IB_XGXS_RESET;
+	prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+	qib_write_kreg(dd, kr_control,
+		       dd->control & ~QLOGIC_IB_C_LINKENABLE);
+	qib_write_kreg(dd, kr_xgxs_cfg, val);
+	qib_read_kreg32(dd, kr_scratch);
+	qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+	qib_write_kreg(dd, kr_control, dd->control);
+}
+
+static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+	int ret;
+
+	switch (which) {
+	case QIB_IB_CFG_LWID:
+		ret = ppd->link_width_active;
+		break;
+
+	case QIB_IB_CFG_SPD:
+		ret = ppd->link_speed_active;
+		break;
+
+	case QIB_IB_CFG_LWID_ENB:
+		ret = ppd->link_width_enabled;
+		break;
+
+	case QIB_IB_CFG_SPD_ENB:
+		ret = ppd->link_speed_enabled;
+		break;
+
+	case QIB_IB_CFG_OP_VLS:
+		ret = ppd->vls_operational;
+		break;
+
+	case QIB_IB_CFG_VL_HIGH_CAP:
+		ret = 0;
+		break;
+
+	case QIB_IB_CFG_VL_LOW_CAP:
+		ret = 0;
+		break;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+				OverrunThreshold);
+		break;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+				PhyerrThreshold);
+		break;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		ret = (ppd->dd->cspec->ibcctrl &
+		       SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+		break;
+
+	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+		ret = 0; /* no heartbeat on this chip */
+		break;
+
+	case QIB_IB_CFG_PMA_TICKS:
+		ret = 250; /* 1 usec. */
+		break;
+
+	default:
+		ret =  -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+/*
+ * We assume range checking is already done, if needed.
+ */
+static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret = 0;
+	u64 val64;
+	u16 lcmd, licmd;
+
+	switch (which) {
+	case QIB_IB_CFG_LWID_ENB:
+		ppd->link_width_enabled = val;
+		break;
+
+	case QIB_IB_CFG_SPD_ENB:
+		ppd->link_speed_enabled = val;
+		break;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+				  OverrunThreshold);
+		if (val64 != val) {
+			dd->cspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, OverrunThreshold);
+			dd->cspec->ibcctrl |= (u64) val <<
+				SYM_LSB(IBCCtrl, OverrunThreshold);
+			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+		break;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+				  PhyerrThreshold);
+		if (val64 != val) {
+			dd->cspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, PhyerrThreshold);
+			dd->cspec->ibcctrl |= (u64) val <<
+				SYM_LSB(IBCCtrl, PhyerrThreshold);
+			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+		break;
+
+	case QIB_IB_CFG_PKEYS: /* update pkeys */
+		val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+			((u64) ppd->pkeys[2] << 32) |
+			((u64) ppd->pkeys[3] << 48);
+		qib_write_kreg(dd, kr_partitionkey, val64);
+		break;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		if (val == IB_LINKINITCMD_POLL)
+			dd->cspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+		else /* SLEEP */
+			dd->cspec->ibcctrl |=
+				SYM_MASK(IBCCtrl, LinkDownDefaultState);
+		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+		break;
+
+	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+		/*
+		 * Update our housekeeping variables, and set IBC max
+		 * size, same as init code; max IBC is max we allow in
+		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+		 * Set even if it's unchanged, print debug message only
+		 * on changes.
+		 */
+		val = (ppd->ibmaxlen >> 2) + 1;
+		dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+		dd->cspec->ibcctrl |= (u64)val <<
+			SYM_LSB(IBCCtrl, MaxPktLen);
+		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+		break;
+
+	case QIB_IB_CFG_LSTATE: /* set the IB link state */
+		switch (val & 0xffff0000) {
+		case IB_LINKCMD_DOWN:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+			if (!dd->cspec->ibdeltainprog) {
+				dd->cspec->ibdeltainprog = 1;
+				dd->cspec->ibsymsnap =
+					read_6120_creg32(dd, cr_ibsymbolerr);
+				dd->cspec->iblnkerrsnap =
+					read_6120_creg32(dd, cr_iblinkerrrecov);
+			}
+			break;
+
+		case IB_LINKCMD_ARMED:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+			break;
+
+		case IB_LINKCMD_ACTIVE:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+			goto bail;
+		}
+		switch (val & 0xffff) {
+		case IB_LINKINITCMD_NOP:
+			licmd = 0;
+			break;
+
+		case IB_LINKINITCMD_POLL:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+			break;
+
+		case IB_LINKINITCMD_SLEEP:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+			break;
+
+		case IB_LINKINITCMD_DISABLE:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+				    val & 0xffff);
+			goto bail;
+		}
+		qib_set_ib_6120_lstate(ppd, lcmd, licmd);
+		goto bail;
+
+	case QIB_IB_CFG_HRTBT:
+		ret = -EINVAL;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+bail:
+	return ret;
+}
+
+static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+	int ret = 0;
+	if (!strncmp(what, "ibc", 3)) {
+		ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+			 ppd->dd->unit, ppd->port);
+	} else if (!strncmp(what, "off", 3)) {
+		ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+		qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+			    "(normal)\n", ppd->dd->unit, ppd->port);
+	} else
+		ret = -EINVAL;
+	if (!ret) {
+		qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
+		qib_write_kreg(ppd->dd, kr_scratch, 0);
+	}
+	return ret;
+}
+
+static void pma_6120_timer(unsigned long data)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)data;
+	struct qib_chip_specific *cs = ppd->dd->cspec;
+	struct qib_ibport *ibp = &ppd->ibport_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ibp->lock, flags);
+	if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
+		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+		qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+				      &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+		mod_timer(&cs->pma_timer,
+			  jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+	} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
+		u64 ta, tb, tc, td, te;
+
+		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+		qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
+
+		cs->sword = ta - cs->sword;
+		cs->rword = tb - cs->rword;
+		cs->spkts = tc - cs->spkts;
+		cs->rpkts = td - cs->rpkts;
+		cs->xmit_wait = te - cs->xmit_wait;
+	}
+	spin_unlock_irqrestore(&ibp->lock, flags);
+}
+
+/*
+ * Note that the caller has the ibp->lock held.
+ */
+static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
+				     u32 start)
+{
+	struct qib_chip_specific *cs = ppd->dd->cspec;
+
+	if (start && intv) {
+		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
+		mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
+	} else if (intv) {
+		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+		qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+				      &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+		mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
+	} else {
+		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+		cs->sword = 0;
+		cs->rword = 0;
+		cs->spkts = 0;
+		cs->rpkts = 0;
+		cs->xmit_wait = 0;
+	}
+}
+
+static u32 qib_6120_iblink_state(u64 ibcs)
+{
+	u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+	switch (state) {
+	case IB_6120_L_STATE_INIT:
+		state = IB_PORT_INIT;
+		break;
+	case IB_6120_L_STATE_ARM:
+		state = IB_PORT_ARMED;
+		break;
+	case IB_6120_L_STATE_ACTIVE:
+		/* fall through */
+	case IB_6120_L_STATE_ACT_DEFER:
+		state = IB_PORT_ACTIVE;
+		break;
+	default: /* fall through */
+	case IB_6120_L_STATE_DOWN:
+		state = IB_PORT_DOWN;
+		break;
+	}
+	return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_6120_phys_portstate(u64 ibcs)
+{
+	u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+	return qib_6120_physportstate[state];
+}
+
+static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+	if (ibup) {
+		if (ppd->dd->cspec->ibdeltainprog) {
+			ppd->dd->cspec->ibdeltainprog = 0;
+			ppd->dd->cspec->ibsymdelta +=
+				read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
+					ppd->dd->cspec->ibsymsnap;
+			ppd->dd->cspec->iblnkerrdelta +=
+				read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
+					ppd->dd->cspec->iblnkerrsnap;
+		}
+		qib_hol_init(ppd);
+	} else {
+		ppd->dd->cspec->lli_counter = 0;
+		if (!ppd->dd->cspec->ibdeltainprog) {
+			ppd->dd->cspec->ibdeltainprog = 1;
+			ppd->dd->cspec->ibsymsnap =
+				read_6120_creg32(ppd->dd, cr_ibsymbolerr);
+			ppd->dd->cspec->iblnkerrsnap =
+				read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
+		}
+		qib_hol_down(ppd);
+	}
+
+	qib_6120_setup_setextled(ppd, ibup);
+
+	return 0;
+}
+
+/* Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+	u64 read_val, new_out;
+	unsigned long flags;
+
+	if (mask) {
+		/* some bits being written, lock access to GPIO */
+		dir &= mask;
+		out &= mask;
+		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+		new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+		qib_write_kreg(dd, kr_gpio_out, new_out);
+		dd->cspec->gpio_out = new_out;
+		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+	}
+	/*
+	 * It is unlikely that a read at this time would get valid
+	 * data on a pin whose direction line was set in the same
+	 * call to this function. We include the read here because
+	 * that allows us to potentially combine a change on one pin with
+	 * a read on another, and because the old code did something like
+	 * this.
+	 */
+	read_val = qib_read_kreg64(dd, kr_extstatus);
+	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip.  These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_6120_chip_params(struct qib_devdata *dd)
+{
+	u64 val;
+	u32 piobufs;
+	int mtu;
+
+	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+	dd->palign = qib_read_kreg32(dd, kr_palign);
+	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+	dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+
+	val = qib_read_kreg64(dd, kr_sendpiosize);
+	dd->piosize2k = val & ~0U;
+	dd->piosize4k = val >> 32;
+
+	mtu = ib_mtu_enum_to_int(qib_ibmtu);
+	if (mtu == -1)
+		mtu = QIB_DEFAULT_MTU;
+	dd->pport->ibmtu = (u32)mtu;
+
+	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+	dd->piobcnt2k = val & ~0U;
+	dd->piobcnt4k = val >> 32;
+	/* these may be adjusted in init_chip_wc_pat() */
+	dd->pio2kbase = (u32 __iomem *)
+		(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
+	if (dd->piobcnt4k) {
+		dd->pio4kbase = (u32 __iomem *)
+			(((char __iomem *) dd->kregbase) +
+			 (dd->piobufbase >> 32));
+		/*
+		 * 4K buffers take 2 pages; we use roundup just to be
+		 * paranoid; we calculate it once here, rather than on
+		 * ever buf allocate
+		 */
+		dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+	}
+
+	piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+		(sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_6120_chip_params(), so split out as separate function
+ */
+static void set_6120_baseaddrs(struct qib_devdata *dd)
+{
+	u32 cregbase;
+	cregbase = qib_read_kreg32(dd, kr_counterregbase);
+	dd->cspec->cregbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase + cregbase);
+
+	dd->egrtidbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup.  Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_6120_initreg(struct qib_devdata *dd)
+{
+	int ret = 0;
+	u64 val;
+
+	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+	if (val != dd->pioavailregs_phys) {
+		qib_dev_err(dd, "Catastrophic software error, "
+			    "SendPIOAvailAddr written as %lx, "
+			    "read back as %llx\n",
+			    (unsigned long) dd->pioavailregs_phys,
+			    (unsigned long long) val);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int init_6120_variables(struct qib_devdata *dd)
+{
+	int ret = 0;
+	struct qib_pportdata *ppd;
+	u32 sbufs;
+
+	ppd = (struct qib_pportdata *)(dd + 1);
+	dd->pport = ppd;
+	dd->num_pports = 1;
+
+	dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+	ppd->cpspec = NULL; /* not used in this chip */
+
+	spin_lock_init(&dd->cspec->kernel_tid_lock);
+	spin_lock_init(&dd->cspec->user_tid_lock);
+	spin_lock_init(&dd->cspec->rcvmod_lock);
+	spin_lock_init(&dd->cspec->gpio_lock);
+
+	/* we haven't yet set QIB_PRESENT, so use read directly */
+	dd->revision = readq(&dd->kregbase[kr_revision]);
+
+	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+		qib_dev_err(dd, "Revision register read failure, "
+			    "giving up initialization\n");
+		ret = -ENODEV;
+		goto bail;
+	}
+	dd->flags |= QIB_PRESENT;  /* now register routines work */
+
+	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+				    ChipRevMajor);
+	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+				    ChipRevMinor);
+
+	get_6120_chip_params(dd);
+	pe_boardname(dd); /* fill in boardname */
+
+	/*
+	 * GPIO bits for TWSI data and clock,
+	 * used for serial EEPROM.
+	 */
+	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+	dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
+
+	if (qib_unordered_wc())
+		dd->flags |= QIB_PIO_FLUSH_WC;
+
+	/*
+	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+	 * 2 is Some Misc, 3 is reserved for future.
+	 */
+	dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+	/* Ignore errors in PIO/PBC on systems with unordered write-combining */
+	if (qib_unordered_wc())
+		dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
+
+	dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+	dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+	qib_init_pportdata(ppd, dd, 0, 1);
+	ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+	ppd->link_speed_supported = QIB_IB_SDR;
+	ppd->link_width_enabled = IB_WIDTH_4X;
+	ppd->link_speed_enabled = ppd->link_speed_supported;
+	/* these can't change for this chip, so set once */
+	ppd->link_width_active = ppd->link_width_enabled;
+	ppd->link_speed_active = ppd->link_speed_enabled;
+	ppd->vls_supported = IB_VL_VL0;
+	ppd->vls_operational = ppd->vls_supported;
+
+	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rhf_offset = 0;
+
+	/* we always allocate at least 2048 bytes for eager buffers */
+	ret = ib_mtu_enum_to_int(qib_ibmtu);
+	dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+	qib_6120_tidtemplate(dd);
+
+	/*
+	 * We can request a receive interrupt for 1 or
+	 * more packets from current offset.  For now, we set this
+	 * up for a single packet.
+	 */
+	dd->rhdrhead_intr_off = 1ULL << 32;
+
+	/* setup the stats timer; the add_timer is done at end of init */
+	init_timer(&dd->stats_timer);
+	dd->stats_timer.function = qib_get_6120_faststats;
+	dd->stats_timer.data = (unsigned long) dd;
+
+	init_timer(&dd->cspec->pma_timer);
+	dd->cspec->pma_timer.function = pma_6120_timer;
+	dd->cspec->pma_timer.data = (unsigned long) ppd;
+
+	dd->ureg_align = qib_read_kreg32(dd, kr_palign);
+
+	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+	qib_6120_config_ctxts(dd);
+	qib_set_ctxtcnt(dd);
+
+	if (qib_wc_pat) {
+		ret = init_chip_wc_pat(dd, 0);
+		if (ret)
+			goto bail;
+	}
+	set_6120_baseaddrs(dd); /* set chip access pointers now */
+
+	ret = 0;
+	if (qib_mini_init)
+		goto bail;
+
+	qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
+
+	ret = qib_create_ctxts(dd);
+	init_6120_cntrnames(dd);
+
+	/* use all of 4KB buffers for the kernel, otherwise 16 */
+	sbufs = dd->piobcnt4k ?  dd->piobcnt4k : 16;
+
+	dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
+	dd->pbufsctxt = dd->lastctxt_piobuf /
+		(dd->cfgctxts - dd->first_user_ctxt);
+
+	if (ret)
+		goto bail;
+bail:
+	return ret;
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets).  At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out.  In that case, we flush all packets, and try again.  If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+	u32 __iomem *buf;
+	u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
+
+	/*
+	 * always blip to get avail list updated, since it's almost
+	 * always needed, and is fairly cheap.
+	 */
+	sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+	if (buf)
+		goto done;
+
+	sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+			  QIB_SENDCTRL_AVAIL_BLIP);
+	ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
+	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+done:
+	return buf;
+}
+
+static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+					u32 *pbufnum)
+{
+	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+	struct qib_devdata *dd = ppd->dd;
+	u32 __iomem *buf;
+
+	if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
+		!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+		buf = get_6120_link_buf(ppd, pbufnum);
+	else {
+
+		if ((plen + 1) > dd->piosize2kmax_dwords)
+			first = dd->piobcnt2k;
+		else
+			first = 0;
+		/* try 4k if all 2k busy, so same last for both sizes */
+		last = dd->piobcnt2k + dd->piobcnt4k - 1;
+		buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+	}
+	return buf;
+}
+
+static int init_sdma_6120_regs(struct qib_pportdata *ppd)
+{
+	return -ENODEV;
+}
+
+static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
+{
+	return 0;
+}
+
+static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
+{
+	return 0;
+}
+
+static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
+{
+}
+
+static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+}
+
+static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+/*
+ * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
+ * The chip ignores the bit if set.
+ */
+static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+				   u8 srate, u8 vl)
+{
+	return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
+}
+
+static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
+{
+	rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
+	rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
+}
+
+static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
+	u32 len, u32 avail, struct qib_ctxtdata *rcd)
+{
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+	(void) qib_write_kreg(dd, kr_scratch, val);
+}
+
+static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+	return -ENXIO;
+}
+
+/* Dummy function, as 6120 boards never disable EEPROM Write */
+static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+	return 1;
+}
+
+/**
+ * qib_init_iba6120_funcs - set up the chip-specific function pointers
+ * @pdev: pci_dev of the qlogic_ib device
+ * @ent: pci_device_id matching this chip
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ *
+ * It also allocates/partially-inits the qib_devdata struct for
+ * this device.
+ */
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
+					   const struct pci_device_id *ent)
+{
+	struct qib_devdata *dd;
+	int ret;
+
+#ifndef CONFIG_PCI_MSI
+	qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
+	      "work if CONFIG_PCI_MSI is not enabled\n",
+	      ent->device);
+	dd = ERR_PTR(-ENODEV);
+	goto bail;
+#endif
+
+	dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
+			       sizeof(struct qib_chip_specific));
+	if (IS_ERR(dd))
+		goto bail;
+
+	dd->f_bringup_serdes    = qib_6120_bringup_serdes;
+	dd->f_cleanup           = qib_6120_setup_cleanup;
+	dd->f_clear_tids        = qib_6120_clear_tids;
+	dd->f_free_irq          = qib_6120_free_irq;
+	dd->f_get_base_info     = qib_6120_get_base_info;
+	dd->f_get_msgheader     = qib_6120_get_msgheader;
+	dd->f_getsendbuf        = qib_6120_getsendbuf;
+	dd->f_gpio_mod          = gpio_6120_mod;
+	dd->f_eeprom_wen	= qib_6120_eeprom_wen;
+	dd->f_hdrqempty         = qib_6120_hdrqempty;
+	dd->f_ib_updown         = qib_6120_ib_updown;
+	dd->f_init_ctxt         = qib_6120_init_ctxt;
+	dd->f_initvl15_bufs     = qib_6120_initvl15_bufs;
+	dd->f_intr_fallback     = qib_6120_nointr_fallback;
+	dd->f_late_initreg      = qib_late_6120_initreg;
+	dd->f_setpbc_control    = qib_6120_setpbc_control;
+	dd->f_portcntr          = qib_portcntr_6120;
+	dd->f_put_tid           = (dd->minrev >= 2) ?
+				      qib_6120_put_tid_2 :
+				      qib_6120_put_tid;
+	dd->f_quiet_serdes      = qib_6120_quiet_serdes;
+	dd->f_rcvctrl           = rcvctrl_6120_mod;
+	dd->f_read_cntrs        = qib_read_6120cntrs;
+	dd->f_read_portcntrs    = qib_read_6120portcntrs;
+	dd->f_reset             = qib_6120_setup_reset;
+	dd->f_init_sdma_regs    = init_sdma_6120_regs;
+	dd->f_sdma_busy         = qib_sdma_6120_busy;
+	dd->f_sdma_gethead      = qib_sdma_6120_gethead;
+	dd->f_sdma_sendctrl     = qib_6120_sdma_sendctrl;
+	dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
+	dd->f_sdma_update_tail  = qib_sdma_update_6120_tail;
+	dd->f_sendctrl          = sendctrl_6120_mod;
+	dd->f_set_armlaunch     = qib_set_6120_armlaunch;
+	dd->f_set_cntr_sample   = qib_set_cntr_6120_sample;
+	dd->f_iblink_state      = qib_6120_iblink_state;
+	dd->f_ibphys_portstate  = qib_6120_phys_portstate;
+	dd->f_get_ib_cfg        = qib_6120_get_ib_cfg;
+	dd->f_set_ib_cfg        = qib_6120_set_ib_cfg;
+	dd->f_set_ib_loopback   = qib_6120_set_loopback;
+	dd->f_set_intr_state    = qib_6120_set_intr_state;
+	dd->f_setextled         = qib_6120_setup_setextled;
+	dd->f_txchk_change      = qib_6120_txchk_change;
+	dd->f_update_usrhead    = qib_update_6120_usrhead;
+	dd->f_wantpiobuf_intr   = qib_wantpiobuf_6120_intr;
+	dd->f_xgxs_reset        = qib_6120_xgxs_reset;
+	dd->f_writescratch      = writescratch;
+	dd->f_tempsense_rd	= qib_6120_tempsense_rd;
+	/*
+	 * Do remaining pcie setup and save pcie values in dd.
+	 * Any error printing is already done by the init code.
+	 * On return, we have the chip mapped and accessible,
+	 * but chip registers are not set up until start of
+	 * init_6120_variables.
+	 */
+	ret = qib_pcie_ddinit(dd, pdev, ent);
+	if (ret < 0)
+		goto bail_free;
+
+	/* initialize chip-specific variables */
+	ret = init_6120_variables(dd);
+	if (ret)
+		goto bail_cleanup;
+
+	if (qib_mini_init)
+		goto bail;
+
+#ifndef CONFIG_PCI_MSI
+	qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n");
+#endif
+
+	if (qib_pcie_params(dd, 8, NULL, NULL))
+		qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+			    "continuing anyway\n");
+	dd->cspec->irq = pdev->irq; /* save IRQ */
+
+	/* clear diagctrl register, in case diags were running and crashed */
+	qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+	if (qib_read_kreg64(dd, kr_hwerrstatus) &
+	    QLOGIC_IB_HWE_SERDESPLLFAILED)
+		qib_write_kreg(dd, kr_hwerrclear,
+			       QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+	/* setup interrupt handler (interrupt type handled above) */
+	qib_setup_6120_interrupt(dd);
+	/* Note that qpn_mask is set by qib_6120_config_ctxts() first */
+	qib_6120_init_hwerrors(dd);
+
+	goto bail;
+
+bail_cleanup:
+	qib_pcie_ddcleanup(dd);
+bail_free:
+	qib_free_devdata(dd);
+	dd = ERR_PTR(ret);
+bail:
+	return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 000000000000..6fd8d74e7392
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 7220 chip (except that specific to the SerDes)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_7220.h"
+
+static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
+static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
+static u32 qib_7220_iblink_state(u64);
+static u8 qib_7220_phys_portstate(u64);
+static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
+static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
+ * exception of SerDes support, which in in qib_sd7220.c.
+ */
+
+/* Below uses machine-generated qib_chipnum_regs.h file */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
+#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_senddmabase KREG_IDX(SendDmaBase)
+#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
+#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
+#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
+#define kr_senddmahead KREG_IDX(SendDmaHead)
+#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
+#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
+#define kr_senddmastatus KREG_IDX(SendDmaStatus)
+#define kr_senddmatail KREG_IDX(SendDmaTail)
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+
+#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
+			QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxVersionErrCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkMalformCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
+#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define cr_psstat CREG_IDX(PSStat)
+#define cr_psstart CREG_IDX(PSStart)
+#define cr_psinterval CREG_IDX(PSInterval)
+#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64)              \
+	QIB_7220_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64)               \
+	QIB_7220_##regname##_##fldname##_RMASK <<       \
+	 QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+	(((value) >> SYM_LSB(regname, fldname)) & \
+	 SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7220_IBCHG 0x81
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner.  It also gives us some error
+ * checking.  64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible.  User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+				  enum qib_ureg regno, int ctxt)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+
+	if (dd->userbase)
+		return readl(regno + (u64 __iomem *)
+			     ((char __iomem *)dd->userbase +
+			      dd->ureg_align * ctxt));
+	else
+		return readl(regno + (u64 __iomem *)
+			     (dd->uregbase +
+			      (char __iomem *)dd->kregbase +
+			      dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+				  enum qib_ureg regno, u64 value, int ctxt)
+{
+	u64 __iomem *ubase;
+
+	if (dd->userbase)
+		ubase = (u64 __iomem *)
+			((char __iomem *) dd->userbase +
+			 dd->ureg_align * ctxt);
+	else
+		ubase = (u64 __iomem *)
+			(dd->uregbase +
+			 (char __iomem *) dd->kregbase +
+			 dd->ureg_align * ctxt);
+
+	if (dd->kregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &ubase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+				       const u16 regno, unsigned ctxt,
+				       u64 value)
+{
+	qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_7220_creg(const struct qib_devdata *dd,
+				   u16 regno, u64 value)
+{
+	if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_revision bits */
+#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
+#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET (1U << 7)
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 32
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
+#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+
+#define QLOGIC_IB_I_SDMAINT             0x8000000000000000ULL
+#define QLOGIC_IB_I_SDMADISABLED        0x4000000000000000ULL
+#define QLOGIC_IB_I_ERROR               0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT            0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL        0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO                0x0000000010000000ULL
+
+/* variables for sanity checking interrupt and errors */
+#define QLOGIC_IB_I_BITSEXTANT \
+		(QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
+		(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+		(QLOGIC_IB_I_RCVAVAIL_MASK << \
+		 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+		QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+		QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
+		QLOGIC_IB_I_SERDESTRIMDONE)
+
+#define IB_HWE_BITSEXTANT \
+	       (HWE_MASK(RXEMemParityErr) | \
+		HWE_MASK(TXEMemParityErr) | \
+		(QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<	 \
+		 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+		QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+		QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+		QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+		QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+		QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+		QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+		QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+		HWE_MASK(PowerOnBISTFailed) |	  \
+		QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+		QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+		QLOGIC_IB_HWE_SERDESPLLFAILED | \
+		HWE_MASK(IBCBusToSPCParityErr) | \
+		HWE_MASK(IBCBusFromSPCParityErr) | \
+		QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
+		QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
+		QLOGIC_IB_HWE_SDMAMEMREADERR | \
+		QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
+		QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
+		QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
+		QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
+		QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
+		QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
+		QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
+		QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
+		QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
+
+#define IB_E_BITSEXTANT							\
+	(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) |		\
+	 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) |		\
+	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) |	\
+	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+	 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) |		\
+	 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) |		\
+	 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |		\
+	 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) |		\
+	 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) |		\
+	 ERR_MASK(SendSpecialTriggerErr) |				\
+	 ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) |	\
+	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) |	\
+	 ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) |	\
+	 ERR_MASK(SendDroppedDataPktErr) |				\
+	 ERR_MASK(SendPioArmLaunchErr) |				\
+	 ERR_MASK(SendUnexpectedPktNumErr) |				\
+	 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) |	\
+	 ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) |	\
+	 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) |	\
+	 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) |		\
+	 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) |		\
+	 ERR_MASK(SDmaUnexpDataErr) |					\
+	 ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) |		\
+	 ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) |		\
+	 ERR_MASK(SDmaDescAddrMisalignErr) |				\
+	 ERR_MASK(InvalidEEPCmd))
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK  0x00000000000000ffULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED      0x1000000000000000ULL
+/* specific to this chip */
+#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR         0x0000000000000040ULL
+#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR          0x0000000000000080ULL
+#define QLOGIC_IB_HWE_SDMAMEMREADERR              0x0000000010000000ULL
+#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED          0x2000000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT   0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT   0x0200000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT   0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT   0x0800000000000000ULL
+#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR       0x0000008000000000ULL
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR        0x0000004000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
+
+#define IBA7220_IBCC_LINKCMD_SHIFT 19
+
+/* kr_ibcddrctrl bits */
+#define IBA7220_IBC_DLIDLMC_MASK        0xFFFFFFFFUL
+#define IBA7220_IBC_DLIDLMC_SHIFT       32
+
+#define IBA7220_IBC_HRTBT_MASK  (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
+				 SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
+#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
+
+#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
+#define IBA7220_IBC_LREV_MASK   1
+#define IBA7220_IBC_LREV_SHIFT  8
+#define IBA7220_IBC_RXPOL_MASK  1
+#define IBA7220_IBC_RXPOL_SHIFT 7
+#define IBA7220_IBC_WIDTH_SHIFT 5
+#define IBA7220_IBC_WIDTH_MASK  0x3
+#define IBA7220_IBC_WIDTH_1X_ONLY       (0 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_4X_ONLY       (1 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_AUTONEG       (2 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_SPEED_AUTONEG       (1 << 1)
+#define IBA7220_IBC_SPEED_SDR           (1 << 2)
+#define IBA7220_IBC_SPEED_DDR           (1 << 3)
+#define IBA7220_IBC_SPEED_AUTONEG_MASK  (0x7 << 1)
+#define IBA7220_IBC_IBTA_1_2_MASK       (1)
+
+/* kr_ibcddrstatus */
+/* link latency shift is 0, don't bother defining */
+#define IBA7220_DDRSTAT_LINKLAT_MASK    0x3ffffff
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST     0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_DISABLED    0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET          0x5ULL
+#define QLOGIC_IB_XGXS_FC_SAFE        (1ULL << 63)
+
+/* kr_rcvpktledcnt */
+#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
+#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
+#define QIB_TWSI_TEMP_DEV 0x98
+
+/* HW counter clock is at 4nsec */
+#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
+
+#define IBA7220_R_INTRAVAIL_SHIFT 17
+#define IBA7220_R_PKEY_DIS_SHIFT 34
+#define IBA7220_R_TAILUPD_SHIFT 35
+#define IBA7220_R_CTXTCFG_SHIFT 36
+
+#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * the size bits give us 2^N, in KB units.  0 marks as invalid,
+ * and 7 is reserved.  We currently use only 2KB and 4KB
+ */
+#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
+#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
+#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
+#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
+
+/* packet rate matching delay multiplier */
+static u8 rate_to_delay[2][2] = {
+	/* 1x, 4x */
+	{   8, 2 }, /* SDR */
+	{   4, 1 }  /* DDR */
+};
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+	[IB_RATE_2_5_GBPS] = 8,
+	[IB_RATE_5_GBPS] = 4,
+	[IB_RATE_10_GBPS] = 2,
+	[IB_RATE_20_GBPS] = 1
+};
+
+#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
+#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7220_LT_STATE_DISABLED        0x00
+#define IB_7220_LT_STATE_LINKUP          0x01
+#define IB_7220_LT_STATE_POLLACTIVE      0x02
+#define IB_7220_LT_STATE_POLLQUIET       0x03
+#define IB_7220_LT_STATE_SLEEPDELAY      0x04
+#define IB_7220_LT_STATE_SLEEPQUIET      0x05
+#define IB_7220_LT_STATE_CFGDEBOUNCE     0x08
+#define IB_7220_LT_STATE_CFGRCVFCFG      0x09
+#define IB_7220_LT_STATE_CFGWAITRMT      0x0a
+#define IB_7220_LT_STATE_CFGIDLE 0x0b
+#define IB_7220_LT_STATE_RECOVERRETRAIN  0x0c
+#define IB_7220_LT_STATE_RECOVERWAITRMT  0x0e
+#define IB_7220_LT_STATE_RECOVERIDLE     0x0f
+
+/* link state machine states from IBC */
+#define IB_7220_L_STATE_DOWN             0x0
+#define IB_7220_L_STATE_INIT             0x1
+#define IB_7220_L_STATE_ARM              0x2
+#define IB_7220_L_STATE_ACTIVE           0x3
+#define IB_7220_L_STATE_ACT_DEFER        0x4
+
+static const u8 qib_7220_physportstate[0x20] = {
+	[IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+	[IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+	[IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+	[IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+	[IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_7220_LT_STATE_CFGDEBOUNCE] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7220_LT_STATE_CFGRCVFCFG] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7220_LT_STATE_CFGWAITRMT] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7220_LT_STATE_RECOVERRETRAIN] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_7220_LT_STATE_RECOVERWAITRMT] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_7220_LT_STATE_RECOVERIDLE] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+int qib_special_trigger;
+module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
+MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+	(1ULL << (SYM_LSB(regname, fldname) + (bit))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 7220 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
+	/* generic hardware errors */
+	QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+	QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+			  "TXE PIOBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+			  "TXE PIOPBC Memory Parity"),
+	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+			  "TXE PIOLAUNCHFIFO Memory Parity"),
+
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+			  "RXE RCVBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+			  "RXE LOOKUPQ Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+			  "RXE EAGERTID Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+			  "RXE EXPTID Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+			  "RXE FLAGBUF Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+			  "RXE DATAINFO Memory Parity"),
+	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+			  "RXE HDRINFO Memory Parity"),
+
+	/* chip-specific hardware errors */
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+			  "PCIe Poisoned TLP"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+			  "PCIe completion timeout"),
+	/*
+	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+	 * parity or memory parity error failures, because most likely we
+	 * won't be able to talk to the core of the chip.  Nonetheless, we
+	 * might see them, if they are in parts of the PCIe core that aren't
+	 * essential.
+	 */
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+			  "PCIePLL1"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+			  "PCIePLL0"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+			  "PCIe XTLH core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+			  "PCIe ADM TX core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+			  "PCIe ADM RX core parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+			  "SerDes PLL"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
+			  "PCIe cpl header queue"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
+			  "PCIe cpl data queue"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
+			  "Send DMA memory read"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
+			  "uC PLL clock not locked"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
+			  "PCIe serdes Q0 no clock"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
+			  "PCIe serdes Q1 no clock"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
+			  "PCIe serdes Q2 no clock"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
+			  "PCIe serdes Q3 no clock"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
+			  "DDS RXEQ memory parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
+			  "IB uC memory parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
+			  "PCIe uC oct0 memory parity"),
+	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
+			  "PCIe uC oct1 memory parity"),
+};
+
+#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
+
+#define QLOGIC_IB_E_PKTERRS (\
+		ERR_MASK(SendPktLenErr) |				\
+		ERR_MASK(SendDroppedDataPktErr) |			\
+		ERR_MASK(RcvVCRCErr) |					\
+		ERR_MASK(RcvICRCErr) |					\
+		ERR_MASK(RcvShortPktLenErr) |				\
+		ERR_MASK(RcvEBPErr))
+
+/* Convenience for decoding Send DMA errors */
+#define QLOGIC_IB_E_SDMAERRS ( \
+		ERR_MASK(SDmaGenMismatchErr) |				\
+		ERR_MASK(SDmaOutOfBoundErr) |				\
+		ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+		ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) |	\
+		ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) |	\
+		ERR_MASK(SDmaUnexpDataErr) |				\
+		ERR_MASK(SDmaDescAddrMisalignErr) |			\
+		ERR_MASK(SDmaDisabledErr) |				\
+		ERR_MASK(SendBufMisuseErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+	(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) |		\
+	 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) |		\
+	 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) |	\
+	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
+	 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) |	\
+	 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+	(ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
+	 ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) |	\
+	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
+	 ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers.  Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+	(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) |	\
+	 ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received.  This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+	(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
+	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
+	 ERR_MASK(RcvUnexpectedCharErr))
+
+static void autoneg_7220_work(struct work_struct *);
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ * because we don't need to force the update of pioavail.
+ */
+static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
+{
+	unsigned long sbuf[3];
+	struct qib_devdata *dd = ppd->dd;
+
+	/*
+	 * It's possible that sendbuffererror could have bits set; might
+	 * have already done this as a result of hardware error handling.
+	 */
+	/* read these before writing errorclear */
+	sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+	sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+	sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+	if (sbuf[0] || sbuf[1] || sbuf[2])
+		qib_disarm_piobufs_set(dd, sbuf,
+				       dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static void qib_7220_txe_recover(struct qib_devdata *dd)
+{
+	qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
+	qib_disarm_7220_senderrbufs(dd->pport);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 set_sendctrl = 0;
+	u64 clr_sendctrl = 0;
+
+	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+		set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+		set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+		set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+
+	spin_lock(&dd->sendctrl_lock);
+
+	dd->sendctrl |= set_sendctrl;
+	dd->sendctrl &= ~clr_sendctrl;
+
+	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	spin_unlock(&dd->sendctrl_lock);
+}
+
+static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
+				      u64 err, char *buf, size_t blen)
+{
+	static const struct {
+		u64 err;
+		const char *msg;
+	} errs[] = {
+		{ ERR_MASK(SDmaGenMismatchErr),
+		  "SDmaGenMismatch" },
+		{ ERR_MASK(SDmaOutOfBoundErr),
+		  "SDmaOutOfBound" },
+		{ ERR_MASK(SDmaTailOutOfBoundErr),
+		  "SDmaTailOutOfBound" },
+		{ ERR_MASK(SDmaBaseErr),
+		  "SDmaBase" },
+		{ ERR_MASK(SDma1stDescErr),
+		  "SDma1stDesc" },
+		{ ERR_MASK(SDmaRpyTagErr),
+		  "SDmaRpyTag" },
+		{ ERR_MASK(SDmaDwEnErr),
+		  "SDmaDwEn" },
+		{ ERR_MASK(SDmaMissingDwErr),
+		  "SDmaMissingDw" },
+		{ ERR_MASK(SDmaUnexpDataErr),
+		  "SDmaUnexpData" },
+		{ ERR_MASK(SDmaDescAddrMisalignErr),
+		  "SDmaDescAddrMisalign" },
+		{ ERR_MASK(SendBufMisuseErr),
+		  "SendBufMisuse" },
+		{ ERR_MASK(SDmaDisabledErr),
+		  "SDmaDisabled" },
+	};
+	int i;
+	size_t bidx = 0;
+
+	for (i = 0; i < ARRAY_SIZE(errs); i++) {
+		if (err & errs[i].err)
+			bidx += scnprintf(buf + bidx, blen - bidx,
+					 "%s ", errs[i].msg);
+	}
+}
+
+/*
+ * This is called as part of link down clean up so disarm and flush
+ * all send buffers so that SMP packets can be sent.
+ */
+static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+	/* This will trigger the Abort interrupt */
+	sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+			  QIB_SENDCTRL_AVAIL_BLIP);
+	ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
+}
+
+static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
+{
+	/*
+	 * Set SendDmaLenGen and clear and set
+	 * the MSB of the generation count to enable generation checking
+	 * and load the internal generation counter.
+	 */
+	qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
+	qib_write_kreg(ppd->dd, kr_senddmalengen,
+		       ppd->sdma_descq_cnt |
+		       (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
+}
+
+static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+	qib_sdma_7220_setlengen(ppd);
+	qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+	ppd->sdma_head_dma[0] = 0;
+}
+
+#define DISABLES_SDMA (							\
+		ERR_MASK(SDmaDisabledErr) |				\
+		ERR_MASK(SDmaBaseErr) |					\
+		ERR_MASK(SDmaTailOutOfBoundErr) |			\
+		ERR_MASK(SDmaOutOfBoundErr) |				\
+		ERR_MASK(SDma1stDescErr) |				\
+		ERR_MASK(SDmaRpyTagErr) |				\
+		ERR_MASK(SDmaGenMismatchErr) |				\
+		ERR_MASK(SDmaDescAddrMisalignErr) |			\
+		ERR_MASK(SDmaMissingDwErr) |				\
+		ERR_MASK(SDmaDwEnErr))
+
+static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
+{
+	unsigned long flags;
+	struct qib_devdata *dd = ppd->dd;
+	char *msg;
+
+	errs &= QLOGIC_IB_E_SDMAERRS;
+
+	msg = dd->cspec->sdmamsgbuf;
+	qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	if (errs & ERR_MASK(SendBufMisuseErr)) {
+		unsigned long sbuf[3];
+
+		sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+		sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+		sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+		qib_dev_err(ppd->dd,
+			    "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
+			    ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
+			    sbuf[0]);
+	}
+
+	if (errs & ERR_MASK(SDmaUnexpDataErr))
+		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
+			    ppd->port);
+
+	switch (ppd->sdma_state.current_state) {
+	case qib_sdma_state_s00_hw_down:
+		/* not expecting any interrupts */
+		break;
+
+	case qib_sdma_state_s10_hw_start_up_wait:
+		/* handled in intr path */
+		break;
+
+	case qib_sdma_state_s20_idle:
+		/* not expecting any interrupts */
+		break;
+
+	case qib_sdma_state_s30_sw_clean_up_wait:
+		/* not expecting any interrupts */
+		break;
+
+	case qib_sdma_state_s40_hw_clean_up_wait:
+		if (errs & ERR_MASK(SDmaDisabledErr))
+			__qib_sdma_process_event(ppd,
+				qib_sdma_event_e50_hw_cleaned);
+		break;
+
+	case qib_sdma_state_s50_hw_halt_wait:
+		/* handled in intr path */
+		break;
+
+	case qib_sdma_state_s99_running:
+		if (errs & DISABLES_SDMA)
+			__qib_sdma_process_event(ppd,
+				qib_sdma_event_e7220_err_halted);
+		break;
+	}
+
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else.   Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
+			       u64 err)
+{
+	int iserr = 1;
+
+	*buf = '\0';
+	if (err & QLOGIC_IB_E_PKTERRS) {
+		if (!(err & ~QLOGIC_IB_E_PKTERRS))
+			iserr = 0;
+		if ((err & ERR_MASK(RcvICRCErr)) &&
+		    !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
+			strlcat(buf, "CRC ", blen);
+		if (!iserr)
+			goto done;
+	}
+	if (err & ERR_MASK(RcvHdrLenErr))
+		strlcat(buf, "rhdrlen ", blen);
+	if (err & ERR_MASK(RcvBadTidErr))
+		strlcat(buf, "rbadtid ", blen);
+	if (err & ERR_MASK(RcvBadVersionErr))
+		strlcat(buf, "rbadversion ", blen);
+	if (err & ERR_MASK(RcvHdrErr))
+		strlcat(buf, "rhdr ", blen);
+	if (err & ERR_MASK(SendSpecialTriggerErr))
+		strlcat(buf, "sendspecialtrigger ", blen);
+	if (err & ERR_MASK(RcvLongPktLenErr))
+		strlcat(buf, "rlongpktlen ", blen);
+	if (err & ERR_MASK(RcvMaxPktLenErr))
+		strlcat(buf, "rmaxpktlen ", blen);
+	if (err & ERR_MASK(RcvMinPktLenErr))
+		strlcat(buf, "rminpktlen ", blen);
+	if (err & ERR_MASK(SendMinPktLenErr))
+		strlcat(buf, "sminpktlen ", blen);
+	if (err & ERR_MASK(RcvFormatErr))
+		strlcat(buf, "rformaterr ", blen);
+	if (err & ERR_MASK(RcvUnsupportedVLErr))
+		strlcat(buf, "runsupvl ", blen);
+	if (err & ERR_MASK(RcvUnexpectedCharErr))
+		strlcat(buf, "runexpchar ", blen);
+	if (err & ERR_MASK(RcvIBFlowErr))
+		strlcat(buf, "ribflow ", blen);
+	if (err & ERR_MASK(SendUnderRunErr))
+		strlcat(buf, "sunderrun ", blen);
+	if (err & ERR_MASK(SendPioArmLaunchErr))
+		strlcat(buf, "spioarmlaunch ", blen);
+	if (err & ERR_MASK(SendUnexpectedPktNumErr))
+		strlcat(buf, "sunexperrpktnum ", blen);
+	if (err & ERR_MASK(SendDroppedSmpPktErr))
+		strlcat(buf, "sdroppedsmppkt ", blen);
+	if (err & ERR_MASK(SendMaxPktLenErr))
+		strlcat(buf, "smaxpktlen ", blen);
+	if (err & ERR_MASK(SendUnsupportedVLErr))
+		strlcat(buf, "sunsupVL ", blen);
+	if (err & ERR_MASK(InvalidAddrErr))
+		strlcat(buf, "invalidaddr ", blen);
+	if (err & ERR_MASK(RcvEgrFullErr))
+		strlcat(buf, "rcvegrfull ", blen);
+	if (err & ERR_MASK(RcvHdrFullErr))
+		strlcat(buf, "rcvhdrfull ", blen);
+	if (err & ERR_MASK(IBStatusChanged))
+		strlcat(buf, "ibcstatuschg ", blen);
+	if (err & ERR_MASK(RcvIBLostLinkErr))
+		strlcat(buf, "riblostlink ", blen);
+	if (err & ERR_MASK(HardwareErr))
+		strlcat(buf, "hardware ", blen);
+	if (err & ERR_MASK(ResetNegated))
+		strlcat(buf, "reset ", blen);
+	if (err & QLOGIC_IB_E_SDMAERRS)
+		qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
+	if (err & ERR_MASK(InvalidEEPCmd))
+		strlcat(buf, "invalideepromcmd ", blen);
+done:
+	return iserr;
+}
+
+static void reenable_7220_chase(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+	ppd->cpspec->chase_timer.expires = 0;
+	qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
+{
+	u8 ibclt;
+	u64 tnow;
+
+	ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
+
+	/*
+	 * Detect and handle the state chase issue, where we can
+	 * get stuck if we are unlucky on timing on both sides of
+	 * the link.   If we are, we disable, set a timer, and
+	 * then re-enable.
+	 */
+	switch (ibclt) {
+	case IB_7220_LT_STATE_CFGRCVFCFG:
+	case IB_7220_LT_STATE_CFGWAITRMT:
+	case IB_7220_LT_STATE_TXREVLANES:
+	case IB_7220_LT_STATE_CFGENH:
+		tnow = get_jiffies_64();
+		if (ppd->cpspec->chase_end &&
+		    time_after64(tnow, ppd->cpspec->chase_end)) {
+			ppd->cpspec->chase_end = 0;
+			qib_set_ib_7220_lstate(ppd,
+				QLOGIC_IB_IBCC_LINKCMD_DOWN,
+				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+			ppd->cpspec->chase_timer.expires = jiffies +
+				QIB_CHASE_DIS_TIME;
+			add_timer(&ppd->cpspec->chase_timer);
+		} else if (!ppd->cpspec->chase_end)
+			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+		break;
+
+	default:
+		ppd->cpspec->chase_end = 0;
+		break;
+	}
+}
+
+static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
+{
+	char *msg;
+	u64 ignore_this_time = 0;
+	u64 iserr = 0;
+	int log_idx;
+	struct qib_pportdata *ppd = dd->pport;
+	u64 mask;
+
+	/* don't report errors that are masked */
+	errs &= dd->cspec->errormask;
+	msg = dd->cspec->emsgbuf;
+
+	/* do these first, they are most important */
+	if (errs & ERR_MASK(HardwareErr))
+		qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+	else
+		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+				qib_inc_eeprom_err(dd, log_idx, 1);
+
+	if (errs & QLOGIC_IB_E_SDMAERRS)
+		sdma_7220_errors(ppd, errs);
+
+	if (errs & ~IB_E_BITSEXTANT)
+		qib_dev_err(dd, "error interrupt with unknown errors "
+			    "%llx set\n", (unsigned long long)
+			    (errs & ~IB_E_BITSEXTANT));
+
+	if (errs & E_SUM_ERRS) {
+		qib_disarm_7220_senderrbufs(ppd);
+		if ((errs & E_SUM_LINK_PKTERRS) &&
+		    !(ppd->lflags & QIBL_LINKACTIVE)) {
+			/*
+			 * This can happen when trying to bring the link
+			 * up, but the IB link changes state at the "wrong"
+			 * time. The IB logic then complains that the packet
+			 * isn't valid.  We don't want to confuse people, so
+			 * we just don't print them, except at debug
+			 */
+			ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+		}
+	} else if ((errs & E_SUM_LINK_PKTERRS) &&
+		   !(ppd->lflags & QIBL_LINKACTIVE)) {
+		/*
+		 * This can happen when SMA is trying to bring the link
+		 * up, but the IB link changes state at the "wrong" time.
+		 * The IB logic then complains that the packet isn't
+		 * valid.  We don't want to confuse people, so we just
+		 * don't print them, except at debug
+		 */
+		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+	}
+
+	qib_write_kreg(dd, kr_errclear, errs);
+
+	errs &= ~ignore_this_time;
+	if (!errs)
+		goto done;
+
+	/*
+	 * The ones we mask off are handled specially below
+	 * or above.  Also mask SDMADISABLED by default as it
+	 * is too chatty.
+	 */
+	mask = ERR_MASK(IBStatusChanged) |
+		ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
+		ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
+
+	qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+	if (errs & E_SUM_PKTERRS)
+		qib_stats.sps_rcverrs++;
+	if (errs & E_SUM_ERRS)
+		qib_stats.sps_txerrs++;
+	iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
+			 ERR_MASK(SDmaDisabledErr));
+
+	if (errs & ERR_MASK(IBStatusChanged)) {
+		u64 ibcs;
+
+		ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+			handle_7220_chase(ppd, ibcs);
+
+		/* Update our picture of width and speed from chip */
+		ppd->link_width_active =
+			((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
+			    IB_WIDTH_4X : IB_WIDTH_1X;
+		ppd->link_speed_active =
+			((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
+			    QIB_IB_DDR : QIB_IB_SDR;
+
+		/*
+		 * Since going into a recovery state causes the link state
+		 * to go down and since recovery is transitory, it is better
+		 * if we "miss" ever seeing the link training state go into
+		 * recovery (i.e., ignore this transition for link state
+		 * special handling purposes) without updating lastibcstat.
+		 */
+		if (qib_7220_phys_portstate(ibcs) !=
+					    IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+			qib_handle_e_ibstatuschanged(ppd, ibcs);
+	}
+
+	if (errs & ERR_MASK(ResetNegated)) {
+		qib_dev_err(dd, "Got reset, requires re-init "
+			    "(unload and reload driver)\n");
+		dd->flags &= ~QIB_INITTED;  /* needs re-init */
+		/* mark as having had error */
+		*dd->devstatusp |= QIB_STATUS_HWERROR;
+		*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+	}
+
+	if (*msg && iserr)
+		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+	if (ppd->state_wanted & ppd->lflags)
+		wake_up_interruptible(&ppd->state_wait);
+
+	/*
+	 * If there were hdrq or egrfull errors, wake up any processes
+	 * waiting in poll.  We used to try to check which contexts had
+	 * the overflow, but given the cost of that and the chip reads
+	 * to support it, it's better to just wake everybody up if we
+	 * get an overflow; waiters can poll again if it's not them.
+	 */
+	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+		qib_handle_urcv(dd, ~0U);
+		if (errs & ERR_MASK(RcvEgrFullErr))
+			qib_stats.sps_buffull++;
+		else
+			qib_stats.sps_hdrfull++;
+	}
+done:
+	return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		if (dd->flags & QIB_BADINTR)
+			return;
+		qib_write_kreg(dd, kr_intmask, ~0ULL);
+		/* force re-interrupt of any pending interrupts. */
+		qib_write_kreg(dd, kr_intclear, 0ULL);
+	} else
+		qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7220_clear_freeze(struct qib_devdata *dd)
+{
+	/* disable error interrupts, to avoid confusion */
+	qib_write_kreg(dd, kr_errmask, 0ULL);
+
+	/* also disable interrupts; errormask is sometimes overwriten */
+	qib_7220_set_intr_state(dd, 0);
+
+	qib_cancel_sends(dd->pport);
+
+	/* clear the freeze, and be sure chip saw it */
+	qib_write_kreg(dd, kr_control, dd->control);
+	qib_read_kreg32(dd, kr_scratch);
+
+	/* force in-memory update now we are out of freeze */
+	qib_force_pio_avail_update(dd);
+
+	/*
+	 * force new interrupt if any hwerr, error or interrupt bits are
+	 * still set, and clear "safe" send packet errors related to freeze
+	 * and cancelling sends.  Re-enable error interrupts before possible
+	 * force of re-interrupt on pending interrupts.
+	 */
+	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+	qib_7220_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_7220_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use.  Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue.  We reuse the same message buffer as
+ * handle_7220_errors() to avoid excessive stack usage.
+ */
+static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
+				     size_t msgl)
+{
+	u64 hwerrs;
+	u32 bits, ctrl;
+	int isfatal = 0;
+	char *bitsmsg;
+	int log_idx;
+
+	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+	if (!hwerrs)
+		goto bail;
+	if (hwerrs == ~0ULL) {
+		qib_dev_err(dd, "Read of hardware error status failed "
+			    "(all bits set); ignoring\n");
+		goto bail;
+	}
+	qib_stats.sps_hwerrs++;
+
+	/*
+	 * Always clear the error status register, except MEMBISTFAIL,
+	 * regardless of whether we continue or stop using the chip.
+	 * We want that set so we know it failed, even across driver reload.
+	 * We'll still ignore it in the hwerrmask.  We do this partly for
+	 * diagnostics, but also for support.
+	 */
+	qib_write_kreg(dd, kr_hwerrclear,
+		       hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+	hwerrs &= dd->cspec->hwerrmask;
+
+	/* We log some errors to EEPROM, check if we have any of those. */
+	for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+		if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+			qib_inc_eeprom_err(dd, log_idx, 1);
+	if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
+		       RXE_PARITY))
+		qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+			 "(cleared)\n", (unsigned long long) hwerrs);
+
+	if (hwerrs & ~IB_HWE_BITSEXTANT)
+		qib_dev_err(dd, "hwerror interrupt with unknown errors "
+			    "%llx set\n", (unsigned long long)
+			    (hwerrs & ~IB_HWE_BITSEXTANT));
+
+	if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
+		qib_sd7220_clr_ibpar(dd);
+
+	ctrl = qib_read_kreg32(dd, kr_control);
+	if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+		/*
+		 * Parity errors in send memory are recoverable by h/w
+		 * just do housekeeping, exit freeze mode and continue.
+		 */
+		if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
+			      TXEMEMPARITYERR_PIOPBC)) {
+			qib_7220_txe_recover(dd);
+			hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
+				    TXEMEMPARITYERR_PIOPBC);
+		}
+		if (hwerrs)
+			isfatal = 1;
+		else
+			qib_7220_clear_freeze(dd);
+	}
+
+	*msg = '\0';
+
+	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+		isfatal = 1;
+		strlcat(msg, "[Memory BIST test failed, "
+			"InfiniPath hardware unusable]", msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
+			    ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
+
+	bitsmsg = dd->cspec->bitsmsgbuf;
+	if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+		      QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+			      QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+			 "[PCIe Mem Parity Errs %x] ", bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP |   \
+			 QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+	if (hwerrs & _QIB_PLL_FAIL) {
+		isfatal = 1;
+		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+			 "[PLL failed (%llx), InfiniPath hardware unusable]",
+			 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+		strlcat(msg, bitsmsg, msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+		/*
+		 * If it occurs, it is left masked since the eternal
+		 * interface is unused.
+		 */
+		dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	qib_dev_err(dd, "%s hardware error\n", msg);
+
+	if (isfatal && !dd->diag_client) {
+		qib_dev_err(dd, "Fatal Hardware Error, no longer"
+			    " usable, SN %.16s\n", dd->serial);
+		/*
+		 * For /sys status file and user programs to print; if no
+		 * trailing brace is copied, we'll know it was truncated.
+		 */
+		if (dd->freezemsg)
+			snprintf(dd->freezemsg, dd->freezelen,
+				 "{%s}", msg);
+		qib_disable_after_error(dd);
+	}
+bail:;
+}
+
+/**
+ * qib_7220_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7220_init_hwerrors(struct qib_devdata *dd)
+{
+	u64 val;
+	u64 extsval;
+
+	extsval = qib_read_kreg64(dd, kr_extstatus);
+
+	if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
+			 QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
+		qib_dev_err(dd, "MemBIST did not complete!\n");
+	if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
+		qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
+
+	val = ~0ULL;    /* default to all hwerrors become interrupts, */
+
+	val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
+	dd->cspec->hwerrmask = val;
+
+	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+	/* clear all */
+	qib_write_kreg(dd, kr_errclear, ~0ULL);
+	/* enable errors that are masked, at least this first time. */
+	qib_write_kreg(dd, kr_errmask, ~0ULL);
+	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+	/* clear any interrupts up to this point (ints still not enabled) */
+	qib_write_kreg(dd, kr_intclear, ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based.  There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
+		dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+	} else
+		dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+				   u16 linitcmd)
+{
+	u64 mod_wd;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+		/*
+		 * If we are told to disable, note that so link-recovery
+		 * code does not attempt to bring us back up.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+		/*
+		 * Any other linkinitcmd will lead to LINKDOWN and then
+		 * to INIT (if all is well), so clear flag to let
+		 * link-recovery code attempt to bring us back up.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	}
+
+	mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
+		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+	qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
+	/* write to chip to prevent back-to-back writes of ibc reg */
+	qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/*
+ * All detailed interaction with the SerDes has been moved to qib_sd7220.c
+ *
+ * The portion of IBA7220-specific bringup_serdes() that actually deals with
+ * registers and memory within the SerDes itself is qib_sd7220_init().
+ */
+
+/**
+ * qib_7220_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 val, prev_val, guid, ibc;
+	int ret = 0;
+
+	/* Put IBC in reset, sends disabled */
+	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+	qib_write_kreg(dd, kr_control, 0ULL);
+
+	if (qib_compat_ddr_negotiate) {
+		ppd->cpspec->ibdeltainprog = 1;
+		ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
+		ppd->cpspec->iblnkerrsnap =
+			read_7220_creg32(dd, cr_iblinkerrrecov);
+	}
+
+	/* flowcontrolwatermark is in units of KBytes */
+	ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+	/*
+	 * How often flowctrl sent.  More or less in usecs; balance against
+	 * watermark value, so that in theory senders always get a flow
+	 * control update in time to not let the IB link go idle.
+	 */
+	ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+	/* max error tolerance */
+	ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
+	/* use "real" buffer space for */
+	ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+	/* IB credit flow control. */
+	ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+	/*
+	 * set initial max size pkt IBC will send, including ICRC; it's the
+	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+	 */
+	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+	ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+	/* initially come up waiting for TS1, without sending anything. */
+	val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+		QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+	qib_write_kreg(dd, kr_ibcctrl, val);
+
+	if (!ppd->cpspec->ibcddrctrl) {
+		/* not on re-init after reset */
+		ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
+
+		if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
+			ppd->cpspec->ibcddrctrl |=
+				IBA7220_IBC_SPEED_AUTONEG_MASK |
+				IBA7220_IBC_IBTA_1_2_MASK;
+		else
+			ppd->cpspec->ibcddrctrl |=
+				ppd->link_speed_enabled == QIB_IB_DDR ?
+				IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+		    (IB_WIDTH_1X | IB_WIDTH_4X))
+			ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
+		else
+			ppd->cpspec->ibcddrctrl |=
+				ppd->link_width_enabled == IB_WIDTH_4X ?
+				IBA7220_IBC_WIDTH_4X_ONLY :
+				IBA7220_IBC_WIDTH_1X_ONLY;
+
+		/* always enable these on driver reload, not sticky */
+		ppd->cpspec->ibcddrctrl |=
+			IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
+		ppd->cpspec->ibcddrctrl |=
+			IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+
+		/* enable automatic lane reversal detection for receive */
+		ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
+	} else
+		/* write to chip to prevent back-to-back writes of ibc reg */
+		qib_write_kreg(dd, kr_scratch, 0);
+
+	qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	ret = qib_sd7220_init(dd);
+
+	val = qib_read_kreg64(dd, kr_xgxs_cfg);
+	prev_val = val;
+	val |= QLOGIC_IB_XGXS_FC_SAFE;
+	if (val != prev_val) {
+		qib_write_kreg(dd, kr_xgxs_cfg, val);
+		qib_read_kreg32(dd, kr_scratch);
+	}
+	if (val & QLOGIC_IB_XGXS_RESET)
+		val &= ~QLOGIC_IB_XGXS_RESET;
+	if (val != prev_val)
+		qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+	/* first time through, set port guid */
+	if (!ppd->guid)
+		ppd->guid = dd->base_guid;
+	guid = be64_to_cpu(ppd->guid);
+
+	qib_write_kreg(dd, kr_hrtbt_guid, guid);
+	if (!ret) {
+		dd->control |= QLOGIC_IB_C_LINKENABLE;
+		qib_write_kreg(dd, kr_control, dd->control);
+	} else
+		/* write to chip to prevent back-to-back writes of ibc reg */
+		qib_write_kreg(dd, kr_scratch, 0);
+	return ret;
+}
+
+/**
+ * qib_7220_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
+{
+	u64 val;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	/* disable IBC */
+	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+	qib_write_kreg(dd, kr_control,
+		       dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+	ppd->cpspec->chase_end = 0;
+	if (ppd->cpspec->chase_timer.data) /* if initted */
+		del_timer_sync(&ppd->cpspec->chase_timer);
+
+	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+	    ppd->cpspec->ibdeltainprog) {
+		u64 diagc;
+
+		/* enable counter writes */
+		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+		qib_write_kreg(dd, kr_hwdiagctrl,
+			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+			val = read_7220_creg32(dd, cr_ibsymbolerr);
+			if (ppd->cpspec->ibdeltainprog)
+				val -= val - ppd->cpspec->ibsymsnap;
+			val -= ppd->cpspec->ibsymdelta;
+			write_7220_creg(dd, cr_ibsymbolerr, val);
+		}
+		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+			val = read_7220_creg32(dd, cr_iblinkerrrecov);
+			if (ppd->cpspec->ibdeltainprog)
+				val -= val - ppd->cpspec->iblnkerrsnap;
+			val -= ppd->cpspec->iblnkerrdelta;
+			write_7220_creg(dd, cr_iblinkerrrecov, val);
+		}
+
+		/* and disable counter writes */
+		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+	}
+	qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	wake_up(&ppd->cpspec->autoneg_wait);
+	cancel_delayed_work(&ppd->cpspec->autoneg_work);
+	flush_scheduled_work();
+
+	shutdown_7220_relock_poll(ppd->dd);
+	val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
+	val |= QLOGIC_IB_XGXS_RESET;
+	qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
+}
+
+/**
+ * qib_setup_7220_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 extctl, ledblink = 0, val, lst, ltst;
+	unsigned long flags;
+
+	/*
+	 * The diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running.
+	 */
+	if (dd->diag_client)
+		return;
+
+	if (ppd->led_override) {
+		ltst = (ppd->led_override & QIB_LED_PHYS) ?
+			IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+		lst = (ppd->led_override & QIB_LED_LOG) ?
+			IB_PORT_ACTIVE : IB_PORT_DOWN;
+	} else if (on) {
+		val = qib_read_kreg64(dd, kr_ibcstatus);
+		ltst = qib_7220_phys_portstate(val);
+		lst = qib_7220_iblink_state(val);
+	} else {
+		ltst = 0;
+		lst = 0;
+	}
+
+	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+	extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+				 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+	if (ltst == IB_PHYSPORTSTATE_LINKUP) {
+		extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+		/*
+		 * counts are in chip clock (4ns) periods.
+		 * This is 1/16 sec (66.6ms) on,
+		 * 3/16 sec (187.5 ms) off, with packets rcvd
+		 */
+		ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
+			| ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
+	}
+	if (lst == IB_PORT_ACTIVE)
+		extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+	dd->cspec->extctrl = extctl;
+	qib_write_kreg(dd, kr_extctrl, extctl);
+	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+	if (ledblink) /* blink the LED on packet receive */
+		qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
+}
+
+static void qib_7220_free_irq(struct qib_devdata *dd)
+{
+	if (dd->cspec->irq) {
+		free_irq(dd->cspec->irq, dd);
+		dd->cspec->irq = 0;
+	}
+	qib_nomsi(dd);
+}
+
+/*
+ * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+ *
+ */
+static void qib_setup_7220_cleanup(struct qib_devdata *dd)
+{
+	qib_7220_free_irq(dd);
+	kfree(dd->cspec->cntrs);
+	kfree(dd->cspec->portcntrs);
+}
+
+/*
+ * This is only called for SDmaInt.
+ * SDmaDisabled is handled on the error path.
+ */
+static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	switch (ppd->sdma_state.current_state) {
+	case qib_sdma_state_s00_hw_down:
+		break;
+
+	case qib_sdma_state_s10_hw_start_up_wait:
+		__qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+		break;
+
+	case qib_sdma_state_s20_idle:
+		break;
+
+	case qib_sdma_state_s30_sw_clean_up_wait:
+		break;
+
+	case qib_sdma_state_s40_hw_clean_up_wait:
+		break;
+
+	case qib_sdma_state_s50_hw_halt_wait:
+		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+		break;
+
+	case qib_sdma_state_s99_running:
+		/* too chatty to print here */
+		__qib_sdma_intr(ppd);
+		break;
+	}
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+	if (needint) {
+		if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+			goto done;
+		/*
+		 * blip the availupd off, next write will be on, so
+		 * we ensure an avail update, regardless of threshold or
+		 * buffers becoming free, whenever we want an interrupt
+		 */
+		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
+			~SYM_MASK(SendCtrl, SendBufAvailUpd));
+		qib_write_kreg(dd, kr_scratch, 0ULL);
+		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+	} else
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+done:
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
+{
+	if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+		qib_dev_err(dd,
+			    "interrupt with unknown interrupts %Lx set\n",
+			    istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+	if (istat & QLOGIC_IB_I_GPIO) {
+		u32 gpiostatus;
+
+		/*
+		 * Boards for this chip currently don't use GPIO interrupts,
+		 * so clear by writing GPIOstatus to GPIOclear, and complain
+		 * to alert developer. To avoid endless repeats, clear
+		 * the bits in the mask, since there is some kind of
+		 * programming error or chip problem.
+		 */
+		gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+		/*
+		 * In theory, writing GPIOstatus to GPIOclear could
+		 * have a bad side-effect on some diagnostic that wanted
+		 * to poll for a status-change, but the various shadows
+		 * make that problematic at best. Diags will just suppress
+		 * all GPIO interrupts during such tests.
+		 */
+		qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+
+		if (gpiostatus) {
+			const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+			u32 gpio_irq = mask & gpiostatus;
+
+			/*
+			 * A bit set in status and (chip) Mask register
+			 * would cause an interrupt. Since we are not
+			 * expecting any, report it. Also check that the
+			 * chip reflects our shadow, report issues,
+			 * and refresh from the shadow.
+			 */
+			/*
+			 * Clear any troublemakers, and update chip
+			 * from shadow
+			 */
+			dd->cspec->gpio_mask &= ~gpio_irq;
+			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+		}
+	}
+
+	if (istat & QLOGIC_IB_I_ERROR) {
+		u64 estat;
+
+		qib_stats.sps_errints++;
+		estat = qib_read_kreg64(dd, kr_errstatus);
+		if (!estat)
+			qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+				 "but no error bits set!\n", istat);
+		else
+			handle_7220_errors(dd, estat);
+	}
+}
+
+static irqreturn_t qib_7220intr(int irq, void *data)
+{
+	struct qib_devdata *dd = data;
+	irqreturn_t ret;
+	u64 istat;
+	u64 ctxtrbits;
+	u64 rmask;
+	unsigned i;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		ret = IRQ_HANDLED;
+		goto bail;
+	}
+
+	istat = qib_read_kreg64(dd, kr_intstatus);
+
+	if (unlikely(!istat)) {
+		ret = IRQ_NONE; /* not our interrupt, or already handled */
+		goto bail;
+	}
+	if (unlikely(istat == -1)) {
+		qib_bad_intrstatus(dd);
+		/* don't know if it was our interrupt or not */
+		ret = IRQ_NONE;
+		goto bail;
+	}
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+	if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+			      QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+		unlikely_7220_intr(dd, istat);
+
+	/*
+	 * Clear the interrupt bits we found set, relatively early, so we
+	 * "know" know the chip will have seen this by the time we process
+	 * the queue, and will re-interrupt if necessary.  The processor
+	 * itself won't take the interrupt again until we return.
+	 */
+	qib_write_kreg(dd, kr_intclear, istat);
+
+	/*
+	 * Handle kernel receive queues before checking for pio buffers
+	 * available since receives can overflow; piobuf waiters can afford
+	 * a few extra cycles, since they were waiting anyway.
+	 */
+	ctxtrbits = istat &
+		((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+		 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+	if (ctxtrbits) {
+		rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+			(1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
+		for (i = 0; i < dd->first_user_ctxt; i++) {
+			if (ctxtrbits & rmask) {
+				ctxtrbits &= ~rmask;
+				qib_kreceive(dd->rcd[i], NULL, NULL);
+			}
+			rmask <<= 1;
+		}
+		if (ctxtrbits) {
+			ctxtrbits =
+				(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+				(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+			qib_handle_urcv(dd, ctxtrbits);
+		}
+	}
+
+	/* only call for SDmaInt */
+	if (istat & QLOGIC_IB_I_SDMAINT)
+		sdma_7220_intr(dd->pport, istat);
+
+	if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+		qib_ib_piobufavail(dd);
+
+	ret = IRQ_HANDLED;
+bail:
+	return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSI interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7220_interrupt(struct qib_devdata *dd)
+{
+	if (!dd->cspec->irq)
+		qib_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
+			    "work\n");
+	else {
+		int ret = request_irq(dd->cspec->irq, qib_7220intr,
+			dd->msi_lo ? 0 : IRQF_SHARED,
+			QIB_DRV_NAME, dd);
+
+		if (ret)
+			qib_dev_err(dd, "Couldn't setup %s interrupt "
+				    "(irq=%d): %d\n", dd->msi_lo ?
+				    "MSI" : "INTx", dd->cspec->irq, ret);
+	}
+}
+
+/**
+ * qib_7220_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void qib_7220_boardname(struct qib_devdata *dd)
+{
+	char *n;
+	u32 boardid, namelen;
+
+	boardid = SYM_FIELD(dd->revision, Revision,
+			    BoardID);
+
+	switch (boardid) {
+	case 1:
+		n = "InfiniPath_QLE7240";
+		break;
+	case 2:
+		n = "InfiniPath_QLE7280";
+		break;
+	default:
+		qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
+		n = "Unknown_InfiniPath_7220";
+		break;
+	}
+
+	namelen = strlen(n) + 1;
+	dd->boardname = kmalloc(namelen, GFP_KERNEL);
+	if (!dd->boardname)
+		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+	else
+		snprintf(dd->boardname, namelen, "%s", n);
+
+	if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
+		qib_dev_err(dd, "Unsupported InfiniPath hardware "
+			    "revision %u.%u!\n",
+			    dd->majrev, dd->minrev);
+
+	snprintf(dd->boardversion, sizeof(dd->boardversion),
+		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+		 dd->majrev, dd->minrev,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_setup_7220_reset(struct qib_devdata *dd)
+{
+	u64 val;
+	int i;
+	int ret;
+	u16 cmdval;
+	u8 int_line, clinesz;
+	unsigned long flags;
+
+	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+	/* Use dev_err so it shows up in logs, etc. */
+	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+	/* no interrupts till re-initted */
+	qib_7220_set_intr_state(dd, 0);
+
+	dd->pport->cpspec->ibdeltainprog = 0;
+	dd->pport->cpspec->ibsymdelta = 0;
+	dd->pport->cpspec->iblnkerrdelta = 0;
+
+	/*
+	 * Keep chip from being accessed until we are ready.  Use
+	 * writeq() directly, to allow the write even though QIB_PRESENT
+	 * isnt' set.
+	 */
+	dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+	dd->int_counter = 0; /* so we check interrupts work again */
+	val = dd->control | QLOGIC_IB_C_RESET;
+	writeq(val, &dd->kregbase[kr_control]);
+	mb(); /* prevent compiler reordering around actual reset */
+
+	for (i = 1; i <= 5; i++) {
+		/*
+		 * Allow MBIST, etc. to complete; longer on each retry.
+		 * We sometimes get machine checks from bus timeout if no
+		 * response, so for now, make it *really* long.
+		 */
+		msleep(1000 + (1 + i) * 2000);
+
+		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+		/*
+		 * Use readq directly, so we don't need to mark it as PRESENT
+		 * until we get a successful indication that all is well.
+		 */
+		val = readq(&dd->kregbase[kr_revision]);
+		if (val == dd->revision) {
+			dd->flags |= QIB_PRESENT; /* it's back */
+			ret = qib_reinit_intr(dd);
+			goto bail;
+		}
+	}
+	ret = 0; /* failed */
+
+bail:
+	if (ret) {
+		if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+			qib_dev_err(dd, "Reset failed to setup PCIe or "
+				    "interrupts; continuing anyway\n");
+
+		/* hold IBC in reset, no sends, etc till later */
+		qib_write_kreg(dd, kr_control, 0ULL);
+
+		/* clear the reset error, init error/hwerror mask */
+		qib_7220_init_hwerrors(dd);
+
+		/* do setup similar to speed or link-width changes */
+		if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
+			dd->cspec->presets_needed = 1;
+		spin_lock_irqsave(&dd->pport->lflags_lock, flags);
+		dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
+		dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+		spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
+	}
+
+	return ret;
+}
+
+/**
+ * qib_7220_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+			     u32 type, unsigned long pa)
+{
+	if (pa != dd->tidinvalid) {
+		u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
+
+		/* paranoia checks */
+		if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
+			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+				    pa);
+			return;
+		}
+		if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+			qib_dev_err(dd, "Physical page address 0x%lx "
+				"larger than supported\n", pa);
+			return;
+		}
+
+		if (type == RCVHQ_RCV_TYPE_EAGER)
+			chippa |= dd->tidtemplate;
+		else /* for now, always full 4KB page */
+			chippa |= IBA7220_TID_SZ_4K;
+		pa = chippa;
+	}
+	writeq(pa, tidptr);
+	mmiowb();
+}
+
+/**
+ * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close().  On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_7220_clear_tids(struct qib_devdata *dd,
+				struct qib_ctxtdata *rcd)
+{
+	u64 __iomem *tidbase;
+	unsigned long tidinv;
+	u32 ctxt;
+	int i;
+
+	if (!dd->kregbase || !rcd)
+		return;
+
+	ctxt = rcd->ctxt;
+
+	tidinv = dd->tidinvalid;
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->kregbase) +
+		 dd->rcvtidbase +
+		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->rcvtidcnt; i++)
+		qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+				 tidinv);
+
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->kregbase) +
+		 dd->rcvegrbase +
+		 rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+	for (i = 0; i < rcd->rcvegrcnt; i++)
+		qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+				 tidinv);
+}
+
+/**
+ * qib_7220_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7220_tidtemplate(struct qib_devdata *dd)
+{
+	if (dd->rcvegrbufsize == 2048)
+		dd->tidtemplate = IBA7220_TID_SZ_2K;
+	else if (dd->rcvegrbufsize == 4096)
+		dd->tidtemplate = IBA7220_TID_SZ_4K;
+	dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
+				  struct qib_base_info *kinfo)
+{
+	kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+		QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
+
+	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+	return 0;
+}
+
+static struct qib_message_header *
+qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+	u32 offset = qib_hdrget_offset(rhf_addr);
+
+	return (struct qib_message_header *)
+		(rhf_addr - dd->rhf_offset + offset);
+}
+
+static void qib_7220_config_ctxts(struct qib_devdata *dd)
+{
+	unsigned long flags;
+	u32 nchipctxts;
+
+	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
+	dd->cspec->numctxts = nchipctxts;
+	if (qib_n_krcv_queues > 1) {
+		dd->qpn_mask = 0x3f;
+		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+		if (dd->first_user_ctxt > nchipctxts)
+			dd->first_user_ctxt = nchipctxts;
+	} else
+		dd->first_user_ctxt = dd->num_pports;
+	dd->n_krcv_queues = dd->first_user_ctxt;
+
+	if (!qib_cfgctxts) {
+		int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+		if (nctxts <= 5)
+			dd->ctxtcnt = 5;
+		else if (nctxts <= 9)
+			dd->ctxtcnt = 9;
+		else if (nctxts <= nchipctxts)
+			dd->ctxtcnt = nchipctxts;
+	} else if (qib_cfgctxts <= nchipctxts)
+		dd->ctxtcnt = qib_cfgctxts;
+	if (!dd->ctxtcnt) /* none of the above, set to max */
+		dd->ctxtcnt = nchipctxts;
+
+	/*
+	 * Chip can be configured for 5, 9, or 17 ctxts, and choice
+	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+	 * Lock to be paranoid about later motion, etc.
+	 */
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+	if (dd->ctxtcnt > 9)
+		dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
+	else if (dd->ctxtcnt > 5)
+		dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
+	/* else configure for default 5 receive ctxts */
+	if (dd->qpn_mask)
+		dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
+	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+	/* kr_rcvegrcnt changes based on the number of contexts enabled */
+	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
+}
+
+static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+	int lsb, ret = 0;
+	u64 maskr; /* right-justified mask */
+
+	switch (which) {
+	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+		ret = ppd->link_width_enabled;
+		goto done;
+
+	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+		ret = ppd->link_width_active;
+		goto done;
+
+	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+		ret = ppd->link_speed_enabled;
+		goto done;
+
+	case QIB_IB_CFG_SPD: /* Get current Link spd */
+		ret = ppd->link_speed_active;
+		goto done;
+
+	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+		lsb = IBA7220_IBC_RXPOL_SHIFT;
+		maskr = IBA7220_IBC_RXPOL_MASK;
+		break;
+
+	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+		lsb = IBA7220_IBC_LREV_SHIFT;
+		maskr = IBA7220_IBC_LREV_MASK;
+		break;
+
+	case QIB_IB_CFG_LINKLATENCY:
+		ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
+			& IBA7220_DDRSTAT_LINKLAT_MASK;
+		goto done;
+
+	case QIB_IB_CFG_OP_VLS:
+		ret = ppd->vls_operational;
+		goto done;
+
+	case QIB_IB_CFG_VL_HIGH_CAP:
+		ret = 0;
+		goto done;
+
+	case QIB_IB_CFG_VL_LOW_CAP:
+		ret = 0;
+		goto done;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+				OverrunThreshold);
+		goto done;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+				PhyerrThreshold);
+		goto done;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		ret = (ppd->cpspec->ibcctrl &
+		       SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+		goto done;
+
+	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+		lsb = IBA7220_IBC_HRTBT_SHIFT;
+		maskr = IBA7220_IBC_HRTBT_MASK;
+		break;
+
+	case QIB_IB_CFG_PMA_TICKS:
+		/*
+		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+		 * Since the clock is always 250MHz, the value is 1 or 0.
+		 */
+		ret = (ppd->link_speed_active == QIB_IB_DDR);
+		goto done;
+
+	default:
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
+done:
+	return ret;
+}
+
+static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 maskr; /* right-justified mask */
+	int lsb, ret = 0, setforce = 0;
+	u16 lcmd, licmd;
+	unsigned long flags;
+
+	switch (which) {
+	case QIB_IB_CFG_LIDLMC:
+		/*
+		 * Set LID and LMC. Combined to avoid possible hazard
+		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+		 */
+		lsb = IBA7220_IBC_DLIDLMC_SHIFT;
+		maskr = IBA7220_IBC_DLIDLMC_MASK;
+		break;
+
+	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+		/*
+		 * As with speed, only write the actual register if
+		 * the link is currently down, otherwise takes effect
+		 * on next link change.
+		 */
+		ppd->link_width_enabled = val;
+		if (!(ppd->lflags & QIBL_LINKDOWN))
+			goto bail;
+		/*
+		 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+		 * will get called because we want update
+		 * link_width_active, and the change may not take
+		 * effect for some time (if we are in POLL), so this
+		 * flag will force the updown routine to be called
+		 * on the next ibstatuschange down interrupt, even
+		 * if it's not an down->up transition.
+		 */
+		val--; /* convert from IB to chip */
+		maskr = IBA7220_IBC_WIDTH_MASK;
+		lsb = IBA7220_IBC_WIDTH_SHIFT;
+		setforce = 1;
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		break;
+
+	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+		/*
+		 * If we turn off IB1.2, need to preset SerDes defaults,
+		 * but not right now. Set a flag for the next time
+		 * we command the link down.  As with width, only write the
+		 * actual register if the link is currently down, otherwise
+		 * takes effect on next link change.  Since setting is being
+		 * explictly requested (via MAD or sysfs), clear autoneg
+		 * failure status if speed autoneg is enabled.
+		 */
+		ppd->link_speed_enabled = val;
+		if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
+		    !(val & (val - 1)))
+			dd->cspec->presets_needed = 1;
+		if (!(ppd->lflags & QIBL_LINKDOWN))
+			goto bail;
+		/*
+		 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+		 * will get called because we want update
+		 * link_speed_active, and the change may not take
+		 * effect for some time (if we are in POLL), so this
+		 * flag will force the updown routine to be called
+		 * on the next ibstatuschange down interrupt, even
+		 * if it's not an down->up transition.
+		 */
+		if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
+			val = IBA7220_IBC_SPEED_AUTONEG_MASK |
+				IBA7220_IBC_IBTA_1_2_MASK;
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		} else
+			val = val == QIB_IB_DDR ?
+				IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+		maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
+			IBA7220_IBC_IBTA_1_2_MASK;
+		/* IBTA 1.2 mode + speed bits are contiguous */
+		lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
+		setforce = 1;
+		break;
+
+	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+		lsb = IBA7220_IBC_RXPOL_SHIFT;
+		maskr = IBA7220_IBC_RXPOL_MASK;
+		break;
+
+	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+		lsb = IBA7220_IBC_LREV_SHIFT;
+		maskr = IBA7220_IBC_LREV_MASK;
+		break;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+				  OverrunThreshold);
+		if (maskr != val) {
+			ppd->cpspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, OverrunThreshold);
+			ppd->cpspec->ibcctrl |= (u64) val <<
+				SYM_LSB(IBCCtrl, OverrunThreshold);
+			qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+		goto bail;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+				  PhyerrThreshold);
+		if (maskr != val) {
+			ppd->cpspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, PhyerrThreshold);
+			ppd->cpspec->ibcctrl |= (u64) val <<
+				SYM_LSB(IBCCtrl, PhyerrThreshold);
+			qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+		goto bail;
+
+	case QIB_IB_CFG_PKEYS: /* update pkeys */
+		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+			((u64) ppd->pkeys[2] << 32) |
+			((u64) ppd->pkeys[3] << 48);
+		qib_write_kreg(dd, kr_partitionkey, maskr);
+		goto bail;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		if (val == IB_LINKINITCMD_POLL)
+			ppd->cpspec->ibcctrl &=
+				~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+		else /* SLEEP */
+			ppd->cpspec->ibcctrl |=
+				SYM_MASK(IBCCtrl, LinkDownDefaultState);
+		qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+		goto bail;
+
+	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+		/*
+		 * Update our housekeeping variables, and set IBC max
+		 * size, same as init code; max IBC is max we allow in
+		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+		 * Set even if it's unchanged, print debug message only
+		 * on changes.
+		 */
+		val = (ppd->ibmaxlen >> 2) + 1;
+		ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+		ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
+		qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+		goto bail;
+
+	case QIB_IB_CFG_LSTATE: /* set the IB link state */
+		switch (val & 0xffff0000) {
+		case IB_LINKCMD_DOWN:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+			if (!ppd->cpspec->ibdeltainprog &&
+			    qib_compat_ddr_negotiate) {
+				ppd->cpspec->ibdeltainprog = 1;
+				ppd->cpspec->ibsymsnap =
+					read_7220_creg32(dd, cr_ibsymbolerr);
+				ppd->cpspec->iblnkerrsnap =
+					read_7220_creg32(dd, cr_iblinkerrrecov);
+			}
+			break;
+
+		case IB_LINKCMD_ARMED:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+			break;
+
+		case IB_LINKCMD_ACTIVE:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+			goto bail;
+		}
+		switch (val & 0xffff) {
+		case IB_LINKINITCMD_NOP:
+			licmd = 0;
+			break;
+
+		case IB_LINKINITCMD_POLL:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+			break;
+
+		case IB_LINKINITCMD_SLEEP:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+			break;
+
+		case IB_LINKINITCMD_DISABLE:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+			ppd->cpspec->chase_end = 0;
+			/*
+			 * stop state chase counter and timer, if running.
+			 * wait forpending timer, but don't clear .data (ppd)!
+			 */
+			if (ppd->cpspec->chase_timer.expires) {
+				del_timer_sync(&ppd->cpspec->chase_timer);
+				ppd->cpspec->chase_timer.expires = 0;
+			}
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+				    val & 0xffff);
+			goto bail;
+		}
+		qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+		goto bail;
+
+	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+		if (val > IBA7220_IBC_HRTBT_MASK) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		lsb = IBA7220_IBC_HRTBT_SHIFT;
+		maskr = IBA7220_IBC_HRTBT_MASK;
+		break;
+
+	default:
+		ret = -EINVAL;
+		goto bail;
+	}
+	ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+	ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
+	qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+	if (setforce) {
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	}
+bail:
+	return ret;
+}
+
+static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+	int ret = 0;
+	u64 val, ddr;
+
+	if (!strncmp(what, "ibc", 3)) {
+		ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+		val = 0; /* disable heart beat, so link will come up */
+		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+			 ppd->dd->unit, ppd->port);
+	} else if (!strncmp(what, "off", 3)) {
+		ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+		/* enable heart beat again */
+		val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+		qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+			    "(normal)\n", ppd->dd->unit, ppd->port);
+	} else
+		ret = -EINVAL;
+	if (!ret) {
+		qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+		ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
+					     << IBA7220_IBC_HRTBT_SHIFT);
+		ppd->cpspec->ibcddrctrl = ddr | val;
+		qib_write_kreg(ppd->dd, kr_ibcddrctrl,
+			       ppd->cpspec->ibcddrctrl);
+		qib_write_kreg(ppd->dd, kr_scratch, 0);
+	}
+	return ret;
+}
+
+static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+				    u32 updegr, u32 egrhd)
+{
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	if (updegr)
+		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+{
+	u32 head, tail;
+
+	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+	if (rcd->rcvhdrtail_kvaddr)
+		tail = qib_get_rcvhdrtail(rcd);
+	else
+		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+	return head == tail;
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
+			     int ctxt)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 mask, val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+	if (op & QIB_RCVCTRL_TAILUPD_ENB)
+		dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
+	if (op & QIB_RCVCTRL_TAILUPD_DIS)
+		dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
+	if (op & QIB_RCVCTRL_PKEY_ENB)
+		dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+	if (op & QIB_RCVCTRL_PKEY_DIS)
+		dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+	if (ctxt < 0)
+		mask = (1ULL << dd->ctxtcnt) - 1;
+	else
+		mask = (1ULL << ctxt);
+	if (op & QIB_RCVCTRL_CTXT_ENB) {
+		/* always done for specific ctxt */
+		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+		if (!(dd->flags & QIB_NODMA_RTAIL))
+			dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
+		/* Write these registers before the context is enabled. */
+		qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+			dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+		qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+			dd->rcd[ctxt]->rcvhdrq_phys);
+		dd->rcd[ctxt]->seq_cnt = 1;
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS)
+		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+		dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
+	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+		dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
+	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+	if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+		/* arm rcv interrupt */
+		val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+			dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	}
+	if (op & QIB_RCVCTRL_CTXT_ENB) {
+		/*
+		 * Init the context registers also; if we were
+		 * disabled, tail and head should both be zero
+		 * already from the enable, but since we don't
+		 * know, we have to do it explictly.
+		 */
+		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+		dd->rcd[ctxt]->head = val;
+		/* If kctxt, interrupt on next receive. */
+		if (ctxt < dd->first_user_ctxt)
+			val |= dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS) {
+		if (ctxt >= 0) {
+			qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
+			qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
+		} else {
+			unsigned i;
+
+			for (i = 0; i < dd->cfgctxts; i++) {
+				qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+						    i, 0);
+				qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. To start, we assume the
+ * "canonical" register layout of the first chips.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 tmp_dd_sendctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+	/* First the ones that are "sticky", saved in shadow */
+	if (op & QIB_SENDCTRL_CLEAR)
+		dd->sendctrl = 0;
+	if (op & QIB_SENDCTRL_SEND_DIS)
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
+	else if (op & QIB_SENDCTRL_SEND_ENB) {
+		dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
+		if (dd->flags & QIB_USE_SPCL_TRIG)
+			dd->sendctrl |= SYM_MASK(SendCtrl,
+						 SSpecialTriggerEn);
+	}
+	if (op & QIB_SENDCTRL_AVAIL_DIS)
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+	else if (op & QIB_SENDCTRL_AVAIL_ENB)
+		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+	if (op & QIB_SENDCTRL_DISARM_ALL) {
+		u32 i, last;
+
+		tmp_dd_sendctrl = dd->sendctrl;
+		/*
+		 * disarm any that are not yet launched, disabling sends
+		 * and updates until done.
+		 */
+		last = dd->piobcnt2k + dd->piobcnt4k;
+		tmp_dd_sendctrl &=
+			~(SYM_MASK(SendCtrl, SPioEnable) |
+			  SYM_MASK(SendCtrl, SendBufAvailUpd));
+		for (i = 0; i < last; i++) {
+			qib_write_kreg(dd, kr_sendctrl,
+				       tmp_dd_sendctrl |
+				       SYM_MASK(SendCtrl, Disarm) | i);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+	}
+
+	tmp_dd_sendctrl = dd->sendctrl;
+
+	if (op & QIB_SENDCTRL_FLUSH)
+		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+	if (op & QIB_SENDCTRL_DISARM)
+		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+			((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
+			 SYM_LSB(SendCtrl, DisarmPIOBuf));
+	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+	qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+	if (op & QIB_SENDCTRL_FLUSH) {
+		u32 v;
+		/*
+		 * ensure writes have hit chip, then do a few
+		 * more reads, to allow DMA of pioavail registers
+		 * to occur, so in-memory copy is in sync with
+		 * the chip.  Not always safe to sleep.
+		 */
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		qib_read_kreg32(dd, kr_scratch);
+	}
+}
+
+/**
+ * qib_portcntr_7220 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
+{
+	u64 ret = 0ULL;
+	struct qib_devdata *dd = ppd->dd;
+	u16 creg;
+	/* 0xffff for unimplemented or synthesized counters */
+	static const u16 xlator[] = {
+		[QIBPORTCNTR_PKTSEND] = cr_pktsend,
+		[QIBPORTCNTR_WORDSEND] = cr_wordsend,
+		[QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
+		[QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
+		[QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
+		[QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+		[QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+		[QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
+		[QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
+		[QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+		[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+		[QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+		[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+		[QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
+		[QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
+		[QIBPORTCNTR_ERRICRC] = cr_erricrc,
+		[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+		[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+		[QIBPORTCNTR_BADFORMAT] = cr_badformat,
+		[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+		[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+		[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+		[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+		[QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
+		[QIBPORTCNTR_ERRLINK] = cr_errlink,
+		[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+		[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+		[QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
+		[QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
+		[QIBPORTCNTR_PSSTART] = cr_psstart,
+		[QIBPORTCNTR_PSSTAT] = cr_psstat,
+		[QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
+		[QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+		[QIBPORTCNTR_KHDROVFL] = 0xffff,
+	};
+
+	if (reg >= ARRAY_SIZE(xlator)) {
+		qib_devinfo(ppd->dd->pcidev,
+			 "Unimplemented portcounter %u\n", reg);
+		goto done;
+	}
+	creg = xlator[reg];
+
+	if (reg == QIBPORTCNTR_KHDROVFL) {
+		int i;
+
+		/* sum over all kernel contexts */
+		for (i = 0; i < dd->first_user_ctxt; i++)
+			ret += read_7220_creg32(dd, cr_portovfl + i);
+	}
+	if (creg == 0xffff)
+		goto done;
+
+	/*
+	 * only fast incrementing counters are 64bit; use 32 bit reads to
+	 * avoid two independent reads when on opteron
+	 */
+	if ((creg == cr_wordsend || creg == cr_wordrcv ||
+	     creg == cr_pktsend || creg == cr_pktrcv))
+		ret = read_7220_creg(dd, creg);
+	else
+		ret = read_7220_creg32(dd, creg);
+	if (creg == cr_ibsymbolerr) {
+		if (dd->pport->cpspec->ibdeltainprog)
+			ret -= ret - ppd->cpspec->ibsymsnap;
+		ret -= dd->pport->cpspec->ibsymdelta;
+	} else if (creg == cr_iblinkerrrecov) {
+		if (dd->pport->cpspec->ibdeltainprog)
+			ret -= ret - ppd->cpspec->iblnkerrsnap;
+		ret -= dd->pport->cpspec->iblnkerrdelta;
+	}
+done:
+	return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string.  Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility.  Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7220indices contains the corresponding register indices.
+ */
+static const char cntr7220names[] =
+	"Interrupts\n"
+	"HostBusStall\n"
+	"E RxTIDFull\n"
+	"RxTIDInvalid\n"
+	"Ctxt0EgrOvfl\n"
+	"Ctxt1EgrOvfl\n"
+	"Ctxt2EgrOvfl\n"
+	"Ctxt3EgrOvfl\n"
+	"Ctxt4EgrOvfl\n"
+	"Ctxt5EgrOvfl\n"
+	"Ctxt6EgrOvfl\n"
+	"Ctxt7EgrOvfl\n"
+	"Ctxt8EgrOvfl\n"
+	"Ctxt9EgrOvfl\n"
+	"Ctx10EgrOvfl\n"
+	"Ctx11EgrOvfl\n"
+	"Ctx12EgrOvfl\n"
+	"Ctx13EgrOvfl\n"
+	"Ctx14EgrOvfl\n"
+	"Ctx15EgrOvfl\n"
+	"Ctx16EgrOvfl\n";
+
+static const size_t cntr7220indices[] = {
+	cr_lbint,
+	cr_lbflowstall,
+	cr_errtidfull,
+	cr_errtidvalid,
+	cr_portovfl + 0,
+	cr_portovfl + 1,
+	cr_portovfl + 2,
+	cr_portovfl + 3,
+	cr_portovfl + 4,
+	cr_portovfl + 5,
+	cr_portovfl + 6,
+	cr_portovfl + 7,
+	cr_portovfl + 8,
+	cr_portovfl + 9,
+	cr_portovfl + 10,
+	cr_portovfl + 11,
+	cr_portovfl + 12,
+	cr_portovfl + 13,
+	cr_portovfl + 14,
+	cr_portovfl + 15,
+	cr_portovfl + 16,
+};
+
+/*
+ * same as cntr7220names and cntr7220indices, but for port-specific counters.
+ * portcntr7220indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7220names[] =
+	"TxPkt\n"
+	"TxFlowPkt\n"
+	"TxWords\n"
+	"RxPkt\n"
+	"RxFlowPkt\n"
+	"RxWords\n"
+	"TxFlowStall\n"
+	"TxDmaDesc\n"  /* 7220 and 7322-only */
+	"E RxDlidFltr\n"  /* 7220 and 7322-only */
+	"IBStatusChng\n"
+	"IBLinkDown\n"
+	"IBLnkRecov\n"
+	"IBRxLinkErr\n"
+	"IBSymbolErr\n"
+	"RxLLIErr\n"
+	"RxBadFormat\n"
+	"RxBadLen\n"
+	"RxBufOvrfl\n"
+	"RxEBP\n"
+	"RxFlowCtlErr\n"
+	"RxICRCerr\n"
+	"RxLPCRCerr\n"
+	"RxVCRCerr\n"
+	"RxInvalLen\n"
+	"RxInvalPKey\n"
+	"RxPktDropped\n"
+	"TxBadLength\n"
+	"TxDropped\n"
+	"TxInvalLen\n"
+	"TxUnderrun\n"
+	"TxUnsupVL\n"
+	"RxLclPhyErr\n" /* 7220 and 7322-only */
+	"RxVL15Drop\n" /* 7220 and 7322-only */
+	"RxVlErr\n" /* 7220 and 7322-only */
+	"XcessBufOvfl\n" /* 7220 and 7322-only */
+	;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr7220indices[] = {
+	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+	cr_pktsendflow,
+	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+	cr_pktrcvflowctrl,
+	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+	cr_txsdmadesc,
+	cr_rxdlidfltr,
+	cr_ibstatuschange,
+	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+	cr_rcvflowctrl_err,
+	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+	cr_invalidslen,
+	cr_senddropped,
+	cr_errslen,
+	cr_sendunderrun,
+	cr_txunsupvl,
+	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7220_cntrnames(struct qib_devdata *dd)
+{
+	int i, j = 0;
+	char *s;
+
+	for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
+	     i++) {
+		/* we always have at least one counter before the egrovfl */
+		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+			j = 1;
+		s = strchr(s + 1, '\n');
+		if (s && j)
+			j++;
+	}
+	dd->cspec->ncntrs = i;
+	if (!s)
+		/* full list; size is without terminating null */
+		dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
+	else
+		dd->cspec->cntrnamelen = 1 + s - cntr7220names;
+	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+		* sizeof(u64), GFP_KERNEL);
+	if (!dd->cspec->cntrs)
+		qib_dev_err(dd, "Failed allocation for counters\n");
+
+	for (i = 0, s = (char *)portcntr7220names; s; i++)
+		s = strchr(s + 1, '\n');
+	dd->cspec->nportcntrs = i - 1;
+	dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
+	dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+		* sizeof(u64), GFP_KERNEL);
+	if (!dd->cspec->portcntrs)
+		qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+			      u64 **cntrp)
+{
+	u32 ret;
+
+	if (!dd->cspec->cntrs) {
+		ret = 0;
+		goto done;
+	}
+
+	if (namep) {
+		*namep = (char *)cntr7220names;
+		ret = dd->cspec->cntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+	} else {
+		u64 *cntr = dd->cspec->cntrs;
+		int i;
+
+		ret = dd->cspec->ncntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->ncntrs; i++)
+			*cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
+	}
+done:
+	return ret;
+}
+
+static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+				  char **namep, u64 **cntrp)
+{
+	u32 ret;
+
+	if (!dd->cspec->portcntrs) {
+		ret = 0;
+		goto done;
+	}
+	if (namep) {
+		*namep = (char *)portcntr7220names;
+		ret = dd->cspec->portcntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+	} else {
+		u64 *cntr = dd->cspec->portcntrs;
+		struct qib_pportdata *ppd = &dd->pport[port];
+		int i;
+
+		ret = dd->cspec->nportcntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->nportcntrs; i++) {
+			if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
+				*cntr++ = qib_portcntr_7220(ppd,
+					portcntr7220indices[i] &
+					~_PORT_VIRT_FLAG);
+			else
+				*cntr++ = read_7220_creg32(dd,
+					   portcntr7220indices[i]);
+		}
+	}
+done:
+	return ret;
+}
+
+/**
+ * qib_get_7220_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_7220_faststats(unsigned long opaque)
+{
+	struct qib_devdata *dd = (struct qib_devdata *) opaque;
+	struct qib_pportdata *ppd = dd->pport;
+	unsigned long flags;
+	u64 traffic_wds;
+
+	/*
+	 * don't access the chip while running diags, or memory diags can
+	 * fail
+	 */
+	if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+		/* but re-arm the timer, for diags case; won't hurt other */
+		goto done;
+
+	/*
+	 * We now try to maintain an activity timer, based on traffic
+	 * exceeding a threshold, so we need to check the word-counts
+	 * even if they are 64-bit.
+	 */
+	traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
+		qib_portcntr_7220(ppd, cr_wordrcv);
+	spin_lock_irqsave(&dd->eep_st_lock, flags);
+	traffic_wds -= dd->traffic_wds;
+	dd->traffic_wds += traffic_wds;
+	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+		atomic_add(5, &dd->active_time); /* S/B #define */
+	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+done:
+	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we are using MSI, try to fallback to INTx.
+ */
+static int qib_7220_intr_fallback(struct qib_devdata *dd)
+{
+	if (!dd->msi_lo)
+		return 0;
+
+	qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
+		 " trying INTx interrupts\n");
+	qib_7220_free_irq(dd);
+	qib_enable_intx(dd->pcidev);
+	/*
+	 * Some newer kernels require free_irq before disable_msi,
+	 * and irq can be changed during disable and INTx enable
+	 * and we need to therefore use the pcidev->irq value,
+	 * not our saved MSI value.
+	 */
+	dd->cspec->irq = dd->pcidev->irq;
+	qib_setup_7220_interrupt(dd);
+	return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining.  To do this right, we reset IBC
+ * as well.
+ */
+static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
+{
+	u64 val, prev_val;
+	struct qib_devdata *dd = ppd->dd;
+
+	prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+	val = prev_val | QLOGIC_IB_XGXS_RESET;
+	prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+	qib_write_kreg(dd, kr_control,
+		       dd->control & ~QLOGIC_IB_C_LINKENABLE);
+	qib_write_kreg(dd, kr_xgxs_cfg, val);
+	qib_read_kreg32(dd, kr_scratch);
+	qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+	qib_write_kreg(dd, kr_control, dd->control);
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets).  At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out.  In that case, we flush all packets, and try again.  If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+	u32 __iomem *buf;
+	u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
+	int do_cleanup;
+	unsigned long flags;
+
+	/*
+	 * always blip to get avail list updated, since it's almost
+	 * always needed, and is fairly cheap.
+	 */
+	sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+	if (buf)
+		goto done;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+	if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
+	    ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
+		__qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+		do_cleanup = 0;
+	} else {
+		do_cleanup = 1;
+		qib_7220_sdma_hw_clean_up(ppd);
+	}
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+	if (do_cleanup) {
+		qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+		buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+	}
+done:
+	return buf;
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware.  It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+				 u32 dcnt, u32 *data)
+{
+	int i;
+	u64 pbc;
+	u32 __iomem *piobuf;
+	u32 pnum;
+	struct qib_devdata *dd = ppd->dd;
+
+	i = 0;
+	pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+	pbc |= PBC_7220_VL15_SEND;
+	while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
+		if (i++ > 5)
+			return;
+		udelay(2);
+	}
+	sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
+	writeq(pbc, piobuf);
+	qib_flush_wc();
+	qib_pio_copy(piobuf + 2, hdr, 7);
+	qib_pio_copy(piobuf + 9, data, dcnt);
+	if (dd->flags & QIB_USE_SPCL_TRIG) {
+		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+		qib_flush_wc();
+		__raw_writel(0xaebecede, piobuf + spcl_off);
+	}
+	qib_flush_wc();
+	qib_sendbuf_done(dd, pnum);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
+{
+	struct qib_devdata *dd = ppd->dd;
+	static u32 swapped;
+	u32 dw, i, hcnt, dcnt, *data;
+	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+	static u32 madpayload_start[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+		};
+	static u32 madpayload_done[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x40000001, 0x1388, 0x15e, /* rest 0's */
+		};
+
+	dcnt = ARRAY_SIZE(madpayload_start);
+	hcnt = ARRAY_SIZE(hdr);
+	if (!swapped) {
+		/* for maintainability, do it at runtime */
+		for (i = 0; i < hcnt; i++) {
+			dw = (__force u32) cpu_to_be32(hdr[i]);
+			hdr[i] = dw;
+		}
+		for (i = 0; i < dcnt; i++) {
+			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+			madpayload_start[i] = dw;
+			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+			madpayload_done[i] = dw;
+		}
+		swapped = 1;
+	}
+
+	data = which ? madpayload_done : madpayload_start;
+
+	autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+	qib_read_kreg64(dd, kr_scratch);
+	udelay(2);
+	autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+	qib_read_kreg64(dd, kr_scratch);
+	udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change.   The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+	ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
+		IBA7220_IBC_IBTA_1_2_MASK);
+
+	if (speed == (QIB_IB_SDR | QIB_IB_DDR))
+		ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
+			IBA7220_IBC_IBTA_1_2_MASK;
+	else
+		ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
+			IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+
+	qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+	qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7220_autoneg(struct qib_pportdata *ppd)
+{
+	unsigned long flags;
+
+	/*
+	 * Required for older non-IB1.2 DDR switches.  Newer
+	 * non-IB-compliant switches don't need it, but so far,
+	 * aren't bothered by it either.  "Magic constant"
+	 */
+	qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	autoneg_7220_send(ppd, 0);
+	set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+
+	toggle_7220_rclkrls(ppd->dd);
+	/* 2 msec is minimum length of a poll cycle */
+	schedule_delayed_work(&ppd->cpspec->autoneg_work,
+			      msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7220_work(struct work_struct *work)
+{
+	struct qib_pportdata *ppd;
+	struct qib_devdata *dd;
+	u64 startms;
+	u32 i;
+	unsigned long flags;
+
+	ppd = &container_of(work, struct qib_chippport_specific,
+			    autoneg_work.work)->pportdata;
+	dd = ppd->dd;
+
+	startms = jiffies_to_msecs(jiffies);
+
+	/*
+	 * Busy wait for this first part, it should be at most a
+	 * few hundred usec, since we scheduled ourselves for 2msec.
+	 */
+	for (i = 0; i < 25; i++) {
+		if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
+		     == IB_7220_LT_STATE_POLLQUIET) {
+			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+			break;
+		}
+		udelay(100);
+	}
+
+	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+		goto done; /* we got there early or told to stop */
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+			       msecs_to_jiffies(90)))
+		goto done;
+
+	toggle_7220_rclkrls(dd);
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+			       msecs_to_jiffies(1700)))
+		goto done;
+
+	set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
+	toggle_7220_rclkrls(dd);
+
+	/*
+	 * Wait up to 250 msec for link to train and get to INIT at DDR;
+	 * this should terminate early.
+	 */
+	wait_event_timeout(ppd->cpspec->autoneg_wait,
+		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+		msecs_to_jiffies(250));
+done:
+	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+		if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
+			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+			dd->cspec->autoneg_tries = 0;
+		}
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+	}
+}
+
+static u32 qib_7220_iblink_state(u64 ibcs)
+{
+	u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+	switch (state) {
+	case IB_7220_L_STATE_INIT:
+		state = IB_PORT_INIT;
+		break;
+	case IB_7220_L_STATE_ARM:
+		state = IB_PORT_ARMED;
+		break;
+	case IB_7220_L_STATE_ACTIVE:
+		/* fall through */
+	case IB_7220_L_STATE_ACT_DEFER:
+		state = IB_PORT_ACTIVE;
+		break;
+	default: /* fall through */
+	case IB_7220_L_STATE_DOWN:
+		state = IB_PORT_DOWN;
+		break;
+	}
+	return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7220_phys_portstate(u64 ibcs)
+{
+	u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+	return qib_7220_physportstate[state];
+}
+
+static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+	int ret = 0, symadj = 0;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+	if (!ibup) {
+		/*
+		 * When the link goes down we don't want AEQ running, so it
+		 * won't interfere with IBC training, etc., and we need
+		 * to go back to the static SerDes preset values.
+		 */
+		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+				     QIBL_IB_AUTONEG_INPROG)))
+			set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+			qib_sd7220_presets(dd);
+			qib_cancel_sends(ppd); /* initial disarm, etc. */
+			spin_lock_irqsave(&ppd->sdma_lock, flags);
+			if (__qib_sdma_running(ppd))
+				__qib_sdma_process_event(ppd,
+					qib_sdma_event_e70_go_idle);
+			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+		}
+		/* this might better in qib_sd7220_presets() */
+		set_7220_relock_poll(dd, ibup);
+	} else {
+		if (qib_compat_ddr_negotiate &&
+		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+				     QIBL_IB_AUTONEG_INPROG)) &&
+		    ppd->link_speed_active == QIB_IB_SDR &&
+		    (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
+		    (QIB_IB_DDR | QIB_IB_SDR) &&
+		    dd->cspec->autoneg_tries < AUTONEG_TRIES) {
+			/* we are SDR, and DDR auto-negotiation enabled */
+			++dd->cspec->autoneg_tries;
+			if (!ppd->cpspec->ibdeltainprog) {
+				ppd->cpspec->ibdeltainprog = 1;
+				ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
+					cr_ibsymbolerr);
+				ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
+					cr_iblinkerrrecov);
+			}
+			try_7220_autoneg(ppd);
+			ret = 1; /* no other IB status change processing */
+		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+			   ppd->link_speed_active == QIB_IB_SDR) {
+			autoneg_7220_send(ppd, 1);
+			set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+			udelay(2);
+			toggle_7220_rclkrls(dd);
+			ret = 1; /* no other IB status change processing */
+		} else {
+			if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+			    (ppd->link_speed_active & QIB_IB_DDR)) {
+				spin_lock_irqsave(&ppd->lflags_lock, flags);
+				ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+						 QIBL_IB_AUTONEG_FAILED);
+				spin_unlock_irqrestore(&ppd->lflags_lock,
+						       flags);
+				dd->cspec->autoneg_tries = 0;
+				/* re-enable SDR, for next link down */
+				set_7220_ibspeed_fast(ppd,
+						      ppd->link_speed_enabled);
+				wake_up(&ppd->cpspec->autoneg_wait);
+				symadj = 1;
+			} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+				/*
+				 * Clear autoneg failure flag, and do setup
+				 * so we'll try next time link goes down and
+				 * back to INIT (possibly connected to a
+				 * different device).
+				 */
+				spin_lock_irqsave(&ppd->lflags_lock, flags);
+				ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+				spin_unlock_irqrestore(&ppd->lflags_lock,
+						       flags);
+				ppd->cpspec->ibcddrctrl |=
+					IBA7220_IBC_IBTA_1_2_MASK;
+				qib_write_kreg(dd, kr_ncmodectrl, 0);
+				symadj = 1;
+			}
+		}
+
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+			symadj = 1;
+
+		if (!ret) {
+			ppd->delay_mult = rate_to_delay
+			    [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
+			    [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
+
+			set_7220_relock_poll(dd, ibup);
+			spin_lock_irqsave(&ppd->sdma_lock, flags);
+			/*
+			 * Unlike 7322, the 7220 needs this, due to lack of
+			 * interrupt in some cases when we have sdma active
+			 * when the link goes down.
+			 */
+			if (ppd->sdma_state.current_state !=
+			    qib_sdma_state_s20_idle)
+				__qib_sdma_process_event(ppd,
+					qib_sdma_event_e00_go_hw_down);
+			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+		}
+	}
+
+	if (symadj) {
+		if (ppd->cpspec->ibdeltainprog) {
+			ppd->cpspec->ibdeltainprog = 0;
+			ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
+				cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+			ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
+				cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+		}
+	} else if (!ibup && qib_compat_ddr_negotiate &&
+		   !ppd->cpspec->ibdeltainprog &&
+			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+		ppd->cpspec->ibdeltainprog = 1;
+		ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
+							  cr_ibsymbolerr);
+		ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
+						     cr_iblinkerrrecov);
+	}
+
+	if (!ret)
+		qib_setup_7220_setextled(ppd, ibup);
+	return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+	u64 read_val, new_out;
+	unsigned long flags;
+
+	if (mask) {
+		/* some bits being written, lock access to GPIO */
+		dir &= mask;
+		out &= mask;
+		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+		new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+		qib_write_kreg(dd, kr_gpio_out, new_out);
+		dd->cspec->gpio_out = new_out;
+		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+	}
+	/*
+	 * It is unlikely that a read at this time would get valid
+	 * data on a pin whose direction line was set in the same
+	 * call to this function. We include the read here because
+	 * that allows us to potentially combine a change on one pin with
+	 * a read on another, and because the old code did something like
+	 * this.
+	 */
+	read_val = qib_read_kreg64(dd, kr_extstatus);
+	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip.  These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7220_chip_params(struct qib_devdata *dd)
+{
+	u64 val;
+	u32 piobufs;
+	int mtu;
+
+	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+	dd->palign = qib_read_kreg32(dd, kr_palign);
+	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+	val = qib_read_kreg64(dd, kr_sendpiosize);
+	dd->piosize2k = val & ~0U;
+	dd->piosize4k = val >> 32;
+
+	mtu = ib_mtu_enum_to_int(qib_ibmtu);
+	if (mtu == -1)
+		mtu = QIB_DEFAULT_MTU;
+	dd->pport->ibmtu = (u32)mtu;
+
+	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+	dd->piobcnt2k = val & ~0U;
+	dd->piobcnt4k = val >> 32;
+	/* these may be adjusted in init_chip_wc_pat() */
+	dd->pio2kbase = (u32 __iomem *)
+		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+	if (dd->piobcnt4k) {
+		dd->pio4kbase = (u32 __iomem *)
+			((char __iomem *) dd->kregbase +
+			 (dd->piobufbase >> 32));
+		/*
+		 * 4K buffers take 2 pages; we use roundup just to be
+		 * paranoid; we calculate it once here, rather than on
+		 * ever buf allocate
+		 */
+		dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+	}
+
+	piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+		(sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * qib_get_7220_chip_params(), so split out as separate function
+ */
+static void set_7220_baseaddrs(struct qib_devdata *dd)
+{
+	u32 cregbase;
+	/* init after possible re-map in init_chip_wc_pat() */
+	cregbase = qib_read_kreg32(dd, kr_counterregbase);
+	dd->cspec->cregbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase + cregbase);
+
+	dd->egrtidbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) |	\
+			   SYM_MASK(SendCtrl, SPioEnable) |		\
+			   SYM_MASK(SendCtrl, SSpecialTriggerEn) |	\
+			   SYM_MASK(SendCtrl, SendBufAvailUpd) |	\
+			   SYM_MASK(SendCtrl, AvailUpdThld) |		\
+			   SYM_MASK(SendCtrl, SDmaEnable) |		\
+			   SYM_MASK(SendCtrl, SDmaIntEnable) |		\
+			   SYM_MASK(SendCtrl, SDmaHalt) |		\
+			   SYM_MASK(SendCtrl, SDmaSingleDescriptor))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+			 const struct diag_observer *op,
+			 u32 offs, u64 *data, u64 mask, int only_32)
+{
+	unsigned long flags;
+	unsigned idx = offs / sizeof(u64);
+	u64 local_data, all_bits;
+
+	if (idx != kr_sendctrl) {
+		qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
+			    offs, only_32 ? "32" : "64");
+		return 0;
+	}
+
+	all_bits = ~0ULL;
+	if (only_32)
+		all_bits >>= 32;
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+	if ((mask & all_bits) != all_bits) {
+		/*
+		 * At least some mask bits are zero, so we need
+		 * to read. The judgement call is whether from
+		 * reg or shadow. First-cut: read reg, and complain
+		 * if any bits which should be shadowed are different
+		 * from their shadowed value.
+		 */
+		if (only_32)
+			local_data = (u64)qib_read_kreg32(dd, idx);
+		else
+			local_data = qib_read_kreg64(dd, idx);
+		qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
+			    (u32)local_data, (u32)dd->sendctrl);
+		if ((local_data & SENDCTRL_SHADOWED) !=
+		    (dd->sendctrl & SENDCTRL_SHADOWED))
+			qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
+				(u32)local_data, (u32) dd->sendctrl);
+		*data = (local_data & ~mask) | (*data & mask);
+	}
+	if (mask) {
+		/*
+		 * At least some mask bits are one, so we need
+		 * to write, but only shadow some bits.
+		 */
+		u64 sval, tval; /* Shadowed, transient */
+
+		/*
+		 * New shadow val is bits we don't want to touch,
+		 * ORed with bits we do, that are intended for shadow.
+		 */
+		sval = (dd->sendctrl & ~mask);
+		sval |= *data & SENDCTRL_SHADOWED & mask;
+		dd->sendctrl = sval;
+		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+		qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
+			    (u32)tval, (u32)sval);
+		qib_write_kreg(dd, kr_sendctrl, tval);
+		qib_write_kreg(dd, kr_scratch, 0Ull);
+	}
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+	return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_observer = {
+	sendctrl_hook, kr_sendctrl * sizeof(u64),
+	kr_sendctrl * sizeof(u64)
+};
+
+/*
+ * write the final few registers that depend on some of the
+ * init setup.  Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7220_initreg(struct qib_devdata *dd)
+{
+	int ret = 0;
+	u64 val;
+
+	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+	if (val != dd->pioavailregs_phys) {
+		qib_dev_err(dd, "Catastrophic software error, "
+			    "SendPIOAvailAddr written as %lx, "
+			    "read back as %llx\n",
+			    (unsigned long) dd->pioavailregs_phys,
+			    (unsigned long long) val);
+		ret = -EINVAL;
+	}
+	qib_register_observer(dd, &sendctrl_observer);
+	return ret;
+}
+
+static int qib_init_7220_variables(struct qib_devdata *dd)
+{
+	struct qib_chippport_specific *cpspec;
+	struct qib_pportdata *ppd;
+	int ret = 0;
+	u32 sbufs, updthresh;
+
+	cpspec = (struct qib_chippport_specific *)(dd + 1);
+	ppd = &cpspec->pportdata;
+	dd->pport = ppd;
+	dd->num_pports = 1;
+
+	dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+	ppd->cpspec = cpspec;
+
+	spin_lock_init(&dd->cspec->sdepb_lock);
+	spin_lock_init(&dd->cspec->rcvmod_lock);
+	spin_lock_init(&dd->cspec->gpio_lock);
+
+	/* we haven't yet set QIB_PRESENT, so use read directly */
+	dd->revision = readq(&dd->kregbase[kr_revision]);
+
+	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+		qib_dev_err(dd, "Revision register read failure, "
+			    "giving up initialization\n");
+		ret = -ENODEV;
+		goto bail;
+	}
+	dd->flags |= QIB_PRESENT;  /* now register routines work */
+
+	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+				    ChipRevMajor);
+	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+				    ChipRevMinor);
+
+	get_7220_chip_params(dd);
+	qib_7220_boardname(dd);
+
+	/*
+	 * GPIO bits for TWSI data and clock,
+	 * used for serial EEPROM.
+	 */
+	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+		QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
+	dd->flags |= qib_special_trigger ?
+		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+	/*
+	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+	 * 2 is Some Misc, 3 is reserved for future.
+	 */
+	dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+	dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+	dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+	init_waitqueue_head(&cpspec->autoneg_wait);
+	INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
+
+	qib_init_pportdata(ppd, dd, 0, 1);
+	ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+	ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
+
+	ppd->link_width_enabled = ppd->link_width_supported;
+	ppd->link_speed_enabled = ppd->link_speed_supported;
+	/*
+	 * Set the initial values to reasonable default, will be set
+	 * for real when link is up.
+	 */
+	ppd->link_width_active = IB_WIDTH_4X;
+	ppd->link_speed_active = QIB_IB_SDR;
+	ppd->delay_mult = rate_to_delay[0][1];
+	ppd->vls_supported = IB_VL_VL0;
+	ppd->vls_operational = ppd->vls_supported;
+
+	if (!qib_mini_init)
+		qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
+
+	init_timer(&ppd->cpspec->chase_timer);
+	ppd->cpspec->chase_timer.function = reenable_7220_chase;
+	ppd->cpspec->chase_timer.data = (unsigned long)ppd;
+
+	qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
+
+	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rhf_offset =
+		dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+	/* we always allocate at least 2048 bytes for eager buffers */
+	ret = ib_mtu_enum_to_int(qib_ibmtu);
+	dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+	qib_7220_tidtemplate(dd);
+
+	/*
+	 * We can request a receive interrupt for 1 or
+	 * more packets from current offset.  For now, we set this
+	 * up for a single packet.
+	 */
+	dd->rhdrhead_intr_off = 1ULL << 32;
+
+	/* setup the stats timer; the add_timer is done at end of init */
+	init_timer(&dd->stats_timer);
+	dd->stats_timer.function = qib_get_7220_faststats;
+	dd->stats_timer.data = (unsigned long) dd;
+	dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
+
+	/*
+	 * Control[4] has been added to change the arbitration within
+	 * the SDMA engine between favoring data fetches over descriptor
+	 * fetches.  qib_sdma_fetch_arb==0 gives data fetches priority.
+	 */
+	if (qib_sdma_fetch_arb)
+		dd->control |= 1 << 4;
+
+	dd->ureg_align = 0x10000;  /* 64KB alignment */
+
+	dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
+	qib_7220_config_ctxts(dd);
+	qib_set_ctxtcnt(dd);  /* needed for PAT setup */
+
+	if (qib_wc_pat) {
+		ret = init_chip_wc_pat(dd, 0);
+		if (ret)
+			goto bail;
+	}
+	set_7220_baseaddrs(dd); /* set chip access pointers now */
+
+	ret = 0;
+	if (qib_mini_init)
+		goto bail;
+
+	ret = qib_create_ctxts(dd);
+	init_7220_cntrnames(dd);
+
+	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+	 * reserve the update threshold amount for other kernel use, such
+	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+	 * unless we aren't enabling SDMA, in which case we want to use
+	 * all the 4k bufs for the kernel.
+	 * if this was less than the update threshold, we could wait
+	 * a long time for an update.  Coded this way because we
+	 * sometimes change the update threshold for various reasons,
+	 * and we want this to remain robust.
+	 */
+	updthresh = 8U; /* update threshold */
+	if (dd->flags & QIB_HAS_SEND_DMA) {
+		dd->cspec->sdmabufcnt =  dd->piobcnt4k;
+		sbufs = updthresh > 3 ? updthresh : 3;
+	} else {
+		dd->cspec->sdmabufcnt = 0;
+		sbufs = dd->piobcnt4k;
+	}
+
+	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+		dd->cspec->sdmabufcnt;
+	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+	dd->pbufsctxt = dd->lastctxt_piobuf /
+		(dd->cfgctxts - dd->first_user_ctxt);
+
+	/*
+	 * if we are at 16 user contexts, we will have one 7 sbufs
+	 * per context, so drop the update threshold to match.  We
+	 * want to update before we actually run out, at low pbufs/ctxt
+	 * so give ourselves some margin
+	 */
+	if ((dd->pbufsctxt - 2) < updthresh)
+		updthresh = dd->pbufsctxt - 2;
+
+	dd->cspec->updthresh_dflt = updthresh;
+	dd->cspec->updthresh = updthresh;
+
+	/* before full enable, no interrupts, no locking needed */
+	dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+			     << SYM_LSB(SendCtrl, AvailUpdThld);
+
+	dd->psxmitwait_supported = 1;
+	dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
+bail:
+	return ret;
+}
+
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+					u32 *pbufnum)
+{
+	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+	struct qib_devdata *dd = ppd->dd;
+	u32 __iomem *buf;
+
+	if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
+		!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+		buf = get_7220_link_buf(ppd, pbufnum);
+	else {
+		if ((plen + 1) > dd->piosize2kmax_dwords)
+			first = dd->piobcnt2k;
+		else
+			first = 0;
+		/* try 4k if all 2k busy, so same last for both sizes */
+		last = dd->cspec->lastbuf_for_pio;
+		buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+	}
+	return buf;
+}
+
+/* these 2 "counters" are really control registers, and are always RW */
+static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
+				     u32 start)
+{
+	write_7220_creg(ppd->dd, cr_psinterval, intv);
+	write_7220_creg(ppd->dd, cr_psstart, start);
+}
+
+/*
+ * NOTE: no real attempt is made to generalize the SDMA stuff.
+ * At some point "soon" we will have a new more generalized
+ * set of sdma interface, and then we'll clean this up.
+ */
+
+/* Must be called with sdma_lock held, or before init finished */
+static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
+{
+	/* Commit writes to memory and advance the tail on the chip */
+	wmb();
+	ppd->sdma_descq_tail = tail;
+	qib_write_kreg(ppd->dd, kr_senddmatail, tail);
+}
+
+static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+static struct sdma_set_state_action sdma_7220_action_table[] = {
+	[qib_sdma_state_s00_hw_down] = {
+		.op_enable = 0,
+		.op_intenable = 0,
+		.op_halt = 0,
+		.go_s99_running_tofalse = 1,
+	},
+	[qib_sdma_state_s10_hw_start_up_wait] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+	},
+	[qib_sdma_state_s20_idle] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+	},
+	[qib_sdma_state_s30_sw_clean_up_wait] = {
+		.op_enable = 0,
+		.op_intenable = 1,
+		.op_halt = 0,
+	},
+	[qib_sdma_state_s40_hw_clean_up_wait] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+	},
+	[qib_sdma_state_s50_hw_halt_wait] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+	},
+	[qib_sdma_state_s99_running] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 0,
+		.go_s99_running_totrue = 1,
+	},
+};
+
+static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
+{
+	ppd->sdma_state.set_state_action = sdma_7220_action_table;
+}
+
+static int init_sdma_7220_regs(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	unsigned i, n;
+	u64 senddmabufmask[3] = { 0 };
+
+	/* Set SendDmaBase */
+	qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
+	qib_sdma_7220_setlengen(ppd);
+	qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+	/* Set SendDmaHeadAddr */
+	qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
+
+	/*
+	 * Reserve all the former "kernel" piobufs, using high number range
+	 * so we get as many 4K buffers as possible
+	 */
+	n = dd->piobcnt2k + dd->piobcnt4k;
+	i = n - dd->cspec->sdmabufcnt;
+
+	for (; i < n; ++i) {
+		unsigned word = i / 64;
+		unsigned bit = i & 63;
+
+		BUG_ON(word >= 3);
+		senddmabufmask[word] |= 1ULL << bit;
+	}
+	qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
+	qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
+	qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
+
+	ppd->sdma_state.first_sendbuf = i;
+	ppd->sdma_state.last_sendbuf = n;
+
+	return 0;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int sane;
+	int use_dmahead;
+	u16 swhead;
+	u16 swtail;
+	u16 cnt;
+	u16 hwhead;
+
+	use_dmahead = __qib_sdma_running(ppd) &&
+		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+	hwhead = use_dmahead ?
+		(u16)le64_to_cpu(*ppd->sdma_head_dma) :
+		(u16)qib_read_kreg32(dd, kr_senddmahead);
+
+	swhead = ppd->sdma_descq_head;
+	swtail = ppd->sdma_descq_tail;
+	cnt = ppd->sdma_descq_cnt;
+
+	if (swhead < swtail) {
+		/* not wrapped */
+		sane = (hwhead >= swhead) & (hwhead <= swtail);
+	} else if (swhead > swtail) {
+		/* wrapped around */
+		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+			(hwhead <= swtail);
+	} else {
+		/* empty */
+		sane = (hwhead == swhead);
+	}
+
+	if (unlikely(!sane)) {
+		if (use_dmahead) {
+			/* try one more time, directly from the register */
+			use_dmahead = 0;
+			goto retry;
+		}
+		/* assume no progress */
+		hwhead = swhead;
+	}
+
+	return hwhead;
+}
+
+static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
+{
+	u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
+
+	return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
+	       (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
+	       (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
+	       !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * Since the delay affects this packet but the amount of the delay is
+ * based on the length of the previous packet, use the last delay computed
+ * and save the delay count for this packet to be used next time
+ * we get here.
+ */
+static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+				   u8 srate, u8 vl)
+{
+	u8 snd_mult = ppd->delay_mult;
+	u8 rcv_mult = ib_rate_to_delay[srate];
+	u32 ret = ppd->cpspec->last_delay_mult;
+
+	ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
+		(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
+
+	/* Indicate VL15, if necessary */
+	if (vl == 15)
+		ret |= PBC_7220_VL15_SEND_CTRL;
+	return ret;
+}
+
+static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
+{
+	if (!rcd->ctxt) {
+		rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
+		rcd->rcvegr_tid_base = 0;
+	} else {
+		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+		rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
+			(rcd->ctxt - 1) * rcd->rcvegrcnt;
+	}
+}
+
+static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
+				  u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+	int i;
+	unsigned long flags;
+
+	switch (which) {
+	case TXCHK_CHG_TYPE_KERN:
+		/* see if we need to raise avail update threshold */
+		spin_lock_irqsave(&dd->uctxt_lock, flags);
+		for (i = dd->first_user_ctxt;
+		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
+		     && i < dd->cfgctxts; i++)
+			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+			   < dd->cspec->updthresh_dflt)
+				break;
+		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+		if (i == dd->cfgctxts) {
+			spin_lock_irqsave(&dd->sendctrl_lock, flags);
+			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+			dd->sendctrl |= (dd->cspec->updthresh &
+					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+					   SYM_LSB(SendCtrl, AvailUpdThld);
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+			sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+		}
+		break;
+	case TXCHK_CHG_TYPE_USER:
+		spin_lock_irqsave(&dd->sendctrl_lock, flags);
+		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+			dd->cspec->updthresh = (rcd->piocnt /
+						rcd->subctxt_cnt) - 1;
+			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+			dd->sendctrl |= (dd->cspec->updthresh &
+					SYM_RMASK(SendCtrl, AvailUpdThld))
+					<< SYM_LSB(SendCtrl, AvailUpdThld);
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+			sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+		} else
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+		break;
+	}
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+	qib_write_kreg(dd, kr_scratch, val);
+}
+
+#define VALID_TS_RD_REG_MASK 0xBF
+/**
+ * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * @dd: the qlogic_ib device
+ * @regnum: register to read from
+ *
+ * returns reg contents (0..255) or < 0 for error
+ */
+static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+	int ret;
+	u8 rdata;
+
+	if (regnum > 7) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/* return a bogus value for (the one) register we do not have */
+	if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
+		ret = 0;
+		goto bail;
+	}
+
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (ret)
+		goto bail;
+
+	ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
+	if (!ret)
+		ret = rdata;
+
+	mutex_unlock(&dd->eep_lock);
+
+	/*
+	 * There are three possibilities here:
+	 * ret is actual value (0..255)
+	 * ret is -ENXIO or -EINVAL from twsi code or this file
+	 * ret is -EINTR from mutex_lock_interruptible.
+	 */
+bail:
+	return ret;
+}
+
+/* Dummy function, as 7220 boards never disable EEPROM Write */
+static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+	return 1;
+}
+
+/**
+ * qib_init_iba7220_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
+					   const struct pci_device_id *ent)
+{
+	struct qib_devdata *dd;
+	int ret;
+	u32 boardid, minwidth;
+
+	dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
+		sizeof(struct qib_chippport_specific));
+	if (IS_ERR(dd))
+		goto bail;
+
+	dd->f_bringup_serdes    = qib_7220_bringup_serdes;
+	dd->f_cleanup           = qib_setup_7220_cleanup;
+	dd->f_clear_tids        = qib_7220_clear_tids;
+	dd->f_free_irq          = qib_7220_free_irq;
+	dd->f_get_base_info     = qib_7220_get_base_info;
+	dd->f_get_msgheader     = qib_7220_get_msgheader;
+	dd->f_getsendbuf        = qib_7220_getsendbuf;
+	dd->f_gpio_mod          = gpio_7220_mod;
+	dd->f_eeprom_wen        = qib_7220_eeprom_wen;
+	dd->f_hdrqempty         = qib_7220_hdrqempty;
+	dd->f_ib_updown         = qib_7220_ib_updown;
+	dd->f_init_ctxt         = qib_7220_init_ctxt;
+	dd->f_initvl15_bufs     = qib_7220_initvl15_bufs;
+	dd->f_intr_fallback     = qib_7220_intr_fallback;
+	dd->f_late_initreg      = qib_late_7220_initreg;
+	dd->f_setpbc_control    = qib_7220_setpbc_control;
+	dd->f_portcntr          = qib_portcntr_7220;
+	dd->f_put_tid           = qib_7220_put_tid;
+	dd->f_quiet_serdes      = qib_7220_quiet_serdes;
+	dd->f_rcvctrl           = rcvctrl_7220_mod;
+	dd->f_read_cntrs        = qib_read_7220cntrs;
+	dd->f_read_portcntrs    = qib_read_7220portcntrs;
+	dd->f_reset             = qib_setup_7220_reset;
+	dd->f_init_sdma_regs    = init_sdma_7220_regs;
+	dd->f_sdma_busy         = qib_sdma_7220_busy;
+	dd->f_sdma_gethead      = qib_sdma_7220_gethead;
+	dd->f_sdma_sendctrl     = qib_7220_sdma_sendctrl;
+	dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
+	dd->f_sdma_update_tail  = qib_sdma_update_7220_tail;
+	dd->f_sdma_hw_clean_up  = qib_7220_sdma_hw_clean_up;
+	dd->f_sdma_hw_start_up  = qib_7220_sdma_hw_start_up;
+	dd->f_sdma_init_early   = qib_7220_sdma_init_early;
+	dd->f_sendctrl          = sendctrl_7220_mod;
+	dd->f_set_armlaunch     = qib_set_7220_armlaunch;
+	dd->f_set_cntr_sample   = qib_set_cntr_7220_sample;
+	dd->f_iblink_state      = qib_7220_iblink_state;
+	dd->f_ibphys_portstate  = qib_7220_phys_portstate;
+	dd->f_get_ib_cfg        = qib_7220_get_ib_cfg;
+	dd->f_set_ib_cfg        = qib_7220_set_ib_cfg;
+	dd->f_set_ib_loopback   = qib_7220_set_loopback;
+	dd->f_set_intr_state    = qib_7220_set_intr_state;
+	dd->f_setextled         = qib_setup_7220_setextled;
+	dd->f_txchk_change      = qib_7220_txchk_change;
+	dd->f_update_usrhead    = qib_update_7220_usrhead;
+	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7220_intr;
+	dd->f_xgxs_reset        = qib_7220_xgxs_reset;
+	dd->f_writescratch      = writescratch;
+	dd->f_tempsense_rd	= qib_7220_tempsense_rd;
+	/*
+	 * Do remaining pcie setup and save pcie values in dd.
+	 * Any error printing is already done by the init code.
+	 * On return, we have the chip mapped, but chip registers
+	 * are not set up until start of qib_init_7220_variables.
+	 */
+	ret = qib_pcie_ddinit(dd, pdev, ent);
+	if (ret < 0)
+		goto bail_free;
+
+	/* initialize chip-specific variables */
+	ret = qib_init_7220_variables(dd);
+	if (ret)
+		goto bail_cleanup;
+
+	if (qib_mini_init)
+		goto bail;
+
+	boardid = SYM_FIELD(dd->revision, Revision,
+			    BoardID);
+	switch (boardid) {
+	case 0:
+	case 2:
+	case 10:
+	case 12:
+		minwidth = 16; /* x16 capable boards */
+		break;
+	default:
+		minwidth = 8; /* x8 capable boards */
+		break;
+	}
+	if (qib_pcie_params(dd, minwidth, NULL, NULL))
+		qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+			    "continuing anyway\n");
+
+	/* save IRQ for possible later use */
+	dd->cspec->irq = pdev->irq;
+
+	if (qib_read_kreg64(dd, kr_hwerrstatus) &
+	    QLOGIC_IB_HWE_SERDESPLLFAILED)
+		qib_write_kreg(dd, kr_hwerrclear,
+			       QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+	/* setup interrupt handler (interrupt type handled above) */
+	qib_setup_7220_interrupt(dd);
+	qib_7220_init_hwerrors(dd);
+
+	/* clear diagctrl register, in case diags were running and crashed */
+	qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+	goto bail;
+
+bail_cleanup:
+	qib_pcie_ddcleanup(dd);
+bail_free:
+	qib_free_devdata(dd);
+	dd = ERR_PTR(ret);
+bail:
+	return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..2c24eab35b54
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,8058 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath 7322 chip
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#include <linux/dca.h>
+#endif
+
+#include "qib.h"
+#include "qib_7322_regs.h"
+#include "qib_qsfp.h"
+
+#include "qib_mad.h"
+
+static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
+static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
+static irqreturn_t qib_7322intr(int irq, void *data);
+static irqreturn_t qib_7322bufavail(int irq, void *data);
+static irqreturn_t sdma_intr(int irq, void *data);
+static irqreturn_t sdma_idle_intr(int irq, void *data);
+static irqreturn_t sdma_progress_intr(int irq, void *data);
+static irqreturn_t sdma_cleanup_intr(int irq, void *data);
+static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
+				  struct qib_ctxtdata *rcd);
+static u8 qib_7322_phys_portstate(u64);
+static u32 qib_7322_iblink_state(u64);
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+				   u16 linitcmd);
+static void force_h1(struct qib_pportdata *);
+static void adj_tx_serdes(struct qib_pportdata *);
+static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
+
+static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
+static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+
+#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+/* LE2 serdes values for different cases */
+#define LE2_DEFAULT 5
+#define LE2_5m 4
+#define LE2_QME 0
+
+/* Below is special-purpose, so only really works for the IB SerDes blocks. */
+#define IBSD(hw_pidx) (hw_pidx + 2)
+
+/* these are variables for documentation and experimentation purposes */
+static const unsigned rcv_int_timeout = 375;
+static const unsigned rcv_int_count = 16;
+static const unsigned sdma_idle_cnt = 64;
+
+/* Time to stop altering Rx Equalization parameters, after link up. */
+#define RXEQ_DISABLE_MSECS 2500
+
+/*
+ * Number of VLs we are configured to use (to allow for more
+ * credits per vl, etc.)
+ */
+ushort qib_num_cfg_vls = 2;
+module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+static ushort qib_chase = 1;
+module_param_named(chase, qib_chase, ushort, S_IRUGO);
+MODULE_PARM_DESC(chase, "Enable state chase handling");
+
+static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
+module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
+MODULE_PARM_DESC(long_attenuation, \
+		 "attenuation cutoff (dB) for long copper cable setup");
+
+static ushort qib_singleport;
+module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
+MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+
+
+/*
+ * Setup QMH7342 receive and transmit parameters, necessary because
+ * each bay, Mez connector, and IB port need different tuning, beyond
+ * what the switch and HCA can do automatically.
+ * It's expected to be done by cat'ing files to the modules file,
+ * rather than setting up as a module parameter.
+ * It's a "write-only" file, returns 0 when read back.
+ * The unit, port, bay (if given), and values MUST be done as a single write.
+ * The unit, port, and bay must precede the values to be effective.
+ */
+static int setup_qmh_params(const char *, struct kernel_param *);
+static unsigned dummy_qmh_params;
+module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint,
+		  &dummy_qmh_params, S_IWUSR | S_IRUGO);
+
+/* similarly for QME7342, but it's simpler */
+static int setup_qme_params(const char *, struct kernel_param *);
+static unsigned dummy_qme_params;
+module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint,
+		  &dummy_qme_params, S_IWUSR | S_IRUGO);
+
+#define MAX_ATTEN_LEN 64 /* plenty for any real system */
+/* for read back, default index is ~5m copper cable */
+static char cable_atten_list[MAX_ATTEN_LEN] = "10";
+static struct kparam_string kp_cable_atten = {
+	.string = cable_atten_list,
+	.maxlen = MAX_ATTEN_LEN
+};
+static int  setup_cable_atten(const char *, struct kernel_param *);
+module_param_call(cable_atten, setup_cable_atten, param_get_string,
+		  &kp_cable_atten, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(cable_atten, \
+		 "cable attenuation indices for cables with invalid EEPROM");
+
+#define BOARD_QME7342 5
+#define BOARD_QMH7342 6
+#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+		    BOARD_QMH7342)
+#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+		    BOARD_QME7342)
+
+#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
+
+#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
+
+#define MASK_ACROSS(lsb, msb) \
+	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+#define SYM_RMASK(regname, fldname) ((u64)              \
+	QIB_7322_##regname##_##fldname##_RMASK)
+
+#define SYM_MASK(regname, fldname) ((u64)               \
+	QIB_7322_##regname##_##fldname##_RMASK <<       \
+	 QIB_7322_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64)	\
+	(((value) >> SYM_LSB(regname, fldname)) &	\
+	 SYM_RMASK(regname, fldname)))
+
+/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
+#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
+	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
+
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
+#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
+#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
+/* Below because most, but not all, fields of IntMask have that full suffix */
+#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
+
+
+#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
+
+/*
+ * the size bits give us 2^N, in KB units.  0 marks as invalid,
+ * and 7 is reserved.  We currently use only 2KB and 4KB
+ */
+#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
+#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
+#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
+#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+
+#define SendIBSLIDAssignMask \
+	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
+#define SendIBSLMCMask \
+	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
+
+#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
+#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
+#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
+#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
+#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
+#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_EEPROM_WEN_NUM 14
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
+
+/* HW counter clock is at 4nsec */
+#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
+
+/* full speed IB port 1 only */
+#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
+#define PORT_SPD_CAP_SHIFT 3
+
+/* full speed featuremask, both ports */
+#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
+ */
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_contextcnt KREG_IDX(ContextCnt)
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_debugportval KREG_IDX(DebugPortValueReg)
+#define kr_fmask KREG_IDX(feature_mask)
+#define kr_act_fmask KREG_IDX(active_feature_mask)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intredirect KREG_IDX(IntRedirect0)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_pagealign KREG_IDX(PageAlign)
+#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
+#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
+#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
+#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_intgranted KREG_IDX(Int_Granted)
+#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
+#define kr_intblocked KREG_IDX(IntBlocked)
+#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
+
+/*
+ * per-port kernel registers.  Access only with qib_read_kreg_port()
+ * or qib_write_kreg_port()
+ */
+#define krp_errclear KREG_IBPORT_IDX(ErrClear)
+#define krp_errmask KREG_IBPORT_IDX(ErrMask)
+#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
+#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
+#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
+#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
+#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
+#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
+#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
+#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
+#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
+#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
+#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
+#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
+#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
+#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
+#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
+#define krp_psstart KREG_IBPORT_IDX(PSStart)
+#define krp_psstat KREG_IBPORT_IDX(PSStat)
+#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
+#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
+#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
+#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
+#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
+#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
+#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
+#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
+#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
+#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
+#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
+#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
+#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
+#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
+#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
+#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
+#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
+#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
+#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
+#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
+#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
+#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
+#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
+#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
+#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
+#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
+#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
+#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
+#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
+#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
+#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
+
+/*
+ * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
+ * or qib_write_kreg_ctxt()
+ */
+#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+/*
+ * TID Flow table, per context.  Reduces
+ * number of hdrq updates to one per flow (or on errors).
+ * context 0 and 1 share same memory, but have distinct
+ * addresses.  Since for now, we never use expected sends
+ * on kernel contexts, we don't worry about that (we initialize
+ * those entries for ctxt 0/1 on driver load twice, for example).
+ */
+#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
+#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
+
+/* these are the error bits in the tid flows, and are W1C */
+#define TIDFLOW_ERRBITS  ( \
+	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
+	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
+	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
+	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
+
+/* Most (not all) Counters are per-IBport.
+ * Requires LBIntCnt is at offset 0 in the group
+ */
+#define CREG_IDX(regname) \
+((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+
+#define crp_badformat CREG_IDX(RxVersionErrCnt)
+#define crp_err_rlen CREG_IDX(RxLenErrCnt)
+#define crp_erricrc CREG_IDX(RxICRCErrCnt)
+#define crp_errlink CREG_IDX(RxLinkMalformCnt)
+#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define crp_pktrcv CREG_IDX(RxDataPktCnt)
+#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define crp_pktsend CREG_IDX(TxDataPktCnt)
+#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define crp_rcvebp CREG_IDX(RxEBPCnt)
+#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
+#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
+#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
+#define crp_sendstall CREG_IDX(TxFlowStallCnt)
+#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
+#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
+#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
+#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define crp_wordrcv CREG_IDX(RxDwordCnt)
+#define crp_wordsend CREG_IDX(TxDwordCnt)
+#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
+
+/* these are the (few) counters that are not port-specific */
+#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
+			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
+#define cr_lbint CREG_DEVIDX(LBIntCnt)
+#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
+#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
+#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
+#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
+#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
+
+/* no chip register for # of IB ports supported, so define */
+#define NUM_IB_PORTS 2
+
+/* 1 VL15 buffer per hardware IB port, no register for this, so define */
+#define NUM_VL15_BUFS NUM_IB_PORTS
+
+/*
+ * context 0 and 1 are special, and there is no chip register that
+ * defines this value, so we have to define it here.
+ * These are all allocated to either 0 or 1 for single port
+ * hardware configuration, otherwise each gets half
+ */
+#define KCTXT0_EGRCNT 2048
+
+/* values for vl and port fields in PBC, 7322-specific */
+#define PBC_PORT_SEL_LSB 26
+#define PBC_PORT_SEL_RMASK 1
+#define PBC_VL_NUM_LSB 27
+#define PBC_VL_NUM_RMASK 7
+#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+	[IB_RATE_2_5_GBPS] = 16,
+	[IB_RATE_5_GBPS] = 8,
+	[IB_RATE_10_GBPS] = 4,
+	[IB_RATE_20_GBPS] = 2,
+	[IB_RATE_30_GBPS] = 2,
+	[IB_RATE_40_GBPS] = 1
+};
+
+#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
+#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7322_LT_STATE_DISABLED        0x00
+#define IB_7322_LT_STATE_LINKUP          0x01
+#define IB_7322_LT_STATE_POLLACTIVE      0x02
+#define IB_7322_LT_STATE_POLLQUIET       0x03
+#define IB_7322_LT_STATE_SLEEPDELAY      0x04
+#define IB_7322_LT_STATE_SLEEPQUIET      0x05
+#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
+#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
+#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
+#define IB_7322_LT_STATE_CFGIDLE         0x0b
+#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
+#define IB_7322_LT_STATE_TXREVLANES      0x0d
+#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
+#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
+#define IB_7322_LT_STATE_CFGENH          0x10
+#define IB_7322_LT_STATE_CFGTEST         0x11
+
+/* link state machine states from IBC */
+#define IB_7322_L_STATE_DOWN             0x0
+#define IB_7322_L_STATE_INIT             0x1
+#define IB_7322_L_STATE_ARM              0x2
+#define IB_7322_L_STATE_ACTIVE           0x3
+#define IB_7322_L_STATE_ACT_DEFER        0x4
+
+static const u8 qib_7322_physportstate[0x20] = {
+	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7322_LT_STATE_CFGRCVFCFG] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7322_LT_STATE_CFGWAITRMT] =
+		IB_PHYSPORTSTATE_CFG_TRAIN,
+	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
+	[IB_7322_LT_STATE_RECOVERRETRAIN] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_7322_LT_STATE_RECOVERWAITRMT] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_7322_LT_STATE_RECOVERIDLE] =
+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
+	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+struct qib_chip_specific {
+	u64 __iomem *cregbase;
+	u64 *cntrs;
+	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+	u64 main_int_mask;      /* clear bits which have dedicated handlers */
+	u64 int_enable_mask;  /* for per port interrupts in single port mode */
+	u64 errormask;
+	u64 hwerrmask;
+	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+	u64 gpio_mask; /* shadow the gpio mask register */
+	u64 extctrl; /* shadow the gpio output enable, etc... */
+	u32 ncntrs;
+	u32 nportcntrs;
+	u32 cntrnamelen;
+	u32 portcntrnamelen;
+	u32 numctxts;
+	u32 rcvegrcnt;
+	u32 updthresh; /* current AvailUpdThld */
+	u32 updthresh_dflt; /* default AvailUpdThld */
+	u32 r1;
+	int irq;
+	u32 num_msix_entries;
+	u32 sdmabufcnt;
+	u32 lastbuf_for_pio;
+	u32 stay_in_freeze;
+	u32 recovery_ports_initted;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	u32 dca_ctrl;
+	int rhdr_cpu[18];
+	int sdma_cpu[2];
+	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
+#endif
+	struct msix_entry *msix_entries;
+	void  **msix_arg;
+	unsigned long *sendchkenable;
+	unsigned long *sendgrhchk;
+	unsigned long *sendibchk;
+	u32 rcvavail_timeout[18];
+	char emsgbuf[128]; /* for device error interrupt msg buffer */
+};
+
+/* Table of entries in "human readable" form Tx Emphasis. */
+struct txdds_ent {
+	u8 amp;
+	u8 pre;
+	u8 main;
+	u8 post;
+};
+
+struct vendor_txdds_ent {
+	u8 oui[QSFP_VOUI_LEN];
+	u8 *partnum;
+	struct txdds_ent sdr;
+	struct txdds_ent ddr;
+	struct txdds_ent qdr;
+};
+
+static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
+
+#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
+#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
+
+#define H1_FORCE_VAL 8
+#define H1_FORCE_QME 1 /*  may be overridden via setup_qme_params() */
+#define H1_FORCE_QMH 7 /*  may be overridden via setup_qmh_params() */
+
+/* The static and dynamic registers are paired, and the pairs indexed by spd */
+#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
+	+ ((spd) * 2))
+
+#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
+#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
+#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
+
+static const struct txdds_ent qmh_sdr_txdds =  { 11, 0,  5,  6 };
+static const struct txdds_ent qmh_ddr_txdds =  {  7, 0,  2,  8 };
+static const struct txdds_ent qmh_qdr_txdds =  {  0, 1,  3, 10 };
+
+/* this is used for unknown mez cards also */
+static const struct txdds_ent qme_sdr_txdds =  { 11, 0,  4,  4 };
+static const struct txdds_ent qme_ddr_txdds =  {  7, 0,  2,  7 };
+static const struct txdds_ent qme_qdr_txdds =  {  0, 1, 12, 11 };
+
+struct qib_chippport_specific {
+	u64 __iomem *kpregbase;
+	u64 __iomem *cpregbase;
+	u64 *portcntrs;
+	struct qib_pportdata *ppd;
+	wait_queue_head_t autoneg_wait;
+	struct delayed_work autoneg_work;
+	struct delayed_work ipg_work;
+	struct timer_list chase_timer;
+	/*
+	 * these 5 fields are used to establish deltas for IB symbol
+	 * errors and linkrecovery errors.  They can be reported on
+	 * some chips during link negotiation prior to INIT, and with
+	 * DDR when faking DDR negotiations with non-IBTA switches.
+	 * The chip counters are adjusted at driver unload if there is
+	 * a non-zero delta.
+	 */
+	u64 ibdeltainprog;
+	u64 ibsymdelta;
+	u64 ibsymsnap;
+	u64 iblnkerrdelta;
+	u64 iblnkerrsnap;
+	u64 iblnkdownsnap;
+	u64 iblnkdowndelta;
+	u64 ibmalfdelta;
+	u64 ibmalfsnap;
+	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
+	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
+	u64 qdr_dfe_time;
+	u64 chase_end;
+	u32 autoneg_tries;
+	u32 recovery_init;
+	u32 qdr_dfe_on;
+	u32 qdr_reforce;
+	/*
+	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
+	 * entry zero is unused, to simplify indexing
+	 */
+	u16 h1_val;
+	u8 amp[SERDES_CHANS];
+	u8 pre[SERDES_CHANS];
+	u8 mainv[SERDES_CHANS];
+	u8 post[SERDES_CHANS];
+	u8 no_eep;  /* attenuation index to use if no qsfp info */
+	u8 ipg_tries;
+	u8 ibmalfusesnap;
+	struct qib_qsfp_data qsfp_data;
+	char epmsgbuf[192]; /* for port error interrupt msg buffer */
+};
+
+static struct {
+	const char *name;
+	irq_handler_t handler;
+	int lsb;
+	int port; /* 0 if not port-specific, else port # */
+} irq_table[] = {
+	{ QIB_DRV_NAME, qib_7322intr, -1, 0 },
+	{ QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
+		SYM_LSB(IntStatus, SendBufAvail), 0 },
+	{ QIB_DRV_NAME " (sdma 0)", sdma_intr,
+		SYM_LSB(IntStatus, SDmaInt_0), 1 },
+	{ QIB_DRV_NAME " (sdma 1)", sdma_intr,
+		SYM_LSB(IntStatus, SDmaInt_1), 2 },
+	{ QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
+		SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
+	{ QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
+		SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
+	{ QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
+		SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
+	{ QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
+		SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
+	{ QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
+		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
+	{ QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
+		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
+};
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static const struct dca_reg_map {
+	int     shadow_inx;
+	int     lsb;
+	u64     mask;
+	u16     regno;
+} dca_rcvhdr_reg_map[] = {
+	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
+	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
+	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
+	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
+	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
+	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
+	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
+	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
+	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
+	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
+	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
+	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
+	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
+	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
+	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
+	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
+	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
+	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
+	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
+	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
+	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
+	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
+	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
+	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
+	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
+	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
+	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
+	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
+	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
+	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
+	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
+	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
+	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
+	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
+	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
+	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
+};
+#endif
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7322_IBCHG 0x101
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+				  const u32 regno, u64 value);
+static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
+static void write_7322_initregs(struct qib_devdata *);
+static void write_7322_init_portregs(struct qib_pportdata *);
+static void setup_7322_link_recovery(struct qib_pportdata *, u32);
+static void check_7322_rxe_status(struct qib_pportdata *);
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+				  enum qib_ureg regno, int ctxt)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readl(regno + (u64 __iomem *)(
+		(dd->ureg_align * ctxt) + (dd->userbase ?
+		 (char __iomem *)dd->userbase :
+		 (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_read_ureg - read virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u64 qib_read_ureg(const struct qib_devdata *dd,
+				enum qib_ureg regno, int ctxt)
+{
+
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readq(regno + (u64 __iomem *)(
+		(dd->ureg_align * ctxt) + (dd->userbase ?
+		 (char __iomem *)dd->userbase :
+		 (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_write_ureg - write virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+				  enum qib_ureg regno, u64 value, int ctxt)
+{
+	u64 __iomem *ubase;
+	if (dd->userbase)
+		ubase = (u64 __iomem *)
+			((char __iomem *) dd->userbase +
+			 dd->ureg_align * ctxt);
+	else
+		ubase = (u64 __iomem *)
+			(dd->uregbase +
+			 (char __iomem *) dd->kregbase +
+			 dd->ureg_align * ctxt);
+
+	if (dd->kregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+				  const u32 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+	return readl((u32 __iomem *) &dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+				  const u32 regno)
+{
+	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+		return -1;
+	return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+				  const u32 regno, u64 value)
+{
+	if (dd->kregbase && (dd->flags & QIB_PRESENT))
+		writeq(value, &dd->kregbase[regno]);
+}
+
+/*
+ * not many sanity checks for the port-specific kernel register routines,
+ * since they are only used when it's known to be safe.
+*/
+static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
+				     const u16 regno)
+{
+	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
+		return 0ULL;
+	return readq(&ppd->cpspec->kpregbase[regno]);
+}
+
+static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
+				       const u16 regno, u64 value)
+{
+	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
+	    (ppd->dd->flags & QIB_PRESENT))
+		writeq(value, &ppd->cpspec->kpregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+				       const u16 regno, unsigned ctxt,
+				       u64 value)
+{
+	qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readq(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
+{
+	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+		return 0;
+	return readl(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
+					u16 regno, u64 value)
+{
+	if (ppd->cpspec && ppd->cpspec->cpregbase &&
+	    (ppd->dd->flags & QIB_PRESENT))
+		writeq(value, &ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
+				      u16 regno)
+{
+	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+	    !(ppd->dd->flags & QIB_PRESENT))
+		return 0;
+	return readq(&ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
+					u16 regno)
+{
+	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+	    !(ppd->dd->flags & QIB_PRESENT))
+		return 0;
+	return readl(&ppd->cpspec->cpregbase[regno]);
+}
+
+/* bits in Control register */
+#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
+#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
+
+/* bits in general interrupt regs */
+#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
+#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
+#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
+#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
+#define QIB_I_C_ERROR INT_MASK(Err)
+
+#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
+#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
+#define QIB_I_GPIO INT_MASK(AssertGPIO)
+#define QIB_I_P_SDMAINT(pidx) \
+	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+	 INT_MASK_P(SDmaProgress, pidx) | \
+	 INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are "per port" */
+#define QIB_I_P_BITSEXTANT(pidx) \
+	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
+	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+	INT_MASK_P(SDmaProgress, pidx) | \
+	INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are common to a device */
+/* currently unused: QIB_I_SPIOSENT */
+#define QIB_I_C_BITSEXTANT \
+	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
+	QIB_I_SPIOSENT | \
+	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
+
+#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
+	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
+
+/*
+ * Error bits that are "per port".
+ */
+#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
+#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
+#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
+#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
+#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
+#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
+#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
+#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
+#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
+#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
+#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
+#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
+#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
+#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
+#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
+#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
+#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
+#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
+#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
+#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
+#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
+#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
+#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
+#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
+#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
+#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
+#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
+#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
+
+#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
+#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
+#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
+#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
+#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
+#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
+#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
+#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
+#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
+#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
+#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
+
+/* Error bits that are common to a device */
+#define QIB_E_RESET ERR_MASK(ResetNegated)
+#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
+#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
+
+
+/*
+ * Per chip (rather than per-port) errors.  Most either do
+ * nothing but trigger a print (because they self-recover, or
+ * always occur in tandem with other errors that handle the
+ * issue), or because they indicate errors with no recovery,
+ * but we want to know that they happened.
+ */
+#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
+#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
+#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
+#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
+#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
+#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
+#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
+#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
+
+/* SDMA chip errors (not per port)
+ * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
+ * the SDMAHALT error immediately, so we just print the dup error via the
+ * E_AUTO mechanism.  This is true of most of the per-port fatal errors
+ * as well, but since this is port-independent, by definition, it's
+ * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
+ * packet send errors, and so are handled in the same manner as other
+ * per-packet errors.
+ */
+#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
+#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
+#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
+
+/*
+ * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
+ * it is used to print "common" packet errors.
+ */
+#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
+	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
+	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
+	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+	QIB_E_P_REBP)
+
+/* Error Bits that Packet-related (Receive, per-port) */
+#define QIB_E_P_RPKTERRS (\
+	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
+	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
+	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
+	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
+	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
+	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
+
+/*
+ * Error bits that are Send-related (per port)
+ * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
+ * All of these potentially need to have a buffer disarmed
+ */
+#define QIB_E_P_SPKTERRS (\
+	QIB_E_P_SUNEXP_PKTNUM |\
+	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+	QIB_E_P_SMAXPKTLEN |\
+	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
+	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
+
+#define QIB_E_SPKTERRS ( \
+		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
+		ERR_MASK_N(SendUnsupportedVLErr) |			\
+		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
+
+#define QIB_E_P_SDMAERRS ( \
+	QIB_E_P_SDMAHALT | \
+	QIB_E_P_SDMADESCADDRMISALIGN | \
+	QIB_E_P_SDMAUNEXPDATA | \
+	QIB_E_P_SDMAMISSINGDW | \
+	QIB_E_P_SDMADWEN | \
+	QIB_E_P_SDMARPYTAG | \
+	QIB_E_P_SDMA1STDESC | \
+	QIB_E_P_SDMABASE | \
+	QIB_E_P_SDMATAILOUTOFBOUND | \
+	QIB_E_P_SDMAOUTOFBOUND | \
+	QIB_E_P_SDMAGENMISMATCH)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories, and the repeat definition
+ * is not a problem.
+ */
+#define QIB_E_P_BITSEXTANT ( \
+	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
+	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
+	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
+	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
+	)
+
+/*
+ * These are errors that can occur when the link
+ * changes state while a packet is being sent or received.  This doesn't
+ * cover things like EBP or VCRC that can be the result of a sending
+ * having the link change state, so we receive a "known bad" packet.
+ * All of these are "per port", so renamed:
+ */
+#define QIB_E_P_LINK_PKTERRS (\
+	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
+	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
+	QIB_E_P_RUNEXPCHAR)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories (such as QIB_E_SPKTERRS),
+ * and the repeat definition is not a problem.
+ */
+#define QIB_E_C_BITSEXTANT (\
+	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
+	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
+	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
+
+/* Likewise Neuter E_SPKT_ERRS_IGNORE */
+#define E_SPKT_ERRS_IGNORE 0
+
+#define QIB_EXTS_MEMBIST_DISABLED \
+	SYM_MASK(EXTStatus, MemBISTDisabled)
+#define QIB_EXTS_MEMBIST_ENDTEST \
+	SYM_MASK(EXTStatus, MemBISTEndTest)
+
+#define QIB_E_SPIOARMLAUNCH \
+	ERR_MASK(SendArmLaunchErr)
+
+#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
+#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
+
+/*
+ * IBTA_1_2 is set when multiple speeds are enabled (normal),
+ * and also if forced QDR (only QDR enabled).  It's enabled for the
+ * forced QDR case so that scrambling will be enabled by the TS3
+ * exchange, when supported by both sides of the link.
+ */
+#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
+#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
+#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
+#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
+#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
+#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
+	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
+#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
+
+#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
+#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
+
+#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
+#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+
+#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
+	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
+	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
+
+#define IBA7322_REDIRECT_VEC_PER_REG 12
+
+#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
+#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
+#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
+#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
+#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
+
+#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
+
+#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
+	.msg = #fldname }
+#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
+	fldname##Mask##_##port), .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
+	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
+	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
+	HWE_AUTO(PCIESerdesPClkNotDetect),
+	HWE_AUTO(PowerOnBISTFailed),
+	HWE_AUTO(TempsenseTholdReached),
+	HWE_AUTO(MemoryErr),
+	HWE_AUTO(PCIeBusParityErr),
+	HWE_AUTO(PcieCplTimeout),
+	HWE_AUTO(PciePoisonedTLP),
+	HWE_AUTO_P(SDmaMemReadErr, 1),
+	HWE_AUTO_P(SDmaMemReadErr, 0),
+	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
+	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
+	HWE_AUTO_P(statusValidNoEop, 1),
+	HWE_AUTO_P(statusValidNoEop, 0),
+	HWE_AUTO(LATriggered),
+	{ .mask = 0 }
+};
+
+#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
+	.msg = #fldname }
+#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
+	.msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+	E_AUTO(ResetNegated),
+	E_AUTO(HardwareErr),
+	E_AUTO(InvalidAddrErr),
+	E_AUTO(SDmaVL15Err),
+	E_AUTO(SBufVL15MisUseErr),
+	E_AUTO(InvalidEEPCmd),
+	E_AUTO(RcvContextShareErr),
+	E_AUTO(SendVLMismatchErr),
+	E_AUTO(SendArmLaunchErr),
+	E_AUTO(SendSpecialTriggerErr),
+	E_AUTO(SDmaWrongPortErr),
+	E_AUTO(SDmaBufMaskDuplicateErr),
+	E_AUTO(RcvHdrFullErr),
+	E_AUTO(RcvEgrFullErr),
+	{ .mask = 0 }
+};
+
+static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
+	E_P_AUTO(IBStatusChanged),
+	E_P_AUTO(SHeadersErr),
+	E_P_AUTO(VL15BufMisuseErr),
+	/*
+	 * SDmaHaltErr is not really an error, make it clearer;
+	 */
+	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+	E_P_AUTO(SDmaDescAddrMisalignErr),
+	E_P_AUTO(SDmaUnexpDataErr),
+	E_P_AUTO(SDmaMissingDwErr),
+	E_P_AUTO(SDmaDwEnErr),
+	E_P_AUTO(SDmaRpyTagErr),
+	E_P_AUTO(SDma1stDescErr),
+	E_P_AUTO(SDmaBaseErr),
+	E_P_AUTO(SDmaTailOutOfBoundErr),
+	E_P_AUTO(SDmaOutOfBoundErr),
+	E_P_AUTO(SDmaGenMismatchErr),
+	E_P_AUTO(SendBufMisuseErr),
+	E_P_AUTO(SendUnsupportedVLErr),
+	E_P_AUTO(SendUnexpectedPktNumErr),
+	E_P_AUTO(SendDroppedDataPktErr),
+	E_P_AUTO(SendDroppedSmpPktErr),
+	E_P_AUTO(SendPktLenErr),
+	E_P_AUTO(SendUnderRunErr),
+	E_P_AUTO(SendMaxPktLenErr),
+	E_P_AUTO(SendMinPktLenErr),
+	E_P_AUTO(RcvIBLostLinkErr),
+	E_P_AUTO(RcvHdrErr),
+	E_P_AUTO(RcvHdrLenErr),
+	E_P_AUTO(RcvBadTidErr),
+	E_P_AUTO(RcvBadVersionErr),
+	E_P_AUTO(RcvIBFlowErr),
+	E_P_AUTO(RcvEBPErr),
+	E_P_AUTO(RcvUnsupportedVLErr),
+	E_P_AUTO(RcvUnexpectedCharErr),
+	E_P_AUTO(RcvShortPktLenErr),
+	E_P_AUTO(RcvLongPktLenErr),
+	E_P_AUTO(RcvMaxPktLenErr),
+	E_P_AUTO(RcvMinPktLenErr),
+	E_P_AUTO(RcvICRCErr),
+	E_P_AUTO(RcvVCRCErr),
+	E_P_AUTO(RcvFormatErr),
+	{ .mask = 0 }
+};
+
+/*
+ * Below generates "auto-message" for interrupts not specific to any port or
+ * context
+ */
+#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
+	.msg = #fldname }
+/* Below generates "auto-message" for interrupts specific to a port */
+#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
+	SYM_LSB(IntMask, fldname##Mask##_0), \
+	SYM_LSB(IntMask, fldname##Mask##_1)), \
+	.msg = #fldname "_P" }
+/* For some reason, the SerDesTrimDone bits are reversed */
+#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
+	SYM_LSB(IntMask, fldname##Mask##_1), \
+	SYM_LSB(IntMask, fldname##Mask##_0)), \
+	.msg = #fldname "_P" }
+/*
+ * Below generates "auto-message" for interrupts specific to a context,
+ * with ctxt-number appended
+ */
+#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
+	SYM_LSB(IntMask, fldname##0IntMask), \
+	SYM_LSB(IntMask, fldname##17IntMask)), \
+	.msg = #fldname "_C"}
+
+static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
+	INTR_AUTO_P(SDmaInt),
+	INTR_AUTO_P(SDmaProgressInt),
+	INTR_AUTO_P(SDmaIdleInt),
+	INTR_AUTO_P(SDmaCleanupDone),
+	INTR_AUTO_C(RcvUrg),
+	INTR_AUTO_P(ErrInt),
+	INTR_AUTO(ErrInt),      /* non-port-specific errs */
+	INTR_AUTO(AssertGPIOInt),
+	INTR_AUTO_P(SendDoneInt),
+	INTR_AUTO(SendBufAvailInt),
+	INTR_AUTO_C(RcvAvail),
+	{ .mask = 0 }
+};
+
+#define TXSYMPTOM_AUTO_P(fldname) \
+	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
+	TXSYMPTOM_AUTO_P(NonKeyPacket),
+	TXSYMPTOM_AUTO_P(GRHFail),
+	TXSYMPTOM_AUTO_P(PkeyFail),
+	TXSYMPTOM_AUTO_P(QPFail),
+	TXSYMPTOM_AUTO_P(SLIDFail),
+	TXSYMPTOM_AUTO_P(RawIPV6),
+	TXSYMPTOM_AUTO_P(PacketTooSmall),
+	{ .mask = 0 }
+};
+
+#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used,
+ * because we don't need to force the update of pioavail
+ */
+static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 i;
+	int any;
+	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
+	unsigned long sbuf[4];
+
+	/*
+	 * It's possible that sendbuffererror could have bits set; might
+	 * have already done this as a result of hardware error handling.
+	 */
+	any = 0;
+	for (i = 0; i < regcnt; ++i) {
+		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
+		if (sbuf[i]) {
+			any = 1;
+			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
+		}
+	}
+
+	if (any)
+		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
+}
+
+/* No txe_recover yet, if ever */
+
+/* No decode__errors yet */
+static void err_decode(char *msg, size_t len, u64 errs,
+		       const struct qib_hwerror_msgs *msp)
+{
+	u64 these, lmask;
+	int took, multi, n = 0;
+
+	while (msp && msp->mask) {
+		multi = (msp->mask & (msp->mask - 1));
+		while (errs & msp->mask) {
+			these = (errs & msp->mask);
+			lmask = (these & (these - 1)) ^ these;
+			if (len) {
+				if (n++) {
+					/* separate the strings */
+					*msg++ = ',';
+					len--;
+				}
+				took = scnprintf(msg, len, "%s", msp->msg);
+				len -= took;
+				msg += took;
+			}
+			errs &= ~lmask;
+			if (len && multi) {
+				/* More than one bit this mask */
+				int idx = -1;
+
+				while (lmask & msp->mask) {
+					++idx;
+					lmask >>= 1;
+				}
+				took = scnprintf(msg, len, "_%d", idx);
+				len -= took;
+				msg += took;
+			}
+		}
+		++msp;
+	}
+	/* If some bits are left, show in hex. */
+	if (len && errs)
+		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
+			(unsigned long long) errs);
+}
+
+/* only called if r1 set */
+static void flush_fifo(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 __iomem *piobuf;
+	u32 bufn;
+	u32 *hdr;
+	u64 pbc;
+	const unsigned hdrwords = 7;
+	static struct qib_ib_header ibhdr = {
+		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
+		.lrh[1] = IB_LID_PERMISSIVE,
+		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
+		.lrh[3] = IB_LID_PERMISSIVE,
+		.u.oth.bth[0] = cpu_to_be32(
+			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
+		.u.oth.bth[1] = cpu_to_be32(0),
+		.u.oth.bth[2] = cpu_to_be32(0),
+		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
+		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
+	};
+
+	/*
+	 * Send a dummy VL15 packet to flush the launch FIFO.
+	 * This will not actually be sent since the TxeBypassIbc bit is set.
+	 */
+	pbc = PBC_7322_VL15_SEND |
+		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
+		(hdrwords + SIZE_OF_CRC);
+	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
+	if (!piobuf)
+		return;
+	writeq(pbc, piobuf);
+	hdr = (u32 *) &ibhdr;
+	if (dd->flags & QIB_PIO_FLUSH_WC) {
+		qib_flush_wc();
+		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
+		qib_flush_wc();
+		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
+		qib_flush_wc();
+	} else
+		qib_pio_copy(piobuf + 2, hdr, hdrwords);
+	qib_sendbuf_done(dd, bufn);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 set_sendctrl = 0;
+	u64 clr_sendctrl = 0;
+
+	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
+		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+	else
+		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+
+	spin_lock(&dd->sendctrl_lock);
+
+	/* If we are draining everything, block sends first */
+	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	ppd->p_sendctrl |= set_sendctrl;
+	ppd->p_sendctrl &= ~clr_sendctrl;
+
+	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
+		qib_write_kreg_port(ppd, krp_sendctrl,
+				    ppd->p_sendctrl |
+				    SYM_MASK(SendCtrl_0, SDmaCleanup));
+	else
+		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	spin_unlock(&dd->sendctrl_lock);
+
+	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
+		flush_fifo(ppd);
+}
+
+static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
+}
+
+static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
+{
+	/*
+	 * Set SendDmaLenGen and clear and set
+	 * the MSB of the generation count to enable generation checking
+	 * and load the internal generation counter.
+	 */
+	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
+	qib_write_kreg_port(ppd, krp_senddmalengen,
+			    ppd->sdma_descq_cnt |
+			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
+{
+	/* Commit writes to memory and advance the tail on the chip */
+	wmb();
+	ppd->sdma_descq_tail = tail;
+	qib_write_kreg_port(ppd, krp_senddmatail, tail);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+	/*
+	 * Drain all FIFOs.
+	 * The hardware doesn't require this but we do it so that verbs
+	 * and user applications don't wait for link active to send stale
+	 * data.
+	 */
+	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
+
+	qib_sdma_7322_setlengen(ppd);
+	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+	ppd->sdma_head_dma[0] = 0;
+	qib_7322_sdma_sendctrl(ppd,
+		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
+}
+
+#define DISABLES_SDMA ( \
+	QIB_E_P_SDMAHALT | \
+	QIB_E_P_SDMADESCADDRMISALIGN | \
+	QIB_E_P_SDMAMISSINGDW | \
+	QIB_E_P_SDMADWEN | \
+	QIB_E_P_SDMARPYTAG | \
+	QIB_E_P_SDMA1STDESC | \
+	QIB_E_P_SDMABASE | \
+	QIB_E_P_SDMATAILOUTOFBOUND | \
+	QIB_E_P_SDMAOUTOFBOUND | \
+	QIB_E_P_SDMAGENMISMATCH)
+
+static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
+{
+	unsigned long flags;
+	struct qib_devdata *dd = ppd->dd;
+
+	errs &= QIB_E_P_SDMAERRS;
+
+	if (errs & QIB_E_P_SDMAUNEXPDATA)
+		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
+			    ppd->port);
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	switch (ppd->sdma_state.current_state) {
+	case qib_sdma_state_s00_hw_down:
+		break;
+
+	case qib_sdma_state_s10_hw_start_up_wait:
+		if (errs & QIB_E_P_SDMAHALT)
+			__qib_sdma_process_event(ppd,
+				qib_sdma_event_e20_hw_started);
+		break;
+
+	case qib_sdma_state_s20_idle:
+		break;
+
+	case qib_sdma_state_s30_sw_clean_up_wait:
+		break;
+
+	case qib_sdma_state_s40_hw_clean_up_wait:
+		if (errs & QIB_E_P_SDMAHALT)
+			__qib_sdma_process_event(ppd,
+				qib_sdma_event_e50_hw_cleaned);
+		break;
+
+	case qib_sdma_state_s50_hw_halt_wait:
+		if (errs & QIB_E_P_SDMAHALT)
+			__qib_sdma_process_event(ppd,
+				qib_sdma_event_e60_hw_halted);
+		break;
+
+	case qib_sdma_state_s99_running:
+		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
+		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+		break;
+	}
+
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * handle per-device errors (not per-port errors)
+ */
+static noinline void handle_7322_errors(struct qib_devdata *dd)
+{
+	char *msg;
+	u64 iserr = 0;
+	u64 errs;
+	u64 mask;
+	int log_idx;
+
+	qib_stats.sps_errints++;
+	errs = qib_read_kreg64(dd, kr_errstatus);
+	if (!errs) {
+		qib_devinfo(dd->pcidev, "device error interrupt, "
+			 "but no error bits set!\n");
+		goto done;
+	}
+
+	/* don't report errors that are masked */
+	errs &= dd->cspec->errormask;
+	msg = dd->cspec->emsgbuf;
+
+	/* do these first, they are most important */
+	if (errs & QIB_E_HARDWARE) {
+		*msg = '\0';
+		qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+	} else
+		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+				qib_inc_eeprom_err(dd, log_idx, 1);
+
+	if (errs & QIB_E_SPKTERRS) {
+		qib_disarm_7322_senderrbufs(dd->pport);
+		qib_stats.sps_txerrs++;
+	} else if (errs & QIB_E_INVALIDADDR)
+		qib_stats.sps_txerrs++;
+	else if (errs & QIB_E_ARMLAUNCH) {
+		qib_stats.sps_txerrs++;
+		qib_disarm_7322_senderrbufs(dd->pport);
+	}
+	qib_write_kreg(dd, kr_errclear, errs);
+
+	/*
+	 * The ones we mask off are handled specially below
+	 * or above.  Also mask SDMADISABLED by default as it
+	 * is too chatty.
+	 */
+	mask = QIB_E_HARDWARE;
+	*msg = '\0';
+
+	err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+		   qib_7322error_msgs);
+
+	/*
+	 * Getting reset is a tragedy for all ports. Mark the device
+	 * _and_ the ports as "offline" in way meaningful to each.
+	 */
+	if (errs & QIB_E_RESET) {
+		int pidx;
+
+		qib_dev_err(dd, "Got reset, requires re-init "
+			    "(unload and reload driver)\n");
+		dd->flags &= ~QIB_INITTED;  /* needs re-init */
+		/* mark as having had error */
+		*dd->devstatusp |= QIB_STATUS_HWERROR;
+		for (pidx = 0; pidx < dd->num_pports; ++pidx)
+			if (dd->pport[pidx].link_speed_supported)
+				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
+	}
+
+	if (*msg && iserr)
+		qib_dev_err(dd, "%s error\n", msg);
+
+	/*
+	 * If there were hdrq or egrfull errors, wake up any processes
+	 * waiting in poll.  We used to try to check which contexts had
+	 * the overflow, but given the cost of that and the chip reads
+	 * to support it, it's better to just wake everybody up if we
+	 * get an overflow; waiters can poll again if it's not them.
+	 */
+	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+		qib_handle_urcv(dd, ~0U);
+		if (errs & ERR_MASK(RcvEgrFullErr))
+			qib_stats.sps_buffull++;
+		else
+			qib_stats.sps_hdrfull++;
+	}
+
+done:
+	return;
+}
+
+static void reenable_chase(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+	ppd->cpspec->chase_timer.expires = 0;
+	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+{
+	ppd->cpspec->chase_end = 0;
+
+	if (!qib_chase)
+		return;
+
+	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
+	add_timer(&ppd->cpspec->chase_timer);
+}
+
+static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
+{
+	u8 ibclt;
+	u64 tnow;
+
+	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
+
+	/*
+	 * Detect and handle the state chase issue, where we can
+	 * get stuck if we are unlucky on timing on both sides of
+	 * the link.   If we are, we disable, set a timer, and
+	 * then re-enable.
+	 */
+	switch (ibclt) {
+	case IB_7322_LT_STATE_CFGRCVFCFG:
+	case IB_7322_LT_STATE_CFGWAITRMT:
+	case IB_7322_LT_STATE_TXREVLANES:
+	case IB_7322_LT_STATE_CFGENH:
+		tnow = get_jiffies_64();
+		if (ppd->cpspec->chase_end &&
+		     time_after64(tnow, ppd->cpspec->chase_end))
+			disable_chase(ppd, tnow, ibclt);
+		else if (!ppd->cpspec->chase_end)
+			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+		break;
+	default:
+		ppd->cpspec->chase_end = 0;
+		break;
+	}
+
+	if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
+		force_h1(ppd);
+		ppd->cpspec->qdr_reforce = 1;
+	} else if (ppd->cpspec->qdr_reforce &&
+		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
+		 (ibclt == IB_7322_LT_STATE_CFGENH ||
+		ibclt == IB_7322_LT_STATE_CFGIDLE ||
+		ibclt == IB_7322_LT_STATE_LINKUP))
+		force_h1(ppd);
+
+	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
+	    ppd->link_speed_enabled == QIB_IB_QDR &&
+	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
+	     ibclt == IB_7322_LT_STATE_CFGENH ||
+	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
+	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
+		adj_tx_serdes(ppd);
+
+	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
+	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+		ppd->cpspec->qdr_dfe_on = 1;
+		ppd->cpspec->qdr_dfe_time = 0;
+		/* On link down, reenable QDR adaptation */
+		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			ppd->dd->cspec->r1 ?
+				    QDR_STATIC_ADAPT_DOWN_R1 :
+				    QDR_STATIC_ADAPT_DOWN);
+	}
+}
+
+/*
+ * This is per-pport error handling.
+ * will likely get it's own MSIx interrupt (one for each port,
+ * although just a single handler).
+ */
+static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
+{
+	char *msg;
+	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
+	struct qib_devdata *dd = ppd->dd;
+
+	/* do this as soon as possible */
+	fmask = qib_read_kreg64(dd, kr_act_fmask);
+	if (!fmask)
+		check_7322_rxe_status(ppd);
+
+	errs = qib_read_kreg_port(ppd, krp_errstatus);
+	if (!errs)
+		qib_devinfo(dd->pcidev,
+			 "Port%d error interrupt, but no error bits set!\n",
+			 ppd->port);
+	if (!fmask)
+		errs &= ~QIB_E_P_IBSTATUSCHANGED;
+	if (!errs)
+		goto done;
+
+	msg = ppd->cpspec->epmsgbuf;
+	*msg = '\0';
+
+	if (errs & ~QIB_E_P_BITSEXTANT) {
+		err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
+		if (!*msg)
+			snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+				 "no others");
+		qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
+				" errors 0x%016Lx set (and %s)\n",
+				(errs & ~QIB_E_P_BITSEXTANT), msg);
+		*msg = '\0';
+	}
+
+	if (errs & QIB_E_P_SHDR) {
+		u64 symptom;
+
+		/* determine cause, then write to clear */
+		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
+		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
+		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+			   hdrchk_msgs);
+		*msg = '\0';
+		/* senderrbuf cleared in SPKTERRS below */
+	}
+
+	if (errs & QIB_E_P_SPKTERRS) {
+		if ((errs & QIB_E_P_LINK_PKTERRS) &&
+		    !(ppd->lflags & QIBL_LINKACTIVE)) {
+			/*
+			 * This can happen when trying to bring the link
+			 * up, but the IB link changes state at the "wrong"
+			 * time. The IB logic then complains that the packet
+			 * isn't valid.  We don't want to confuse people, so
+			 * we just don't print them, except at debug
+			 */
+			err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+				   (errs & QIB_E_P_LINK_PKTERRS),
+				   qib_7322p_error_msgs);
+			*msg = '\0';
+			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+		}
+		qib_disarm_7322_senderrbufs(ppd);
+	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
+		   !(ppd->lflags & QIBL_LINKACTIVE)) {
+		/*
+		 * This can happen when SMA is trying to bring the link
+		 * up, but the IB link changes state at the "wrong" time.
+		 * The IB logic then complains that the packet isn't
+		 * valid.  We don't want to confuse people, so we just
+		 * don't print them, except at debug
+		 */
+		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+			   qib_7322p_error_msgs);
+		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+		*msg = '\0';
+	}
+
+	qib_write_kreg_port(ppd, krp_errclear, errs);
+
+	errs &= ~ignore_this_time;
+	if (!errs)
+		goto done;
+
+	if (errs & QIB_E_P_RPKTERRS)
+		qib_stats.sps_rcverrs++;
+	if (errs & QIB_E_P_SPKTERRS)
+		qib_stats.sps_txerrs++;
+
+	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
+
+	if (errs & QIB_E_P_SDMAERRS)
+		sdma_7322_p_errors(ppd, errs);
+
+	if (errs & QIB_E_P_IBSTATUSCHANGED) {
+		u64 ibcs;
+		u8 ltstate;
+
+		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+		ltstate = qib_7322_phys_portstate(ibcs);
+
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+			handle_serdes_issues(ppd, ibcs);
+		if (!(ppd->cpspec->ibcctrl_a &
+		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
+			/*
+			 * We got our interrupt, so init code should be
+			 * happy and not try alternatives. Now squelch
+			 * other "chatter" from link-negotiation (pre Init)
+			 */
+			ppd->cpspec->ibcctrl_a |=
+				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+			qib_write_kreg_port(ppd, krp_ibcctrl_a,
+					    ppd->cpspec->ibcctrl_a);
+		}
+
+		/* Update our picture of width and speed from chip */
+		ppd->link_width_active =
+			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
+			    IB_WIDTH_4X : IB_WIDTH_1X;
+		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
+			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
+			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
+				   QIB_IB_DDR : QIB_IB_SDR;
+
+		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
+		    IB_PHYSPORTSTATE_DISABLED)
+			qib_set_ib_7322_lstate(ppd, 0,
+			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+		else
+			/*
+			 * Since going into a recovery state causes the link
+			 * state to go down and since recovery is transitory,
+			 * it is better if we "miss" ever seeing the link
+			 * training state go into recovery (i.e., ignore this
+			 * transition for link state special handling purposes)
+			 * without updating lastibcstat.
+			 */
+			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+				qib_handle_e_ibstatuschanged(ppd, ibcs);
+	}
+	if (*msg && iserr)
+		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+	if (ppd->state_wanted & ppd->lflags)
+		wake_up_interruptible(&ppd->state_wait);
+done:
+	return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		if (dd->flags & QIB_BADINTR)
+			return;
+		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
+		/* cause any pending enabled interrupts to be re-delivered */
+		qib_write_kreg(dd, kr_intclear, 0ULL);
+		if (dd->cspec->num_msix_entries) {
+			/* and same for MSIx */
+			u64 val = qib_read_kreg64(dd, kr_intgranted);
+			if (val)
+				qib_write_kreg(dd, kr_intgranted, val);
+		}
+	} else
+		qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7322_clear_freeze(struct qib_devdata *dd)
+{
+	int pidx;
+
+	/* disable error interrupts, to avoid confusion */
+	qib_write_kreg(dd, kr_errmask, 0ULL);
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx)
+		if (dd->pport[pidx].link_speed_supported)
+			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+					    0ULL);
+
+	/* also disable interrupts; errormask is sometimes overwriten */
+	qib_7322_set_intr_state(dd, 0);
+
+	/* clear the freeze, and be sure chip saw it */
+	qib_write_kreg(dd, kr_control, dd->control);
+	qib_read_kreg32(dd, kr_scratch);
+
+	/*
+	 * Force new interrupt if any hwerr, error or interrupt bits are
+	 * still set, and clear "safe" send packet errors related to freeze
+	 * and cancelling sends.  Re-enable error interrupts before possible
+	 * force of re-interrupt on pending interrupts.
+	 */
+	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+	/* We need to purge per-port errs and reset mask, too */
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		if (!dd->pport[pidx].link_speed_supported)
+			continue;
+		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
+		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
+	}
+	qib_7322_set_intr_state(dd, 1);
+}
+
+/* no error handling to speak of */
+/**
+ * qib_7322_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use.  Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue.  We reuse the same message buffer as
+ * qib_handle_errors() to avoid excessive stack usage.
+ */
+static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
+				     size_t msgl)
+{
+	u64 hwerrs;
+	u32 ctrl;
+	int isfatal = 0;
+
+	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+	if (!hwerrs)
+		goto bail;
+	if (hwerrs == ~0ULL) {
+		qib_dev_err(dd, "Read of hardware error status failed "
+			    "(all bits set); ignoring\n");
+		goto bail;
+	}
+	qib_stats.sps_hwerrs++;
+
+	/* Always clear the error status register, except BIST fail */
+	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
+		       ~HWE_MASK(PowerOnBISTFailed));
+
+	hwerrs &= dd->cspec->hwerrmask;
+
+	/* no EEPROM logging, yet */
+
+	if (hwerrs)
+		qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+			    "(cleared)\n", (unsigned long long) hwerrs);
+
+	ctrl = qib_read_kreg32(dd, kr_control);
+	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
+		/*
+		 * No recovery yet...
+		 */
+		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
+		    dd->cspec->stay_in_freeze) {
+			/*
+			 * If any set that we aren't ignoring only make the
+			 * complaint once, in case it's stuck or recurring,
+			 * and we get here multiple times
+			 * Force link down, so switch knows, and
+			 * LEDs are turned off.
+			 */
+			if (dd->flags & QIB_INITTED)
+				isfatal = 1;
+		} else
+			qib_7322_clear_freeze(dd);
+	}
+
+	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+		isfatal = 1;
+		strlcpy(msg, "[Memory BIST test failed, "
+			"InfiniPath hardware unusable]", msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+	}
+
+	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
+
+	/* Ignore esoteric PLL failures et al. */
+
+	qib_dev_err(dd, "%s hardware error\n", msg);
+
+	if (isfatal && !dd->diag_client) {
+		qib_dev_err(dd, "Fatal Hardware Error, no longer"
+			    " usable, SN %.16s\n", dd->serial);
+		/*
+		 * for /sys status file and user programs to print; if no
+		 * trailing brace is copied, we'll know it was truncated.
+		 */
+		if (dd->freezemsg)
+			snprintf(dd->freezemsg, dd->freezelen,
+				 "{%s}", msg);
+		qib_disable_after_error(dd);
+	}
+bail:;
+}
+
+/**
+ * qib_7322_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7322_init_hwerrors(struct qib_devdata *dd)
+{
+	int pidx;
+	u64 extsval;
+
+	extsval = qib_read_kreg64(dd, kr_extstatus);
+	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
+			 QIB_EXTS_MEMBIST_ENDTEST)))
+		qib_dev_err(dd, "MemBIST did not complete!\n");
+
+	/* never clear BIST failure, so reported on each driver load */
+	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+	/* clear all */
+	qib_write_kreg(dd, kr_errclear, ~0ULL);
+	/* enable errors that are masked, at least this first time. */
+	qib_write_kreg(dd, kr_errmask, ~0ULL);
+	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+	for (pidx = 0; pidx < dd->num_pports; ++pidx)
+		if (dd->pport[pidx].link_speed_supported)
+			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+					    ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based.  There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+	if (enable) {
+		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
+		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
+	} else
+		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
+	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+				   u16 linitcmd)
+{
+	u64 mod_wd;
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+
+	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+		/*
+		 * If we are told to disable, note that so link-recovery
+		 * code does not attempt to bring us back up.
+		 * Also reset everything that we can, so we start
+		 * completely clean when re-enabled (before we
+		 * actually issue the disable to the IBC)
+		 */
+		qib_7322_mini_pcs_reset(ppd);
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+		/*
+		 * Any other linkinitcmd will lead to LINKDOWN and then
+		 * to INIT (if all is well), so clear flag to let
+		 * link-recovery code attempt to bring us back up.
+		 */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		/*
+		 * Clear status change interrupt reduction so the
+		 * new state is seen.
+		 */
+		ppd->cpspec->ibcctrl_a &=
+			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+	}
+
+	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
+		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
+			    mod_wd);
+	/* write to chip to prevent back-to-back writes of ibc reg */
+	qib_write_kreg(dd, kr_scratch, 0);
+
+}
+
+/*
+ * The total RCV buffer memory is 64KB, used for both ports, and is
+ * in units of 64 bytes (same as IB flow control credit unit).
+ * The consumedVL unit in the same registers are in 32 byte units!
+ * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
+ * and we can therefore allocate just 9 IB credits for 2 VL15 packets
+ * in krp_rxcreditvl15, rather than 10.
+ */
+#define RCV_BUF_UNITSZ 64
+#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
+
+static void set_vls(struct qib_pportdata *ppd)
+{
+	int i, numvls, totcred, cred_vl, vl0extra;
+	struct qib_devdata *dd = ppd->dd;
+	u64 val;
+
+	numvls = qib_num_vls(ppd->vls_operational);
+
+	/*
+	 * Set up per-VL credits. Below is kluge based on these assumptions:
+	 * 1) port is disabled at the time early_init is called.
+	 * 2) give VL15 17 credits, for two max-plausible packets.
+	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
+	 */
+	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
+	totcred = NUM_RCV_BUF_UNITS(dd);
+	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
+	totcred -= cred_vl;
+	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
+	cred_vl = totcred / numvls;
+	vl0extra = totcred - cred_vl * numvls;
+	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
+	for (i = 1; i < numvls; i++)
+		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
+	for (; i < 8; i++) /* no buffer space for other VLs */
+		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+
+	/* Notify IBC that credits need to be recalculated */
+	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+	for (i = 0; i < numvls; i++)
+		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
+	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
+
+	/* Change the number of operational VLs */
+	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
+				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
+		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
+	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * The code that deals with actual SerDes is in serdes_7322_init().
+ * Compared to the code for iba7220, it is minimal.
+ */
+static int serdes_7322_init(struct qib_pportdata *ppd);
+
+/**
+ * qib_7322_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 val, guid, ibc;
+	unsigned long flags;
+	int ret = 0;
+
+	/*
+	 * SerDes model not in Pd, but still need to
+	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
+	 * eventually.
+	 */
+	/* Put IBC in reset, sends disabled (should be in reset already) */
+	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+
+	if (qib_compat_ddr_negotiate) {
+		ppd->cpspec->ibdeltainprog = 1;
+		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+						crp_ibsymbolerr);
+		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+						crp_iblinkerrrecov);
+	}
+
+	/* flowcontrolwatermark is in units of KBytes */
+	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
+	/*
+	 * Flow control is sent this often, even if no changes in
+	 * buffer space occur.  Units are 128ns for this chip.
+	 * Set to 3usec.
+	 */
+	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
+	/* max error tolerance */
+	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+	/* IB credit flow control. */
+	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+	/*
+	 * set initial max size pkt IBC will send, including ICRC; it's the
+	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+	 */
+	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
+		SYM_LSB(IBCCtrlA_0, MaxPktLen);
+	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
+
+	/* initially come up waiting for TS1, without sending anything. */
+	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+		QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+	/*
+	 * Reset the PCS interface to the serdes (and also ibc, which is still
+	 * in reset from above).  Writes new value of ibcctrl_a as last step.
+	 */
+	qib_7322_mini_pcs_reset(ppd);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+
+	if (!ppd->cpspec->ibcctrl_b) {
+		unsigned lse = ppd->link_speed_enabled;
+
+		/*
+		 * Not on re-init after reset, establish shadow
+		 * and force initial config.
+		 */
+		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
+							     krp_ibcctrl_b);
+		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
+				IBA7322_IBC_SPEED_DDR |
+				IBA7322_IBC_SPEED_SDR |
+				IBA7322_IBC_WIDTH_AUTONEG |
+				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
+		if (lse & (lse - 1)) /* Muliple speeds enabled */
+			ppd->cpspec->ibcctrl_b |=
+				(lse << IBA7322_IBC_SPEED_LSB) |
+				IBA7322_IBC_IBTA_1_2_MASK |
+				IBA7322_IBC_MAX_SPEED_MASK;
+		else
+			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
+				IBA7322_IBC_SPEED_QDR |
+				 IBA7322_IBC_IBTA_1_2_MASK :
+				(lse == QIB_IB_DDR) ?
+					IBA7322_IBC_SPEED_DDR :
+					IBA7322_IBC_SPEED_SDR;
+		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+		    (IB_WIDTH_1X | IB_WIDTH_4X))
+			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
+		else
+			ppd->cpspec->ibcctrl_b |=
+				ppd->link_width_enabled == IB_WIDTH_4X ?
+				IBA7322_IBC_WIDTH_4X_ONLY :
+				IBA7322_IBC_WIDTH_1X_ONLY;
+
+		/* always enable these on driver reload, not sticky */
+		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
+			IBA7322_IBC_HRTBT_MASK);
+	}
+	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+
+	/* setup so we have more time at CFGTEST to change H1 */
+	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
+	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
+	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
+	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
+
+	serdes_7322_init(ppd);
+
+	guid = be64_to_cpu(ppd->guid);
+	if (!guid) {
+		if (dd->base_guid)
+			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
+		ppd->guid = cpu_to_be64(guid);
+	}
+
+	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
+	/* write to chip to prevent back-to-back writes of ibc reg */
+	qib_write_kreg(dd, kr_scratch, 0);
+
+	/* Enable port */
+	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
+	set_vls(ppd);
+
+	/* be paranoid against later code motion, etc. */
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
+	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+	/* Also enable IBSTATUSCHG interrupt.  */
+	val = qib_read_kreg_port(ppd, krp_errmask);
+	qib_write_kreg_port(ppd, krp_errmask,
+		val | ERR_MASK_N(IBStatusChanged));
+
+	/* Always zero until we start messing with SerDes for real */
+	return ret;
+}
+
+/**
+ * qib_7322_quiet_serdes - set serdes to txidle
+ * @dd: the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
+{
+	u64 val;
+	unsigned long flags;
+
+	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	wake_up(&ppd->cpspec->autoneg_wait);
+	cancel_delayed_work(&ppd->cpspec->autoneg_work);
+	if (ppd->dd->cspec->r1)
+		cancel_delayed_work(&ppd->cpspec->ipg_work);
+	flush_scheduled_work();
+
+	ppd->cpspec->chase_end = 0;
+	if (ppd->cpspec->chase_timer.data) /* if initted */
+		del_timer_sync(&ppd->cpspec->chase_timer);
+
+	/*
+	 * Despite the name, actually disables IBC as well. Do it when
+	 * we are as sure as possible that no more packets can be
+	 * received, following the down and the PCS reset.
+	 * The actual disabling happens in qib_7322_mini_pci_reset(),
+	 * along with the PCS being reset.
+	 */
+	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+	qib_7322_mini_pcs_reset(ppd);
+
+	/*
+	 * Update the adjusted counters so the adjustment persists
+	 * across driver reload.
+	 */
+	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
+		struct qib_devdata *dd = ppd->dd;
+		u64 diagc;
+
+		/* enable counter writes */
+		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+		qib_write_kreg(dd, kr_hwdiagctrl,
+			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
+			if (ppd->cpspec->ibdeltainprog)
+				val -= val - ppd->cpspec->ibsymsnap;
+			val -= ppd->cpspec->ibsymdelta;
+			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
+		}
+		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
+			if (ppd->cpspec->ibdeltainprog)
+				val -= val - ppd->cpspec->iblnkerrsnap;
+			val -= ppd->cpspec->iblnkerrdelta;
+			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
+		}
+		if (ppd->cpspec->iblnkdowndelta) {
+			val = read_7322_creg32_port(ppd, crp_iblinkdown);
+			val += ppd->cpspec->iblnkdowndelta;
+			write_7322_creg_port(ppd, crp_iblinkdown, val);
+		}
+		/*
+		 * No need to save ibmalfdelta since IB perfcounters
+		 * are cleared on driver reload.
+		 */
+
+		/* and disable counter writes */
+		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+	}
+}
+
+/**
+ * qib_setup_7322_setextled - set the state of the two external LEDs
+ * @ppd: physical port on the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ */
+static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 extctl, ledblink = 0, val;
+	unsigned long flags;
+	int yel, grn;
+
+	/*
+	 * The diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running.
+	 */
+	if (dd->diag_client)
+		return;
+
+	/* Allow override of LED display for, e.g. Locating system in rack */
+	if (ppd->led_override) {
+		grn = (ppd->led_override & QIB_LED_PHYS);
+		yel = (ppd->led_override & QIB_LED_LOG);
+	} else if (on) {
+		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+		grn = qib_7322_phys_portstate(val) ==
+			IB_PHYSPORTSTATE_LINKUP;
+		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
+	} else {
+		grn = 0;
+		yel = 0;
+	}
+
+	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
+		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
+	if (grn) {
+		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
+		/*
+		 * Counts are in chip clock (4ns) periods.
+		 * This is 1/16 sec (66.6ms) on,
+		 * 3/16 sec (187.5 ms) off, with packets rcvd.
+		 */
+		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
+			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
+	}
+	if (yel)
+		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
+	dd->cspec->extctrl = extctl;
+	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+	if (ledblink) /* blink the LED on packet receive */
+		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
+}
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd)
+{
+	struct qib_devdata *dd = rcd->dd;
+	struct qib_chip_specific *cspec = dd->cspec;
+	int cpu = get_cpu();
+
+	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
+		const struct dca_reg_map *rmp;
+
+		cspec->rhdr_cpu[rcd->ctxt] = cpu;
+		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
+		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
+		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
+			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
+		qib_write_kreg(dd, rmp->regno,
+			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
+		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
+		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
+	}
+	put_cpu();
+}
+
+static void qib_update_sdma_dca(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct qib_chip_specific *cspec = dd->cspec;
+	int cpu = get_cpu();
+	unsigned pidx = ppd->port - 1;
+
+	if (cspec->sdma_cpu[pidx] != cpu) {
+		cspec->sdma_cpu[pidx] = cpu;
+		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
+			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
+			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
+		cspec->dca_rcvhdr_ctrl[4] |=
+			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
+				(ppd->hw_pidx ?
+					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
+					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
+		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
+			       cspec->dca_rcvhdr_ctrl[4]);
+		cspec->dca_ctrl |= ppd->hw_pidx ?
+			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
+			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
+		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
+	}
+	put_cpu();
+}
+
+static void qib_setup_dca(struct qib_devdata *dd)
+{
+	struct qib_chip_specific *cspec = dd->cspec;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
+		cspec->rhdr_cpu[i] = -1;
+	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
+		cspec->sdma_cpu[i] = -1;
+	cspec->dca_rcvhdr_ctrl[0] =
+		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
+	cspec->dca_rcvhdr_ctrl[1] =
+		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
+	cspec->dca_rcvhdr_ctrl[2] =
+		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
+	cspec->dca_rcvhdr_ctrl[3] =
+		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
+	cspec->dca_rcvhdr_ctrl[4] =
+		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
+		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
+	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
+		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
+			       cspec->dca_rcvhdr_ctrl[i]);
+}
+
+#endif
+
+/*
+ * Disable MSIx interrupt if enabled, call generic MSIx code
+ * to cleanup, and clear pending MSIx interrupts.
+ * Used for fallback to INTx, after reset, and when MSIx setup fails.
+ */
+static void qib_7322_nomsix(struct qib_devdata *dd)
+{
+	u64 intgranted;
+	int n;
+
+	dd->cspec->main_int_mask = ~0ULL;
+	n = dd->cspec->num_msix_entries;
+	if (n) {
+		int i;
+
+		dd->cspec->num_msix_entries = 0;
+		for (i = 0; i < n; i++)
+			free_irq(dd->cspec->msix_entries[i].vector,
+				 dd->cspec->msix_arg[i]);
+		qib_nomsix(dd);
+	}
+	/* make sure no MSIx interrupts are left pending */
+	intgranted = qib_read_kreg64(dd, kr_intgranted);
+	if (intgranted)
+		qib_write_kreg(dd, kr_intgranted, intgranted);
+}
+
+static void qib_7322_free_irq(struct qib_devdata *dd)
+{
+	if (dd->cspec->irq) {
+		free_irq(dd->cspec->irq, dd);
+		dd->cspec->irq = 0;
+	}
+	qib_7322_nomsix(dd);
+}
+
+static void qib_setup_7322_cleanup(struct qib_devdata *dd)
+{
+	int i;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED) {
+		dca_remove_requester(&dd->pcidev->dev);
+		dd->flags &= ~QIB_DCA_ENABLED;
+		dd->cspec->dca_ctrl = 0;
+		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
+	}
+#endif
+
+	qib_7322_free_irq(dd);
+	kfree(dd->cspec->cntrs);
+	kfree(dd->cspec->sendchkenable);
+	kfree(dd->cspec->sendgrhchk);
+	kfree(dd->cspec->sendibchk);
+	kfree(dd->cspec->msix_entries);
+	kfree(dd->cspec->msix_arg);
+	for (i = 0; i < dd->num_pports; i++) {
+		unsigned long flags;
+		u32 mask = QSFP_GPIO_MOD_PRS_N |
+			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
+
+		kfree(dd->pport[i].cpspec->portcntrs);
+		if (dd->flags & QIB_HAS_QSFP) {
+			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+			dd->cspec->gpio_mask &= ~mask;
+			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+			qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
+		}
+		if (dd->pport[i].ibport_data.smi_ah)
+			ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
+	}
+}
+
+/* handle SDMA interrupts */
+static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+	struct qib_pportdata *ppd0 = &dd->pport[0];
+	struct qib_pportdata *ppd1 = &dd->pport[1];
+	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
+		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
+	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
+		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
+
+	if (intr0)
+		qib_sdma_intr(ppd0);
+	if (intr1)
+		qib_sdma_intr(ppd1);
+
+	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
+		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
+	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
+		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
+}
+
+/*
+ * Set or clear the Send buffer available interrupt enable bit.
+ */
+static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+	if (needint)
+		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+	else
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Somehow got an interrupt with reserved bits set in interrupt status.
+ * Print a message so we know it happened, then clear them.
+ * keep mainline interrupt handler cache-friendly
+ */
+static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
+{
+	u64 kills;
+	char msg[128];
+
+	kills = istat & ~QIB_I_BITSEXTANT;
+	qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
+		    " %s\n", (unsigned long long) kills, msg);
+	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
+}
+
+/* keep mainline interrupt handler cache-friendly */
+static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
+{
+	u32 gpiostatus;
+	int handled = 0;
+	int pidx;
+
+	/*
+	 * Boards for this chip currently don't use GPIO interrupts,
+	 * so clear by writing GPIOstatus to GPIOclear, and complain
+	 * to developer.  To avoid endless repeats, clear
+	 * the bits in the mask, since there is some kind of
+	 * programming error or chip problem.
+	 */
+	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+	/*
+	 * In theory, writing GPIOstatus to GPIOclear could
+	 * have a bad side-effect on some diagnostic that wanted
+	 * to poll for a status-change, but the various shadows
+	 * make that problematic at best. Diags will just suppress
+	 * all GPIO interrupts during such tests.
+	 */
+	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+	/*
+	 * Check for QSFP MOD_PRS changes
+	 * only works for single port if IB1 != pidx1
+	 */
+	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
+	     ++pidx) {
+		struct qib_pportdata *ppd;
+		struct qib_qsfp_data *qd;
+		u32 mask;
+		if (!dd->pport[pidx].link_speed_supported)
+			continue;
+		mask = QSFP_GPIO_MOD_PRS_N;
+		ppd = dd->pport + pidx;
+		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+		if (gpiostatus & dd->cspec->gpio_mask & mask) {
+			u64 pins;
+			qd = &ppd->cpspec->qsfp_data;
+			gpiostatus &= ~mask;
+			pins = qib_read_kreg64(dd, kr_extstatus);
+			pins >>= SYM_LSB(EXTStatus, GPIOIn);
+			if (!(pins & mask)) {
+				++handled;
+				qd->t_insert = get_jiffies_64();
+				schedule_work(&qd->work);
+			}
+		}
+	}
+
+	if (gpiostatus && !handled) {
+		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+		u32 gpio_irq = mask & gpiostatus;
+
+		/*
+		 * Clear any troublemakers, and update chip from shadow
+		 */
+		dd->cspec->gpio_mask &= ~gpio_irq;
+		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+	}
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+	if (istat & ~QIB_I_BITSEXTANT)
+		unknown_7322_ibits(dd, istat);
+	if (istat & QIB_I_GPIO)
+		unknown_7322_gpio_intr(dd);
+	if (istat & QIB_I_C_ERROR)
+		handle_7322_errors(dd);
+	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
+		handle_7322_p_errors(dd->rcd[0]->ppd);
+	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
+		handle_7322_p_errors(dd->rcd[1]->ppd);
+}
+
+/*
+ * Dynamically adjust the rcv int timeout for a context based on incoming
+ * packet rate.
+ */
+static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
+{
+	struct qib_devdata *dd = rcd->dd;
+	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
+
+	/*
+	 * Dynamically adjust idle timeout on chip
+	 * based on number of packets processed.
+	 */
+	if (npkts < rcv_int_count && timeout > 2)
+		timeout >>= 1;
+	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
+		timeout = min(timeout << 1, rcv_int_timeout);
+	else
+		return;
+
+	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
+	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
+}
+
+/*
+ * This is the main interrupt handler.
+ * It will normally only be used for low frequency interrupts but may
+ * have to handle all interrupts if INTx is enabled or fewer than normal
+ * MSIx interrupts were allocated.
+ * This routine should ignore the interrupt bits for any of the
+ * dedicated MSIx handlers.
+ */
+static irqreturn_t qib_7322intr(int irq, void *data)
+{
+	struct qib_devdata *dd = data;
+	irqreturn_t ret;
+	u64 istat;
+	u64 ctxtrbits;
+	u64 rmask;
+	unsigned i;
+	u32 npkts;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		ret = IRQ_HANDLED;
+		goto bail;
+	}
+
+	istat = qib_read_kreg64(dd, kr_intstatus);
+
+	if (unlikely(istat == ~0ULL)) {
+		qib_bad_intrstatus(dd);
+		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
+		/* don't know if it was our interrupt or not */
+		ret = IRQ_NONE;
+		goto bail;
+	}
+
+	istat &= dd->cspec->main_int_mask;
+	if (unlikely(!istat)) {
+		/* already handled, or shared and not us */
+		ret = IRQ_NONE;
+		goto bail;
+	}
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+	/* handle "errors" of various kinds first, device ahead of port */
+	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
+			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
+			      INT_MASK_P(Err, 1))))
+		unlikely_7322_intr(dd, istat);
+
+	/*
+	 * Clear the interrupt bits we found set, relatively early, so we
+	 * "know" know the chip will have seen this by the time we process
+	 * the queue, and will re-interrupt if necessary.  The processor
+	 * itself won't take the interrupt again until we return.
+	 */
+	qib_write_kreg(dd, kr_intclear, istat);
+
+	/*
+	 * Handle kernel receive queues before checking for pio buffers
+	 * available since receives can overflow; piobuf waiters can afford
+	 * a few extra cycles, since they were waiting anyway.
+	 */
+	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
+	if (ctxtrbits) {
+		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
+			(1ULL << QIB_I_RCVURG_LSB);
+		for (i = 0; i < dd->first_user_ctxt; i++) {
+			if (ctxtrbits & rmask) {
+				ctxtrbits &= ~rmask;
+				if (dd->rcd[i]) {
+					qib_kreceive(dd->rcd[i], NULL, &npkts);
+					adjust_rcv_timeout(dd->rcd[i], npkts);
+				}
+			}
+			rmask <<= 1;
+		}
+		if (ctxtrbits) {
+			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
+				(ctxtrbits >> QIB_I_RCVURG_LSB);
+			qib_handle_urcv(dd, ctxtrbits);
+		}
+	}
+
+	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
+		sdma_7322_intr(dd, istat);
+
+	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+		qib_ib_piobufavail(dd);
+
+	ret = IRQ_HANDLED;
+bail:
+	return ret;
+}
+
+/*
+ * Dedicated receive packet available interrupt handler.
+ */
+static irqreturn_t qib_7322pintr(int irq, void *data)
+{
+	struct qib_ctxtdata *rcd = data;
+	struct qib_devdata *dd = rcd->dd;
+	u32 npkts;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED)
+		qib_update_rhdrq_dca(rcd);
+#endif
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
+		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
+
+	qib_kreceive(rcd, NULL, &npkts);
+	adjust_rcv_timeout(rcd, npkts);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send buffer available interrupt handler.
+ */
+static irqreturn_t qib_7322bufavail(int irq, void *data)
+{
+	struct qib_devdata *dd = data;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
+
+	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
+	if (dd->flags & QIB_INITTED)
+		qib_ib_piobufavail(dd);
+	else
+		qib_wantpiobuf_7322_intr(dd, 0);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA interrupt handler.
+ */
+static irqreturn_t sdma_intr(int irq, void *data)
+{
+	struct qib_pportdata *ppd = data;
+	struct qib_devdata *dd = ppd->dd;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED)
+		qib_update_sdma_dca(ppd);
+#endif
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
+	qib_sdma_intr(ppd);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA idle interrupt handler.
+ */
+static irqreturn_t sdma_idle_intr(int irq, void *data)
+{
+	struct qib_pportdata *ppd = data;
+	struct qib_devdata *dd = ppd->dd;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED)
+		qib_update_sdma_dca(ppd);
+#endif
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
+	qib_sdma_intr(ppd);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA progress interrupt handler.
+ */
+static irqreturn_t sdma_progress_intr(int irq, void *data)
+{
+	struct qib_pportdata *ppd = data;
+	struct qib_devdata *dd = ppd->dd;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED)
+		qib_update_sdma_dca(ppd);
+#endif
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+		       INT_MASK_P(SDmaProgress, 1) :
+		       INT_MASK_P(SDmaProgress, 0));
+	qib_sdma_intr(ppd);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA cleanup interrupt handler.
+ */
+static irqreturn_t sdma_cleanup_intr(int irq, void *data)
+{
+	struct qib_pportdata *ppd = data;
+	struct qib_devdata *dd = ppd->dd;
+
+	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+		/*
+		 * This return value is not great, but we do not want the
+		 * interrupt core code to remove our interrupt handler
+		 * because we don't appear to be handling an interrupt
+		 * during a chip reset.
+		 */
+		return IRQ_HANDLED;
+
+	qib_stats.sps_ints++;
+	if (dd->int_counter != (u32) -1)
+		dd->int_counter++;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (dd->flags & QIB_DCA_ENABLED)
+		qib_update_sdma_dca(ppd);
+#endif
+
+	/* Clear the interrupt bit we expect to be set. */
+	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+		       INT_MASK_PM(SDmaCleanupDone, 1) :
+		       INT_MASK_PM(SDmaCleanupDone, 0));
+	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSIx interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
+{
+	int ret, i, msixnum;
+	u64 redirect[6];
+	u64 mask;
+
+	if (!dd->num_pports)
+		return;
+
+	if (clearpend) {
+		/*
+		 * if not switching interrupt types, be sure interrupts are
+		 * disabled, and then clear anything pending at this point,
+		 * because we are starting clean.
+		 */
+		qib_7322_set_intr_state(dd, 0);
+
+		/* clear the reset error, init error/hwerror mask */
+		qib_7322_init_hwerrors(dd);
+
+		/* clear any interrupt bits that might be set */
+		qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+		/* make sure no pending MSIx intr, and clear diag reg */
+		qib_write_kreg(dd, kr_intgranted, ~0ULL);
+		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
+	}
+
+	if (!dd->cspec->num_msix_entries) {
+		/* Try to get INTx interrupt */
+try_intx:
+		if (!dd->pcidev->irq) {
+			qib_dev_err(dd, "irq is 0, BIOS error?  "
+				    "Interrupts won't work\n");
+			goto bail;
+		}
+		ret = request_irq(dd->pcidev->irq, qib_7322intr,
+				  IRQF_SHARED, QIB_DRV_NAME, dd);
+		if (ret) {
+			qib_dev_err(dd, "Couldn't setup INTx "
+				    "interrupt (irq=%d): %d\n",
+				    dd->pcidev->irq, ret);
+			goto bail;
+		}
+		dd->cspec->irq = dd->pcidev->irq;
+		dd->cspec->main_int_mask = ~0ULL;
+		goto bail;
+	}
+
+	/* Try to get MSIx interrupts */
+	memset(redirect, 0, sizeof redirect);
+	mask = ~0ULL;
+	msixnum = 0;
+	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
+		irq_handler_t handler;
+		const char *name;
+		void *arg;
+		u64 val;
+		int lsb, reg, sh;
+
+		if (i < ARRAY_SIZE(irq_table)) {
+			if (irq_table[i].port) {
+				/* skip if for a non-configured port */
+				if (irq_table[i].port > dd->num_pports)
+					continue;
+				arg = dd->pport + irq_table[i].port - 1;
+			} else
+				arg = dd;
+			lsb = irq_table[i].lsb;
+			handler = irq_table[i].handler;
+			name = irq_table[i].name;
+		} else {
+			unsigned ctxt;
+
+			ctxt = i - ARRAY_SIZE(irq_table);
+			/* per krcvq context receive interrupt */
+			arg = dd->rcd[ctxt];
+			if (!arg)
+				continue;
+			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
+			handler = qib_7322pintr;
+			name = QIB_DRV_NAME " (kctx)";
+		}
+		ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
+				  handler, 0, name, arg);
+		if (ret) {
+			/*
+			 * Shouldn't happen since the enable said we could
+			 * have as many as we are trying to setup here.
+			 */
+			qib_dev_err(dd, "Couldn't setup MSIx "
+				    "interrupt (vec=%d, irq=%d): %d\n", msixnum,
+				    dd->cspec->msix_entries[msixnum].vector,
+				    ret);
+			qib_7322_nomsix(dd);
+			goto try_intx;
+		}
+		dd->cspec->msix_arg[msixnum] = arg;
+		if (lsb >= 0) {
+			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
+			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
+				SYM_LSB(IntRedirect0, vec1);
+			mask &= ~(1ULL << lsb);
+			redirect[reg] |= ((u64) msixnum) << sh;
+		}
+		val = qib_read_kreg64(dd, 2 * msixnum + 1 +
+			(QIB_7322_MsixTable_OFFS / sizeof(u64)));
+		msixnum++;
+	}
+	/* Initialize the vector mapping */
+	for (i = 0; i < ARRAY_SIZE(redirect); i++)
+		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
+	dd->cspec->main_int_mask = mask;
+bail:;
+}
+
+/**
+ * qib_7322_boardname - fill in the board name and note features
+ * @dd: the qlogic_ib device
+ *
+ * info will be based on the board revision register
+ */
+static unsigned qib_7322_boardname(struct qib_devdata *dd)
+{
+	/* Will need enumeration of board-types here */
+	char *n;
+	u32 boardid, namelen;
+	unsigned features = DUAL_PORT_CAP;
+
+	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
+
+	switch (boardid) {
+	case 0:
+		n = "InfiniPath_QLE7342_Emulation";
+		break;
+	case 1:
+		n = "InfiniPath_QLE7340";
+		dd->flags |= QIB_HAS_QSFP;
+		features = PORT_SPD_CAP;
+		break;
+	case 2:
+		n = "InfiniPath_QLE7342";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
+	case 3:
+		n = "InfiniPath_QMI7342";
+		break;
+	case 4:
+		n = "InfiniPath_Unsupported7342";
+		qib_dev_err(dd, "Unsupported version of QMH7342\n");
+		features = 0;
+		break;
+	case BOARD_QMH7342:
+		n = "InfiniPath_QMH7342";
+		features = 0x24;
+		break;
+	case BOARD_QME7342:
+		n = "InfiniPath_QME7342";
+		break;
+	case 15:
+		n = "InfiniPath_QLE7342_TEST";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
+	default:
+		n = "InfiniPath_QLE73xy_UNKNOWN";
+		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
+		break;
+	}
+	dd->board_atten = 1; /* index into txdds_Xdr */
+
+	namelen = strlen(n) + 1;
+	dd->boardname = kmalloc(namelen, GFP_KERNEL);
+	if (!dd->boardname)
+		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+	else
+		snprintf(dd->boardname, namelen, "%s", n);
+
+	snprintf(dd->boardversion, sizeof(dd->boardversion),
+		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+		 dd->majrev, dd->minrev,
+		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
+		qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
+			    " by module parameter\n", dd->unit);
+		features &= PORT_SPD_CAP;
+	}
+
+	return features;
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_do_7322_reset(struct qib_devdata *dd)
+{
+	u64 val;
+	u64 *msix_vecsave;
+	int i, msix_entries, ret = 1;
+	u16 cmdval;
+	u8 int_line, clinesz;
+	unsigned long flags;
+
+	/* Use dev_err so it shows up in logs, etc. */
+	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+	msix_entries = dd->cspec->num_msix_entries;
+
+	/* no interrupts till re-initted */
+	qib_7322_set_intr_state(dd, 0);
+
+	if (msix_entries) {
+		qib_7322_nomsix(dd);
+		/* can be up to 512 bytes, too big for stack */
+		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
+			sizeof(u64), GFP_KERNEL);
+		if (!msix_vecsave)
+			qib_dev_err(dd, "No mem to save MSIx data\n");
+	} else
+		msix_vecsave = NULL;
+
+	/*
+	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
+	 * info that is set up by the BIOS, so we have to save and restore
+	 * it ourselves.   There is some risk something could change it,
+	 * after we save it, but since we have disabled the MSIx, it
+	 * shouldn't be touched...
+	 */
+	for (i = 0; i < msix_entries; i++) {
+		u64 vecaddr, vecdata;
+		vecaddr = qib_read_kreg64(dd, 2 * i +
+				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
+				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+		if (msix_vecsave) {
+			msix_vecsave[2 * i] = vecaddr;
+			/* save it without the masked bit set */
+			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
+		}
+	}
+
+	dd->pport->cpspec->ibdeltainprog = 0;
+	dd->pport->cpspec->ibsymdelta = 0;
+	dd->pport->cpspec->iblnkerrdelta = 0;
+	dd->pport->cpspec->ibmalfdelta = 0;
+	dd->int_counter = 0; /* so we check interrupts work again */
+
+	/*
+	 * Keep chip from being accessed until we are ready.  Use
+	 * writeq() directly, to allow the write even though QIB_PRESENT
+	 * isnt' set.
+	 */
+	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
+	dd->flags |= QIB_DOING_RESET;
+	val = dd->control | QLOGIC_IB_C_RESET;
+	writeq(val, &dd->kregbase[kr_control]);
+
+	for (i = 1; i <= 5; i++) {
+		/*
+		 * Allow MBIST, etc. to complete; longer on each retry.
+		 * We sometimes get machine checks from bus timeout if no
+		 * response, so for now, make it *really* long.
+		 */
+		msleep(1000 + (1 + i) * 3000);
+
+		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+		/*
+		 * Use readq directly, so we don't need to mark it as PRESENT
+		 * until we get a successful indication that all is well.
+		 */
+		val = readq(&dd->kregbase[kr_revision]);
+		if (val == dd->revision)
+			break;
+		if (i == 5) {
+			qib_dev_err(dd, "Failed to initialize after reset, "
+				    "unusable\n");
+			ret = 0;
+			goto  bail;
+		}
+	}
+
+	dd->flags |= QIB_PRESENT; /* it's back */
+
+	if (msix_entries) {
+		/* restore the MSIx vector address and data if saved above */
+		for (i = 0; i < msix_entries; i++) {
+			dd->cspec->msix_entries[i].entry = i;
+			if (!msix_vecsave || !msix_vecsave[2 * i])
+				continue;
+			qib_write_kreg(dd, 2 * i +
+				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
+				msix_vecsave[2 * i]);
+			qib_write_kreg(dd, 1 + 2 * i +
+				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
+				msix_vecsave[1 + 2 * i]);
+		}
+	}
+
+	/* initialize the remaining registers.  */
+	for (i = 0; i < dd->num_pports; ++i)
+		write_7322_init_portregs(&dd->pport[i]);
+	write_7322_initregs(dd);
+
+	if (qib_pcie_params(dd, dd->lbus_width,
+			    &dd->cspec->num_msix_entries,
+			    dd->cspec->msix_entries))
+		qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
+				"continuing anyway\n");
+
+	qib_setup_7322_interrupt(dd, 1);
+
+	for (i = 0; i < dd->num_pports; ++i) {
+		struct qib_pportdata *ppd = &dd->pport[i];
+
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	}
+
+bail:
+	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
+	kfree(msix_vecsave);
+	return ret;
+}
+
+/**
+ * qib_7322_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+			     u32 type, unsigned long pa)
+{
+	if (!(dd->flags & QIB_PRESENT))
+		return;
+	if (pa != dd->tidinvalid) {
+		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
+
+		/* paranoia checks */
+		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
+			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+				    pa);
+			return;
+		}
+		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
+			qib_dev_err(dd, "Physical page address 0x%lx "
+				"larger than supported\n", pa);
+			return;
+		}
+
+		if (type == RCVHQ_RCV_TYPE_EAGER)
+			chippa |= dd->tidtemplate;
+		else /* for now, always full 4KB page */
+			chippa |= IBA7322_TID_SZ_4K;
+		pa = chippa;
+	}
+	writeq(pa, tidptr);
+	mmiowb();
+}
+
+/**
+ * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close().
+ */
+static void qib_7322_clear_tids(struct qib_devdata *dd,
+				struct qib_ctxtdata *rcd)
+{
+	u64 __iomem *tidbase;
+	unsigned long tidinv;
+	u32 ctxt;
+	int i;
+
+	if (!dd->kregbase || !rcd)
+		return;
+
+	ctxt = rcd->ctxt;
+
+	tidinv = dd->tidinvalid;
+	tidbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase +
+		 dd->rcvtidbase +
+		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->rcvtidcnt; i++)
+		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+				 tidinv);
+
+	tidbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase +
+		 dd->rcvegrbase +
+		 rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+	for (i = 0; i < rcd->rcvegrcnt; i++)
+		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+				 tidinv);
+}
+
+/**
+ * qib_7322_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7322_tidtemplate(struct qib_devdata *dd)
+{
+	/*
+	 * For now, we always allocate 4KB buffers (at init) so we can
+	 * receive max size packets.  We may want a module parameter to
+	 * specify 2KB or 4KB and/or make it per port instead of per device
+	 * for those who want to reduce memory footprint.  Note that the
+	 * rcvhdrentsize size must be large enough to hold the largest
+	 * IB header (currently 96 bytes) that we expect to handle (plus of
+	 * course the 2 dwords of RHF).
+	 */
+	if (dd->rcvegrbufsize == 2048)
+		dd->tidtemplate = IBA7322_TID_SZ_2K;
+	else if (dd->rcvegrbufsize == 4096)
+		dd->tidtemplate = IBA7322_TID_SZ_4K;
+	dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+
+static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
+				  struct qib_base_info *kinfo)
+{
+	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
+		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
+		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
+	if (rcd->dd->cspec->r1)
+		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
+	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+	return 0;
+}
+
+static struct qib_message_header *
+qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+	u32 offset = qib_hdrget_offset(rhf_addr);
+
+	return (struct qib_message_header *)
+		(rhf_addr - dd->rhf_offset + offset);
+}
+
+/*
+ * Configure number of contexts.
+ */
+static void qib_7322_config_ctxts(struct qib_devdata *dd)
+{
+	unsigned long flags;
+	u32 nchipctxts;
+
+	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
+	dd->cspec->numctxts = nchipctxts;
+	if (qib_n_krcv_queues > 1 && dd->num_pports) {
+		/*
+		 * Set the mask for which bits from the QPN are used
+		 * to select a context number.
+		 */
+		dd->qpn_mask = 0x3f;
+		dd->first_user_ctxt = NUM_IB_PORTS +
+			(qib_n_krcv_queues - 1) * dd->num_pports;
+		if (dd->first_user_ctxt > nchipctxts)
+			dd->first_user_ctxt = nchipctxts;
+		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
+	} else {
+		dd->first_user_ctxt = NUM_IB_PORTS;
+		dd->n_krcv_queues = 1;
+	}
+
+	if (!qib_cfgctxts) {
+		int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+		if (nctxts <= 6)
+			dd->ctxtcnt = 6;
+		else if (nctxts <= 10)
+			dd->ctxtcnt = 10;
+		else if (nctxts <= nchipctxts)
+			dd->ctxtcnt = nchipctxts;
+	} else if (qib_cfgctxts < dd->num_pports)
+		dd->ctxtcnt = dd->num_pports;
+	else if (qib_cfgctxts <= nchipctxts)
+		dd->ctxtcnt = qib_cfgctxts;
+	if (!dd->ctxtcnt) /* none of the above, set to max */
+		dd->ctxtcnt = nchipctxts;
+
+	/*
+	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
+	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+	 * Lock to be paranoid about later motion, etc.
+	 */
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+	if (dd->ctxtcnt > 10)
+		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
+	else if (dd->ctxtcnt > 6)
+		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
+	/* else configure for default 6 receive ctxts */
+
+	/* The XRC opcode is 5. */
+	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
+
+	/*
+	 * RcvCtrl *must* be written here so that the
+	 * chip understands how to change rcvegrcnt below.
+	 */
+	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+	/* kr_rcvegrcnt changes based on the number of contexts enabled */
+	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+				dd->num_pports > 1 ? 1024U : 2048U);
+}
+
+static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+
+	int lsb, ret = 0;
+	u64 maskr; /* right-justified mask */
+
+	switch (which) {
+
+	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+		ret = ppd->link_width_enabled;
+		goto done;
+
+	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+		ret = ppd->link_width_active;
+		goto done;
+
+	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+		ret = ppd->link_speed_enabled;
+		goto done;
+
+	case QIB_IB_CFG_SPD: /* Get current Link spd */
+		ret = ppd->link_speed_active;
+		goto done;
+
+	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+		break;
+
+	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+		break;
+
+	case QIB_IB_CFG_LINKLATENCY:
+		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
+		goto done;
+
+	case QIB_IB_CFG_OP_VLS:
+		ret = ppd->vls_operational;
+		goto done;
+
+	case QIB_IB_CFG_VL_HIGH_CAP:
+		ret = 16;
+		goto done;
+
+	case QIB_IB_CFG_VL_LOW_CAP:
+		ret = 16;
+		goto done;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+				OverrunThreshold);
+		goto done;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+				PhyerrThreshold);
+		goto done;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		ret = (ppd->cpspec->ibcctrl_a &
+		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
+			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+		goto done;
+
+	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+		lsb = IBA7322_IBC_HRTBT_LSB;
+		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+		break;
+
+	case QIB_IB_CFG_PMA_TICKS:
+		/*
+		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
+		 */
+		if (ppd->link_speed_active == QIB_IB_QDR)
+			ret = 3;
+		else if (ppd->link_speed_active == QIB_IB_DDR)
+			ret = 1;
+		else
+			ret = 0;
+		goto done;
+
+	default:
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
+done:
+	return ret;
+}
+
+/*
+ * Below again cribbed liberally from older version. Do not lean
+ * heavily on it.
+ */
+#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
+#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
+	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
+
+static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 maskr; /* right-justified mask */
+	int lsb, ret = 0;
+	u16 lcmd, licmd;
+	unsigned long flags;
+
+	switch (which) {
+	case QIB_IB_CFG_LIDLMC:
+		/*
+		 * Set LID and LMC. Combined to avoid possible hazard
+		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+		 */
+		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
+		maskr = IBA7322_IBC_DLIDLMC_MASK;
+		/*
+		 * For header-checking, the SLID in the packet will
+		 * be masked with SendIBSLMCMask, and compared
+		 * with SendIBSLIDAssignMask. Make sure we do not
+		 * set any bits not covered by the mask, or we get
+		 * false-positives.
+		 */
+		qib_write_kreg_port(ppd, krp_sendslid,
+				    val & (val >> 16) & SendIBSLIDAssignMask);
+		qib_write_kreg_port(ppd, krp_sendslidmask,
+				    (val >> 16) & SendIBSLMCMask);
+		break;
+
+	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+		ppd->link_width_enabled = val;
+		/* convert IB value to chip register value */
+		if (val == IB_WIDTH_1X)
+			val = 0;
+		else if (val == IB_WIDTH_4X)
+			val = 1;
+		else
+			val = 3;
+		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
+		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
+		break;
+
+	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+		/*
+		 * As with width, only write the actual register if the
+		 * link is currently down, otherwise takes effect on next
+		 * link change.  Since setting is being explictly requested
+		 * (via MAD or sysfs), clear autoneg failure status if speed
+		 * autoneg is enabled.
+		 */
+		ppd->link_speed_enabled = val;
+		val <<= IBA7322_IBC_SPEED_LSB;
+		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
+			IBA7322_IBC_MAX_SPEED_MASK;
+		if (val & (val - 1)) {
+			/* Muliple speeds enabled */
+			val |= IBA7322_IBC_IBTA_1_2_MASK |
+				IBA7322_IBC_MAX_SPEED_MASK;
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		} else if (val & IBA7322_IBC_SPEED_QDR)
+			val |= IBA7322_IBC_IBTA_1_2_MASK;
+		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
+		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
+		break;
+
+	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+		break;
+
+	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+		break;
+
+	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+				  OverrunThreshold);
+		if (maskr != val) {
+			ppd->cpspec->ibcctrl_a &=
+				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
+			ppd->cpspec->ibcctrl_a |= (u64) val <<
+				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+			qib_write_kreg_port(ppd, krp_ibcctrl_a,
+					    ppd->cpspec->ibcctrl_a);
+			qib_write_kreg(dd, kr_scratch, 0ULL);
+		}
+		goto bail;
+
+	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+				  PhyerrThreshold);
+		if (maskr != val) {
+			ppd->cpspec->ibcctrl_a &=
+				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
+			ppd->cpspec->ibcctrl_a |= (u64) val <<
+				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+			qib_write_kreg_port(ppd, krp_ibcctrl_a,
+					    ppd->cpspec->ibcctrl_a);
+			qib_write_kreg(dd, kr_scratch, 0ULL);
+		}
+		goto bail;
+
+	case QIB_IB_CFG_PKEYS: /* update pkeys */
+		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+			((u64) ppd->pkeys[2] << 32) |
+			((u64) ppd->pkeys[3] << 48);
+		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
+		goto bail;
+
+	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+		/* will only take effect when the link state changes */
+		if (val == IB_LINKINITCMD_POLL)
+			ppd->cpspec->ibcctrl_a &=
+				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+		else /* SLEEP */
+			ppd->cpspec->ibcctrl_a |=
+				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+		qib_write_kreg(dd, kr_scratch, 0ULL);
+		goto bail;
+
+	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+		/*
+		 * Update our housekeeping variables, and set IBC max
+		 * size, same as init code; max IBC is max we allow in
+		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+		 * Set even if it's unchanged, print debug message only
+		 * on changes.
+		 */
+		val = (ppd->ibmaxlen >> 2) + 1;
+		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
+		ppd->cpspec->ibcctrl_a |= (u64)val <<
+			SYM_LSB(IBCCtrlA_0, MaxPktLen);
+		qib_write_kreg_port(ppd, krp_ibcctrl_a,
+				    ppd->cpspec->ibcctrl_a);
+		qib_write_kreg(dd, kr_scratch, 0ULL);
+		goto bail;
+
+	case QIB_IB_CFG_LSTATE: /* set the IB link state */
+		switch (val & 0xffff0000) {
+		case IB_LINKCMD_DOWN:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+			ppd->cpspec->ibmalfusesnap = 1;
+			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+				crp_errlink);
+			if (!ppd->cpspec->ibdeltainprog &&
+			    qib_compat_ddr_negotiate) {
+				ppd->cpspec->ibdeltainprog = 1;
+				ppd->cpspec->ibsymsnap =
+					read_7322_creg32_port(ppd,
+							      crp_ibsymbolerr);
+				ppd->cpspec->iblnkerrsnap =
+					read_7322_creg32_port(ppd,
+						      crp_iblinkerrrecov);
+			}
+			break;
+
+		case IB_LINKCMD_ARMED:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+			if (ppd->cpspec->ibmalfusesnap) {
+				ppd->cpspec->ibmalfusesnap = 0;
+				ppd->cpspec->ibmalfdelta +=
+					read_7322_creg32_port(ppd,
+							      crp_errlink) -
+					ppd->cpspec->ibmalfsnap;
+			}
+			break;
+
+		case IB_LINKCMD_ACTIVE:
+			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+			goto bail;
+		}
+		switch (val & 0xffff) {
+		case IB_LINKINITCMD_NOP:
+			licmd = 0;
+			break;
+
+		case IB_LINKINITCMD_POLL:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+			break;
+
+		case IB_LINKINITCMD_SLEEP:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+			break;
+
+		case IB_LINKINITCMD_DISABLE:
+			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+			ppd->cpspec->chase_end = 0;
+			/*
+			 * stop state chase counter and timer, if running.
+			 * wait forpending timer, but don't clear .data (ppd)!
+			 */
+			if (ppd->cpspec->chase_timer.expires) {
+				del_timer_sync(&ppd->cpspec->chase_timer);
+				ppd->cpspec->chase_timer.expires = 0;
+			}
+			break;
+
+		default:
+			ret = -EINVAL;
+			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+				    val & 0xffff);
+			goto bail;
+		}
+		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
+		goto bail;
+
+	case QIB_IB_CFG_OP_VLS:
+		if (ppd->vls_operational != val) {
+			ppd->vls_operational = val;
+			set_vls(ppd);
+		}
+		goto bail;
+
+	case QIB_IB_CFG_VL_HIGH_LIMIT:
+		qib_write_kreg_port(ppd, krp_highprio_limit, val);
+		goto bail;
+
+	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+		if (val > 3) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		lsb = IBA7322_IBC_HRTBT_LSB;
+		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+		break;
+
+	case QIB_IB_CFG_PORT:
+		/* val is the port number of the switch we are connected to. */
+		if (ppd->dd->cspec->r1) {
+			cancel_delayed_work(&ppd->cpspec->ipg_work);
+			ppd->cpspec->ipg_tries = 0;
+		}
+		goto bail;
+
+	default:
+		ret = -EINVAL;
+		goto bail;
+	}
+	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
+	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
+	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+	qib_write_kreg(dd, kr_scratch, 0);
+bail:
+	return ret;
+}
+
+static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+	int ret = 0;
+	u64 val, ctrlb;
+
+	/* only IBC loopback, may add serdes and xgxs loopbacks later */
+	if (!strncmp(what, "ibc", 3)) {
+		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
+						       Loopback);
+		val = 0; /* disable heart beat, so link will come up */
+		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+			 ppd->dd->unit, ppd->port);
+	} else if (!strncmp(what, "off", 3)) {
+		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
+							Loopback);
+		/* enable heart beat again */
+		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
+		qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+			    "(normal)\n", ppd->dd->unit, ppd->port);
+	} else
+		ret = -EINVAL;
+	if (!ret) {
+		qib_write_kreg_port(ppd, krp_ibcctrl_a,
+				    ppd->cpspec->ibcctrl_a);
+		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
+					     << IBA7322_IBC_HRTBT_LSB);
+		ppd->cpspec->ibcctrl_b = ctrlb | val;
+		qib_write_kreg_port(ppd, krp_ibcctrl_b,
+				    ppd->cpspec->ibcctrl_b);
+		qib_write_kreg(ppd->dd, kr_scratch, 0);
+	}
+	return ret;
+}
+
+static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+			   struct ib_vl_weight_elem *vl)
+{
+	unsigned i;
+
+	for (i = 0; i < 16; i++, regno++, vl++) {
+		u32 val = qib_read_kreg_port(ppd, regno);
+
+		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
+			SYM_RMASK(LowPriority0_0, VirtualLane);
+		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
+			SYM_RMASK(LowPriority0_0, Weight);
+	}
+}
+
+static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+			   struct ib_vl_weight_elem *vl)
+{
+	unsigned i;
+
+	for (i = 0; i < 16; i++, regno++, vl++) {
+		u64 val;
+
+		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
+			SYM_LSB(LowPriority0_0, VirtualLane)) |
+		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
+			SYM_LSB(LowPriority0_0, Weight));
+		qib_write_kreg_port(ppd, regno, val);
+	}
+	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
+		struct qib_devdata *dd = ppd->dd;
+		unsigned long flags;
+
+		spin_lock_irqsave(&dd->sendctrl_lock, flags);
+		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
+		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+	}
+}
+
+static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+	switch (which) {
+	case QIB_IB_TBL_VL_HIGH_ARB:
+		get_vl_weights(ppd, krp_highprio_0, t);
+		break;
+
+	case QIB_IB_TBL_VL_LOW_ARB:
+		get_vl_weights(ppd, krp_lowprio_0, t);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+	switch (which) {
+	case QIB_IB_TBL_VL_HIGH_ARB:
+		set_vl_weights(ppd, krp_highprio_0, t);
+		break;
+
+	case QIB_IB_TBL_VL_LOW_ARB:
+		set_vl_weights(ppd, krp_lowprio_0, t);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+				    u32 updegr, u32 egrhd)
+{
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	if (updegr)
+		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+{
+	u32 head, tail;
+
+	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+	if (rcd->rcvhdrtail_kvaddr)
+		tail = qib_get_rcvhdrtail(rcd);
+	else
+		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+	return head == tail;
+}
+
+#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
+	QIB_RCVCTRL_CTXT_DIS | \
+	QIB_RCVCTRL_TIDFLOW_ENB | \
+	QIB_RCVCTRL_TIDFLOW_DIS | \
+	QIB_RCVCTRL_TAILUPD_ENB | \
+	QIB_RCVCTRL_TAILUPD_DIS | \
+	QIB_RCVCTRL_INTRAVAIL_ENB | \
+	QIB_RCVCTRL_INTRAVAIL_DIS | \
+	QIB_RCVCTRL_BP_ENB | \
+	QIB_RCVCTRL_BP_DIS)
+
+#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
+	QIB_RCVCTRL_CTXT_DIS | \
+	QIB_RCVCTRL_PKEY_DIS | \
+	QIB_RCVCTRL_PKEY_ENB)
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
+			     int ctxt)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct qib_ctxtdata *rcd;
+	u64 mask, val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
+		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
+	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
+		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
+	if (op & QIB_RCVCTRL_TAILUPD_ENB)
+		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+	if (op & QIB_RCVCTRL_TAILUPD_DIS)
+		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
+	if (op & QIB_RCVCTRL_PKEY_ENB)
+		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+	if (op & QIB_RCVCTRL_PKEY_DIS)
+		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+	if (ctxt < 0) {
+		mask = (1ULL << dd->ctxtcnt) - 1;
+		rcd = NULL;
+	} else {
+		mask = (1ULL << ctxt);
+		rcd = dd->rcd[ctxt];
+	}
+	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
+		ppd->p_rcvctrl |=
+			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+		if (!(dd->flags & QIB_NODMA_RTAIL)) {
+			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
+			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+		}
+		/* Write these registers before the context is enabled. */
+		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
+				    rcd->rcvhdrqtailaddr_phys);
+		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
+				    rcd->rcvhdrq_phys);
+		rcd->seq_cnt = 1;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+		if (dd->flags & QIB_DCA_ENABLED)
+			qib_update_rhdrq_dca(rcd);
+#endif
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS)
+		ppd->p_rcvctrl &=
+			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+	if (op & QIB_RCVCTRL_BP_ENB)
+		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
+	if (op & QIB_RCVCTRL_BP_DIS)
+		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
+	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
+	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
+	/*
+	 * Decide which registers to write depending on the ops enabled.
+	 * Special case is "flush" (no bits set at all)
+	 * which needs to write both.
+	 */
+	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
+		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+	if (op == 0 || (op & RCVCTRL_PORT_MODS))
+		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
+		/*
+		 * Init the context registers also; if we were
+		 * disabled, tail and head should both be zero
+		 * already from the enable, but since we don't
+		 * know, we have to do it explictly.
+		 */
+		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+		/* be sure enabling write seen; hd/tl should be 0 */
+		(void) qib_read_kreg32(dd, kr_scratch);
+		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+		dd->rcd[ctxt]->head = val;
+		/* If kctxt, interrupt on next receive. */
+		if (ctxt < dd->first_user_ctxt)
+			val |= dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
+		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
+		/* arm rcv interrupt */
+		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
+		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+	}
+	if (op & QIB_RCVCTRL_CTXT_DIS) {
+		unsigned f;
+
+		/* Now that the context is disabled, clear these registers. */
+		if (ctxt >= 0) {
+			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
+			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
+			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+				qib_write_ureg(dd, ur_rcvflowtable + f,
+					       TIDFLOW_ERRBITS, ctxt);
+		} else {
+			unsigned i;
+
+			for (i = 0; i < dd->cfgctxts; i++) {
+				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
+						    i, 0);
+				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
+				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+					qib_write_ureg(dd, ur_rcvflowtable + f,
+						       TIDFLOW_ERRBITS, i);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function where there are multiple such registers with
+ * slightly different layouts.
+ * The chip doesn't allow back-to-back sendctrl writes, so write
+ * the scratch register after writing sendctrl.
+ *
+ * Which register is written depends on the operation.
+ * Most operate on the common register, while
+ * SEND_ENB and SEND_DIS operate on the per-port ones.
+ * SEND_ENB is included in common because it can change SPCL_TRIG
+ */
+#define SENDCTRL_COMMON_MODS (\
+	QIB_SENDCTRL_CLEAR | \
+	QIB_SENDCTRL_AVAIL_DIS | \
+	QIB_SENDCTRL_AVAIL_ENB | \
+	QIB_SENDCTRL_AVAIL_BLIP | \
+	QIB_SENDCTRL_DISARM | \
+	QIB_SENDCTRL_DISARM_ALL | \
+	QIB_SENDCTRL_SEND_ENB)
+
+#define SENDCTRL_PORT_MODS (\
+	QIB_SENDCTRL_CLEAR | \
+	QIB_SENDCTRL_SEND_ENB | \
+	QIB_SENDCTRL_SEND_DIS | \
+	QIB_SENDCTRL_FLUSH)
+
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 tmp_dd_sendctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+	/* First the dd ones that are "sticky", saved in shadow */
+	if (op & QIB_SENDCTRL_CLEAR)
+		dd->sendctrl = 0;
+	if (op & QIB_SENDCTRL_AVAIL_DIS)
+		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
+		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+		if (dd->flags & QIB_USE_SPCL_TRIG)
+			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
+	}
+
+	/* Then the ppd ones that are "sticky", saved in shadow */
+	if (op & QIB_SENDCTRL_SEND_DIS)
+		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+	else if (op & QIB_SENDCTRL_SEND_ENB)
+		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+
+	if (op & QIB_SENDCTRL_DISARM_ALL) {
+		u32 i, last;
+
+		tmp_dd_sendctrl = dd->sendctrl;
+		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+		/*
+		 * Disarm any buffers that are not yet launched,
+		 * disabling updates until done.
+		 */
+		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+		for (i = 0; i < last; i++) {
+			qib_write_kreg(dd, kr_sendctrl,
+				       tmp_dd_sendctrl |
+				       SYM_MASK(SendCtrl, Disarm) | i);
+			qib_write_kreg(dd, kr_scratch, 0);
+		}
+	}
+
+	if (op & QIB_SENDCTRL_FLUSH) {
+		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
+
+		/*
+		 * Now drain all the fifos.  The Abort bit should never be
+		 * needed, so for now, at least, we don't use it.
+		 */
+		tmp_ppd_sendctrl |=
+			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
+			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
+			SYM_MASK(SendCtrl_0, TxeBypassIbc);
+		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	tmp_dd_sendctrl = dd->sendctrl;
+
+	if (op & QIB_SENDCTRL_DISARM)
+		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
+			 SYM_LSB(SendCtrl, DisarmSendBuf));
+	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
+		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
+		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+		qib_write_kreg(dd, kr_scratch, 0);
+	}
+
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+	if (op & QIB_SENDCTRL_FLUSH) {
+		u32 v;
+		/*
+		 * ensure writes have hit chip, then do a few
+		 * more reads, to allow DMA of pioavail registers
+		 * to occur, so in-memory copy is in sync with
+		 * the chip.  Not always safe to sleep.
+		 */
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		v = qib_read_kreg32(dd, kr_scratch);
+		qib_write_kreg(dd, kr_scratch, v);
+		qib_read_kreg32(dd, kr_scratch);
+	}
+}
+
+#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
+#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
+#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
+
+/**
+ * qib_portcntr_7322 - read a per-port chip counter
+ * @ppd: the qlogic_ib pport
+ * @creg: the counter to read (not a chip offset)
+ */
+static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 ret = 0ULL;
+	u16 creg;
+	/* 0xffff for unimplemented or synthesized counters */
+	static const u32 xlator[] = {
+		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
+		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
+		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
+		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
+		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
+		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
+		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
+		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
+		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
+		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
+		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
+		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
+		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
+		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
+		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
+		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
+		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
+		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
+		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
+		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
+		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
+		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
+		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
+		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
+		[QIBPORTCNTR_ERRLINK] = crp_errlink,
+		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
+		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
+		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
+		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
+		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
+		/*
+		 * the next 3 aren't really counters, but were implemented
+		 * as counters in older chips, so still get accessed as
+		 * though they were counters from this code.
+		 */
+		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
+		[QIBPORTCNTR_PSSTART] = krp_psstart,
+		[QIBPORTCNTR_PSSTAT] = krp_psstat,
+		/* pseudo-counter, summed for all ports */
+		[QIBPORTCNTR_KHDROVFL] = 0xffff,
+	};
+
+	if (reg >= ARRAY_SIZE(xlator)) {
+		qib_devinfo(ppd->dd->pcidev,
+			 "Unimplemented portcounter %u\n", reg);
+		goto done;
+	}
+	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
+
+	/* handle non-counters and special cases first */
+	if (reg == QIBPORTCNTR_KHDROVFL) {
+		int i;
+
+		/* sum over all kernel contexts (skip if mini_init) */
+		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
+			struct qib_ctxtdata *rcd = dd->rcd[i];
+
+			if (!rcd || rcd->ppd != ppd)
+				continue;
+			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
+		}
+		goto done;
+	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
+		/*
+		 * Used as part of the synthesis of port_rcv_errors
+		 * in the verbs code for IBTA counters.  Not needed for 7322,
+		 * because all the errors are already counted by other cntrs.
+		 */
+		goto done;
+	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
+		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
+		/* were counters in older chips, now per-port kernel regs */
+		ret = qib_read_kreg_port(ppd, creg);
+		goto done;
+	}
+
+	/*
+	 * Only fast increment counters are 64 bits; use 32 bit reads to
+	 * avoid two independent reads when on Opteron.
+	 */
+	if (xlator[reg] & _PORT_64BIT_FLAG)
+		ret = read_7322_creg_port(ppd, creg);
+	else
+		ret = read_7322_creg32_port(ppd, creg);
+	if (creg == crp_ibsymbolerr) {
+		if (ppd->cpspec->ibdeltainprog)
+			ret -= ret - ppd->cpspec->ibsymsnap;
+		ret -= ppd->cpspec->ibsymdelta;
+	} else if (creg == crp_iblinkerrrecov) {
+		if (ppd->cpspec->ibdeltainprog)
+			ret -= ret - ppd->cpspec->iblnkerrsnap;
+		ret -= ppd->cpspec->iblnkerrdelta;
+	} else if (creg == crp_errlink)
+		ret -= ppd->cpspec->ibmalfdelta;
+	else if (creg == crp_iblinkdown)
+		ret += ppd->cpspec->iblnkdowndelta;
+done:
+	return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string.  Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility.  Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7322indices contains the corresponding register indices.
+ */
+static const char cntr7322names[] =
+	"Interrupts\n"
+	"HostBusStall\n"
+	"E RxTIDFull\n"
+	"RxTIDInvalid\n"
+	"RxTIDFloDrop\n" /* 7322 only */
+	"Ctxt0EgrOvfl\n"
+	"Ctxt1EgrOvfl\n"
+	"Ctxt2EgrOvfl\n"
+	"Ctxt3EgrOvfl\n"
+	"Ctxt4EgrOvfl\n"
+	"Ctxt5EgrOvfl\n"
+	"Ctxt6EgrOvfl\n"
+	"Ctxt7EgrOvfl\n"
+	"Ctxt8EgrOvfl\n"
+	"Ctxt9EgrOvfl\n"
+	"Ctx10EgrOvfl\n"
+	"Ctx11EgrOvfl\n"
+	"Ctx12EgrOvfl\n"
+	"Ctx13EgrOvfl\n"
+	"Ctx14EgrOvfl\n"
+	"Ctx15EgrOvfl\n"
+	"Ctx16EgrOvfl\n"
+	"Ctx17EgrOvfl\n"
+	;
+
+static const u32 cntr7322indices[] = {
+	cr_lbint | _PORT_64BIT_FLAG,
+	cr_lbstall | _PORT_64BIT_FLAG,
+	cr_tidfull,
+	cr_tidinvalid,
+	cr_rxtidflowdrop,
+	cr_base_egrovfl + 0,
+	cr_base_egrovfl + 1,
+	cr_base_egrovfl + 2,
+	cr_base_egrovfl + 3,
+	cr_base_egrovfl + 4,
+	cr_base_egrovfl + 5,
+	cr_base_egrovfl + 6,
+	cr_base_egrovfl + 7,
+	cr_base_egrovfl + 8,
+	cr_base_egrovfl + 9,
+	cr_base_egrovfl + 10,
+	cr_base_egrovfl + 11,
+	cr_base_egrovfl + 12,
+	cr_base_egrovfl + 13,
+	cr_base_egrovfl + 14,
+	cr_base_egrovfl + 15,
+	cr_base_egrovfl + 16,
+	cr_base_egrovfl + 17,
+};
+
+/*
+ * same as cntr7322names and cntr7322indices, but for port-specific counters.
+ * portcntr7322indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7322names[] =
+	"TxPkt\n"
+	"TxFlowPkt\n"
+	"TxWords\n"
+	"RxPkt\n"
+	"RxFlowPkt\n"
+	"RxWords\n"
+	"TxFlowStall\n"
+	"TxDmaDesc\n"  /* 7220 and 7322-only */
+	"E RxDlidFltr\n"  /* 7220 and 7322-only */
+	"IBStatusChng\n"
+	"IBLinkDown\n"
+	"IBLnkRecov\n"
+	"IBRxLinkErr\n"
+	"IBSymbolErr\n"
+	"RxLLIErr\n"
+	"RxBadFormat\n"
+	"RxBadLen\n"
+	"RxBufOvrfl\n"
+	"RxEBP\n"
+	"RxFlowCtlErr\n"
+	"RxICRCerr\n"
+	"RxLPCRCerr\n"
+	"RxVCRCerr\n"
+	"RxInvalLen\n"
+	"RxInvalPKey\n"
+	"RxPktDropped\n"
+	"TxBadLength\n"
+	"TxDropped\n"
+	"TxInvalLen\n"
+	"TxUnderrun\n"
+	"TxUnsupVL\n"
+	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
+	"RxVL15Drop\n"
+	"RxVlErr\n"
+	"XcessBufOvfl\n"
+	"RxQPBadCtxt\n" /* 7322-only from here down */
+	"TXBadHeader\n"
+	;
+
+static const u32 portcntr7322indices[] = {
+	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+	crp_pktsendflow,
+	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+	crp_pktrcvflowctrl,
+	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+	crp_txsdmadesc | _PORT_64BIT_FLAG,
+	crp_rxdlidfltr,
+	crp_ibstatuschange,
+	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+	crp_rcvflowctrlviol,
+	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+	crp_txminmaxlenerr,
+	crp_txdroppedpkt,
+	crp_txlenerr,
+	crp_txunderrun,
+	crp_txunsupvl,
+	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+	crp_rxqpinvalidctxt,
+	crp_txhdrerr,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7322_cntrnames(struct qib_devdata *dd)
+{
+	int i, j = 0;
+	char *s;
+
+	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
+	     i++) {
+		/* we always have at least one counter before the egrovfl */
+		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+			j = 1;
+		s = strchr(s + 1, '\n');
+		if (s && j)
+			j++;
+	}
+	dd->cspec->ncntrs = i;
+	if (!s)
+		/* full list; size is without terminating null */
+		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
+	else
+		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
+	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+		* sizeof(u64), GFP_KERNEL);
+	if (!dd->cspec->cntrs)
+		qib_dev_err(dd, "Failed allocation for counters\n");
+
+	for (i = 0, s = (char *)portcntr7322names; s; i++)
+		s = strchr(s + 1, '\n');
+	dd->cspec->nportcntrs = i - 1;
+	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
+	for (i = 0; i < dd->num_pports; ++i) {
+		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+			* sizeof(u64), GFP_KERNEL);
+		if (!dd->pport[i].cpspec->portcntrs)
+			qib_dev_err(dd, "Failed allocation for"
+				    " portcounters\n");
+	}
+}
+
+static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+			      u64 **cntrp)
+{
+	u32 ret;
+
+	if (namep) {
+		ret = dd->cspec->cntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+		else
+			*namep = (char *) cntr7322names;
+	} else {
+		u64 *cntr = dd->cspec->cntrs;
+		int i;
+
+		ret = dd->cspec->ncntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->ncntrs; i++)
+			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
+				*cntr++ = read_7322_creg(dd,
+							 cntr7322indices[i] &
+							 _PORT_CNTR_IDXMASK);
+			else
+				*cntr++ = read_7322_creg32(dd,
+							   cntr7322indices[i]);
+	}
+done:
+	return ret;
+}
+
+static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+				  char **namep, u64 **cntrp)
+{
+	u32 ret;
+
+	if (namep) {
+		ret = dd->cspec->portcntrnamelen;
+		if (pos >= ret)
+			ret = 0; /* final read after getting everything */
+		else
+			*namep = (char *)portcntr7322names;
+	} else {
+		struct qib_pportdata *ppd = &dd->pport[port];
+		u64 *cntr = ppd->cpspec->portcntrs;
+		int i;
+
+		ret = dd->cspec->nportcntrs * sizeof(u64);
+		if (!cntr || pos >= ret) {
+			/* everything read, or couldn't get memory */
+			ret = 0;
+			goto done;
+		}
+		*cntrp = cntr;
+		for (i = 0; i < dd->cspec->nportcntrs; i++) {
+			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
+				*cntr++ = qib_portcntr_7322(ppd,
+					portcntr7322indices[i] &
+					_PORT_CNTR_IDXMASK);
+			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
+				*cntr++ = read_7322_creg_port(ppd,
+					   portcntr7322indices[i] &
+					    _PORT_CNTR_IDXMASK);
+			else
+				*cntr++ = read_7322_creg32_port(ppd,
+					   portcntr7322indices[i]);
+		}
+	}
+done:
+	return ret;
+}
+
+/**
+ * qib_get_7322_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * VESTIGIAL IBA7322 has no "small fast counters", so the only
+ * real purpose of this function is to maintain the notion of
+ * "active time", which in turn is only logged into the eeprom,
+ * which we don;t have, yet, for 7322-based boards.
+ *
+ * called from add_timer
+ */
+static void qib_get_7322_faststats(unsigned long opaque)
+{
+	struct qib_devdata *dd = (struct qib_devdata *) opaque;
+	struct qib_pportdata *ppd;
+	unsigned long flags;
+	u64 traffic_wds;
+	int pidx;
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+
+		/*
+		 * If port isn't enabled or not operational ports, or
+		 * diags is running (can cause memory diags to fail)
+		 * skip this port this time.
+		 */
+		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
+		    || dd->diag_client)
+			continue;
+
+		/*
+		 * Maintain an activity timer, based on traffic
+		 * exceeding a threshold, so we need to check the word-counts
+		 * even if they are 64-bit.
+		 */
+		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
+			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
+		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+		traffic_wds -= ppd->dd->traffic_wds;
+		ppd->dd->traffic_wds += traffic_wds;
+		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+						QIB_IB_QDR) &&
+		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+				    QIBL_LINKACTIVE)) &&
+		    ppd->cpspec->qdr_dfe_time &&
+		    time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+			ppd->cpspec->qdr_dfe_on = 0;
+
+			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+					    ppd->dd->cspec->r1 ?
+					    QDR_STATIC_ADAPT_INIT_R1 :
+					    QDR_STATIC_ADAPT_INIT);
+			force_h1(ppd);
+		}
+	}
+	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we were using MSIx, try to fallback to INTx.
+ */
+static int qib_7322_intr_fallback(struct qib_devdata *dd)
+{
+	if (!dd->cspec->num_msix_entries)
+		return 0; /* already using INTx */
+
+	qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
+		 " trying INTx interrupts\n");
+	qib_7322_nomsix(dd);
+	qib_enable_intx(dd->pcidev);
+	qib_setup_7322_interrupt(dd, 0);
+	return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining.  To do this right, we reset IBC
+ * as well, then return to previous state (which may be still in reset)
+ * NOTE: some callers of this "know" this writes the current value
+ * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
+ * check all callers.
+ */
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
+{
+	u64 val;
+	struct qib_devdata *dd = ppd->dd;
+	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
+		SYM_MASK(IBPCSConfig_0, xcv_treset) |
+		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
+
+	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
+	qib_write_kreg_port(ppd, krp_ibcctrl_a,
+			    ppd->cpspec->ibcctrl_a &
+			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
+
+	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
+	qib_read_kreg32(dd, kr_scratch);
+	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
+	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+	qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware.  It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+				 u32 dcnt, u32 *data)
+{
+	int i;
+	u64 pbc;
+	u32 __iomem *piobuf;
+	u32 pnum, control, len;
+	struct qib_devdata *dd = ppd->dd;
+
+	i = 0;
+	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+	control = qib_7322_setpbc_control(ppd, len, 0, 15);
+	pbc = ((u64) control << 32) | len;
+	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
+		if (i++ > 15)
+			return;
+		udelay(2);
+	}
+	/* disable header check on this packet, since it can't be valid */
+	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+	writeq(pbc, piobuf);
+	qib_flush_wc();
+	qib_pio_copy(piobuf + 2, hdr, 7);
+	qib_pio_copy(piobuf + 9, data, dcnt);
+	if (dd->flags & QIB_USE_SPCL_TRIG) {
+		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+		qib_flush_wc();
+		__raw_writel(0xaebecede, piobuf + spcl_off);
+	}
+	qib_flush_wc();
+	qib_sendbuf_done(dd, pnum);
+	/* and re-enable hdr check */
+	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
+{
+	struct qib_devdata *dd = ppd->dd;
+	static u32 swapped;
+	u32 dw, i, hcnt, dcnt, *data;
+	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+	static u32 madpayload_start[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+		};
+	static u32 madpayload_done[0x40] = {
+		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x40000001, 0x1388, 0x15e, /* rest 0's */
+		};
+
+	dcnt = ARRAY_SIZE(madpayload_start);
+	hcnt = ARRAY_SIZE(hdr);
+	if (!swapped) {
+		/* for maintainability, do it at runtime */
+		for (i = 0; i < hcnt; i++) {
+			dw = (__force u32) cpu_to_be32(hdr[i]);
+			hdr[i] = dw;
+		}
+		for (i = 0; i < dcnt; i++) {
+			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+			madpayload_start[i] = dw;
+			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+			madpayload_done[i] = dw;
+		}
+		swapped = 1;
+	}
+
+	data = which ? madpayload_done : madpayload_start;
+
+	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+	qib_read_kreg64(dd, kr_scratch);
+	udelay(2);
+	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+	qib_read_kreg64(dd, kr_scratch);
+	udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change.   The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+	u64 newctrlb;
+	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
+				    IBA7322_IBC_IBTA_1_2_MASK |
+				    IBA7322_IBC_MAX_SPEED_MASK);
+
+	if (speed & (speed - 1)) /* multiple speeds */
+		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
+				    IBA7322_IBC_IBTA_1_2_MASK |
+				    IBA7322_IBC_MAX_SPEED_MASK;
+	else
+		newctrlb |= speed == QIB_IB_QDR ?
+			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
+			((speed == QIB_IB_DDR ?
+			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
+
+	if (newctrlb == ppd->cpspec->ibcctrl_b)
+		return;
+
+	ppd->cpspec->ibcctrl_b = newctrlb;
+	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+	qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7322_autoneg(struct qib_pportdata *ppd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	qib_autoneg_7322_send(ppd, 0);
+	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+	qib_7322_mini_pcs_reset(ppd);
+	/* 2 msec is minimum length of a poll cycle */
+	schedule_delayed_work(&ppd->cpspec->autoneg_work,
+			      msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7322_work(struct work_struct *work)
+{
+	struct qib_pportdata *ppd;
+	struct qib_devdata *dd;
+	u64 startms;
+	u32 i;
+	unsigned long flags;
+
+	ppd = container_of(work, struct qib_chippport_specific,
+			    autoneg_work.work)->ppd;
+	dd = ppd->dd;
+
+	startms = jiffies_to_msecs(jiffies);
+
+	/*
+	 * Busy wait for this first part, it should be at most a
+	 * few hundred usec, since we scheduled ourselves for 2msec.
+	 */
+	for (i = 0; i < 25; i++) {
+		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
+		     == IB_7322_LT_STATE_POLLQUIET) {
+			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+			break;
+		}
+		udelay(100);
+	}
+
+	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+		goto done; /* we got there early or told to stop */
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+			       msecs_to_jiffies(90)))
+		goto done;
+	qib_7322_mini_pcs_reset(ppd);
+
+	/* we expect this to timeout */
+	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+			       msecs_to_jiffies(1700)))
+		goto done;
+	qib_7322_mini_pcs_reset(ppd);
+
+	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
+
+	/*
+	 * Wait up to 250 msec for link to train and get to INIT at DDR;
+	 * this should terminate early.
+	 */
+	wait_event_timeout(ppd->cpspec->autoneg_wait,
+		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+		msecs_to_jiffies(250));
+done:
+	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
+			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+			ppd->cpspec->autoneg_tries = 0;
+		}
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+	}
+}
+
+/*
+ * This routine is used to request IPG set in the QLogic switch.
+ * Only called if r1.
+ */
+static void try_7322_ipg(struct qib_pportdata *ppd)
+{
+	struct qib_ibport *ibp = &ppd->ibport_data;
+	struct ib_mad_send_buf *send_buf;
+	struct ib_mad_agent *agent;
+	struct ib_smp *smp;
+	unsigned delay;
+	int ret;
+
+	agent = ibp->send_agent;
+	if (!agent)
+		goto retry;
+
+	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+	if (IS_ERR(send_buf))
+		goto retry;
+
+	if (!ibp->smi_ah) {
+		struct ib_ah_attr attr;
+		struct ib_ah *ah;
+
+		memset(&attr, 0, sizeof attr);
+		attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
+		attr.port_num = ppd->port;
+		ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+		if (IS_ERR(ah))
+			ret = -EINVAL;
+		else {
+			send_buf->ah = ah;
+			ibp->smi_ah = to_iah(ah);
+			ret = 0;
+		}
+	} else {
+		send_buf->ah = &ibp->smi_ah->ibah;
+		ret = 0;
+	}
+
+	smp = send_buf->mad;
+	smp->base_version = IB_MGMT_BASE_VERSION;
+	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+	smp->class_version = 1;
+	smp->method = IB_MGMT_METHOD_SEND;
+	smp->hop_cnt = 1;
+	smp->attr_id = QIB_VENDOR_IPG;
+	smp->attr_mod = 0;
+
+	if (!ret)
+		ret = ib_post_send_mad(send_buf, NULL);
+	if (ret)
+		ib_free_send_mad(send_buf);
+retry:
+	delay = 2 << ppd->cpspec->ipg_tries;
+	schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+}
+
+/*
+ * Timeout handler for setting IPG.
+ * Only called if r1.
+ */
+static void ipg_7322_work(struct work_struct *work)
+{
+	struct qib_pportdata *ppd;
+
+	ppd = container_of(work, struct qib_chippport_specific,
+			   ipg_work.work)->ppd;
+	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
+	    && ++ppd->cpspec->ipg_tries <= 10)
+		try_7322_ipg(ppd);
+}
+
+static u32 qib_7322_iblink_state(u64 ibcs)
+{
+	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
+
+	switch (state) {
+	case IB_7322_L_STATE_INIT:
+		state = IB_PORT_INIT;
+		break;
+	case IB_7322_L_STATE_ARM:
+		state = IB_PORT_ARMED;
+		break;
+	case IB_7322_L_STATE_ACTIVE:
+		/* fall through */
+	case IB_7322_L_STATE_ACT_DEFER:
+		state = IB_PORT_ACTIVE;
+		break;
+	default: /* fall through */
+	case IB_7322_L_STATE_DOWN:
+		state = IB_PORT_DOWN;
+		break;
+	}
+	return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7322_phys_portstate(u64 ibcs)
+{
+	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
+	return qib_7322_physportstate[state];
+}
+
+static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+	int ret = 0, symadj = 0;
+	unsigned long flags;
+	int mult;
+
+	spin_lock_irqsave(&ppd->lflags_lock, flags);
+	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+	/* Update our picture of width and speed from chip */
+	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
+		ppd->link_speed_active = QIB_IB_QDR;
+		mult = 4;
+	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
+		ppd->link_speed_active = QIB_IB_DDR;
+		mult = 2;
+	} else {
+		ppd->link_speed_active = QIB_IB_SDR;
+		mult = 1;
+	}
+	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
+		ppd->link_width_active = IB_WIDTH_4X;
+		mult *= 4;
+	} else
+		ppd->link_width_active = IB_WIDTH_1X;
+	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
+
+	if (!ibup) {
+		u64 clr;
+
+		/* Link went down. */
+		/* do IPG MAD again after linkdown, even if last time failed */
+		ppd->cpspec->ipg_tries = 0;
+		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
+			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
+		if (clr)
+			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
+		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+				     QIBL_IB_AUTONEG_INPROG)))
+			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+			qib_cancel_sends(ppd);
+			spin_lock_irqsave(&ppd->sdma_lock, flags);
+			if (__qib_sdma_running(ppd))
+				__qib_sdma_process_event(ppd,
+					qib_sdma_event_e70_go_idle);
+			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+		}
+		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
+		if (clr == ppd->cpspec->iblnkdownsnap)
+			ppd->cpspec->iblnkdowndelta++;
+	} else {
+		if (qib_compat_ddr_negotiate &&
+		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+				     QIBL_IB_AUTONEG_INPROG)) &&
+		    ppd->link_speed_active == QIB_IB_SDR &&
+		    (ppd->link_speed_enabled & QIB_IB_DDR)
+		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
+			/* we are SDR, and auto-negotiation enabled */
+			++ppd->cpspec->autoneg_tries;
+			if (!ppd->cpspec->ibdeltainprog) {
+				ppd->cpspec->ibdeltainprog = 1;
+				ppd->cpspec->ibsymdelta +=
+					read_7322_creg32_port(ppd,
+						crp_ibsymbolerr) -
+						ppd->cpspec->ibsymsnap;
+				ppd->cpspec->iblnkerrdelta +=
+					read_7322_creg32_port(ppd,
+						crp_iblinkerrrecov) -
+						ppd->cpspec->iblnkerrsnap;
+			}
+			try_7322_autoneg(ppd);
+			ret = 1; /* no other IB status change processing */
+		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+			   ppd->link_speed_active == QIB_IB_SDR) {
+			qib_autoneg_7322_send(ppd, 1);
+			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+			qib_7322_mini_pcs_reset(ppd);
+			udelay(2);
+			ret = 1; /* no other IB status change processing */
+		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+			   (ppd->link_speed_active & QIB_IB_DDR)) {
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+					 QIBL_IB_AUTONEG_FAILED);
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+			ppd->cpspec->autoneg_tries = 0;
+			/* re-enable SDR, for next link down */
+			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+			wake_up(&ppd->cpspec->autoneg_wait);
+			symadj = 1;
+		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+			/*
+			 * Clear autoneg failure flag, and do setup
+			 * so we'll try next time link goes down and
+			 * back to INIT (possibly connected to a
+			 * different device).
+			 */
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
+			symadj = 1;
+		}
+		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+			symadj = 1;
+			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
+				try_7322_ipg(ppd);
+			if (!ppd->cpspec->recovery_init)
+				setup_7322_link_recovery(ppd, 0);
+			ppd->cpspec->qdr_dfe_time = jiffies +
+				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
+		}
+		ppd->cpspec->ibmalfusesnap = 0;
+		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+			crp_errlink);
+	}
+	if (symadj) {
+		ppd->cpspec->iblnkdownsnap =
+			read_7322_creg32_port(ppd, crp_iblinkdown);
+		if (ppd->cpspec->ibdeltainprog) {
+			ppd->cpspec->ibdeltainprog = 0;
+			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
+				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
+				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+		}
+	} else if (!ibup && qib_compat_ddr_negotiate &&
+		   !ppd->cpspec->ibdeltainprog &&
+			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+		ppd->cpspec->ibdeltainprog = 1;
+		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+			crp_ibsymbolerr);
+		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+			crp_iblinkerrrecov);
+	}
+
+	if (!ret)
+		qib_setup_7322_setextled(ppd, ibup);
+	return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+	u64 read_val, new_out;
+	unsigned long flags;
+
+	if (mask) {
+		/* some bits being written, lock access to GPIO */
+		dir &= mask;
+		out &= mask;
+		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+		new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+		qib_write_kreg(dd, kr_gpio_out, new_out);
+		dd->cspec->gpio_out = new_out;
+		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+	}
+	/*
+	 * It is unlikely that a read at this time would get valid
+	 * data on a pin whose direction line was set in the same
+	 * call to this function. We include the read here because
+	 * that allows us to potentially combine a change on one pin with
+	 * a read on another, and because the old code did something like
+	 * this.
+	 */
+	read_val = qib_read_kreg64(dd, kr_extstatus);
+	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/* Enable writes to config EEPROM, if possible. Returns previous state */
+static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+	int prev_wen;
+	u32 mask;
+
+	mask = 1 << QIB_EEPROM_WEN_NUM;
+	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
+	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
+
+	return prev_wen & 1;
+}
+
+/*
+ * Read fundamental info we need to use the chip.  These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7322_chip_params(struct qib_devdata *dd)
+{
+	u64 val;
+	u32 piobufs;
+	int mtu;
+
+	dd->palign = qib_read_kreg32(dd, kr_pagealign);
+
+	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+	dd->piobcnt2k = val & ~0U;
+	dd->piobcnt4k = val >> 32;
+	val = qib_read_kreg64(dd, kr_sendpiosize);
+	dd->piosize2k = val & ~0U;
+	dd->piosize4k = val >> 32;
+
+	mtu = ib_mtu_enum_to_int(qib_ibmtu);
+	if (mtu == -1)
+		mtu = QIB_DEFAULT_MTU;
+	dd->pport[0].ibmtu = (u32)mtu;
+	dd->pport[1].ibmtu = (u32)mtu;
+
+	/* these may be adjusted in init_chip_wc_pat() */
+	dd->pio2kbase = (u32 __iomem *)
+		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+	dd->pio4kbase = (u32 __iomem *)
+		((char __iomem *) dd->kregbase +
+		 (dd->piobufbase >> 32));
+	/*
+	 * 4K buffers take 2 pages; we use roundup just to be
+	 * paranoid; we calculate it once here, rather than on
+	 * ever buf allocate
+	 */
+	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+
+	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
+
+	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+		(sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_7322_chip_params(), so split out as separate function
+ */
+static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
+{
+	u32 cregbase;
+	cregbase = qib_read_kreg32(dd, kr_counterregbase);
+
+	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
+		(char __iomem *)dd->kregbase);
+
+	dd->egrtidbase = (u64 __iomem *)
+		((char __iomem *) dd->kregbase + dd->rcvegrbase);
+
+	/* port registers are defined as relative to base of chip */
+	dd->pport[0].cpspec->kpregbase =
+		(u64 __iomem *)((char __iomem *)dd->kregbase);
+	dd->pport[1].cpspec->kpregbase =
+		(u64 __iomem *)(dd->palign +
+		(char __iomem *)dd->kregbase);
+	dd->pport[0].cpspec->cpregbase =
+		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
+		kr_counterregbase) + (char __iomem *)dd->kregbase);
+	dd->pport[1].cpspec->cpregbase =
+		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
+		kr_counterregbase) + (char __iomem *)dd->kregbase);
+}
+
+/*
+ * This is a fairly special-purpose observer, so we only support
+ * the port-specific parts of SendCtrl
+ */
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
+			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
+			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
+			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
+			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
+			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
+			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+			 const struct diag_observer *op, u32 offs,
+			 u64 *data, u64 mask, int only_32)
+{
+	unsigned long flags;
+	unsigned idx;
+	unsigned pidx;
+	struct qib_pportdata *ppd = NULL;
+	u64 local_data, all_bits;
+
+	/*
+	 * The fixed correspondence between Physical ports and pports is
+	 * severed. We need to hunt for the ppd that corresponds
+	 * to the offset we got. And we have to do that without admitting
+	 * we know the stride, apparently.
+	 */
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		u64 __iomem *psptr;
+		u32 psoffs;
+
+		ppd = dd->pport + pidx;
+		if (!ppd->cpspec->kpregbase)
+			continue;
+
+		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
+		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
+		if (psoffs == offs)
+			break;
+	}
+
+	/* If pport is not being managed by driver, just avoid shadows. */
+	if (pidx >= dd->num_pports)
+		ppd = NULL;
+
+	/* In any case, "idx" is flat index in kreg space */
+	idx = offs / sizeof(u64);
+
+	all_bits = ~0ULL;
+	if (only_32)
+		all_bits >>= 32;
+
+	spin_lock_irqsave(&dd->sendctrl_lock, flags);
+	if (!ppd || (mask & all_bits) != all_bits) {
+		/*
+		 * At least some mask bits are zero, so we need
+		 * to read. The judgement call is whether from
+		 * reg or shadow. First-cut: read reg, and complain
+		 * if any bits which should be shadowed are different
+		 * from their shadowed value.
+		 */
+		if (only_32)
+			local_data = (u64)qib_read_kreg32(dd, idx);
+		else
+			local_data = qib_read_kreg64(dd, idx);
+		*data = (local_data & ~mask) | (*data & mask);
+	}
+	if (mask) {
+		/*
+		 * At least some mask bits are one, so we need
+		 * to write, but only shadow some bits.
+		 */
+		u64 sval, tval; /* Shadowed, transient */
+
+		/*
+		 * New shadow val is bits we don't want to touch,
+		 * ORed with bits we do, that are intended for shadow.
+		 */
+		if (ppd) {
+			sval = ppd->p_sendctrl & ~mask;
+			sval |= *data & SENDCTRL_SHADOWED & mask;
+			ppd->p_sendctrl = sval;
+		} else
+			sval = *data & SENDCTRL_SHADOWED & mask;
+		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+		qib_write_kreg(dd, idx, tval);
+		qib_write_kreg(dd, kr_scratch, 0Ull);
+	}
+	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+	return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_0_observer = {
+	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
+	KREG_IDX(SendCtrl_0) * sizeof(u64)
+};
+
+static const struct diag_observer sendctrl_1_observer = {
+	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
+	KREG_IDX(SendCtrl_1) * sizeof(u64)
+};
+
+static ushort sdma_fetch_prio = 8;
+module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
+
+/* Besides logging QSFP events, we set appropriate TxDDS values */
+static void init_txdds_table(struct qib_pportdata *ppd, int override);
+
+static void qsfp_7322_event(struct work_struct *work)
+{
+	struct qib_qsfp_data *qd;
+	struct qib_pportdata *ppd;
+	u64 pwrup;
+	int ret;
+	u32 le2;
+
+	qd = container_of(work, struct qib_qsfp_data, work);
+	ppd = qd->ppd;
+	pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+
+	/*
+	 * Some QSFP's not only do not respond until the full power-up
+	 * time, but may behave badly if we try. So hold off responding
+	 * to insertion.
+	 */
+	while (1) {
+		u64 now = get_jiffies_64();
+		if (time_after64(now, pwrup))
+			break;
+		msleep(1);
+	}
+	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+	/*
+	 * Need to change LE2 back to defaults if we couldn't
+	 * read the cable type (to handle cable swaps), so do this
+	 * even on failure to read cable information.  We don't
+	 * get here for QME, so IS_QME check not needed here.
+	 */
+	le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
+	       !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
+		LE2_5m : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+	init_txdds_table(ppd, 0);
+}
+
+/*
+ * There is little we can do but complain to the user if QSFP
+ * initialization fails.
+ */
+static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
+{
+	unsigned long flags;
+	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
+	struct qib_devdata *dd = ppd->dd;
+	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
+
+	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+	qd->ppd = ppd;
+	qib_qsfp_init(qd, qsfp_7322_event);
+	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
+	dd->cspec->gpio_mask |= mod_prs_bit;
+	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+/*
+ * called at device initialization time, and also if the cable_atten
+ * module parameter is changed.  This is used for cables that don't
+ * have valid QSFP EEPROMs (not present, or attenuation is zero).
+ * We initialize to the default, then if there is a specific
+ * unit,port match, we use that.
+ * String format is "default# unit#,port#=# ... u,p=#", separators must
+ * be a SPACE character.  A newline terminates.
+ * The last specific match is used (actually, all are used, but last
+ * one is the one that winds up set); if none at all, fall back on default.
+ */
+static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
+{
+	char *nxt, *str;
+	int pidx, unit, port, deflt;
+	unsigned long val;
+	int any = 0;
+
+	str = cable_atten_list;
+
+	/* default number is validated in setup_cable_atten() */
+	deflt = simple_strtoul(str, &nxt, 0);
+	for (pidx = 0; pidx < dd->num_pports; ++pidx)
+		dd->pport[pidx].cpspec->no_eep = deflt;
+
+	while (*nxt && nxt[1]) {
+		str = ++nxt;
+		unit = simple_strtoul(str, &nxt, 0);
+		if (nxt == str || !*nxt || *nxt != ',') {
+			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+				;
+			continue;
+		}
+		str = ++nxt;
+		port = simple_strtoul(str, &nxt, 0);
+		if (nxt == str || *nxt != '=') {
+			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+				;
+			continue;
+		}
+		str = ++nxt;
+		val = simple_strtoul(str, &nxt, 0);
+		if (nxt == str) {
+			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+				;
+			continue;
+		}
+		if (val >= TXDDS_TABLE_SZ)
+			continue;
+		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
+		     ++pidx) {
+			if (dd->pport[pidx].port != port ||
+				!dd->pport[pidx].link_speed_supported)
+				continue;
+			dd->pport[pidx].cpspec->no_eep = val;
+			/* now change the IBC and serdes, overriding generic */
+			init_txdds_table(&dd->pport[pidx], 1);
+			any++;
+		}
+		if (*nxt == '\n')
+			break; /* done */
+	}
+	if (change && !any) {
+		/* no specific setting, use the default.
+		 * Change the IBC and serdes, but since it's
+		 * general, don't override specific settings.
+		 */
+		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+			if (!dd->pport[pidx].link_speed_supported)
+				continue;
+			init_txdds_table(&dd->pport[pidx], 0);
+		}
+	}
+}
+
+/* handle the cable_atten parameter changing */
+static int setup_cable_atten(const char *str, struct kernel_param *kp)
+{
+	struct qib_devdata *dd;
+	unsigned long val;
+	char *n;
+	if (strlen(str) >= MAX_ATTEN_LEN) {
+		printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string "
+		       "too long\n");
+		return -ENOSPC;
+	}
+	val = simple_strtoul(str, &n, 0);
+	if (n == str || val >= TXDDS_TABLE_SZ) {
+		printk(KERN_INFO QIB_DRV_NAME
+		       "cable_atten_values must start with a number\n");
+		return -EINVAL;
+	}
+	strcpy(cable_atten_list, str);
+
+	list_for_each_entry(dd, &qib_dev_list, list)
+		set_no_qsfp_atten(dd, 1);
+	return 0;
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup.  Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7322_initreg(struct qib_devdata *dd)
+{
+	int ret = 0, n;
+	u64 val;
+
+	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+	if (val != dd->pioavailregs_phys) {
+		qib_dev_err(dd, "Catastrophic software error, "
+			    "SendPIOAvailAddr written as %lx, "
+			    "read back as %llx\n",
+			    (unsigned long) dd->pioavailregs_phys,
+			    (unsigned long long) val);
+		ret = -EINVAL;
+	}
+
+	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
+	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
+	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+	qib_register_observer(dd, &sendctrl_0_observer);
+	qib_register_observer(dd, &sendctrl_1_observer);
+
+	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
+	qib_write_kreg(dd, kr_control, dd->control);
+	/*
+	 * Set SendDmaFetchPriority and init Tx params, including
+	 * QSFP handler on boards that have QSFP.
+	 * First set our default attenuation entry for cables that
+	 * don't have valid attenuation.
+	 */
+	set_no_qsfp_atten(dd, 0);
+	for (n = 0; n < dd->num_pports; ++n) {
+		struct qib_pportdata *ppd = dd->pport + n;
+
+		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
+				    sdma_fetch_prio & 0xf);
+		/* Initialize qsfp if present on board. */
+		if (dd->flags & QIB_HAS_QSFP)
+			qib_init_7322_qsfp(ppd);
+	}
+	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
+	qib_write_kreg(dd, kr_control, dd->control);
+
+	return ret;
+}
+
+/* per IB port errors.  */
+#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
+	MASK_ACROSS(8, 15))
+#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
+#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
+	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
+	MASK_ACROSS(0, 11))
+
+/*
+ * Write the initialization per-port registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c).
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_init_portregs(struct qib_pportdata *ppd)
+{
+	u64 val;
+	int i;
+
+	if (!ppd->link_speed_supported) {
+		/* no buffer credits for this port */
+		for (i = 1; i < 8; i++)
+			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
+		qib_write_kreg(ppd->dd, kr_scratch, 0);
+		return;
+	}
+
+	/*
+	 * Set the number of supported virtual lanes in IBC,
+	 * for flow control packet handling on unsupported VLs
+	 */
+	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
+	val |= (u64)(ppd->vls_supported - 1) <<
+		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
+	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
+
+	/* enable tx header checking */
+	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
+			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
+			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
+
+	qib_write_kreg_port(ppd, krp_ncmodectrl,
+		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
+
+	/*
+	 * Unconditionally clear the bufmask bits.  If SDMA is
+	 * enabled, we'll set them appropriately later.
+	 */
+	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
+	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
+	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
+	if (ppd->dd->cspec->r1)
+		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
+}
+
+/*
+ * Write the initialization per-device registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c).  Also write per-port
+ * registers that are affected by overall device config, such as QP mapping
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_initregs(struct qib_devdata *dd)
+{
+	struct qib_pportdata *ppd;
+	int i, pidx;
+	u64 val;
+
+	/* Set Multicast QPs received by port 2 to map to context one. */
+	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		unsigned n, regno;
+		unsigned long flags;
+
+		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+			continue;
+
+		ppd = &dd->pport[pidx];
+
+		/* be paranoid against later code motion, etc. */
+		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
+		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+		/* Initialize QP to context mapping */
+		regno = krp_rcvqpmaptable;
+		val = 0;
+		if (dd->num_pports > 1)
+			n = dd->first_user_ctxt / dd->num_pports;
+		else
+			n = dd->first_user_ctxt - 1;
+		for (i = 0; i < 32; ) {
+			unsigned ctxt;
+
+			if (dd->num_pports > 1)
+				ctxt = (i % n) * dd->num_pports + pidx;
+			else if (i % n)
+				ctxt = (i % n) + 1;
+			else
+				ctxt = ppd->hw_pidx;
+			val |= ctxt << (5 * (i % 6));
+			i++;
+			if (i % 6 == 0) {
+				qib_write_kreg_port(ppd, regno, val);
+				val = 0;
+				regno++;
+			}
+		}
+		qib_write_kreg_port(ppd, regno, val);
+	}
+
+	/*
+	 * Setup up interrupt mitigation for kernel contexts, but
+	 * not user contexts (user contexts use interrupts when
+	 * stalled waiting for any packet, so want those interrupts
+	 * right away).
+	 */
+	for (i = 0; i < dd->first_user_ctxt; i++) {
+		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
+		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
+	}
+
+	/*
+	 * Initialize  as (disabled) rcvflow tables.  Application code
+	 * will setup each flow as it uses the flow.
+	 * Doesn't clear any of the error bits that might be set.
+	 */
+	val = TIDFLOW_ERRBITS; /* these are W1C */
+	for (i = 0; i < dd->ctxtcnt; i++) {
+		int flow;
+		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
+			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
+	}
+
+	/*
+	 * dual cards init to dual port recovery, single port cards to
+	 * the one port.  Dual port cards may later adjust to 1 port,
+	 * and then back to dual port if both ports are connected
+	 * */
+	if (dd->num_pports)
+		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
+}
+
+static int qib_init_7322_variables(struct qib_devdata *dd)
+{
+	struct qib_pportdata *ppd;
+	unsigned features, pidx, sbufcnt;
+	int ret, mtu;
+	u32 sbufs, updthresh;
+
+	/* pport structs are contiguous, allocated after devdata */
+	ppd = (struct qib_pportdata *)(dd + 1);
+	dd->pport = ppd;
+	ppd[0].dd = dd;
+	ppd[1].dd = dd;
+
+	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
+
+	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
+	ppd[1].cpspec = &ppd[0].cpspec[1];
+	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
+	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
+
+	spin_lock_init(&dd->cspec->rcvmod_lock);
+	spin_lock_init(&dd->cspec->gpio_lock);
+
+	/* we haven't yet set QIB_PRESENT, so use read directly */
+	dd->revision = readq(&dd->kregbase[kr_revision]);
+
+	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+		qib_dev_err(dd, "Revision register read failure, "
+			    "giving up initialization\n");
+		ret = -ENODEV;
+		goto bail;
+	}
+	dd->flags |= QIB_PRESENT;  /* now register routines work */
+
+	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
+	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
+	dd->cspec->r1 = dd->minrev == 1;
+
+	get_7322_chip_params(dd);
+	features = qib_7322_boardname(dd);
+
+	/* now that piobcnt2k and 4k set, we can allocate these */
+	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
+		NUM_VL15_BUFS + BITS_PER_LONG - 1;
+	sbufcnt /= BITS_PER_LONG;
+	dd->cspec->sendchkenable = kmalloc(sbufcnt *
+		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
+	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
+		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
+	dd->cspec->sendibchk = kmalloc(sbufcnt *
+		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
+	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
+		!dd->cspec->sendibchk) {
+		qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	ppd = dd->pport;
+
+	/*
+	 * GPIO bits for TWSI data and clock,
+	 * used for serial EEPROM.
+	 */
+	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
+		QIB_HAS_THRESH_UPDATE |
+		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
+	dd->flags |= qib_special_trigger ?
+		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+	/*
+	 * Setup initial values.  These may change when PAT is enabled, but
+	 * we need these to do initial chip register accesses.
+	 */
+	qib_7322_set_baseaddrs(dd);
+
+	mtu = ib_mtu_enum_to_int(qib_ibmtu);
+	if (mtu == -1)
+		mtu = QIB_DEFAULT_MTU;
+
+	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
+	/* all hwerrors become interrupts, unless special purposed */
+	dd->cspec->hwerrmask = ~0ULL;
+	/*  link_recovery setup causes these errors, so ignore them,
+	 *  other than clearing them when they occur */
+	dd->cspec->hwerrmask &=
+		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
+		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
+		  HWE_MASK(LATriggered));
+
+	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
+		struct qib_chippport_specific *cp = ppd->cpspec;
+		ppd->link_speed_supported = features & PORT_SPD_CAP;
+		features >>=  PORT_SPD_CAP_SHIFT;
+		if (!ppd->link_speed_supported) {
+			/* single port mode (7340, or configured) */
+			dd->skip_kctxt_mask |= 1 << pidx;
+			if (pidx == 0) {
+				/* Make sure port is disabled. */
+				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+				ppd[0] = ppd[1];
+				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+						  IBSerdesPClkNotDetectMask_0)
+						  | SYM_MASK(HwErrMask,
+						  SDmaMemReadErrMask_0));
+				dd->cspec->int_enable_mask &= ~(
+				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
+				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
+				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
+				     SYM_MASK(IntMask, SDmaIntMask_0) |
+				     SYM_MASK(IntMask, ErrIntMask_0) |
+				     SYM_MASK(IntMask, SendDoneIntMask_0));
+			} else {
+				/* Make sure port is disabled. */
+				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+						  IBSerdesPClkNotDetectMask_1)
+						  | SYM_MASK(HwErrMask,
+						  SDmaMemReadErrMask_1));
+				dd->cspec->int_enable_mask &= ~(
+				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
+				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
+				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
+				     SYM_MASK(IntMask, SDmaIntMask_1) |
+				     SYM_MASK(IntMask, ErrIntMask_1) |
+				     SYM_MASK(IntMask, SendDoneIntMask_1));
+			}
+			continue;
+		}
+
+		dd->num_pports++;
+		qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+
+		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+		ppd->link_width_enabled = IB_WIDTH_4X;
+		ppd->link_speed_enabled = ppd->link_speed_supported;
+		/*
+		 * Set the initial values to reasonable default, will be set
+		 * for real when link is up.
+		 */
+		ppd->link_width_active = IB_WIDTH_4X;
+		ppd->link_speed_active = QIB_IB_SDR;
+		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
+		switch (qib_num_cfg_vls) {
+		case 1:
+			ppd->vls_supported = IB_VL_VL0;
+			break;
+		case 2:
+			ppd->vls_supported = IB_VL_VL0_1;
+			break;
+		default:
+			qib_devinfo(dd->pcidev,
+				    "Invalid num_vls %u, using 4 VLs\n",
+				    qib_num_cfg_vls);
+			qib_num_cfg_vls = 4;
+			/* fall through */
+		case 4:
+			ppd->vls_supported = IB_VL_VL0_3;
+			break;
+		case 8:
+			if (mtu <= 2048)
+				ppd->vls_supported = IB_VL_VL0_7;
+			else {
+				qib_devinfo(dd->pcidev,
+					    "Invalid num_vls %u for MTU %d "
+					    ", using 4 VLs\n",
+					    qib_num_cfg_vls, mtu);
+				ppd->vls_supported = IB_VL_VL0_3;
+				qib_num_cfg_vls = 4;
+			}
+			break;
+		}
+		ppd->vls_operational = ppd->vls_supported;
+
+		init_waitqueue_head(&cp->autoneg_wait);
+		INIT_DELAYED_WORK(&cp->autoneg_work,
+				  autoneg_7322_work);
+		if (ppd->dd->cspec->r1)
+			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
+
+		/*
+		 * For Mez and similar cards, no qsfp info, so do
+		 * the "cable info" setup here.  Can be overridden
+		 * in adapter-specific routines.
+		 */
+		if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
+			int i;
+			const struct txdds_ent *txdds;
+
+			if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
+				qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
+					    "Unknown mezzanine card type\n",
+					    ppd->dd->unit, ppd->port);
+			txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds :
+				&qme_qdr_txdds;
+
+			/*
+			 * set values in case link comes up
+			 * before table is written to driver.
+			 */
+			cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH :
+				H1_FORCE_QME;
+			for (i = 0; i < SERDES_CHANS; i++) {
+				cp->amp[i] = txdds->amp;
+				cp->pre[i] = txdds->pre;
+				cp->mainv[i] = txdds->main;
+				cp->post[i] = txdds->post;
+			}
+		} else
+			cp->h1_val = H1_FORCE_VAL;
+
+		/* Avoid writes to chip for mini_init */
+		if (!qib_mini_init)
+			write_7322_init_portregs(ppd);
+
+		init_timer(&cp->chase_timer);
+		cp->chase_timer.function = reenable_chase;
+		cp->chase_timer.data = (unsigned long)ppd;
+
+		ppd++;
+	}
+
+	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rhf_offset =
+		dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+	/* we always allocate at least 2048 bytes for eager buffers */
+	dd->rcvegrbufsize = max(mtu, 2048);
+
+	qib_7322_tidtemplate(dd);
+
+	/*
+	 * We can request a receive interrupt for 1 or
+	 * more packets from current offset.
+	 */
+	dd->rhdrhead_intr_off =
+		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
+
+	/* setup the stats timer; the add_timer is done at end of init */
+	init_timer(&dd->stats_timer);
+	dd->stats_timer.function = qib_get_7322_faststats;
+	dd->stats_timer.data = (unsigned long) dd;
+
+	dd->ureg_align = 0x10000;  /* 64KB alignment */
+
+	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+
+	qib_7322_config_ctxts(dd);
+	qib_set_ctxtcnt(dd);
+
+	if (qib_wc_pat) {
+		ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
+		if (ret)
+			goto bail;
+	}
+	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
+
+	ret = 0;
+	if (qib_mini_init)
+		goto bail;
+	if (!dd->num_pports) {
+		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
+		goto bail; /* no error, so can still figure out why err */
+	}
+
+	write_7322_initregs(dd);
+	ret = qib_create_ctxts(dd);
+	init_7322_cntrnames(dd);
+
+	updthresh = 8U; /* update threshold */
+
+	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+	 * reserve the update threshold amount for other kernel use, such
+	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+	 * unless we aren't enabling SDMA, in which case we want to use
+	 * all the 4k bufs for the kernel.
+	 * if this was less than the update threshold, we could wait
+	 * a long time for an update.  Coded this way because we
+	 * sometimes change the update threshold for various reasons,
+	 * and we want this to remain robust.
+	 */
+	if (dd->flags & QIB_HAS_SEND_DMA) {
+		dd->cspec->sdmabufcnt = dd->piobcnt4k;
+		sbufs = updthresh > 3 ? updthresh : 3;
+	} else {
+		dd->cspec->sdmabufcnt = 0;
+		sbufs = dd->piobcnt4k;
+	}
+	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+		dd->cspec->sdmabufcnt;
+	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
+		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
+
+	/*
+	 * If we have 16 user contexts, we will have 7 sbufs
+	 * per context, so reduce the update threshold to match.  We
+	 * want to update before we actually run out, at low pbufs/ctxt
+	 * so give ourselves some margin.
+	 */
+	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
+		updthresh = dd->pbufsctxt - 2;
+	dd->cspec->updthresh_dflt = updthresh;
+	dd->cspec->updthresh = updthresh;
+
+	/* before full enable, no interrupts, no locking needed */
+	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
+			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
+
+	dd->psxmitwait_supported = 1;
+	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
+bail:
+	if (!dd->ctxtcnt)
+		dd->ctxtcnt = 1; /* for other initialization code */
+
+	return ret;
+}
+
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+					u32 *pbufnum)
+{
+	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+	struct qib_devdata *dd = ppd->dd;
+
+	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
+	if (pbc & PBC_7322_VL15_SEND) {
+		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
+		last = first;
+	} else {
+		if ((plen + 1) > dd->piosize2kmax_dwords)
+			first = dd->piobcnt2k;
+		else
+			first = 0;
+		last = dd->cspec->lastbuf_for_pio;
+	}
+	return qib_getsendbuf_range(dd, pbufnum, first, last);
+}
+
+static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
+				     u32 start)
+{
+	qib_write_kreg_port(ppd, krp_psinterval, intv);
+	qib_write_kreg_port(ppd, krp_psstart, start);
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
+}
+
+static struct sdma_set_state_action sdma_7322_action_table[] = {
+	[qib_sdma_state_s00_hw_down] = {
+		.go_s99_running_tofalse = 1,
+		.op_enable = 0,
+		.op_intenable = 0,
+		.op_halt = 0,
+		.op_drain = 0,
+	},
+	[qib_sdma_state_s10_hw_start_up_wait] = {
+		.op_enable = 0,
+		.op_intenable = 1,
+		.op_halt = 1,
+		.op_drain = 0,
+	},
+	[qib_sdma_state_s20_idle] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+		.op_drain = 0,
+	},
+	[qib_sdma_state_s30_sw_clean_up_wait] = {
+		.op_enable = 0,
+		.op_intenable = 1,
+		.op_halt = 1,
+		.op_drain = 0,
+	},
+	[qib_sdma_state_s40_hw_clean_up_wait] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+		.op_drain = 0,
+	},
+	[qib_sdma_state_s50_hw_halt_wait] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 1,
+		.op_drain = 1,
+	},
+	[qib_sdma_state_s99_running] = {
+		.op_enable = 1,
+		.op_intenable = 1,
+		.op_halt = 0,
+		.op_drain = 0,
+		.go_s99_running_totrue = 1,
+	},
+};
+
+static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
+{
+	ppd->sdma_state.set_state_action = sdma_7322_action_table;
+}
+
+static int init_sdma_7322_regs(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	unsigned lastbuf, erstbuf;
+	u64 senddmabufmask[3] = { 0 };
+	int n, ret = 0;
+
+	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
+	qib_sdma_7322_setlengen(ppd);
+	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
+	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
+	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
+
+	if (dd->num_pports)
+		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
+	else
+		n = dd->cspec->sdmabufcnt; /* failsafe for init */
+	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
+		((dd->num_pports == 1 || ppd->port == 2) ? n :
+		dd->cspec->sdmabufcnt);
+	lastbuf = erstbuf + n;
+
+	ppd->sdma_state.first_sendbuf = erstbuf;
+	ppd->sdma_state.last_sendbuf = lastbuf;
+	for (; erstbuf < lastbuf; ++erstbuf) {
+		unsigned word = erstbuf / BITS_PER_LONG;
+		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
+
+		BUG_ON(word >= 3);
+		senddmabufmask[word] |= 1ULL << bit;
+	}
+	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
+	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
+	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
+	return ret;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int sane;
+	int use_dmahead;
+	u16 swhead;
+	u16 swtail;
+	u16 cnt;
+	u16 hwhead;
+
+	use_dmahead = __qib_sdma_running(ppd) &&
+		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+	hwhead = use_dmahead ?
+		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
+		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
+
+	swhead = ppd->sdma_descq_head;
+	swtail = ppd->sdma_descq_tail;
+	cnt = ppd->sdma_descq_cnt;
+
+	if (swhead < swtail)
+		/* not wrapped */
+		sane = (hwhead >= swhead) & (hwhead <= swtail);
+	else if (swhead > swtail)
+		/* wrapped around */
+		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+			(hwhead <= swtail);
+	else
+		/* empty */
+		sane = (hwhead == swhead);
+
+	if (unlikely(!sane)) {
+		if (use_dmahead) {
+			/* try one more time, directly from the register */
+			use_dmahead = 0;
+			goto retry;
+		}
+		/* proceed as if no progress */
+		hwhead = swhead;
+	}
+
+	return hwhead;
+}
+
+static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
+{
+	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
+
+	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
+	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
+	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
+	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * The delay affects the next packet and the amount of the delay is
+ * based on the length of the this packet.
+ */
+static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+				   u8 srate, u8 vl)
+{
+	u8 snd_mult = ppd->delay_mult;
+	u8 rcv_mult = ib_rate_to_delay[srate];
+	u32 ret;
+
+	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
+
+	/* Indicate VL15, else set the VL in the control word */
+	if (vl == 15)
+		ret |= PBC_7322_VL15_SEND_CTRL;
+	else
+		ret |= vl << PBC_VL_NUM_LSB;
+	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
+
+	return ret;
+}
+
+/*
+ * Enable the per-port VL15 send buffers for use.
+ * They follow the rest of the buffers, without a config parameter.
+ * This was in initregs, but that is done before the shadow
+ * is set up, and this has to be done after the shadow is
+ * set up.
+ */
+static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
+{
+	unsigned vl15bufs;
+
+	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
+	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
+			       TXCHK_CHG_TYPE_KERN, NULL);
+}
+
+static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
+{
+	if (rcd->ctxt < NUM_IB_PORTS) {
+		if (rcd->dd->num_pports > 1) {
+			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
+			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
+		} else {
+			rcd->rcvegrcnt = KCTXT0_EGRCNT;
+			rcd->rcvegr_tid_base = 0;
+		}
+	} else {
+		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
+			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
+	}
+}
+
+#define QTXSLEEPS 5000
+static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+				  u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+	int i;
+	const int last = start + len - 1;
+	const int lastr = last / BITS_PER_LONG;
+	u32 sleeps = 0;
+	int wait = rcd != NULL;
+	unsigned long flags;
+
+	while (wait) {
+		unsigned long shadow;
+		int cstart, previ = -1;
+
+		/*
+		 * when flipping from kernel to user, we can't change
+		 * the checking type if the buffer is allocated to the
+		 * driver.   It's OK the other direction, because it's
+		 * from close, and we have just disarm'ed all the
+		 * buffers.  All the kernel to kernel changes are also
+		 * OK.
+		 */
+		for (cstart = start; cstart <= last; cstart++) {
+			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+				/ BITS_PER_LONG;
+			if (i != previ) {
+				shadow = (unsigned long)
+					le64_to_cpu(dd->pioavailregs_dma[i]);
+				previ = i;
+			}
+			if (test_bit(((2 * cstart) +
+				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+				     % BITS_PER_LONG, &shadow))
+				break;
+		}
+
+		if (cstart > last)
+			break;
+
+		if (sleeps == QTXSLEEPS)
+			break;
+		/* make sure we see an updated copy next time around */
+		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+		sleeps++;
+		msleep(1);
+	}
+
+	switch (which) {
+	case TXCHK_CHG_TYPE_DIS1:
+		/*
+		 * disable checking on a range; used by diags; just
+		 * one buffer, but still written generically
+		 */
+		for (i = start; i <= last; i++)
+			clear_bit(i, dd->cspec->sendchkenable);
+		break;
+
+	case TXCHK_CHG_TYPE_ENAB1:
+		/*
+		 * (re)enable checking on a range; used by diags; just
+		 * one buffer, but still written generically; read
+		 * scratch to be sure buffer actually triggered, not
+		 * just flushed from processor.
+		 */
+		qib_read_kreg32(dd, kr_scratch);
+		for (i = start; i <= last; i++)
+			set_bit(i, dd->cspec->sendchkenable);
+		break;
+
+	case TXCHK_CHG_TYPE_KERN:
+		/* usable by kernel */
+		for (i = start; i <= last; i++) {
+			set_bit(i, dd->cspec->sendibchk);
+			clear_bit(i, dd->cspec->sendgrhchk);
+		}
+		spin_lock_irqsave(&dd->uctxt_lock, flags);
+		/* see if we need to raise avail update threshold */
+		for (i = dd->first_user_ctxt;
+		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
+		     && i < dd->cfgctxts; i++)
+			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+			   < dd->cspec->updthresh_dflt)
+				break;
+		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+		if (i == dd->cfgctxts) {
+			spin_lock_irqsave(&dd->sendctrl_lock, flags);
+			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+			dd->sendctrl |= (dd->cspec->updthresh &
+					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+					   SYM_LSB(SendCtrl, AvailUpdThld);
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+		}
+		break;
+
+	case TXCHK_CHG_TYPE_USER:
+		/* for user process */
+		for (i = start; i <= last; i++) {
+			clear_bit(i, dd->cspec->sendibchk);
+			set_bit(i, dd->cspec->sendgrhchk);
+		}
+		spin_lock_irqsave(&dd->sendctrl_lock, flags);
+		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+			dd->cspec->updthresh = (rcd->piocnt /
+						rcd->subctxt_cnt) - 1;
+			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+			dd->sendctrl |= (dd->cspec->updthresh &
+					SYM_RMASK(SendCtrl, AvailUpdThld))
+					<< SYM_LSB(SendCtrl, AvailUpdThld);
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+		} else
+			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+		break;
+
+	default:
+		break;
+	}
+
+	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
+		qib_write_kreg(dd, kr_sendcheckmask + i,
+			       dd->cspec->sendchkenable[i]);
+
+	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
+		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
+			       dd->cspec->sendgrhchk[i]);
+		qib_write_kreg(dd, kr_sendibpktmask + i,
+			       dd->cspec->sendibchk[i]);
+	}
+
+	/*
+	 * Be sure whatever we did was seen by the chip and acted upon,
+	 * before we return.  Mostly important for which >= 2.
+	 */
+	qib_read_kreg32(dd, kr_scratch);
+}
+
+
+/* useful for trigger analyzers, etc. */
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+	qib_write_kreg(dd, kr_scratch, val);
+}
+
+/* Dummy for now, use chip regs soon */
+static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+	return -ENXIO;
+}
+
+/**
+ * qib_init_iba7322_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * Also allocates, inits, and returns the devdata struct for this
+ * device instance
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
+					   const struct pci_device_id *ent)
+{
+	struct qib_devdata *dd;
+	int ret, i;
+	u32 tabsize, actual_cnt = 0;
+
+	dd = qib_alloc_devdata(pdev,
+		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
+		sizeof(struct qib_chip_specific) +
+		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
+	if (IS_ERR(dd))
+		goto bail;
+
+	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
+	dd->f_cleanup           = qib_setup_7322_cleanup;
+	dd->f_clear_tids        = qib_7322_clear_tids;
+	dd->f_free_irq          = qib_7322_free_irq;
+	dd->f_get_base_info     = qib_7322_get_base_info;
+	dd->f_get_msgheader     = qib_7322_get_msgheader;
+	dd->f_getsendbuf        = qib_7322_getsendbuf;
+	dd->f_gpio_mod          = gpio_7322_mod;
+	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
+	dd->f_hdrqempty         = qib_7322_hdrqempty;
+	dd->f_ib_updown         = qib_7322_ib_updown;
+	dd->f_init_ctxt         = qib_7322_init_ctxt;
+	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
+	dd->f_intr_fallback     = qib_7322_intr_fallback;
+	dd->f_late_initreg      = qib_late_7322_initreg;
+	dd->f_setpbc_control    = qib_7322_setpbc_control;
+	dd->f_portcntr          = qib_portcntr_7322;
+	dd->f_put_tid           = qib_7322_put_tid;
+	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
+	dd->f_rcvctrl           = rcvctrl_7322_mod;
+	dd->f_read_cntrs        = qib_read_7322cntrs;
+	dd->f_read_portcntrs    = qib_read_7322portcntrs;
+	dd->f_reset             = qib_do_7322_reset;
+	dd->f_init_sdma_regs    = init_sdma_7322_regs;
+	dd->f_sdma_busy         = qib_sdma_7322_busy;
+	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
+	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
+	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
+	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
+	dd->f_sendctrl          = sendctrl_7322_mod;
+	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
+	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
+	dd->f_iblink_state      = qib_7322_iblink_state;
+	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
+	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
+	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
+	dd->f_set_ib_loopback   = qib_7322_set_loopback;
+	dd->f_get_ib_table      = qib_7322_get_ib_table;
+	dd->f_set_ib_table      = qib_7322_set_ib_table;
+	dd->f_set_intr_state    = qib_7322_set_intr_state;
+	dd->f_setextled         = qib_setup_7322_setextled;
+	dd->f_txchk_change      = qib_7322_txchk_change;
+	dd->f_update_usrhead    = qib_update_7322_usrhead;
+	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
+	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
+	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
+	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
+	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
+	dd->f_writescratch      = writescratch;
+	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
+	/*
+	 * Do remaining PCIe setup and save PCIe values in dd.
+	 * Any error printing is already done by the init code.
+	 * On return, we have the chip mapped, but chip registers
+	 * are not set up until start of qib_init_7322_variables.
+	 */
+	ret = qib_pcie_ddinit(dd, pdev, ent);
+	if (ret < 0)
+		goto bail_free;
+
+	/* initialize chip-specific variables */
+	ret = qib_init_7322_variables(dd);
+	if (ret)
+		goto bail_cleanup;
+
+	if (qib_mini_init || !dd->num_pports)
+		goto bail;
+
+	/*
+	 * Determine number of vectors we want; depends on port count
+	 * and number of configured kernel receive queues actually used.
+	 * Should also depend on whether sdma is enabled or not, but
+	 * that's such a rare testing case it's not worth worrying about.
+	 */
+	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
+	for (i = 0; i < tabsize; i++)
+		if ((i < ARRAY_SIZE(irq_table) &&
+		     irq_table[i].port <= dd->num_pports) ||
+		    (i >= ARRAY_SIZE(irq_table) &&
+		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
+			actual_cnt++;
+	tabsize = actual_cnt;
+	dd->cspec->msix_entries = kmalloc(tabsize *
+			sizeof(struct msix_entry), GFP_KERNEL);
+	dd->cspec->msix_arg = kmalloc(tabsize *
+			sizeof(void *), GFP_KERNEL);
+	if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
+		qib_dev_err(dd, "No memory for MSIx table\n");
+		tabsize = 0;
+	}
+	for (i = 0; i < tabsize; i++)
+		dd->cspec->msix_entries[i].entry = i;
+
+	if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
+		qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+			    "continuing anyway\n");
+	/* may be less than we wanted, if not enough available */
+	dd->cspec->num_msix_entries = tabsize;
+
+	/* setup interrupt handler */
+	qib_setup_7322_interrupt(dd, 1);
+
+	/* clear diagctrl register, in case diags were running and crashed */
+	qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	ret = dca_add_requester(&pdev->dev);
+	if (!ret) {
+		dd->flags |= QIB_DCA_ENABLED;
+		qib_setup_dca(dd);
+	}
+#endif
+	goto bail;
+
+bail_cleanup:
+	qib_pcie_ddcleanup(dd);
+bail_free:
+	qib_free_devdata(dd);
+	dd = ERR_PTR(ret);
+bail:
+	return dd;
+}
+
+/*
+ * Set the table entry at the specified index from the table specifed.
+ * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
+ * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
+ * 'idx' below addresses the correct entry, while its 4 LSBs select the
+ * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
+ */
+#define DDS_ENT_AMP_LSB 14
+#define DDS_ENT_MAIN_LSB 9
+#define DDS_ENT_POST_LSB 5
+#define DDS_ENT_PRE_XTRA_LSB 3
+#define DDS_ENT_PRE_LSB 0
+
+/*
+ * Set one entry in the TxDDS table for spec'd port
+ * ridx picks one of the entries, while tp points
+ * to the appropriate table entry.
+ */
+static void set_txdds(struct qib_pportdata *ppd, int ridx,
+		      const struct txdds_ent *tp)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 pack_ent;
+	int regidx;
+
+	/* Get correct offset in chip-space, and in source table */
+	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
+	/*
+	 * We do not use qib_write_kreg_port() because it was intended
+	 * only for registers in the lower "port specific" pages.
+	 * So do index calculation  by hand.
+	 */
+	if (ppd->hw_pidx)
+		regidx += (dd->palign / sizeof(u64));
+
+	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
+	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
+	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
+	pack_ent |= tp->post << DDS_ENT_POST_LSB;
+	qib_write_kreg(dd, regidx, pack_ent);
+	/* Prevent back-to-back writes by hitting scratch */
+	qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+static const struct vendor_txdds_ent vendor_txdds[] = {
+	{ /* Amphenol 1m 30awg NoEq */
+		{ 0x41, 0x50, 0x48 }, "584470002       ",
+		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
+	},
+	{ /* Amphenol 3m 28awg NoEq */
+		{ 0x41, 0x50, 0x48 }, "584470004       ",
+		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
+	},
+	{ /* Finisar 3m OM2 Optical */
+		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
+		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
+	},
+	{ /* Finisar 30m OM2 Optical */
+		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
+		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
+	},
+	{ /* Finisar Default OM2 Optical */
+		{ 0x00, 0x90, 0x65 }, NULL,
+		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
+	},
+	{ /* Gore 1m 30awg NoEq */
+		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
+	},
+	{ /* Gore 2m 30awg NoEq */
+		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
+		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
+	},
+	{ /* Gore 1m 28awg NoEq */
+		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
+	},
+	{ /* Gore 3m 28awg NoEq */
+		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
+		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
+	},
+	{ /* Gore 5m 24awg Eq */
+		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
+		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
+	},
+	{ /* Gore 7m 24awg Eq */
+		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
+		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
+	},
+	{ /* Gore 5m 26awg Eq */
+		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
+		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
+	},
+	{ /* Gore 7m 26awg Eq */
+		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
+		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
+	},
+	{ /* Intersil 12m 24awg Active */
+		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
+		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
+	},
+	{ /* Intersil 10m 28awg Active */
+		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
+	},
+	{ /* Intersil 7m 30awg Active */
+		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
+	},
+	{ /* Intersil 5m 32awg Active */
+		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
+	},
+	{ /* Intersil Default Active */
+		{ 0x00, 0x30, 0xB4 }, NULL,
+		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
+	},
+	{ /* Luxtera 20m Active Optical */
+		{ 0x00, 0x25, 0x63 }, NULL,
+		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
+	},
+	{ /* Molex 1M Cu loopback */
+		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
+		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
+	},
+	{ /* Molex 2m 28awg NoEq */
+		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
+		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
+	},
+};
+
+static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
+	/* amp, pre, main, post */
+	{  2, 2, 15,  6 },	/* Loopback */
+	{  0, 0,  0,  1 },	/*  2 dB */
+	{  0, 0,  0,  2 },	/*  3 dB */
+	{  0, 0,  0,  3 },	/*  4 dB */
+	{  0, 0,  0,  4 },	/*  5 dB */
+	{  0, 0,  0,  5 },	/*  6 dB */
+	{  0, 0,  0,  6 },	/*  7 dB */
+	{  0, 0,  0,  7 },	/*  8 dB */
+	{  0, 0,  0,  8 },	/*  9 dB */
+	{  0, 0,  0,  9 },	/* 10 dB */
+	{  0, 0,  0, 10 },	/* 11 dB */
+	{  0, 0,  0, 11 },	/* 12 dB */
+	{  0, 0,  0, 12 },	/* 13 dB */
+	{  0, 0,  0, 13 },	/* 14 dB */
+	{  0, 0,  0, 14 },	/* 15 dB */
+	{  0, 0,  0, 15 },	/* 16 dB */
+};
+
+static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
+	/* amp, pre, main, post */
+	{  2, 2, 15,  6 },	/* Loopback */
+	{  0, 0,  0,  8 },	/*  2 dB */
+	{  0, 0,  0,  8 },	/*  3 dB */
+	{  0, 0,  0,  9 },	/*  4 dB */
+	{  0, 0,  0,  9 },	/*  5 dB */
+	{  0, 0,  0, 10 },	/*  6 dB */
+	{  0, 0,  0, 10 },	/*  7 dB */
+	{  0, 0,  0, 11 },	/*  8 dB */
+	{  0, 0,  0, 11 },	/*  9 dB */
+	{  0, 0,  0, 12 },	/* 10 dB */
+	{  0, 0,  0, 12 },	/* 11 dB */
+	{  0, 0,  0, 13 },	/* 12 dB */
+	{  0, 0,  0, 13 },	/* 13 dB */
+	{  0, 0,  0, 14 },	/* 14 dB */
+	{  0, 0,  0, 14 },	/* 15 dB */
+	{  0, 0,  0, 15 },	/* 16 dB */
+};
+
+static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
+	/* amp, pre, main, post */
+	{  2, 2, 15,  6 },	/* Loopback */
+	{  0, 1,  0,  7 },	/*  2 dB */
+	{  0, 1,  0,  9 },	/*  3 dB */
+	{  0, 1,  0, 11 },	/*  4 dB */
+	{  0, 1,  0, 13 },	/*  5 dB */
+	{  0, 1,  0, 15 },	/*  6 dB */
+	{  0, 1,  3, 15 },	/*  7 dB */
+	{  0, 1,  7, 15 },	/*  8 dB */
+	{  0, 1,  7, 15 },	/*  9 dB */
+	{  0, 1,  8, 15 },	/* 10 dB */
+	{  0, 1,  9, 15 },	/* 11 dB */
+	{  0, 1, 10, 15 },	/* 12 dB */
+	{  0, 2,  6, 15 },	/* 13 dB */
+	{  0, 2,  7, 15 },	/* 14 dB */
+	{  0, 2,  8, 15 },	/* 15 dB */
+	{  0, 2,  9, 15 },	/* 16 dB */
+};
+
+static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
+					       unsigned atten)
+{
+	/*
+	 * The attenuation table starts at 2dB for entry 1,
+	 * with entry 0 being the loopback entry.
+	 */
+	if (atten <= 2)
+		atten = 1;
+	else if (atten > TXDDS_TABLE_SZ)
+		atten = TXDDS_TABLE_SZ - 1;
+	else
+		atten--;
+	return txdds + atten;
+}
+
+/*
+ * if override is set, the module parameter cable_atten has a value
+ * for this specific port, so use it, rather than our normal mechanism.
+ */
+static void find_best_ent(struct qib_pportdata *ppd,
+			  const struct txdds_ent **sdr_dds,
+			  const struct txdds_ent **ddr_dds,
+			  const struct txdds_ent **qdr_dds, int override)
+{
+	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
+	int idx;
+
+	/* Search table of known cables */
+	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
+		const struct vendor_txdds_ent *v = vendor_txdds + idx;
+
+		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
+		    (!v->partnum ||
+		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
+			*sdr_dds = &v->sdr;
+			*ddr_dds = &v->ddr;
+			*qdr_dds = &v->qdr;
+			return;
+		}
+	}
+
+	/* Lookup serdes setting by cable type and attenuation */
+	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
+		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
+		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
+		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
+		return;
+	}
+
+	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
+						      qd->atten[1])) {
+		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
+		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
+		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
+		return;
+	} else {
+		/*
+		 * If we have no (or incomplete) data from the cable
+		 * EEPROM, or no QSFP, use the module parameter value
+		 * to index into the attentuation table.
+		 */
+		*sdr_dds = &txdds_sdr[ppd->cpspec->no_eep];
+		*ddr_dds = &txdds_ddr[ppd->cpspec->no_eep];
+		*qdr_dds = &txdds_qdr[ppd->cpspec->no_eep];
+	}
+}
+
+static void init_txdds_table(struct qib_pportdata *ppd, int override)
+{
+	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+	struct txdds_ent *dds;
+	int idx;
+	int single_ent = 0;
+
+	if (IS_QMH(ppd->dd)) {
+		/* normally will be overridden, via setup_qmh() */
+		sdr_dds = &qmh_sdr_txdds;
+		ddr_dds = &qmh_ddr_txdds;
+		qdr_dds = &qmh_qdr_txdds;
+		single_ent = 1;
+	} else if (IS_QME(ppd->dd)) {
+		sdr_dds = &qme_sdr_txdds;
+		ddr_dds = &qme_ddr_txdds;
+		qdr_dds = &qme_qdr_txdds;
+		single_ent = 1;
+	} else
+		find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
+
+	/* Fill in the first entry with the best entry found. */
+	set_txdds(ppd, 0, sdr_dds);
+	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
+	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
+
+	/*
+	 * for our current speed, also write that value into the
+	 * tx serdes registers.
+	 */
+	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
+				   qdr_dds : (ppd->link_speed_active ==
+					      QIB_IB_DDR ? ddr_dds : sdr_dds));
+	write_tx_serdes_param(ppd, dds);
+
+	/* Fill in the remaining entries with the default table values. */
+	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
+		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
+		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
+			  single_ent ? ddr_dds : txdds_ddr + idx);
+		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
+			  single_ent ? qdr_dds : txdds_qdr + idx);
+	}
+}
+
+#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
+#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
+#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
+#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
+#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
+#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
+#define AHB_TRANS_TRIES 10
+
+/*
+ * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
+ * 5=subsystem which is why most calls have "chan + chan >> 1"
+ * for the channel argument.
+ */
+static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
+		    u32 data, u32 mask)
+{
+	u32 rd_data, wr_data, sz_mask;
+	u64 trans, acc, prev_acc;
+	u32 ret = 0xBAD0BAD;
+	int tries;
+
+	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
+	/* From this point on, make sure we return access */
+	acc = (quad << 1) | 1;
+	qib_write_kreg(dd, KR_AHB_ACC, acc);
+
+	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+		if (trans & AHB_TRANS_RDY)
+			break;
+	}
+	if (tries >= AHB_TRANS_TRIES) {
+		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
+		goto bail;
+	}
+
+	/* If mask is not all 1s, we need to read, but different SerDes
+	 * entities have different sizes
+	 */
+	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
+	wr_data = data & mask & sz_mask;
+	if ((~mask & sz_mask) != 0) {
+		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+		qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+			if (trans & AHB_TRANS_RDY)
+				break;
+		}
+		if (tries >= AHB_TRANS_TRIES) {
+			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
+				    AHB_TRANS_TRIES);
+			goto bail;
+		}
+		/* Re-read in case host split reads and read data first */
+		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
+		wr_data |= (rd_data & ~mask & sz_mask);
+	}
+
+	/* If mask is not zero, we need to write. */
+	if (mask & sz_mask) {
+		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
+		trans |= AHB_WR;
+		qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+			if (trans & AHB_TRANS_RDY)
+				break;
+		}
+		if (tries >= AHB_TRANS_TRIES) {
+			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
+				    AHB_TRANS_TRIES);
+			goto bail;
+		}
+	}
+	ret = wr_data;
+bail:
+	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
+	return ret;
+}
+
+static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
+			     unsigned mask)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int chan;
+	u32 rbc;
+
+	for (chan = 0; chan < SERDES_CHANS; ++chan) {
+		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+			data, mask);
+		rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+			      addr, 0, 0);
+	}
+}
+
+static int serdes_7322_init(struct qib_pportdata *ppd)
+{
+	u64 data;
+	u32 le_val;
+
+	/*
+	 * Initialize the Tx DDS tables.  Also done every QSFP event,
+	 * for adapters with QSFP
+	 */
+	init_txdds_table(ppd, 0);
+
+	/* Patch some SerDes defaults to "Better for IB" */
+	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+
+	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
+	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
+
+	/* May be overridden in qsfp_7322_event */
+	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+
+	/* enable LE1 adaptation for all but QME, which is disabled */
+	le_val = IS_QME(ppd->dd) ? 0 : 1;
+	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
+
+	/* Clear cmode-override, may be set from older driver */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
+	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
+
+	/* setup LoS params; these are subsystem, so chan == 5 */
+	/* LoS filter threshold_count on, ch 0-3, set to 8 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+	/* LoS filter threshold_count off, ch 0-3, set to 4 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+	/* LoS filter select enabled */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
+	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+	data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	qib_write_kreg_port(ppd, krp_serdesctrl, data |
+		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+
+	/* rxbistena; set 0 to avoid effects of it switch later */
+	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
+
+	/* Configure 4 DFE taps, and only they adapt */
+	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
+
+	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+
+	/*
+	 * Set receive adaptation mode.  SDR and DDR adaptation are
+	 * always on, and QDR is initially enabled; later disabled.
+	 */
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			    ppd->dd->cspec->r1 ?
+			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+	ppd->cpspec->qdr_dfe_on = 1;
+
+	/* (FLoop LOS gate: PPM filter  enabled */
+	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+	/* rx offset center enabled */
+	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
+
+	if (!ppd->dd->cspec->r1) {
+		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
+		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
+	}
+
+	/* Set the frequency loop bandwidth to 15 */
+	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
+
+	return 0;
+}
+
+/* start adjust QMH serdes parameters */
+
+static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
+{
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+		9, code << 9, 0x3f << 9);
+}
+
+static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
+	int enable, u32 tapenable)
+{
+	if (enable)
+		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+			1, 3 << 10, 0x1f << 10);
+	else
+		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+			1, 0, 0x1f << 10);
+}
+
+/* Set clock to 1, 0, 1, 0 */
+static void clock_man(struct qib_pportdata *ppd, int chan)
+{
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+		4, 0x4000, 0x4000);
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+		4, 0, 0x4000);
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+		4, 0x4000, 0x4000);
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+		4, 0, 0x4000);
+}
+
+/*
+ * write the current Tx serdes pre,post,main,amp settings into the serdes.
+ * The caller must pass the settings appropriate for the current speed,
+ * or not care if they are correct for the current speed.
+ */
+static void write_tx_serdes_param(struct qib_pportdata *ppd,
+				  struct txdds_ent *txdds)
+{
+	u64 deemph;
+
+	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
+	/* field names for amp, main, post, pre, respectively */
+	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
+		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
+		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
+		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
+	deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+				  tx_override_deemphasis_select);
+	deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+				    txampcntl_d2a);
+	deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+				      txc0_ena);
+	deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+				     txcp1_ena);
+	deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+				    txcn1_ena);
+	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
+}
+
+/*
+ * set per-bay, per channel parameters.  For now, we ignore
+ * do_tx, and always set tx parameters, and set them with the same value
+ * for all channels, using the channel 0 value.   We may switch to
+ * per-channel settings in the future, and that method only needs
+ * to be done once.
+ * Because this also writes the IBC txdds table with a single set
+ * of values, it should be called only for cases where we want to completely
+ * force a specific setting, typically only for mez cards.
+ */
+static void adj_tx_serdes(struct qib_pportdata *ppd)
+{
+	struct txdds_ent txdds;
+	int i;
+	u8 *amp, *pre, *mainv, *post;
+
+	/*
+	 * Because we use TX_DEEMPHASIS_OVERRIDE, we need to
+	 * always do tx side, just like H1, since it is cleared
+	 * by link down
+	 */
+	amp = ppd->cpspec->amp;
+	pre = ppd->cpspec->pre;
+	mainv = ppd->cpspec->mainv;
+	post = ppd->cpspec->post;
+
+	amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+			    txampcntl_d2a);
+	mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+			      txc0_ena);
+	post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+			     txcp1_ena);
+	pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+			    txcn1_ena);
+
+	/*
+	 * Use the channel zero values, only, for now, for
+	 * all channels
+	*/
+	txdds.amp = amp[0];
+	txdds.pre = pre[0];
+	txdds.main = mainv[0];
+	txdds.post = post[0];
+
+	/* write the QDR table for IBC use, as backup for link down */
+	for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i)
+		set_txdds(ppd, i + 32, &txdds);
+
+	write_tx_serdes_param(ppd, &txdds);
+}
+
+/* set QDR forced value for H1, if needed */
+static void force_h1(struct qib_pportdata *ppd)
+{
+	int chan;
+
+	ppd->cpspec->qdr_reforce = 0;
+	if (!ppd->dd->cspec->r1)
+		return;
+
+	for (chan = 0; chan < SERDES_CHANS; chan++) {
+		set_man_mode_h1(ppd, chan, 1, 0);
+		set_man_code(ppd, chan, ppd->cpspec->h1_val);
+		clock_man(ppd, chan);
+		set_man_mode_h1(ppd, chan, 0, 0);
+	}
+}
+
+/*
+ * Parse the parameters for the QMH7342, to get rx and tx serdes
+ * settings for that Bay, for both possible mez connectors (PCIe bus)
+ * and IB link (one link on mez1, two possible on mez2).
+ *
+ * Data is comma or white space separated.
+ *
+ * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values,
+ * one per IB lane (serdes channel).
+ * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR).
+ * The Bay # is used only for debugging currently.
+ * H1 values are set whenever the link goes down, or is at cfg_test or
+ * cfg_wait_enh.  Tx values are programmed once, when this routine is called
+ * (and with default values at chip initialization).  Values are any base, in
+ * strtoul style, and values are seperated by comma, or any white space
+ * (space, tab, newline).
+ *
+ * An example set might look like this (white space vs
+ * comma used for human ease of reading)
+ * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1,
+ * repeat for mez2 IB1, then mez2 IB2.
+ *
+ * B B H1:0       amp:0       pre:0        post: 0        main:0
+ * a u H1:  1     amp:  1     pre:  1      post:   1      main:  1
+ * y s H1:    2   amp:    2   pre:    2    post:      2   main:    2
+ *     H1:      4 amp:      3 pre:      3  post:        3 main:      3
+ * 1 3    8,6,5,6     0,0,0,0     1,1,1,1       10,10,10,10    3,3,3,3
+ * 1 6    7,6,6,7     0,0,0,0     1,1,1,1       10,10,10,10    3,3,3,3
+ * 1 6    9,7,7,8     0,0,0,0     1,1,1,1       10,10,10,10    3,3,3,3
+ */
+#define N_QMH_FIELDS 22
+static int setup_qmh_params(const char *str, struct kernel_param *kp)
+{
+	char *abuf, *v, *nv, *nvp;
+	struct qib_devdata *dd;
+	struct qib_pportdata *ppd;
+	u32 mez, vlen, nf, port, bay;
+	int ret = 0, found = 0;
+
+	vlen = strlen(str) + 1;
+	abuf = kmalloc(vlen, GFP_KERNEL);
+	if (!abuf) {
+		printk(KERN_INFO QIB_DRV_NAME
+		       " Unable to allocate QMH param buffer; ignoring\n");
+		return 0;
+	}
+	memcpy(abuf, str, vlen);
+	v = abuf;
+
+	/* these 3 are because gcc can't know they are set before used */
+	port = 1;
+	mez = 1; /* used only for debugging */
+	bay = 0; /* used only for debugging */
+	ppd = NULL;
+	for (nf = 0; (nv = strsep(&v, ", \t\n\r")) &&
+	     nf < (N_QMH_FIELDS * 3);) {
+		u32 val;
+
+		if (!*nv)
+			/* allow for multiple separators */
+			continue;
+
+		val = simple_strtoul(nv, &nvp, 0);
+		if (nv == nvp) {
+			printk(KERN_INFO QIB_DRV_NAME
+			       " Bay%u, mez%u IB%u non-numeric value (%s) "
+			       "field #%u, ignoring rest\n", bay, mez,
+			       port, nv, nf % (N_QMH_FIELDS * 3));
+			ret = -EINVAL;
+			goto bail;
+		}
+		if (!(nf % N_QMH_FIELDS)) {
+			ppd = NULL;
+			bay = val;
+			if (!bay || bay > 16) {
+				printk(KERN_INFO QIB_DRV_NAME
+				       " Invalid bay # %u, field %u, "
+				       "ignoring rest\n", bay, nf);
+				ret = -EINVAL;
+				goto bail;
+			}
+		} else if ((nf % N_QMH_FIELDS) == 1) {
+			u32 bus = val;
+			if (nf == 1) {
+				mez = 1;
+				port = 1;
+			} else if (nf == (N_QMH_FIELDS + 1)) {
+				mez = 2;
+				port = 1;
+			} else {
+				mez = 2;
+				port = 2;
+			}
+			list_for_each_entry(dd, &qib_dev_list, list) {
+				if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
+				    || !IS_QMH(dd))
+					continue; /* only for QMH cards */
+				if (dd->pcidev->bus->number == bus) {
+					found++;
+					ppd = &dd->pport[port - 1];
+				}
+			}
+		} else if (ppd) {
+			u32 parm = (nf % N_QMH_FIELDS) - 2;
+			if (parm < SERDES_CHANS && !(parm % SERDES_CHANS))
+				ppd->cpspec->h1_val = val;
+			else if (parm < (2 * SERDES_CHANS))
+				ppd->cpspec->amp[parm % SERDES_CHANS] = val;
+			else if (parm < (3 * SERDES_CHANS))
+				ppd->cpspec->pre[parm % SERDES_CHANS] = val;
+			else if (parm < (4 * SERDES_CHANS))
+				ppd->cpspec->post[parm % SERDES_CHANS] = val;
+			else {
+				ppd->cpspec->mainv[parm % SERDES_CHANS] = val;
+				/* At the end of a port, set params */
+				if (parm == ((5 * SERDES_CHANS) - 1))
+					adj_tx_serdes(ppd);
+			}
+		}
+		nf++;
+	}
+	if (!found) {
+		printk(KERN_ERR QIB_DRV_NAME
+		       ": No match found for qmh_serdes_setup parameter\n");
+		ret = -EINVAL;
+	}
+bail:
+	kfree(abuf);
+	return ret;
+}
+
+/*
+ * Similarly for QME7342, but the format is simpler, values are the
+ * same for all mez card positions in a blade (2 or 4 per blade), but
+ * are different for some blades vs others, and we don't need to
+ * specify different parameters for different serdes channels or different
+ * IB ports.
+ * Format is: h1 amp,pre,post,main
+ * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main
+ */
+#define N_QME_FIELDS 5
+static int setup_qme_params(const char *str, struct kernel_param *kp)
+{
+	char *abuf, *v, *nv, *nvp;
+	struct qib_devdata *dd;
+	u32 vlen, nf, port = 0;
+	u8 h1, tx[4]; /* amp, pre, post, main */
+	int ret =  -EINVAL;
+	char *seplist;
+
+	vlen = strlen(str) + 1;
+	abuf = kmalloc(vlen, GFP_KERNEL);
+	if (!abuf) {
+		printk(KERN_INFO QIB_DRV_NAME
+		       " Unable to allocate QME param buffer; ignoring\n");
+		return 0;
+	}
+	strncpy(abuf, str, vlen);
+
+	v = abuf;
+	seplist = " \t";
+	h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */
+
+	for (nf = 0; (nv = strsep(&v, seplist)); ) {
+		u32 val;
+
+		if (!*nv)
+			/* allow for multiple separators */
+			continue;
+
+		if (!nf && *nv == 'P') {
+			/* alternate format with port */
+			val = simple_strtoul(++nv, &nvp, 0);
+			if (nv == nvp || port >= NUM_IB_PORTS) {
+				printk(KERN_INFO QIB_DRV_NAME
+				       " %s: non-numeric port value (%s) "
+				       "ignoring rest\n", __func__, nv);
+				goto done;
+			}
+			port = val;
+			continue; /* without incrementing nf */
+		}
+		val = simple_strtoul(nv, &nvp, 0);
+		if (nv == nvp) {
+			printk(KERN_INFO QIB_DRV_NAME
+			       " %s: non-numeric value (%s) "
+			       "field #%u, ignoring rest\n", __func__,
+			       nv, nf);
+			goto done;
+		}
+		if (!nf) {
+			h1 = val;
+			seplist = ",";
+		} else
+			tx[nf - 1] = val;
+		if (++nf == N_QME_FIELDS) {
+			list_for_each_entry(dd, &qib_dev_list, list) {
+				int pidx, i;
+				if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
+				    || !IS_QME(dd))
+					continue; /* only for QME cards */
+				for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+					struct qib_pportdata *ppd;
+					ppd = &dd->pport[pidx];
+					if ((port && ppd->port != port) ||
+						!ppd->link_speed_supported)
+						continue;
+					ppd->cpspec->h1_val = h1;
+					for (i = 0; i < SERDES_CHANS; i++) {
+						ppd->cpspec->amp[i] = tx[0];
+						ppd->cpspec->pre[i] = tx[1];
+						ppd->cpspec->post[i] = tx[2];
+						ppd->cpspec->mainv[i] = tx[3];
+					}
+					adj_tx_serdes(ppd);
+				}
+			}
+			ret = 0;
+			goto done;
+		}
+	}
+	printk(KERN_INFO QIB_DRV_NAME
+	       " %s: Only %u of %u fields provided, skipping\n",
+	       __func__, nf, N_QME_FIELDS);
+done:
+	kfree(abuf);
+	return ret;
+}
+
+#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
+#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
+
+#define R_OPCODE_LSB 3
+#define R_OP_NOP 0
+#define R_OP_SHIFT 2
+#define R_OP_UPDATE 3
+#define R_TDI_LSB 2
+#define R_TDO_LSB 1
+#define R_RDY 1
+
+static int qib_r_grab(struct qib_devdata *dd)
+{
+	u64 val;
+	val = SJA_EN;
+	qib_write_kreg(dd, kr_r_access, val);
+	qib_read_kreg32(dd, kr_scratch);
+	return 0;
+}
+
+/* qib_r_wait_for_rdy() not only waits for the ready bit, it
+ * returns the current state of R_TDO
+ */
+static int qib_r_wait_for_rdy(struct qib_devdata *dd)
+{
+	u64 val;
+	int timeout;
+	for (timeout = 0; timeout < 100 ; ++timeout) {
+		val = qib_read_kreg32(dd, kr_r_access);
+		if (val & R_RDY)
+			return (val >> R_TDO_LSB) & 1;
+	}
+	return -1;
+}
+
+static int qib_r_shift(struct qib_devdata *dd, int bisten,
+		       int len, u8 *inp, u8 *outp)
+{
+	u64 valbase, val;
+	int ret, pos;
+
+	valbase = SJA_EN | (bisten << BISTEN_LSB) |
+		(R_OP_SHIFT << R_OPCODE_LSB);
+	ret = qib_r_wait_for_rdy(dd);
+	if (ret < 0)
+		goto bail;
+	for (pos = 0; pos < len; ++pos) {
+		val = valbase;
+		if (outp) {
+			outp[pos >> 3] &= ~(1 << (pos & 7));
+			outp[pos >> 3] |= (ret << (pos & 7));
+		}
+		if (inp) {
+			int tdi = inp[pos >> 3] >> (pos & 7);
+			val |= ((tdi & 1) << R_TDI_LSB);
+		}
+		qib_write_kreg(dd, kr_r_access, val);
+		qib_read_kreg32(dd, kr_scratch);
+		ret = qib_r_wait_for_rdy(dd);
+		if (ret < 0)
+			break;
+	}
+	/* Restore to NOP between operations. */
+	val =  SJA_EN | (bisten << BISTEN_LSB);
+	qib_write_kreg(dd, kr_r_access, val);
+	qib_read_kreg32(dd, kr_scratch);
+	ret = qib_r_wait_for_rdy(dd);
+
+	if (ret >= 0)
+		ret = pos;
+bail:
+	return ret;
+}
+
+static int qib_r_update(struct qib_devdata *dd, int bisten)
+{
+	u64 val;
+	int ret;
+
+	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
+	ret = qib_r_wait_for_rdy(dd);
+	if (ret >= 0) {
+		qib_write_kreg(dd, kr_r_access, val);
+		qib_read_kreg32(dd, kr_scratch);
+	}
+	return ret;
+}
+
+#define BISTEN_PORT_SEL 15
+#define LEN_PORT_SEL 625
+#define BISTEN_AT 17
+#define LEN_AT 156
+#define BISTEN_ETM 16
+#define LEN_ETM 632
+
+#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+
+/* these are common for all IB port use cases. */
+static u8 reset_at[BIT2BYTE(LEN_AT)] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
+	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
+	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
+	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
+	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+};
+static u8 at[BIT2BYTE(LEN_AT)] = {
+	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+
+/* used for IB1 or IB2, only one in use */
+static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
+	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
+	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
+	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
+	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
+	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
+	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB1 is in use */
+static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
+	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB2 is in use */
+static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
+	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
+	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
+	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
+	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
+	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
+	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/*
+ * Do setup to properly handle IB link recovery; if port is zero, we
+ * are initializing to cover both ports; otherwise we are initializing
+ * to cover a single port card, or the port has reached INIT and we may
+ * need to switch coverage types.
+ */
+static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
+{
+	u8 *portsel, *etm;
+	struct qib_devdata *dd = ppd->dd;
+
+	if (!ppd->dd->cspec->r1)
+		return;
+	if (!both) {
+		dd->cspec->recovery_ports_initted++;
+		ppd->cpspec->recovery_init = 1;
+	}
+	if (!both && dd->cspec->recovery_ports_initted == 1) {
+		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
+		etm = atetm_1port;
+	} else {
+		portsel = portsel_2port;
+		etm = atetm_2port;
+	}
+
+	if (qib_r_grab(dd) < 0 ||
+		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
+		qib_r_update(dd, BISTEN_ETM) < 0 ||
+		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
+		qib_r_update(dd, BISTEN_AT) < 0 ||
+		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
+			    portsel, NULL) < 0 ||
+		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
+		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
+		qib_r_update(dd, BISTEN_AT) < 0 ||
+		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
+		qib_r_update(dd, BISTEN_ETM) < 0)
+		qib_dev_err(dd, "Failed IB link recovery setup\n");
+}
+
+static void check_7322_rxe_status(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u64 fmask;
+
+	if (dd->cspec->recovery_ports_initted != 1)
+		return; /* rest doesn't apply to dualport */
+	qib_write_kreg(dd, kr_control, dd->control |
+		       SYM_MASK(Control, FreezeMode));
+	(void)qib_read_kreg64(dd, kr_scratch);
+	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
+	fmask = qib_read_kreg64(dd, kr_act_fmask);
+	if (!fmask) {
+		/*
+		 * require a powercycle before we'll work again, and make
+		 * sure we get no more interrupts, and don't turn off
+		 * freeze.
+		 */
+		ppd->dd->cspec->stay_in_freeze = 1;
+		qib_7322_set_intr_state(ppd->dd, 0);
+		qib_write_kreg(dd, kr_fmask, 0ULL);
+		qib_dev_err(dd, "HCA unusable until powercycled\n");
+		return; /* eventually reset */
+	}
+
+	qib_write_kreg(ppd->dd, kr_hwerrclear,
+	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
+
+	/* don't do the full clear_freeze(), not needed for this */
+	qib_write_kreg(dd, kr_control, dd->control);
+	qib_read_kreg32(dd, kr_scratch);
+	/* take IBC out of reset */
+	if (ppd->link_speed_supported) {
+		ppd->cpspec->ibcctrl_a &=
+			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+		qib_write_kreg_port(ppd, krp_ibcctrl_a,
+				    ppd->cpspec->ibcctrl_a);
+		qib_read_kreg32(dd, kr_scratch);
+		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
+			qib_set_ib_7322_lstate(ppd, 0,
+				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 000000000000..c0139c07e97e
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define QIB_MIN_USER_CTXT_BUFCNT 7
+
+#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
+#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
+#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
+
+/*
+ * Number of ctxts we are configured to use (to allow for more pio
+ * buffers per ctxt, etc.)  Zero means use chip value.
+ */
+ushort qib_cfgctxts;
+module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
+MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
+
+/*
+ * If set, do not write to any regs if avoidable, hack to allow
+ * check for deranged default register values.
+ */
+ushort qib_mini_init;
+module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
+MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
+
+unsigned qib_n_krcv_queues;
+module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
+
+/*
+ * qib_wc_pat parameter:
+ *      0 is WC via MTRR
+ *      1 is WC via PAT
+ *      If PAT initialization fails, code reverts back to MTRR
+ */
+unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
+module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
+MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
+
+struct workqueue_struct *qib_wq;
+struct workqueue_struct *qib_cq_wq;
+
+static void verify_interrupt(unsigned long);
+
+static struct idr qib_unit_table;
+u32 qib_cpulist_count;
+unsigned long *qib_cpulist;
+
+/* set number of contexts we'll actually use */
+void qib_set_ctxtcnt(struct qib_devdata *dd)
+{
+	if (!qib_cfgctxts)
+		dd->cfgctxts = dd->ctxtcnt;
+	else if (qib_cfgctxts < dd->num_pports)
+		dd->cfgctxts = dd->ctxtcnt;
+	else if (qib_cfgctxts <= dd->ctxtcnt)
+		dd->cfgctxts = qib_cfgctxts;
+	else
+		dd->cfgctxts = dd->ctxtcnt;
+}
+
+/*
+ * Common code for creating the receive context array.
+ */
+int qib_create_ctxts(struct qib_devdata *dd)
+{
+	unsigned i;
+	int ret;
+
+	/*
+	 * Allocate full ctxtcnt array, rather than just cfgctxts, because
+	 * cleanup iterates across all possible ctxts.
+	 */
+	dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+	if (!dd->rcd) {
+		qib_dev_err(dd, "Unable to allocate ctxtdata array, "
+			    "failing\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	/* create (one or more) kctxt */
+	for (i = 0; i < dd->first_user_ctxt; ++i) {
+		struct qib_pportdata *ppd;
+		struct qib_ctxtdata *rcd;
+
+		if (dd->skip_kctxt_mask & (1 << i))
+			continue;
+
+		ppd = dd->pport + (i % dd->num_pports);
+		rcd = qib_create_ctxtdata(ppd, i);
+		if (!rcd) {
+			qib_dev_err(dd, "Unable to allocate ctxtdata"
+				    " for Kernel ctxt, failing\n");
+			ret = -ENOMEM;
+			goto done;
+		}
+		rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
+		rcd->seq_cnt = 1;
+	}
+	ret = 0;
+done:
+	return ret;
+}
+
+/*
+ * Common code for user and kernel context setup.
+ */
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct qib_ctxtdata *rcd;
+
+	rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+	if (rcd) {
+		INIT_LIST_HEAD(&rcd->qp_wait_list);
+		rcd->ppd = ppd;
+		rcd->dd = dd;
+		rcd->cnt = 1;
+		rcd->ctxt = ctxt;
+		dd->rcd[ctxt] = rcd;
+
+		dd->f_init_ctxt(rcd);
+
+		/*
+		 * To avoid wasting a lot of memory, we allocate 32KB chunks
+		 * of physically contiguous memory, advance through it until
+		 * used up and then allocate more.  Of course, we need
+		 * memory to store those extra pointers, now.  32KB seems to
+		 * be the most that is "safe" under memory pressure
+		 * (creating large files and then copying them over
+		 * NFS while doing lots of MPI jobs).  The OOM killer can
+		 * get invoked, even though we say we can sleep and this can
+		 * cause significant system problems....
+		 */
+		rcd->rcvegrbuf_size = 0x8000;
+		rcd->rcvegrbufs_perchunk =
+			rcd->rcvegrbuf_size / dd->rcvegrbufsize;
+		rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
+			rcd->rcvegrbufs_perchunk - 1) /
+			rcd->rcvegrbufs_perchunk;
+	}
+	return rcd;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
+			u8 hw_pidx, u8 port)
+{
+	ppd->dd = dd;
+	ppd->hw_pidx = hw_pidx;
+	ppd->port = port; /* IB port number, not index */
+
+	spin_lock_init(&ppd->sdma_lock);
+	spin_lock_init(&ppd->lflags_lock);
+	init_waitqueue_head(&ppd->state_wait);
+
+	init_timer(&ppd->symerr_clear_timer);
+	ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
+	ppd->symerr_clear_timer.data = (unsigned long)ppd;
+}
+
+static int init_pioavailregs(struct qib_devdata *dd)
+{
+	int ret, pidx;
+	u64 *status_page;
+
+	dd->pioavailregs_dma = dma_alloc_coherent(
+		&dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
+		GFP_KERNEL);
+	if (!dd->pioavailregs_dma) {
+		qib_dev_err(dd, "failed to allocate PIOavail reg area "
+			    "in memory\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	/*
+	 * We really want L2 cache aligned, but for current CPUs of
+	 * interest, they are the same.
+	 */
+	status_page = (u64 *)
+		((char *) dd->pioavailregs_dma +
+		 ((2 * L1_CACHE_BYTES +
+		   dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
+	/* device status comes first, for backwards compatibility */
+	dd->devstatusp = status_page;
+	*status_page++ = 0;
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		dd->pport[pidx].statusp = status_page;
+		*status_page++ = 0;
+	}
+
+	/*
+	 * Setup buffer to hold freeze and other messages, accessible to
+	 * apps, following statusp.  This is per-unit, not per port.
+	 */
+	dd->freezemsg = (char *) status_page;
+	*dd->freezemsg = 0;
+	/* length of msg buffer is "whatever is left" */
+	ret = (char *) status_page - (char *) dd->pioavailregs_dma;
+	dd->freezelen = PAGE_SIZE - ret;
+
+	ret = 0;
+
+done:
+	return ret;
+}
+
+/**
+ * init_shadow_tids - allocate the shadow TID array
+ * @dd: the qlogic_ib device
+ *
+ * allocate the shadow TID array, so we can qib_munlock previous
+ * entries.  It may make more sense to move the pageshadow to the
+ * ctxt data structure, so we only allocate memory for ctxts actually
+ * in use, since we at 8k per ctxt, now.
+ * We don't want failures here to prevent use of the driver/chip,
+ * so no return value.
+ */
+static void init_shadow_tids(struct qib_devdata *dd)
+{
+	struct page **pages;
+	dma_addr_t *addrs;
+
+	pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+	if (!pages) {
+		qib_dev_err(dd, "failed to allocate shadow page * "
+			    "array, no expected sends!\n");
+		goto bail;
+	}
+
+	addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+	if (!addrs) {
+		qib_dev_err(dd, "failed to allocate shadow dma handle "
+			    "array, no expected sends!\n");
+		goto bail_free;
+	}
+
+	memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+	memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+
+	dd->pageshadow = pages;
+	dd->physshadow = addrs;
+	return;
+
+bail_free:
+	vfree(pages);
+bail:
+	dd->pageshadow = NULL;
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct qib_devdata *dd)
+{
+	int ret = 0;
+
+	if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
+	     QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
+		qib_dev_err(dd, "Driver only handles version %d, "
+			    "chip swversion is %d (%llx), failng\n",
+			    QIB_CHIP_SWVERSION,
+			    (int)(dd->revision >>
+				QLOGIC_IB_R_SOFTWARE_SHIFT) &
+			    QLOGIC_IB_R_SOFTWARE_MASK,
+			    (unsigned long long) dd->revision);
+		ret = -ENOSYS;
+		goto done;
+	}
+
+	if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
+		qib_devinfo(dd->pcidev, "%s", dd->boardversion);
+
+	spin_lock_init(&dd->pioavail_lock);
+	spin_lock_init(&dd->sendctrl_lock);
+	spin_lock_init(&dd->uctxt_lock);
+	spin_lock_init(&dd->qib_diag_trans_lock);
+	spin_lock_init(&dd->eep_st_lock);
+	mutex_init(&dd->eep_lock);
+
+	if (qib_mini_init)
+		goto done;
+
+	ret = init_pioavailregs(dd);
+	init_shadow_tids(dd);
+
+	qib_get_eeprom_info(dd);
+
+	/* setup time (don't start yet) to verify we got interrupt */
+	init_timer(&dd->intrchk_timer);
+	dd->intrchk_timer.function = verify_interrupt;
+	dd->intrchk_timer.data = (unsigned long) dd;
+
+done:
+	return ret;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the qlogic_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explictly, in case reset
+ * failed
+ */
+static int init_after_reset(struct qib_devdata *dd)
+{
+	int i;
+
+	/*
+	 * Ensure chip does no sends or receives, tail updates, or
+	 * pioavail updates while we re-initialize.  This is mostly
+	 * for the driver data structures, not chip registers.
+	 */
+	for (i = 0; i < dd->num_pports; ++i) {
+		/*
+		 * ctxt == -1 means "all contexts". Only really safe for
+		 * _dis_abling things, as here.
+		 */
+		dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
+				  QIB_RCVCTRL_INTRAVAIL_DIS |
+				  QIB_RCVCTRL_TAILUPD_DIS, -1);
+		/* Redundant across ports for some, but no big deal.  */
+		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
+			QIB_SENDCTRL_AVAIL_DIS);
+	}
+
+	return 0;
+}
+
+static void enable_chip(struct qib_devdata *dd)
+{
+	u64 rcvmask;
+	int i;
+
+	/*
+	 * Enable PIO send, and update of PIOavail regs to memory.
+	 */
+	for (i = 0; i < dd->num_pports; ++i)
+		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
+			QIB_SENDCTRL_AVAIL_ENB);
+	/*
+	 * Enable kernel ctxts' receive and receive interrupt.
+	 * Other ctxts done as user opens and inits them.
+	 */
+	rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
+	rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
+		  QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
+	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+		struct qib_ctxtdata *rcd = dd->rcd[i];
+
+		if (rcd)
+			dd->f_rcvctrl(rcd->ppd, rcvmask, i);
+	}
+}
+
+static void verify_interrupt(unsigned long opaque)
+{
+	struct qib_devdata *dd = (struct qib_devdata *) opaque;
+
+	if (!dd)
+		return; /* being torn down */
+
+	/*
+	 * If we don't have a lid or any interrupts, let the user know and
+	 * don't bother checking again.
+	 */
+	if (dd->int_counter == 0) {
+		if (!dd->f_intr_fallback(dd))
+			dev_err(&dd->pcidev->dev, "No interrupts detected, "
+				"not usable.\n");
+		else /* re-arm the timer to see if fallback works */
+			mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+	}
+}
+
+static void init_piobuf_state(struct qib_devdata *dd)
+{
+	int i, pidx;
+	u32 uctxts;
+
+	/*
+	 * Ensure all buffers are free, and fifos empty.  Buffers
+	 * are common, so only do once for port 0.
+	 *
+	 * After enable and qib_chg_pioavailkernel so we can safely
+	 * enable pioavail updates and PIOENABLE.  After this, packets
+	 * are ready and able to go out.
+	 */
+	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
+	for (pidx = 0; pidx < dd->num_pports; ++pidx)
+		dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
+
+	/*
+	 * If not all sendbufs are used, add the one to each of the lower
+	 * numbered contexts.  pbufsctxt and lastctxt_piobuf are
+	 * calculated in chip-specific code because it may cause some
+	 * chip-specific adjustments to be made.
+	 */
+	uctxts = dd->cfgctxts - dd->first_user_ctxt;
+	dd->ctxts_extrabuf = dd->pbufsctxt ?
+		dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
+
+	/*
+	 * Set up the shadow copies of the piobufavail registers,
+	 * which we compare against the chip registers for now, and
+	 * the in memory DMA'ed copies of the registers.
+	 * By now pioavail updates to memory should have occurred, so
+	 * copy them into our working/shadow registers; this is in
+	 * case something went wrong with abort, but mostly to get the
+	 * initial values of the generation bit correct.
+	 */
+	for (i = 0; i < dd->pioavregs; i++) {
+		__le64 tmp;
+
+		tmp = dd->pioavailregs_dma[i];
+		/*
+		 * Don't need to worry about pioavailkernel here
+		 * because we will call qib_chg_pioavailkernel() later
+		 * in initialization, to busy out buffers as needed.
+		 */
+		dd->pioavailshadow[i] = le64_to_cpu(tmp);
+	}
+	while (i < ARRAY_SIZE(dd->pioavailshadow))
+		dd->pioavailshadow[i++] = 0; /* for debugging sanity */
+
+	/* after pioavailshadow is setup */
+	qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
+			       TXCHK_CHG_TYPE_KERN, NULL);
+	dd->f_initvl15_bufs(dd);
+}
+
+/**
+ * qib_init - do the actual initialization sequence on the chip
+ * @dd: the qlogic_ib device
+ * @reinit: reinitializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip.  This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0).  We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int qib_init(struct qib_devdata *dd, int reinit)
+{
+	int ret = 0, pidx, lastfail = 0;
+	u32 portok = 0;
+	unsigned i;
+	struct qib_ctxtdata *rcd;
+	struct qib_pportdata *ppd;
+	unsigned long flags;
+
+	/* Set linkstate to unknown, so we can watch for a transition. */
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
+				 QIBL_LINKDOWN | QIBL_LINKINIT |
+				 QIBL_LINKV);
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+	}
+
+	if (reinit)
+		ret = init_after_reset(dd);
+	else
+		ret = loadtime_init(dd);
+	if (ret)
+		goto done;
+
+	/* Bypass most chip-init, to get to device creation */
+	if (qib_mini_init)
+		return 0;
+
+	ret = dd->f_late_initreg(dd);
+	if (ret)
+		goto done;
+
+	/* dd->rcd can be NULL if early init failed */
+	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+		/*
+		 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
+		 * re-init, the simplest way to handle this is to free
+		 * existing, and re-allocate.
+		 * Need to re-create rest of ctxt 0 ctxtdata as well.
+		 */
+		rcd = dd->rcd[i];
+		if (!rcd)
+			continue;
+
+		lastfail = qib_create_rcvhdrq(dd, rcd);
+		if (!lastfail)
+			lastfail = qib_setup_eagerbufs(rcd);
+		if (lastfail) {
+			qib_dev_err(dd, "failed to allocate kernel ctxt's "
+				    "rcvhdrq and/or egr bufs\n");
+			continue;
+		}
+	}
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		int mtu;
+		if (lastfail)
+			ret = lastfail;
+		ppd = dd->pport + pidx;
+		mtu = ib_mtu_enum_to_int(qib_ibmtu);
+		if (mtu == -1) {
+			mtu = QIB_DEFAULT_MTU;
+			qib_ibmtu = 0; /* don't leave invalid value */
+		}
+		/* set max we can ever have for this driver load */
+		ppd->init_ibmaxlen = min(mtu > 2048 ?
+					 dd->piosize4k : dd->piosize2k,
+					 dd->rcvegrbufsize +
+					 (dd->rcvhdrentsize << 2));
+		/*
+		 * Have to initialize ibmaxlen, but this will normally
+		 * change immediately in qib_set_mtu().
+		 */
+		ppd->ibmaxlen = ppd->init_ibmaxlen;
+		qib_set_mtu(ppd, mtu);
+
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+		lastfail = dd->f_bringup_serdes(ppd);
+		if (lastfail) {
+			qib_devinfo(dd->pcidev,
+				 "Failed to bringup IB port %u\n", ppd->port);
+			lastfail = -ENETDOWN;
+			continue;
+		}
+
+		/* let link come up, and enable IBC */
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		portok++;
+	}
+
+	if (!portok) {
+		/* none of the ports initialized */
+		if (!ret && lastfail)
+			ret = lastfail;
+		else if (!ret)
+			ret = -ENETDOWN;
+		/* but continue on, so we can debug cause */
+	}
+
+	enable_chip(dd);
+
+	init_piobuf_state(dd);
+
+done:
+	if (!ret) {
+		/* chip is OK for user apps; mark it as initialized */
+		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+			ppd = dd->pport + pidx;
+			/*
+			 * Set status even if port serdes is not initialized
+			 * so that diags will work.
+			 */
+			*ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
+				QIB_STATUS_INITTED;
+			if (!ppd->link_speed_enabled)
+				continue;
+			if (dd->flags & QIB_HAS_SEND_DMA)
+				ret = qib_setup_sdma(ppd);
+			init_timer(&ppd->hol_timer);
+			ppd->hol_timer.function = qib_hol_event;
+			ppd->hol_timer.data = (unsigned long)ppd;
+			ppd->hol_state = QIB_HOL_UP;
+		}
+
+		/* now we can enable all interrupts from the chip */
+		dd->f_set_intr_state(dd, 1);
+
+		/*
+		 * Setup to verify we get an interrupt, and fallback
+		 * to an alternate if necessary and possible.
+		 */
+		mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+		/* start stats retrieval timer */
+		mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+	}
+
+	/* if ret is non-zero, we probably should do some cleanup here... */
+	return ret;
+}
+
+/*
+ * These next two routines are placeholders in case we don't have per-arch
+ * code for controlling write combining.  If explicit control of write
+ * combining is not available, performance will probably be awful.
+ */
+
+int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
+{
+	return -EOPNOTSUPP;
+}
+
+void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
+{
+}
+
+static inline struct qib_devdata *__qib_lookup(int unit)
+{
+	return idr_find(&qib_unit_table, unit);
+}
+
+struct qib_devdata *qib_lookup(int unit)
+{
+	struct qib_devdata *dd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+	dd = __qib_lookup(unit);
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+	return dd;
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void qib_stop_timers(struct qib_devdata *dd)
+{
+	struct qib_pportdata *ppd;
+	int pidx;
+
+	if (dd->stats_timer.data) {
+		del_timer_sync(&dd->stats_timer);
+		dd->stats_timer.data = 0;
+	}
+	if (dd->intrchk_timer.data) {
+		del_timer_sync(&dd->intrchk_timer);
+		dd->intrchk_timer.data = 0;
+	}
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+		if (ppd->hol_timer.data)
+			del_timer_sync(&ppd->hol_timer);
+		if (ppd->led_override_timer.data) {
+			del_timer_sync(&ppd->led_override_timer);
+			atomic_set(&ppd->led_override_timer_active, 0);
+		}
+		if (ppd->symerr_clear_timer.data)
+			del_timer_sync(&ppd->symerr_clear_timer);
+	}
+}
+
+/**
+ * qib_shutdown_device - shut down a device
+ * @dd: the qlogic_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled.   It does not free any data structures.
+ * Everything it does has to be setup again by qib_init(dd, 1)
+ */
+static void qib_shutdown_device(struct qib_devdata *dd)
+{
+	struct qib_pportdata *ppd;
+	unsigned pidx;
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+
+		spin_lock_irq(&ppd->lflags_lock);
+		ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
+				 QIBL_LINKARMED | QIBL_LINKACTIVE |
+				 QIBL_LINKV);
+		spin_unlock_irq(&ppd->lflags_lock);
+		*ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
+	}
+	dd->flags &= ~QIB_INITTED;
+
+	/* mask interrupts, but not errors */
+	dd->f_set_intr_state(dd, 0);
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+		dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
+				   QIB_RCVCTRL_CTXT_DIS |
+				   QIB_RCVCTRL_INTRAVAIL_DIS |
+				   QIB_RCVCTRL_PKEY_ENB, -1);
+		/*
+		 * Gracefully stop all sends allowing any in progress to
+		 * trickle out first.
+		 */
+		dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
+	}
+
+	/*
+	 * Enough for anything that's going to trickle out to have actually
+	 * done so.
+	 */
+	udelay(20);
+
+	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+		ppd = dd->pport + pidx;
+		dd->f_setextled(ppd, 0); /* make sure LEDs are off */
+
+		if (dd->flags & QIB_HAS_SEND_DMA)
+			qib_teardown_sdma(ppd);
+
+		dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
+				    QIB_SENDCTRL_SEND_DIS);
+		/*
+		 * Clear SerdesEnable.
+		 * We can't count on interrupts since we are stopping.
+		 */
+		dd->f_quiet_serdes(ppd);
+	}
+
+	qib_update_eeprom_log(dd);
+}
+
+/**
+ * qib_free_ctxtdata - free a context's allocated data
+ * @dd: the qlogic_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of context data, because it is called after qib_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ */
+void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+	if (!rcd)
+		return;
+
+	if (rcd->rcvhdrq) {
+		dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+				  rcd->rcvhdrq, rcd->rcvhdrq_phys);
+		rcd->rcvhdrq = NULL;
+		if (rcd->rcvhdrtail_kvaddr) {
+			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+					  rcd->rcvhdrtail_kvaddr,
+					  rcd->rcvhdrqtailaddr_phys);
+			rcd->rcvhdrtail_kvaddr = NULL;
+		}
+	}
+	if (rcd->rcvegrbuf) {
+		unsigned e;
+
+		for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+			void *base = rcd->rcvegrbuf[e];
+			size_t size = rcd->rcvegrbuf_size;
+
+			dma_free_coherent(&dd->pcidev->dev, size,
+					  base, rcd->rcvegrbuf_phys[e]);
+		}
+		kfree(rcd->rcvegrbuf);
+		rcd->rcvegrbuf = NULL;
+		kfree(rcd->rcvegrbuf_phys);
+		rcd->rcvegrbuf_phys = NULL;
+		rcd->rcvegrbuf_chunks = 0;
+	}
+
+	kfree(rcd->tid_pg_list);
+	vfree(rcd->user_event_mask);
+	vfree(rcd->subctxt_uregbase);
+	vfree(rcd->subctxt_rcvegrbuf);
+	vfree(rcd->subctxt_rcvhdr_base);
+	kfree(rcd);
+}
+
+/*
+ * Perform a PIO buffer bandwidth write test, to verify proper system
+ * configuration.  Even when all the setup calls work, occasionally
+ * BIOS or other issues can prevent write combining from working, or
+ * can cause other bandwidth problems to the chip.
+ *
+ * This test simply writes the same buffer over and over again, and
+ * measures close to the peak bandwidth to the chip (not testing
+ * data bandwidth to the wire).   On chips that use an address-based
+ * trigger to send packets to the wire, this is easy.  On chips that
+ * use a count to trigger, we want to make sure that the packet doesn't
+ * go out on the wire, or trigger flow control checks.
+ */
+static void qib_verify_pioperf(struct qib_devdata *dd)
+{
+	u32 pbnum, cnt, lcnt;
+	u32 __iomem *piobuf;
+	u32 *addr;
+	u64 msecs, emsecs;
+
+	piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
+	if (!piobuf) {
+		qib_devinfo(dd->pcidev,
+			 "No PIObufs for checking perf, skipping\n");
+		return;
+	}
+
+	/*
+	 * Enough to give us a reasonable test, less than piobuf size, and
+	 * likely multiple of store buffer length.
+	 */
+	cnt = 1024;
+
+	addr = vmalloc(cnt);
+	if (!addr) {
+		qib_devinfo(dd->pcidev,
+			 "Couldn't get memory for checking PIO perf,"
+			 " skipping\n");
+		goto done;
+	}
+
+	preempt_disable();  /* we want reasonably accurate elapsed time */
+	msecs = 1 + jiffies_to_msecs(jiffies);
+	for (lcnt = 0; lcnt < 10000U; lcnt++) {
+		/* wait until we cross msec boundary */
+		if (jiffies_to_msecs(jiffies) >= msecs)
+			break;
+		udelay(1);
+	}
+
+	dd->f_set_armlaunch(dd, 0);
+
+	/*
+	 * length 0, no dwords actually sent
+	 */
+	writeq(0, piobuf);
+	qib_flush_wc();
+
+	/*
+	 * This is only roughly accurate, since even with preempt we
+	 * still take interrupts that could take a while.   Running for
+	 * >= 5 msec seems to get us "close enough" to accurate values.
+	 */
+	msecs = jiffies_to_msecs(jiffies);
+	for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
+		qib_pio_copy(piobuf + 64, addr, cnt >> 2);
+		emsecs = jiffies_to_msecs(jiffies) - msecs;
+	}
+
+	/* 1 GiB/sec, slightly over IB SDR line rate */
+	if (lcnt < (emsecs * 1024U))
+		qib_dev_err(dd,
+			    "Performance problem: bandwidth to PIO buffers is "
+			    "only %u MiB/sec\n",
+			    lcnt / (u32) emsecs);
+
+	preempt_enable();
+
+	vfree(addr);
+
+done:
+	/* disarm piobuf, so it's available again */
+	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
+	qib_sendbuf_done(dd, pbnum);
+	dd->f_set_armlaunch(dd, 1);
+}
+
+
+void qib_free_devdata(struct qib_devdata *dd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+	idr_remove(&qib_unit_table, dd->unit);
+	list_del(&dd->list);
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+	ib_dealloc_device(&dd->verbs_dev.ibdev);
+}
+
+/*
+ * Allocate our primary per-unit data structure.  Must be done via verbs
+ * allocator, because the verbs cleanup process both does cleanup and
+ * free of the data structure.
+ * "extra" is for chip-specific data.
+ *
+ * Use the idr mechanism to get a unit number for this unit.
+ */
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+{
+	unsigned long flags;
+	struct qib_devdata *dd;
+	int ret;
+
+	if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+		dd = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+	if (!dd) {
+		dd = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	spin_lock_irqsave(&qib_devs_lock, flags);
+	ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
+	if (ret >= 0)
+		list_add(&dd->list, &qib_dev_list);
+	spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+	if (ret < 0) {
+		qib_early_err(&pdev->dev,
+			      "Could not allocate unit ID: error %d\n", -ret);
+		ib_dealloc_device(&dd->verbs_dev.ibdev);
+		dd = ERR_PTR(ret);
+		goto bail;
+	}
+
+	if (!qib_cpulist_count) {
+		u32 count = num_online_cpus();
+		qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
+				      sizeof(long), GFP_KERNEL);
+		if (qib_cpulist)
+			qib_cpulist_count = count;
+		else
+			qib_early_err(&pdev->dev, "Could not alloc cpulist "
+				      "info, cpu affinity might be wrong\n");
+	}
+
+bail:
+	return dd;
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code.  Should be paranoid about state of
+ * system and data structures.
+ */
+void qib_disable_after_error(struct qib_devdata *dd)
+{
+	if (dd->flags & QIB_INITTED) {
+		u32 pidx;
+
+		dd->flags &= ~QIB_INITTED;
+		if (dd->pport)
+			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+				struct qib_pportdata *ppd;
+
+				ppd = dd->pport + pidx;
+				if (dd->flags & QIB_PRESENT) {
+					qib_set_linkstate(ppd,
+						QIB_IB_LINKDOWN_DISABLE);
+					dd->f_setextled(ppd, 0);
+				}
+				*ppd->statusp &= ~QIB_STATUS_IB_READY;
+			}
+	}
+
+	/*
+	 * Mark as having had an error for driver, and also
+	 * for /sys and status word mapped to user programs.
+	 * This marks unit as not usable, until reset.
+	 */
+	if (dd->devstatusp)
+		*dd->devstatusp |= QIB_STATUS_HWERROR;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *);
+static int __devinit qib_init_one(struct pci_dev *,
+				  const struct pci_device_id *);
+
+#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define PFX QIB_DRV_NAME ": "
+
+static const struct pci_device_id qib_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
+
+struct pci_driver qib_driver = {
+	.name = QIB_DRV_NAME,
+	.probe = qib_init_one,
+	.remove = __devexit_p(qib_remove_one),
+	.id_table = qib_pci_tbl,
+	.err_handler = &qib_pci_err_handler,
+};
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init qlogic_ib_init(void)
+{
+	int ret;
+
+	ret = qib_dev_init();
+	if (ret)
+		goto bail;
+
+	/*
+	 * We create our own workqueue mainly because we want to be
+	 * able to flush it when devices are being removed.  We can't
+	 * use schedule_work()/flush_scheduled_work() because both
+	 * unregister_netdev() and linkwatch_event take the rtnl lock,
+	 * so flush_scheduled_work() can deadlock during device
+	 * removal.
+	 */
+	qib_wq = create_workqueue("qib");
+	if (!qib_wq) {
+		ret = -ENOMEM;
+		goto bail_dev;
+	}
+
+	qib_cq_wq = create_workqueue("qib_cq");
+	if (!qib_cq_wq) {
+		ret = -ENOMEM;
+		goto bail_wq;
+	}
+
+	/*
+	 * These must be called before the driver is registered with
+	 * the PCI subsystem.
+	 */
+	idr_init(&qib_unit_table);
+	if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+		printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
+		ret = -ENOMEM;
+		goto bail_cq_wq;
+	}
+
+	ret = pci_register_driver(&qib_driver);
+	if (ret < 0) {
+		printk(KERN_ERR QIB_DRV_NAME
+		       ": Unable to register driver: error %d\n", -ret);
+		goto bail_unit;
+	}
+
+	/* not fatal if it doesn't work */
+	if (qib_init_qibfs())
+		printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
+	goto bail; /* all OK */
+
+bail_unit:
+	idr_destroy(&qib_unit_table);
+bail_cq_wq:
+	destroy_workqueue(qib_cq_wq);
+bail_wq:
+	destroy_workqueue(qib_wq);
+bail_dev:
+	qib_dev_cleanup();
+bail:
+	return ret;
+}
+
+module_init(qlogic_ib_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit qlogic_ib_cleanup(void)
+{
+	int ret;
+
+	ret = qib_exit_qibfs();
+	if (ret)
+		printk(KERN_ERR QIB_DRV_NAME ": "
+			"Unable to cleanup counter filesystem: "
+			"error %d\n", -ret);
+
+	pci_unregister_driver(&qib_driver);
+
+	destroy_workqueue(qib_wq);
+	destroy_workqueue(qib_cq_wq);
+
+	qib_cpulist_count = 0;
+	kfree(qib_cpulist);
+
+	idr_destroy(&qib_unit_table);
+	qib_dev_cleanup();
+}
+
+module_exit(qlogic_ib_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct qib_devdata *dd)
+{
+	int ctxt;
+	int pidx;
+	struct qib_ctxtdata **tmp;
+	unsigned long flags;
+
+	/* users can't do anything more with chip */
+	for (pidx = 0; pidx < dd->num_pports; ++pidx)
+		if (dd->pport[pidx].statusp)
+			*dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
+
+	if (!qib_wc_pat)
+		qib_disable_wc(dd);
+
+	if (dd->pioavailregs_dma) {
+		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+				  (void *) dd->pioavailregs_dma,
+				  dd->pioavailregs_phys);
+		dd->pioavailregs_dma = NULL;
+	}
+
+	if (dd->pageshadow) {
+		struct page **tmpp = dd->pageshadow;
+		dma_addr_t *tmpd = dd->physshadow;
+		int i, cnt = 0;
+
+		for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
+			int ctxt_tidbase = ctxt * dd->rcvtidcnt;
+			int maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+			for (i = ctxt_tidbase; i < maxtid; i++) {
+				if (!tmpp[i])
+					continue;
+				pci_unmap_page(dd->pcidev, tmpd[i],
+					       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+				qib_release_user_pages(&tmpp[i], 1);
+				tmpp[i] = NULL;
+				cnt++;
+			}
+		}
+
+		tmpp = dd->pageshadow;
+		dd->pageshadow = NULL;
+		vfree(tmpp);
+	}
+
+	/*
+	 * Free any resources still in use (usually just kernel contexts)
+	 * at unload; we do for ctxtcnt, because that's what we allocate.
+	 * We acquire lock to be really paranoid that rcd isn't being
+	 * accessed from some interrupt-related code (that should not happen,
+	 * but best to be sure).
+	 */
+	spin_lock_irqsave(&dd->uctxt_lock, flags);
+	tmp = dd->rcd;
+	dd->rcd = NULL;
+	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+	for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
+		struct qib_ctxtdata *rcd = tmp[ctxt];
+
+		tmp[ctxt] = NULL; /* debugging paranoia */
+		qib_free_ctxtdata(dd, rcd);
+	}
+	kfree(tmp);
+	kfree(dd->boardname);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void qib_postinit_cleanup(struct qib_devdata *dd)
+{
+	/*
+	 * Clean up chip-specific stuff.
+	 * We check for NULL here, because it's outside
+	 * the kregbase check, and we need to call it
+	 * after the free_irq.  Thus it's possible that
+	 * the function pointers were never initialized.
+	 */
+	if (dd->f_cleanup)
+		dd->f_cleanup(dd);
+
+	qib_pcie_ddcleanup(dd);
+
+	cleanup_device_data(dd);
+
+	qib_free_devdata(dd);
+}
+
+static int __devinit qib_init_one(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	int ret, j, pidx, initfail;
+	struct qib_devdata *dd = NULL;
+
+	ret = qib_pcie_init(pdev, ent);
+	if (ret)
+		goto bail;
+
+	/*
+	 * Do device-specific initialiation, function table setup, dd
+	 * allocation, etc.
+	 */
+	switch (ent->device) {
+	case PCI_DEVICE_ID_QLOGIC_IB_6120:
+		dd = qib_init_iba6120_funcs(pdev, ent);
+		break;
+
+	case PCI_DEVICE_ID_QLOGIC_IB_7220:
+		dd = qib_init_iba7220_funcs(pdev, ent);
+		break;
+
+	case PCI_DEVICE_ID_QLOGIC_IB_7322:
+		dd = qib_init_iba7322_funcs(pdev, ent);
+		break;
+
+	default:
+		qib_early_err(&pdev->dev, "Failing on unknown QLogic "
+			      "deviceid 0x%x\n", ent->device);
+		ret = -ENODEV;
+	}
+
+	if (IS_ERR(dd))
+		ret = PTR_ERR(dd);
+	if (ret)
+		goto bail; /* error already printed */
+
+	/* do the generic initialization */
+	initfail = qib_init(dd, 0);
+
+	ret = qib_register_ib_device(dd);
+
+	/*
+	 * Now ready for use.  this should be cleared whenever we
+	 * detect a reset, or initiate one.  If earlier failure,
+	 * we still create devices, so diags, etc. can be used
+	 * to determine cause of problem.
+	 */
+	if (!qib_mini_init && !initfail && !ret)
+		dd->flags |= QIB_INITTED;
+
+	j = qib_device_create(dd);
+	if (j)
+		qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+	j = qibfs_add(dd);
+	if (j)
+		qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
+			    -j);
+
+	if (qib_mini_init || initfail || ret) {
+		qib_stop_timers(dd);
+		for (pidx = 0; pidx < dd->num_pports; ++pidx)
+			dd->f_quiet_serdes(dd->pport + pidx);
+		if (initfail)
+			ret = initfail;
+		goto bail;
+	}
+
+	if (!qib_wc_pat) {
+		ret = qib_enable_wc(dd);
+		if (ret) {
+			qib_dev_err(dd, "Write combining not enabled "
+				    "(err %d): performance may be poor\n",
+				    -ret);
+			ret = 0;
+		}
+	}
+
+	qib_verify_pioperf(dd);
+bail:
+	return ret;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *pdev)
+{
+	struct qib_devdata *dd = pci_get_drvdata(pdev);
+	int ret;
+
+	/* unregister from IB core */
+	qib_unregister_ib_device(dd);
+
+	/*
+	 * Disable the IB link, disable interrupts on the device,
+	 * clear dma engines, etc.
+	 */
+	if (!qib_mini_init)
+		qib_shutdown_device(dd);
+
+	qib_stop_timers(dd);
+
+	/* wait until all of our (qsfp) schedule_work() calls complete */
+	flush_scheduled_work();
+
+	ret = qibfs_remove(dd);
+	if (ret)
+		qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
+			    -ret);
+
+	qib_device_remove(dd);
+
+	qib_postinit_cleanup(dd);
+}
+
+/**
+ * qib_create_rcvhdrq - create a receive header queue
+ * @dd: the qlogic_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+	unsigned amt;
+
+	if (!rcd->rcvhdrq) {
+		dma_addr_t phys_hdrqtail;
+		gfp_t gfp_flags;
+
+		amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+			    sizeof(u32), PAGE_SIZE);
+		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
+			GFP_USER : GFP_KERNEL;
+		rcd->rcvhdrq = dma_alloc_coherent(
+			&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+			gfp_flags | __GFP_COMP);
+
+		if (!rcd->rcvhdrq) {
+			qib_dev_err(dd, "attempt to allocate %d bytes "
+				    "for ctxt %u rcvhdrq failed\n",
+				    amt, rcd->ctxt);
+			goto bail;
+		}
+
+		if (rcd->ctxt >= dd->first_user_ctxt) {
+			rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
+			if (!rcd->user_event_mask)
+				goto bail_free_hdrq;
+		}
+
+		if (!(dd->flags & QIB_NODMA_RTAIL)) {
+			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
+				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+				gfp_flags);
+			if (!rcd->rcvhdrtail_kvaddr)
+				goto bail_free;
+			rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+		}
+
+		rcd->rcvhdrq_size = amt;
+	}
+
+	/* clear for security and sanity on each use */
+	memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+	if (rcd->rcvhdrtail_kvaddr)
+		memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
+	return 0;
+
+bail_free:
+	qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
+		    "rcvhdrqtailaddr failed\n", rcd->ctxt);
+	vfree(rcd->user_event_mask);
+	rcd->user_event_mask = NULL;
+bail_free_hdrq:
+	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+			  rcd->rcvhdrq_phys);
+	rcd->rcvhdrq = NULL;
+bail:
+	return -ENOMEM;
+}
+
+/**
+ * allocate eager buffers, both kernel and user contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls.  Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
+	size_t size;
+	gfp_t gfp_flags;
+
+	/*
+	 * GFP_USER, but without GFP_FS, so buffer cache can be
+	 * coalesced (we hope); otherwise, even at order 4,
+	 * heavy filesystem activity makes these fail, and we can
+	 * use compound pages.
+	 */
+	gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+	egrcnt = rcd->rcvegrcnt;
+	egroff = rcd->rcvegr_tid_base;
+	egrsize = dd->rcvegrbufsize;
+
+	chunk = rcd->rcvegrbuf_chunks;
+	egrperchunk = rcd->rcvegrbufs_perchunk;
+	size = rcd->rcvegrbuf_size;
+	if (!rcd->rcvegrbuf) {
+		rcd->rcvegrbuf =
+			kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
+				GFP_KERNEL);
+		if (!rcd->rcvegrbuf)
+			goto bail;
+	}
+	if (!rcd->rcvegrbuf_phys) {
+		rcd->rcvegrbuf_phys =
+			kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
+				GFP_KERNEL);
+		if (!rcd->rcvegrbuf_phys)
+			goto bail_rcvegrbuf;
+	}
+	for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+		if (rcd->rcvegrbuf[e])
+			continue;
+		rcd->rcvegrbuf[e] =
+			dma_alloc_coherent(&dd->pcidev->dev, size,
+					   &rcd->rcvegrbuf_phys[e],
+					   gfp_flags);
+		if (!rcd->rcvegrbuf[e])
+			goto bail_rcvegrbuf_phys;
+	}
+
+	rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
+
+	for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
+		dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
+		unsigned i;
+
+		for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
+			dd->f_put_tid(dd, e + egroff +
+					  (u64 __iomem *)
+					  ((char __iomem *)
+					   dd->kregbase +
+					   dd->rcvegrbase),
+					  RCVHQ_RCV_TYPE_EAGER, pa);
+			pa += egrsize;
+		}
+		cond_resched(); /* don't hog the cpu */
+	}
+
+	return 0;
+
+bail_rcvegrbuf_phys:
+	for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
+		dma_free_coherent(&dd->pcidev->dev, size,
+				  rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
+	kfree(rcd->rcvegrbuf_phys);
+	rcd->rcvegrbuf_phys = NULL;
+bail_rcvegrbuf:
+	kfree(rcd->rcvegrbuf);
+	rcd->rcvegrbuf = NULL;
+bail:
+	return -ENOMEM;
+}
+
+int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
+{
+	u64 __iomem *qib_kregbase = NULL;
+	void __iomem *qib_piobase = NULL;
+	u64 __iomem *qib_userbase = NULL;
+	u64 qib_kreglen;
+	u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
+	u64 qib_pio4koffset = dd->piobufbase >> 32;
+	u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
+	u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
+	u64 qib_physaddr = dd->physaddr;
+	u64 qib_piolen;
+	u64 qib_userlen = 0;
+
+	/*
+	 * Free the old mapping because the kernel will try to reuse the
+	 * old mapping and not create a new mapping with the
+	 * write combining attribute.
+	 */
+	iounmap(dd->kregbase);
+	dd->kregbase = NULL;
+
+	/*
+	 * Assumes chip address space looks like:
+	 *	- kregs + sregs + cregs + uregs (in any order)
+	 *	- piobufs (2K and 4K bufs in either order)
+	 * or:
+	 *	- kregs + sregs + cregs (in any order)
+	 *	- piobufs (2K and 4K bufs in either order)
+	 *	- uregs
+	 */
+	if (dd->piobcnt4k == 0) {
+		qib_kreglen = qib_pio2koffset;
+		qib_piolen = qib_pio2klen;
+	} else if (qib_pio2koffset < qib_pio4koffset) {
+		qib_kreglen = qib_pio2koffset;
+		qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
+	} else {
+		qib_kreglen = qib_pio4koffset;
+		qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
+	}
+	qib_piolen += vl15buflen;
+	/* Map just the configured ports (not all hw ports) */
+	if (dd->uregbase > qib_kreglen)
+		qib_userlen = dd->ureg_align * dd->cfgctxts;
+
+	/* Sanity checks passed, now create the new mappings */
+	qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+	if (!qib_kregbase)
+		goto bail;
+
+	qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
+	if (!qib_piobase)
+		goto bail_kregbase;
+
+	if (qib_userlen) {
+		qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+					       qib_userlen);
+		if (!qib_userbase)
+			goto bail_piobase;
+	}
+
+	dd->kregbase = qib_kregbase;
+	dd->kregend = (u64 __iomem *)
+		((char __iomem *) qib_kregbase + qib_kreglen);
+	dd->piobase = qib_piobase;
+	dd->pio2kbase = (void __iomem *)
+		(((char __iomem *) dd->piobase) +
+		 qib_pio2koffset - qib_kreglen);
+	if (dd->piobcnt4k)
+		dd->pio4kbase = (void __iomem *)
+			(((char __iomem *) dd->piobase) +
+			 qib_pio4koffset - qib_kreglen);
+	if (qib_userlen)
+		/* ureg will now be accessed relative to dd->userbase */
+		dd->userbase = qib_userbase;
+	return 0;
+
+bail_piobase:
+	iounmap(qib_piobase);
+bail_kregbase:
+	iounmap(qib_kregbase);
+bail:
+	return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 000000000000..54a40828a106
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/**
+ * qib_format_hwmsg - format a single hwerror message
+ * @msg message buffer
+ * @msgl length of message buffer
+ * @hwmsg message to add to message buffer
+ */
+static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
+{
+	strlcat(msg, "[", msgl);
+	strlcat(msg, hwmsg, msgl);
+	strlcat(msg, "]", msgl);
+}
+
+/**
+ * qib_format_hwerrors - format hardware error messages for display
+ * @hwerrs hardware errors bit vector
+ * @hwerrmsgs hardware error descriptions
+ * @nhwerrmsgs number of hwerrmsgs
+ * @msg message buffer
+ * @msgl message buffer length
+ */
+void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
+			 size_t nhwerrmsgs, char *msg, size_t msgl)
+{
+	int i;
+
+	for (i = 0; i < nhwerrmsgs; i++)
+		if (hwerrs & hwerrmsgs[i].mask)
+			qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
+}
+
+static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
+{
+	struct ib_event event;
+	struct qib_devdata *dd = ppd->dd;
+
+	event.device = &dd->verbs_dev.ibdev;
+	event.element.port_num = ppd->port;
+	event.event = ev;
+	ib_dispatch_event(&event);
+}
+
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
+{
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+	u32 lstate;
+	u8 ltstate;
+	enum ib_event_type ev = 0;
+
+	lstate = dd->f_iblink_state(ibcs); /* linkstate */
+	ltstate = dd->f_ibphys_portstate(ibcs);
+
+	/*
+	 * If linkstate transitions into INIT from any of the various down
+	 * states, or if it transitions from any of the up (INIT or better)
+	 * states into any of the down states (except link recovery), then
+	 * call the chip-specific code to take appropriate actions.
+	 */
+	if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+	    ltstate == IB_PHYSPORTSTATE_LINKUP) {
+		/* transitioned to UP */
+		if (dd->f_ib_updown(ppd, 1, ibcs))
+			goto skip_ibchange; /* chip-code handled */
+	} else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+		   QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
+		if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
+		    ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
+		    dd->f_ib_updown(ppd, 0, ibcs))
+			goto skip_ibchange; /* chip-code handled */
+		qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
+	}
+
+	if (lstate != IB_PORT_DOWN) {
+		/* lstate is INIT, ARMED, or ACTIVE */
+		if (lstate != IB_PORT_ACTIVE) {
+			*ppd->statusp &= ~QIB_STATUS_IB_READY;
+			if (ppd->lflags & QIBL_LINKACTIVE)
+				ev = IB_EVENT_PORT_ERR;
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			if (lstate == IB_PORT_ARMED) {
+				ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
+				ppd->lflags &= ~(QIBL_LINKINIT |
+					QIBL_LINKDOWN | QIBL_LINKACTIVE);
+			} else {
+				ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
+				ppd->lflags &= ~(QIBL_LINKARMED |
+					QIBL_LINKDOWN | QIBL_LINKACTIVE);
+			}
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+			/* start a 75msec timer to clear symbol errors */
+			mod_timer(&ppd->symerr_clear_timer,
+				  msecs_to_jiffies(75));
+		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+			/* active, but not active defered */
+			qib_hol_up(ppd); /* useful only for 6120 now */
+			*ppd->statusp |=
+				QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
+			qib_clear_symerror_on_linkup((unsigned long)ppd);
+			spin_lock_irqsave(&ppd->lflags_lock, flags);
+			ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
+			ppd->lflags &= ~(QIBL_LINKINIT |
+				QIBL_LINKDOWN | QIBL_LINKARMED);
+			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+			if (dd->flags & QIB_HAS_SEND_DMA)
+				qib_sdma_process_event(ppd,
+					qib_sdma_event_e30_go_running);
+			ev = IB_EVENT_PORT_ACTIVE;
+			dd->f_setextled(ppd, 1);
+		}
+	} else { /* down */
+		if (ppd->lflags & QIBL_LINKACTIVE)
+			ev = IB_EVENT_PORT_ERR;
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
+		ppd->lflags &= ~(QIBL_LINKINIT |
+				 QIBL_LINKACTIVE | QIBL_LINKARMED);
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		*ppd->statusp &= ~QIB_STATUS_IB_READY;
+	}
+
+skip_ibchange:
+	ppd->lastibcstat = ibcs;
+	if (ev)
+		signal_ib_event(ppd, ev);
+	return;
+}
+
+void qib_clear_symerror_on_linkup(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+	if (ppd->lflags & QIBL_LINKACTIVE)
+		return;
+
+	ppd->ibport_data.z_symbol_error_counter =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+}
+
+/*
+ * Handle receive interrupts for user ctxts; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll.
+ */
+void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
+{
+	struct qib_ctxtdata *rcd;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dd->uctxt_lock, flags);
+	for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
+		if (!(ctxtr & (1ULL << i)))
+			continue;
+		rcd = dd->rcd[i];
+		if (!rcd || !rcd->cnt)
+			continue;
+
+		if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
+			wake_up_interruptible(&rcd->wait);
+			dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
+				      rcd->ctxt);
+		} else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
+					      &rcd->flag)) {
+			rcd->urgent++;
+			wake_up_interruptible(&rcd->wait);
+		}
+	}
+	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
+
+void qib_bad_intrstatus(struct qib_devdata *dd)
+{
+	static int allbits;
+
+	/* separate routine, for better optimization of qib_intr() */
+
+	/*
+	 * We print the message and disable interrupts, in hope of
+	 * having a better chance of debugging the problem.
+	 */
+	qib_dev_err(dd, "Read of chip interrupt status failed"
+		    " disabling interrupts\n");
+	if (allbits++) {
+		/* disable interrupt delivery, something is very wrong */
+		if (allbits == 2)
+			dd->f_set_intr_state(dd, 0);
+		if (allbits == 3) {
+			qib_dev_err(dd, "2nd bad interrupt status, "
+				    "unregistering interrupts\n");
+			dd->flags |= QIB_BADINTR;
+			dd->flags &= ~QIB_INITTED;
+			dd->f_free_irq(dd);
+		}
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 000000000000..4b80eb153d57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_alloc_lkey - allocate an lkey
+ * @rkt: lkey table in which to allocate the lkey
+ * @mr: memory region that this lkey protects
+ *
+ * Returns 1 if successful, otherwise returns 0.
+ */
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+{
+	unsigned long flags;
+	u32 r;
+	u32 n;
+	int ret;
+
+	spin_lock_irqsave(&rkt->lock, flags);
+
+	/* Find the next available LKEY */
+	r = rkt->next;
+	n = r;
+	for (;;) {
+		if (rkt->table[r] == NULL)
+			break;
+		r = (r + 1) & (rkt->max - 1);
+		if (r == n) {
+			spin_unlock_irqrestore(&rkt->lock, flags);
+			ret = 0;
+			goto bail;
+		}
+	}
+	rkt->next = (r + 1) & (rkt->max - 1);
+	/*
+	 * Make sure lkey is never zero which is reserved to indicate an
+	 * unrestricted LKEY.
+	 */
+	rkt->gen++;
+	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+		 << 8);
+	if (mr->lkey == 0) {
+		mr->lkey |= 1 << 8;
+		rkt->gen++;
+	}
+	rkt->table[r] = mr;
+	spin_unlock_irqrestore(&rkt->lock, flags);
+
+	ret = 1;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_free_lkey - free an lkey
+ * @rkt: table from which to free the lkey
+ * @lkey: lkey id to free
+ */
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
+{
+	unsigned long flags;
+	u32 lkey = mr->lkey;
+	u32 r;
+	int ret;
+
+	spin_lock_irqsave(&dev->lk_table.lock, flags);
+	if (lkey == 0) {
+		if (dev->dma_mr && dev->dma_mr == mr) {
+			ret = atomic_read(&dev->dma_mr->refcount);
+			if (!ret)
+				dev->dma_mr = NULL;
+		} else
+			ret = 0;
+	} else {
+		r = lkey >> (32 - ib_qib_lkey_table_size);
+		ret = atomic_read(&dev->lk_table.table[r]->refcount);
+		if (!ret)
+			dev->lk_table.table[r] = NULL;
+	}
+	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+	if (ret)
+		ret = -EBUSY;
+	return ret;
+}
+
+/**
+ * qib_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+		struct qib_sge *isge, struct ib_sge *sge, int acc)
+{
+	struct qib_mregion *mr;
+	unsigned n, m;
+	size_t off;
+	int ret = 0;
+	unsigned long flags;
+
+	/*
+	 * We use LKEY == zero for kernel virtual addresses
+	 * (see qib_get_dma_mr and qib_dma.c).
+	 */
+	spin_lock_irqsave(&rkt->lock, flags);
+	if (sge->lkey == 0) {
+		struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+		if (pd->user)
+			goto bail;
+		if (!dev->dma_mr)
+			goto bail;
+		atomic_inc(&dev->dma_mr->refcount);
+		isge->mr = dev->dma_mr;
+		isge->vaddr = (void *) sge->addr;
+		isge->length = sge->length;
+		isge->sge_length = sge->length;
+		isge->m = 0;
+		isge->n = 0;
+		goto ok;
+	}
+	mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
+	if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
+		     mr->pd != &pd->ibpd))
+		goto bail;
+
+	off = sge->addr - mr->user_base;
+	if (unlikely(sge->addr < mr->user_base ||
+		     off + sge->length > mr->length ||
+		     (mr->access_flags & acc) != acc))
+		goto bail;
+
+	off += mr->offset;
+	m = 0;
+	n = 0;
+	while (off >= mr->map[m]->segs[n].length) {
+		off -= mr->map[m]->segs[n].length;
+		n++;
+		if (n >= QIB_SEGSZ) {
+			m++;
+			n = 0;
+		}
+	}
+	atomic_inc(&mr->refcount);
+	isge->mr = mr;
+	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+	isge->length = mr->map[m]->segs[n].length - off;
+	isge->sge_length = sge->length;
+	isge->m = m;
+	isge->n = n;
+ok:
+	ret = 1;
+bail:
+	spin_unlock_irqrestore(&rkt->lock, flags);
+	return ret;
+}
+
+/**
+ * qib_rkey_ok - check the IB virtual address, length, and RKEY
+ * @dev: infiniband device
+ * @ss: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ */
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+		u32 len, u64 vaddr, u32 rkey, int acc)
+{
+	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+	struct qib_mregion *mr;
+	unsigned n, m;
+	size_t off;
+	int ret = 0;
+	unsigned long flags;
+
+	/*
+	 * We use RKEY == zero for kernel virtual addresses
+	 * (see qib_get_dma_mr and qib_dma.c).
+	 */
+	spin_lock_irqsave(&rkt->lock, flags);
+	if (rkey == 0) {
+		struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+		struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+		if (pd->user)
+			goto bail;
+		if (!dev->dma_mr)
+			goto bail;
+		atomic_inc(&dev->dma_mr->refcount);
+		sge->mr = dev->dma_mr;
+		sge->vaddr = (void *) vaddr;
+		sge->length = len;
+		sge->sge_length = len;
+		sge->m = 0;
+		sge->n = 0;
+		goto ok;
+	}
+
+	mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+	if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+		goto bail;
+
+	off = vaddr - mr->iova;
+	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+		     (mr->access_flags & acc) == 0))
+		goto bail;
+
+	off += mr->offset;
+	m = 0;
+	n = 0;
+	while (off >= mr->map[m]->segs[n].length) {
+		off -= mr->map[m]->segs[n].length;
+		n++;
+		if (n >= QIB_SEGSZ) {
+			m++;
+			n = 0;
+		}
+	}
+	atomic_inc(&mr->refcount);
+	sge->mr = mr;
+	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+	sge->length = mr->map[m]->segs[n].length - off;
+	sge->sge_length = len;
+	sge->m = m;
+	sge->n = n;
+ok:
+	ret = 1;
+bail:
+	spin_unlock_irqrestore(&rkt->lock, flags);
+	return ret;
+}
+
+/*
+ * Initialize the memory region specified by the work reqeust.
+ */
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+	struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+	struct qib_mregion *mr;
+	u32 rkey = wr->wr.fast_reg.rkey;
+	unsigned i, n, m;
+	int ret = -EINVAL;
+	unsigned long flags;
+	u64 *page_list;
+	size_t ps;
+
+	spin_lock_irqsave(&rkt->lock, flags);
+	if (pd->user || rkey == 0)
+		goto bail;
+
+	mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+	if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
+		goto bail;
+
+	if (wr->wr.fast_reg.page_list_len > mr->max_segs)
+		goto bail;
+
+	ps = 1UL << wr->wr.fast_reg.page_shift;
+	if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
+		goto bail;
+
+	mr->user_base = wr->wr.fast_reg.iova_start;
+	mr->iova = wr->wr.fast_reg.iova_start;
+	mr->lkey = rkey;
+	mr->length = wr->wr.fast_reg.length;
+	mr->access_flags = wr->wr.fast_reg.access_flags;
+	page_list = wr->wr.fast_reg.page_list->page_list;
+	m = 0;
+	n = 0;
+	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+		mr->map[m]->segs[n].vaddr = (void *) page_list[i];
+		mr->map[m]->segs[n].length = ps;
+		if (++n == QIB_SEGSZ) {
+			m++;
+			n = 0;
+		}
+	}
+
+	ret = 0;
+bail:
+	spin_unlock_irqrestore(&rkt->lock, flags);
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 000000000000..94b0d1f3a8f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+static int reply(struct ib_smp *smp)
+{
+	/*
+	 * The verbs framework will handle the directed/LID route
+	 * packet changes.
+	 */
+	smp->method = IB_MGMT_METHOD_GET_RESP;
+	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+		smp->status |= IB_SMP_DIRECTION;
+	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
+{
+	struct ib_mad_send_buf *send_buf;
+	struct ib_mad_agent *agent;
+	struct ib_smp *smp;
+	int ret;
+	unsigned long flags;
+	unsigned long timeout;
+
+	agent = ibp->send_agent;
+	if (!agent)
+		return;
+
+	/* o14-3.2.1 */
+	if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
+		return;
+
+	/* o14-2 */
+	if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+		return;
+
+	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+	if (IS_ERR(send_buf))
+		return;
+
+	smp = send_buf->mad;
+	smp->base_version = IB_MGMT_BASE_VERSION;
+	smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+	smp->class_version = 1;
+	smp->method = IB_MGMT_METHOD_TRAP;
+	ibp->tid++;
+	smp->tid = cpu_to_be64(ibp->tid);
+	smp->attr_id = IB_SMP_ATTR_NOTICE;
+	/* o14-1: smp->mkey = 0; */
+	memcpy(smp->data, data, len);
+
+	spin_lock_irqsave(&ibp->lock, flags);
+	if (!ibp->sm_ah) {
+		if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+			struct ib_ah *ah;
+			struct ib_ah_attr attr;
+
+			memset(&attr, 0, sizeof attr);
+			attr.dlid = ibp->sm_lid;
+			attr.port_num = ppd_from_ibp(ibp)->port;
+			ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+			if (IS_ERR(ah))
+				ret = -EINVAL;
+			else {
+				send_buf->ah = ah;
+				ibp->sm_ah = to_iah(ah);
+				ret = 0;
+			}
+		} else
+			ret = -EINVAL;
+	} else {
+		send_buf->ah = &ibp->sm_ah->ibah;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+	if (!ret)
+		ret = ib_post_send_mad(send_buf, NULL);
+	if (!ret) {
+		/* 4.096 usec. */
+		timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
+		ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+	} else {
+		ib_free_send_mad(send_buf);
+		ibp->trap_timeout = 0;
+	}
+}
+
+/*
+ * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ */
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+		   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+{
+	struct ib_mad_notice_attr data;
+
+	if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+		ibp->pkey_violations++;
+	else
+		ibp->qkey_violations++;
+	ibp->n_pkt_drops++;
+
+	/* Send violation trap */
+	data.generic_type = IB_NOTICE_TYPE_SECURITY;
+	data.prod_type_msb = 0;
+	data.prod_type_lsb = IB_NOTICE_PROD_CA;
+	data.trap_num = trap_num;
+	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+	data.toggle_count = 0;
+	memset(&data.details, 0, sizeof data.details);
+	data.details.ntc_257_258.lid1 = lid1;
+	data.details.ntc_257_258.lid2 = lid2;
+	data.details.ntc_257_258.key = cpu_to_be32(key);
+	data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
+	data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+	qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
+{
+	struct ib_mad_notice_attr data;
+
+	/* Send violation trap */
+	data.generic_type = IB_NOTICE_TYPE_SECURITY;
+	data.prod_type_msb = 0;
+	data.prod_type_lsb = IB_NOTICE_PROD_CA;
+	data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
+	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+	data.toggle_count = 0;
+	memset(&data.details, 0, sizeof data.details);
+	data.details.ntc_256.lid = data.issuer_lid;
+	data.details.ntc_256.method = smp->method;
+	data.details.ntc_256.attr_id = smp->attr_id;
+	data.details.ntc_256.attr_mod = smp->attr_mod;
+	data.details.ntc_256.mkey = smp->mkey;
+	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+		u8 hop_cnt;
+
+		data.details.ntc_256.dr_slid = smp->dr_slid;
+		data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+		hop_cnt = smp->hop_cnt;
+		if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
+			data.details.ntc_256.dr_trunc_hop |=
+				IB_NOTICE_TRAP_DR_TRUNC;
+			hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+		}
+		data.details.ntc_256.dr_trunc_hop |= hop_cnt;
+		memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
+		       hop_cnt);
+	}
+
+	qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void qib_cap_mask_chg(struct qib_ibport *ibp)
+{
+	struct ib_mad_notice_attr data;
+
+	data.generic_type = IB_NOTICE_TYPE_INFO;
+	data.prod_type_msb = 0;
+	data.prod_type_lsb = IB_NOTICE_PROD_CA;
+	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+	data.toggle_count = 0;
+	memset(&data.details, 0, sizeof data.details);
+	data.details.ntc_144.lid = data.issuer_lid;
+	data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+
+	qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void qib_sys_guid_chg(struct qib_ibport *ibp)
+{
+	struct ib_mad_notice_attr data;
+
+	data.generic_type = IB_NOTICE_TYPE_INFO;
+	data.prod_type_msb = 0;
+	data.prod_type_lsb = IB_NOTICE_PROD_CA;
+	data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
+	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+	data.toggle_count = 0;
+	memset(&data.details, 0, sizeof data.details);
+	data.details.ntc_145.lid = data.issuer_lid;
+	data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
+
+	qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void qib_node_desc_chg(struct qib_ibport *ibp)
+{
+	struct ib_mad_notice_attr data;
+
+	data.generic_type = IB_NOTICE_TYPE_INFO;
+	data.prod_type_msb = 0;
+	data.prod_type_lsb = IB_NOTICE_PROD_CA;
+	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+	data.toggle_count = 0;
+	memset(&data.details, 0, sizeof data.details);
+	data.details.ntc_144.lid = data.issuer_lid;
+	data.details.ntc_144.local_changes = 1;
+	data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+
+	qib_send_trap(ibp, &data, sizeof data);
+}
+
+static int subn_get_nodedescription(struct ib_smp *smp,
+				    struct ib_device *ibdev)
+{
+	if (smp->attr_mod)
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+
+	return reply(smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	u32 vendor, majrev, minrev;
+	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+	/* GUID 0 is illegal */
+	if (smp->attr_mod || pidx >= dd->num_pports ||
+	    dd->pport[pidx].guid == 0)
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else
+		nip->port_guid = dd->pport[pidx].guid;
+
+	nip->base_version = 1;
+	nip->class_version = 1;
+	nip->node_type = 1;     /* channel adapter */
+	nip->num_ports = ibdev->phys_port_cnt;
+	/* This is already in network order */
+	nip->sys_guid = ib_qib_sys_image_guid;
+	nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
+	nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
+	nip->device_id = cpu_to_be16(dd->deviceid);
+	majrev = dd->majrev;
+	minrev = dd->minrev;
+	nip->revision = cpu_to_be32((majrev << 16) | minrev);
+	nip->local_port_num = port;
+	vendor = dd->vendorid;
+	nip->vendor_id[0] = QIB_SRC_OUI_1;
+	nip->vendor_id[1] = QIB_SRC_OUI_2;
+	nip->vendor_id[2] = QIB_SRC_OUI_3;
+
+	return reply(smp);
+}
+
+static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+	__be64 *p = (__be64 *) smp->data;
+	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+	/* 32 blocks of 8 64-bit GUIDs per block */
+
+	memset(smp->data, 0, sizeof(smp->data));
+
+	if (startgx == 0 && pidx < dd->num_pports) {
+		struct qib_pportdata *ppd = dd->pport + pidx;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		__be64 g = ppd->guid;
+		unsigned i;
+
+		/* GUID 0 is illegal */
+		if (g == 0)
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else {
+			/* The first is a copy of the read-only HW GUID. */
+			p[0] = g;
+			for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+				p[i] = ibp->guids[i - 1];
+		}
+	} else
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	return reply(smp);
+}
+
+static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
+{
+	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
+{
+	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
+}
+
+static int get_overrunthreshold(struct qib_pportdata *ppd)
+{
+	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
+					 (u32)n);
+	return 0;
+}
+
+static int get_phyerrthreshold(struct qib_pportdata *ppd)
+{
+	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
+					 (u32)n);
+	return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @ppd: the physical port data
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
+{
+	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
+		IB_LINKINITCMD_SLEEP;
+}
+
+static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
+{
+	int ret = 0;
+
+	/* Is the mkey in the process of expiring? */
+	if (ibp->mkey_lease_timeout &&
+	    time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+		/* Clear timeout and mkey protection field. */
+		ibp->mkey_lease_timeout = 0;
+		ibp->mkeyprot = 0;
+	}
+
+	/* M_Key checking depends on Portinfo:M_Key_protect_bits */
+	if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
+	    ibp->mkey != smp->mkey &&
+	    (smp->method == IB_MGMT_METHOD_SET ||
+	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
+	     (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
+		if (ibp->mkey_violations != 0xFFFF)
+			++ibp->mkey_violations;
+		if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+			ibp->mkey_lease_timeout = jiffies +
+				ibp->mkey_lease_period * HZ;
+		/* Generate a trap notice. */
+		qib_bad_mkey(ibp, smp);
+		ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+	} else if (ibp->mkey_lease_timeout)
+		ibp->mkey_lease_timeout = 0;
+
+	return ret;
+}
+
+static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct qib_devdata *dd;
+	struct qib_pportdata *ppd;
+	struct qib_ibport *ibp;
+	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+	u16 lid;
+	u8 mtu;
+	int ret;
+	u32 state;
+	u32 port_num = be32_to_cpu(smp->attr_mod);
+
+	if (port_num == 0)
+		port_num = port;
+	else {
+		if (port_num > ibdev->phys_port_cnt) {
+			smp->status |= IB_SMP_INVALID_FIELD;
+			ret = reply(smp);
+			goto bail;
+		}
+		if (port_num != port) {
+			ibp = to_iport(ibdev, port_num);
+			ret = check_mkey(ibp, smp, 0);
+			if (ret)
+				goto bail;
+		}
+	}
+
+	dd = dd_from_ibdev(ibdev);
+	/* IB numbers ports from 1, hdw from 0 */
+	ppd = dd->pport + (port_num - 1);
+	ibp = &ppd->ibport_data;
+
+	/* Clear all fields.  Only set the non-zero fields. */
+	memset(smp->data, 0, sizeof(smp->data));
+
+	/* Only return the mkey if the protection field allows it. */
+	if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
+	    ibp->mkeyprot == 0)
+		pip->mkey = ibp->mkey;
+	pip->gid_prefix = ibp->gid_prefix;
+	lid = ppd->lid;
+	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
+	pip->sm_lid = cpu_to_be16(ibp->sm_lid);
+	pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+	/* pip->diag_code; */
+	pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+	pip->local_port_num = port;
+	pip->link_width_enabled = ppd->link_width_enabled;
+	pip->link_width_supported = ppd->link_width_supported;
+	pip->link_width_active = ppd->link_width_active;
+	state = dd->f_iblink_state(ppd->lastibcstat);
+	pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
+
+	pip->portphysstate_linkdown =
+		(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
+		(get_linkdowndefaultstate(ppd) ? 1 : 2);
+	pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+	pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
+		ppd->link_speed_enabled;
+	switch (ppd->ibmtu) {
+	default: /* something is wrong; fall through */
+	case 4096:
+		mtu = IB_MTU_4096;
+		break;
+	case 2048:
+		mtu = IB_MTU_2048;
+		break;
+	case 1024:
+		mtu = IB_MTU_1024;
+		break;
+	case 512:
+		mtu = IB_MTU_512;
+		break;
+	case 256:
+		mtu = IB_MTU_256;
+		break;
+	}
+	pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+	pip->vlcap_inittype = ppd->vls_supported << 4;  /* InitType = 0 */
+	pip->vl_high_limit = ibp->vl_high_limit;
+	pip->vl_arb_high_cap =
+		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
+	pip->vl_arb_low_cap =
+		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
+	/* InitTypeReply = 0 */
+	pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+	/* HCAs ignore VLStallCount and HOQLife */
+	/* pip->vlstallcnt_hoqlife; */
+	pip->operationalvl_pei_peo_fpi_fpo =
+		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
+	pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+	/* P_KeyViolations are counted by hardware. */
+	pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
+	pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+	/* Only the hardware GUID is supported for now */
+	pip->guid_cap = QIB_GUIDS_PER_PORT;
+	pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+	/* 32.768 usec. response time (guessing) */
+	pip->resv_resptimevalue = 3;
+	pip->localphyerrors_overrunerrors =
+		(get_phyerrthreshold(ppd) << 4) |
+		get_overrunthreshold(ppd);
+	/* pip->max_credit_hint; */
+	if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+		u32 v;
+
+		v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
+		pip->link_roundtrip_latency[0] = v >> 16;
+		pip->link_roundtrip_latency[1] = v >> 8;
+		pip->link_roundtrip_latency[2] = v;
+	}
+
+	ret = reply(smp);
+
+bail:
+	return ret;
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+	struct qib_pportdata *ppd = dd->pport + port - 1;
+	/*
+	 * always a kernel context, no locking needed.
+	 * If we get here with ppd setup, no need to check
+	 * that pd is valid.
+	 */
+	struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
+
+	memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
+
+	return 0;
+}
+
+static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+			      u8 port)
+{
+	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+	u16 *p = (u16 *) smp->data;
+	__be16 *q = (__be16 *) smp->data;
+
+	/* 64 blocks of 32 16-bit P_Key entries */
+
+	memset(smp->data, 0, sizeof(smp->data));
+	if (startpx == 0) {
+		struct qib_devdata *dd = dd_from_ibdev(ibdev);
+		unsigned i, n = qib_get_npkeys(dd);
+
+		get_pkeys(dd, port, p);
+
+		for (i = 0; i < n; i++)
+			q[i] = cpu_to_be16(p[i]);
+	} else
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	return reply(smp);
+}
+
+static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+	__be64 *p = (__be64 *) smp->data;
+	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+	/* 32 blocks of 8 64-bit GUIDs per block */
+
+	if (startgx == 0 && pidx < dd->num_pports) {
+		struct qib_pportdata *ppd = dd->pport + pidx;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		unsigned i;
+
+		/* The first entry is read-only. */
+		for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+			ibp->guids[i - 1] = p[i];
+	} else
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	/* The only GUID we support is the first read-only entry. */
+	return subn_get_guidinfo(smp, ibdev, port);
+}
+
+/**
+ * subn_set_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ * Set Portinfo (see ch. 14.2.5.6).
+ */
+static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+	struct ib_event event;
+	struct qib_devdata *dd;
+	struct qib_pportdata *ppd;
+	struct qib_ibport *ibp;
+	char clientrereg = 0;
+	unsigned long flags;
+	u16 lid, smlid;
+	u8 lwe;
+	u8 lse;
+	u8 state;
+	u8 vls;
+	u8 msl;
+	u16 lstate;
+	int ret, ore, mtu;
+	u32 port_num = be32_to_cpu(smp->attr_mod);
+
+	if (port_num == 0)
+		port_num = port;
+	else {
+		if (port_num > ibdev->phys_port_cnt)
+			goto err;
+		/* Port attributes can only be set on the receiving port */
+		if (port_num != port)
+			goto get_only;
+	}
+
+	dd = dd_from_ibdev(ibdev);
+	/* IB numbers ports from 1, hdw from 0 */
+	ppd = dd->pport + (port_num - 1);
+	ibp = &ppd->ibport_data;
+	event.device = ibdev;
+	event.element.port_num = port;
+
+	ibp->mkey = pip->mkey;
+	ibp->gid_prefix = pip->gid_prefix;
+	ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+
+	lid = be16_to_cpu(pip->lid);
+	/* Must be a valid unicast LID address. */
+	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+		goto err;
+	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+		if (ppd->lid != lid)
+			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
+		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
+			qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
+		qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
+		event.event = IB_EVENT_LID_CHANGE;
+		ib_dispatch_event(&event);
+	}
+
+	smlid = be16_to_cpu(pip->sm_lid);
+	msl = pip->neighbormtu_mastersmsl & 0xF;
+	/* Must be a valid unicast LID address. */
+	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+		goto err;
+	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+		spin_lock_irqsave(&ibp->lock, flags);
+		if (ibp->sm_ah) {
+			if (smlid != ibp->sm_lid)
+				ibp->sm_ah->attr.dlid = smlid;
+			if (msl != ibp->sm_sl)
+				ibp->sm_ah->attr.sl = msl;
+		}
+		spin_unlock_irqrestore(&ibp->lock, flags);
+		if (smlid != ibp->sm_lid)
+			ibp->sm_lid = smlid;
+		if (msl != ibp->sm_sl)
+			ibp->sm_sl = msl;
+		event.event = IB_EVENT_SM_CHANGE;
+		ib_dispatch_event(&event);
+	}
+
+	/* Allow 1x or 4x to be set (see 14.2.6.6). */
+	lwe = pip->link_width_enabled;
+	if (lwe) {
+		if (lwe == 0xFF)
+			lwe = ppd->link_width_supported;
+		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
+			goto err;
+		set_link_width_enabled(ppd, lwe);
+	}
+
+	lse = pip->linkspeedactive_enabled & 0xF;
+	if (lse) {
+		/*
+		 * The IB 1.2 spec. only allows link speed values
+		 * 1, 3, 5, 7, 15.  1.2.1 extended to allow specific
+		 * speeds.
+		 */
+		if (lse == 15)
+			lse = ppd->link_speed_supported;
+		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
+			goto err;
+		set_link_speed_enabled(ppd, lse);
+	}
+
+	/* Set link down default state. */
+	switch (pip->portphysstate_linkdown & 0xF) {
+	case 0: /* NOP */
+		break;
+	case 1: /* SLEEP */
+		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+					IB_LINKINITCMD_SLEEP);
+		break;
+	case 2: /* POLL */
+		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+					IB_LINKINITCMD_POLL);
+		break;
+	default:
+		goto err;
+	}
+
+	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+	ibp->vl_high_limit = pip->vl_high_limit;
+	(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
+				    ibp->vl_high_limit);
+
+	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
+	if (mtu == -1)
+		goto err;
+	qib_set_mtu(ppd, mtu);
+
+	/* Set operational VLs */
+	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
+	if (vls) {
+		if (vls > ppd->vls_supported)
+			goto err;
+		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+	}
+
+	if (pip->mkey_violations == 0)
+		ibp->mkey_violations = 0;
+
+	if (pip->pkey_violations == 0)
+		ibp->pkey_violations = 0;
+
+	if (pip->qkey_violations == 0)
+		ibp->qkey_violations = 0;
+
+	ore = pip->localphyerrors_overrunerrors;
+	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
+		goto err;
+
+	if (set_overrunthreshold(ppd, (ore & 0xF)))
+		goto err;
+
+	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+
+	if (pip->clientrereg_resv_subnetto & 0x80) {
+		clientrereg = 1;
+		event.event = IB_EVENT_CLIENT_REREGISTER;
+		ib_dispatch_event(&event);
+	}
+
+	/*
+	 * Do the port state change now that the other link parameters
+	 * have been set.
+	 * Changing the port physical state only makes sense if the link
+	 * is down or is being set to down.
+	 */
+	state = pip->linkspeed_portstate & 0xF;
+	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
+	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
+		goto err;
+
+	/*
+	 * Only state changes of DOWN, ARM, and ACTIVE are valid
+	 * and must be in the correct state to take effect (see 7.2.6).
+	 */
+	switch (state) {
+	case IB_PORT_NOP:
+		if (lstate == 0)
+			break;
+		/* FALLTHROUGH */
+	case IB_PORT_DOWN:
+		if (lstate == 0)
+			lstate = QIB_IB_LINKDOWN_ONLY;
+		else if (lstate == 1)
+			lstate = QIB_IB_LINKDOWN_SLEEP;
+		else if (lstate == 2)
+			lstate = QIB_IB_LINKDOWN;
+		else if (lstate == 3)
+			lstate = QIB_IB_LINKDOWN_DISABLE;
+		else
+			goto err;
+		spin_lock_irqsave(&ppd->lflags_lock, flags);
+		ppd->lflags &= ~QIBL_LINKV;
+		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+		qib_set_linkstate(ppd, lstate);
+		/*
+		 * Don't send a reply if the response would be sent
+		 * through the disabled port.
+		 */
+		if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
+			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+			goto done;
+		}
+		qib_wait_linkstate(ppd, QIBL_LINKV, 10);
+		break;
+	case IB_PORT_ARMED:
+		qib_set_linkstate(ppd, QIB_IB_LINKARM);
+		break;
+	case IB_PORT_ACTIVE:
+		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
+		break;
+	default:
+		/* XXX We have already partially updated our state! */
+		goto err;
+	}
+
+	ret = subn_get_portinfo(smp, ibdev, port);
+
+	if (clientrereg)
+		pip->clientrereg_resv_subnetto |= 0x80;
+
+	goto done;
+
+err:
+	smp->status |= IB_SMP_INVALID_FIELD;
+get_only:
+	ret = subn_get_portinfo(smp, ibdev, port);
+done:
+	return ret;
+}
+
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the qlogic_ib device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct qib_pportdata *ppd, u16 key)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+		if (ppd->pkeys[i] != key)
+			continue;
+		if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
+			ppd->pkeys[i] = 0;
+			ret = 1;
+			goto bail;
+		}
+		break;
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the qlogic_ib device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct qib_pportdata *ppd, u16 key)
+{
+	int i;
+	u16 lkey = key & 0x7FFF;
+	int any = 0;
+	int ret;
+
+	if (lkey == 0x7FFF) {
+		ret = 0;
+		goto bail;
+	}
+
+	/* Look for an empty slot or a matching PKEY. */
+	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+		if (!ppd->pkeys[i]) {
+			any++;
+			continue;
+		}
+		/* If it matches exactly, try to increment the ref count */
+		if (ppd->pkeys[i] == key) {
+			if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
+				ret = 0;
+				goto bail;
+			}
+			/* Lost the race. Look for an empty slot below. */
+			atomic_dec(&ppd->pkeyrefs[i]);
+			any++;
+		}
+		/*
+		 * It makes no sense to have both the limited and unlimited
+		 * PKEY set at the same time since the unlimited one will
+		 * disable the limited one.
+		 */
+		if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+			ret = -EEXIST;
+			goto bail;
+		}
+	}
+	if (!any) {
+		ret = -EBUSY;
+		goto bail;
+	}
+	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+		if (!ppd->pkeys[i] &&
+		    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+			/* for qibstats, etc. */
+			ppd->pkeys[i] = key;
+			ret = 1;
+			goto bail;
+		}
+	}
+	ret = -EBUSY;
+
+bail:
+	return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+	struct qib_pportdata *ppd;
+	struct qib_ctxtdata *rcd;
+	int i;
+	int changed = 0;
+
+	/*
+	 * IB port one/two always maps to context zero/one,
+	 * always a kernel context, no locking needed
+	 * If we get here with ppd setup, no need to check
+	 * that rcd is valid.
+	 */
+	ppd = dd->pport + (port - 1);
+	rcd = dd->rcd[ppd->hw_pidx];
+
+	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+		u16 key = pkeys[i];
+		u16 okey = rcd->pkeys[i];
+
+		if (key == okey)
+			continue;
+		/*
+		 * The value of this PKEY table entry is changing.
+		 * Remove the old entry in the hardware's array of PKEYs.
+		 */
+		if (okey & 0x7FFF)
+			changed |= rm_pkey(ppd, okey);
+		if (key & 0x7FFF) {
+			int ret = add_pkey(ppd, key);
+
+			if (ret < 0)
+				key = 0;
+			else
+				changed |= ret;
+		}
+		rcd->pkeys[i] = key;
+	}
+	if (changed) {
+		struct ib_event event;
+
+		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+
+		event.event = IB_EVENT_PKEY_CHANGE;
+		event.device = &dd->verbs_dev.ibdev;
+		event.element.port_num = 1;
+		ib_dispatch_event(&event);
+	}
+	return 0;
+}
+
+static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+			      u8 port)
+{
+	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+	__be16 *p = (__be16 *) smp->data;
+	u16 *q = (u16 *) smp->data;
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	unsigned i, n = qib_get_npkeys(dd);
+
+	for (i = 0; i < n; i++)
+		q[i] = be16_to_cpu(p[i]);
+
+	if (startpx != 0 || set_pkeys(dd, port, q) != 0)
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	return subn_get_pkeytable(smp, ibdev, port);
+}
+
+static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	u8 *p = (u8 *) smp->data;
+	unsigned i;
+
+	memset(smp->data, 0, sizeof(smp->data));
+
+	if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+		smp->status |= IB_SMP_UNSUP_METHOD;
+	else
+		for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
+			*p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
+
+	return reply(smp);
+}
+
+static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	u8 *p = (u8 *) smp->data;
+	unsigned i;
+
+	if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+		smp->status |= IB_SMP_UNSUP_METHOD;
+		return reply(smp);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
+		ibp->sl_to_vl[i] = *p >> 4;
+		ibp->sl_to_vl[i + 1] = *p & 0xF;
+	}
+	qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
+			    _QIB_EVENT_SL2VL_CHANGE_BIT);
+
+	return subn_get_sl_to_vl(smp, ibdev, port);
+}
+
+static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+			   u8 port)
+{
+	unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+	struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+	memset(smp->data, 0, sizeof(smp->data));
+
+	if (ppd->vls_supported == IB_VL_VL0)
+		smp->status |= IB_SMP_UNSUP_METHOD;
+	else if (which == IB_VLARB_LOWPRI_0_31)
+		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+						   smp->data);
+	else if (which == IB_VLARB_HIGHPRI_0_31)
+		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+						   smp->data);
+	else
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	return reply(smp);
+}
+
+static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+			   u8 port)
+{
+	unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+	struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+	if (ppd->vls_supported == IB_VL_VL0)
+		smp->status |= IB_SMP_UNSUP_METHOD;
+	else if (which == IB_VLARB_LOWPRI_0_31)
+		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+						   smp->data);
+	else if (which == IB_VLARB_HIGHPRI_0_31)
+		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+						   smp->data);
+	else
+		smp->status |= IB_SMP_INVALID_FIELD;
+
+	return subn_get_vl_arb(smp, ibdev, port);
+}
+
+static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
+			     u8 port)
+{
+	/*
+	 * For now, we only send the trap once so no need to process this.
+	 * o13-6, o13-7,
+	 * o14-3.a4 The SMA shall not send any message in response to a valid
+	 * SubnTrapRepress() message.
+	 */
+	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+}
+
+static int pma_get_classportinfo(struct ib_perf *pmp,
+				 struct ib_device *ibdev)
+{
+	struct ib_pma_classportinfo *p =
+		(struct ib_pma_classportinfo *)pmp->data;
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+
+	memset(pmp->data, 0, sizeof(pmp->data));
+
+	if (pmp->attr_mod != 0)
+		pmp->status |= IB_SMP_INVALID_FIELD;
+
+	/* Note that AllPortSelect is not valid */
+	p->base_version = 1;
+	p->class_version = 1;
+	p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+	/*
+	 * Set the most significant bit of CM2 to indicate support for
+	 * congestion statistics
+	 */
+	p->reserved[0] = dd->psxmitwait_supported << 7;
+	/*
+	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+	 */
+	p->resp_time_value = 18;
+
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+				      struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portsamplescontrol *p =
+		(struct ib_pma_portsamplescontrol *)pmp->data;
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	unsigned long flags;
+	u8 port_select = p->port_select;
+
+	memset(pmp->data, 0, sizeof(pmp->data));
+
+	p->port_select = port_select;
+	if (pmp->attr_mod != 0 || port_select != port) {
+		pmp->status |= IB_SMP_INVALID_FIELD;
+		goto bail;
+	}
+	spin_lock_irqsave(&ibp->lock, flags);
+	p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
+	p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+	p->counter_width = 4;   /* 32 bit counters */
+	p->counter_mask0_9 = COUNTER_MASK0_9;
+	p->sample_start = cpu_to_be32(ibp->pma_sample_start);
+	p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
+	p->tag = cpu_to_be16(ibp->pma_tag);
+	p->counter_select[0] = ibp->pma_counter_select[0];
+	p->counter_select[1] = ibp->pma_counter_select[1];
+	p->counter_select[2] = ibp->pma_counter_select[2];
+	p->counter_select[3] = ibp->pma_counter_select[3];
+	p->counter_select[4] = ibp->pma_counter_select[4];
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+bail:
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+				      struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portsamplescontrol *p =
+		(struct ib_pma_portsamplescontrol *)pmp->data;
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	unsigned long flags;
+	u8 status, xmit_flags;
+	int ret;
+
+	if (pmp->attr_mod != 0 || p->port_select != port) {
+		pmp->status |= IB_SMP_INVALID_FIELD;
+		ret = reply((struct ib_smp *) pmp);
+		goto bail;
+	}
+
+	spin_lock_irqsave(&ibp->lock, flags);
+
+	/* Port Sampling code owns the PS* HW counters */
+	xmit_flags = ppd->cong_stats.flags;
+	ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
+	status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+	if (status == IB_PMA_SAMPLE_STATUS_DONE ||
+	    (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
+	     xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
+		ibp->pma_sample_start = be32_to_cpu(p->sample_start);
+		ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
+		ibp->pma_tag = be16_to_cpu(p->tag);
+		ibp->pma_counter_select[0] = p->counter_select[0];
+		ibp->pma_counter_select[1] = p->counter_select[1];
+		ibp->pma_counter_select[2] = p->counter_select[2];
+		ibp->pma_counter_select[3] = p->counter_select[3];
+		ibp->pma_counter_select[4] = p->counter_select[4];
+		dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
+				      ibp->pma_sample_start);
+	}
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+	ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+
+bail:
+	return ret;
+}
+
+static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
+		       __be16 sel)
+{
+	u64 ret;
+
+	switch (sel) {
+	case IB_PMA_PORT_XMIT_DATA:
+		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
+		break;
+	case IB_PMA_PORT_RCV_DATA:
+		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
+		break;
+	case IB_PMA_PORT_XMIT_PKTS:
+		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
+		break;
+	case IB_PMA_PORT_RCV_PKTS:
+		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
+		break;
+	case IB_PMA_PORT_XMIT_WAIT:
+		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/* This function assumes that the xmit_wait lock is already held */
+static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
+{
+	u32 delta;
+
+	delta = get_counter(&ppd->ibport_data, ppd,
+			    IB_PMA_PORT_XMIT_WAIT);
+	return ppd->cong_stats.counter + delta;
+}
+
+static void cache_hw_sample_counters(struct qib_pportdata *ppd)
+{
+	struct qib_ibport *ibp = &ppd->ibport_data;
+
+	ppd->cong_stats.counter_cache.psxmitdata =
+		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
+	ppd->cong_stats.counter_cache.psrcvdata =
+		get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
+	ppd->cong_stats.counter_cache.psxmitpkts =
+		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
+	ppd->cong_stats.counter_cache.psrcvpkts =
+		get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
+	ppd->cong_stats.counter_cache.psxmitwait =
+		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
+}
+
+static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
+					__be16 sel)
+{
+	u64 ret;
+
+	switch (sel) {
+	case IB_PMA_PORT_XMIT_DATA:
+		ret = ppd->cong_stats.counter_cache.psxmitdata;
+		break;
+	case IB_PMA_PORT_RCV_DATA:
+		ret = ppd->cong_stats.counter_cache.psrcvdata;
+		break;
+	case IB_PMA_PORT_XMIT_PKTS:
+		ret = ppd->cong_stats.counter_cache.psxmitpkts;
+		break;
+	case IB_PMA_PORT_RCV_PKTS:
+		ret = ppd->cong_stats.counter_cache.psrcvpkts;
+		break;
+	case IB_PMA_PORT_XMIT_WAIT:
+		ret = ppd->cong_stats.counter_cache.psxmitwait;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int pma_get_portsamplesresult(struct ib_perf *pmp,
+				     struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portsamplesresult *p =
+		(struct ib_pma_portsamplesresult *)pmp->data;
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	unsigned long flags;
+	u8 status;
+	int i;
+
+	memset(pmp->data, 0, sizeof(pmp->data));
+	spin_lock_irqsave(&ibp->lock, flags);
+	p->tag = cpu_to_be16(ibp->pma_tag);
+	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+	else {
+		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+		p->sample_status = cpu_to_be16(status);
+		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+			cache_hw_sample_counters(ppd);
+			ppd->cong_stats.counter =
+				xmit_wait_get_value_delta(ppd);
+			dd->f_set_cntr_sample(ppd,
+					      QIB_CONG_TIMER_PSINTERVAL, 0);
+			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+		p->counter[i] = cpu_to_be32(
+			get_cache_hw_sample_counters(
+				ppd, ibp->pma_counter_select[i]));
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+					 struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portsamplesresult_ext *p =
+		(struct ib_pma_portsamplesresult_ext *)pmp->data;
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	unsigned long flags;
+	u8 status;
+	int i;
+
+	/* Port Sampling code owns the PS* HW counters */
+	memset(pmp->data, 0, sizeof(pmp->data));
+	spin_lock_irqsave(&ibp->lock, flags);
+	p->tag = cpu_to_be16(ibp->pma_tag);
+	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+	else {
+		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+		p->sample_status = cpu_to_be16(status);
+		/* 64 bits */
+		p->extended_width = cpu_to_be32(0x80000000);
+		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+			cache_hw_sample_counters(ppd);
+			ppd->cong_stats.counter =
+				xmit_wait_get_value_delta(ppd);
+			dd->f_set_cntr_sample(ppd,
+					      QIB_CONG_TIMER_PSINTERVAL, 0);
+			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+		p->counter[i] = cpu_to_be64(
+			get_cache_hw_sample_counters(
+				ppd, ibp->pma_counter_select[i]));
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters(struct ib_perf *pmp,
+				struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+		pmp->data;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_verbs_counters cntrs;
+	u8 port_select = p->port_select;
+
+	qib_get_counters(ppd, &cntrs);
+
+	/* Adjust counters for any resets done. */
+	cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+	cntrs.link_error_recovery_counter -=
+		ibp->z_link_error_recovery_counter;
+	cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+	cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+	cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
+	cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+	cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+	cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+	cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+	cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+	cntrs.local_link_integrity_errors -=
+		ibp->z_local_link_integrity_errors;
+	cntrs.excessive_buffer_overrun_errors -=
+		ibp->z_excessive_buffer_overrun_errors;
+	cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+	cntrs.vl15_dropped += ibp->n_vl15_dropped;
+
+	memset(pmp->data, 0, sizeof(pmp->data));
+
+	p->port_select = port_select;
+	if (pmp->attr_mod != 0 || port_select != port)
+		pmp->status |= IB_SMP_INVALID_FIELD;
+
+	if (cntrs.symbol_error_counter > 0xFFFFUL)
+		p->symbol_error_counter = cpu_to_be16(0xFFFF);
+	else
+		p->symbol_error_counter =
+			cpu_to_be16((u16)cntrs.symbol_error_counter);
+	if (cntrs.link_error_recovery_counter > 0xFFUL)
+		p->link_error_recovery_counter = 0xFF;
+	else
+		p->link_error_recovery_counter =
+			(u8)cntrs.link_error_recovery_counter;
+	if (cntrs.link_downed_counter > 0xFFUL)
+		p->link_downed_counter = 0xFF;
+	else
+		p->link_downed_counter = (u8)cntrs.link_downed_counter;
+	if (cntrs.port_rcv_errors > 0xFFFFUL)
+		p->port_rcv_errors = cpu_to_be16(0xFFFF);
+	else
+		p->port_rcv_errors =
+			cpu_to_be16((u16) cntrs.port_rcv_errors);
+	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+	else
+		p->port_rcv_remphys_errors =
+			cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
+	if (cntrs.port_xmit_discards > 0xFFFFUL)
+		p->port_xmit_discards = cpu_to_be16(0xFFFF);
+	else
+		p->port_xmit_discards =
+			cpu_to_be16((u16)cntrs.port_xmit_discards);
+	if (cntrs.local_link_integrity_errors > 0xFUL)
+		cntrs.local_link_integrity_errors = 0xFUL;
+	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+		cntrs.excessive_buffer_overrun_errors = 0xFUL;
+	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+		cntrs.excessive_buffer_overrun_errors;
+	if (cntrs.vl15_dropped > 0xFFFFUL)
+		p->vl15_dropped = cpu_to_be16(0xFFFF);
+	else
+		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+		p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
+	else
+		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
+	if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
+		p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
+	else
+		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
+	if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
+		p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
+	else
+		p->port_xmit_packets =
+			cpu_to_be32((u32)cntrs.port_xmit_packets);
+	if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
+		p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
+	else
+		p->port_rcv_packets =
+			cpu_to_be32((u32) cntrs.port_rcv_packets);
+
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters_cong(struct ib_perf *pmp,
+				     struct ib_device *ibdev, u8 port)
+{
+	/* Congestion PMA packets start at offset 24 not 64 */
+	struct ib_pma_portcounters_cong *p =
+		(struct ib_pma_portcounters_cong *)pmp->reserved;
+	struct qib_verbs_counters cntrs;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_devdata *dd = dd_from_ppd(ppd);
+	u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+	u64 xmit_wait_counter;
+	unsigned long flags;
+
+	/*
+	 * This check is performed only in the GET method because the
+	 * SET method ends up calling this anyway.
+	 */
+	if (!dd->psxmitwait_supported)
+		pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+	if (port_select != port)
+		pmp->status |= IB_SMP_INVALID_FIELD;
+
+	qib_get_counters(ppd, &cntrs);
+	spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+	xmit_wait_counter = xmit_wait_get_value_delta(ppd);
+	spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+
+	/* Adjust counters for any resets done. */
+	cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+	cntrs.link_error_recovery_counter -=
+		ibp->z_link_error_recovery_counter;
+	cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+	cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+	cntrs.port_rcv_remphys_errors -=
+		ibp->z_port_rcv_remphys_errors;
+	cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+	cntrs.local_link_integrity_errors -=
+		ibp->z_local_link_integrity_errors;
+	cntrs.excessive_buffer_overrun_errors -=
+		ibp->z_excessive_buffer_overrun_errors;
+	cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+	cntrs.vl15_dropped += ibp->n_vl15_dropped;
+	cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+	cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+	cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+	cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+
+	memset(pmp->reserved, 0, sizeof(pmp->reserved) +
+	       sizeof(pmp->data));
+
+	/*
+	 * Set top 3 bits to indicate interval in picoseconds in
+	 * remaining bits.
+	 */
+	p->port_check_rate =
+		cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
+			    (dd->psxmitwait_check_rate &
+			     ~(QIB_XMIT_RATE_PICO << 13)));
+	p->port_adr_events = cpu_to_be64(0);
+	p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
+	p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
+	p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
+	p->port_xmit_packets =
+		cpu_to_be64(cntrs.port_xmit_packets);
+	p->port_rcv_packets =
+		cpu_to_be64(cntrs.port_rcv_packets);
+	if (cntrs.symbol_error_counter > 0xFFFFUL)
+		p->symbol_error_counter = cpu_to_be16(0xFFFF);
+	else
+		p->symbol_error_counter =
+			cpu_to_be16(
+				(u16)cntrs.symbol_error_counter);
+	if (cntrs.link_error_recovery_counter > 0xFFUL)
+		p->link_error_recovery_counter = 0xFF;
+	else
+		p->link_error_recovery_counter =
+			(u8)cntrs.link_error_recovery_counter;
+	if (cntrs.link_downed_counter > 0xFFUL)
+		p->link_downed_counter = 0xFF;
+	else
+		p->link_downed_counter =
+			(u8)cntrs.link_downed_counter;
+	if (cntrs.port_rcv_errors > 0xFFFFUL)
+		p->port_rcv_errors = cpu_to_be16(0xFFFF);
+	else
+		p->port_rcv_errors =
+			cpu_to_be16((u16) cntrs.port_rcv_errors);
+	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+	else
+		p->port_rcv_remphys_errors =
+			cpu_to_be16(
+				(u16)cntrs.port_rcv_remphys_errors);
+	if (cntrs.port_xmit_discards > 0xFFFFUL)
+		p->port_xmit_discards = cpu_to_be16(0xFFFF);
+	else
+		p->port_xmit_discards =
+			cpu_to_be16((u16)cntrs.port_xmit_discards);
+	if (cntrs.local_link_integrity_errors > 0xFUL)
+		cntrs.local_link_integrity_errors = 0xFUL;
+	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+		cntrs.excessive_buffer_overrun_errors = 0xFUL;
+	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+		cntrs.excessive_buffer_overrun_errors;
+	if (cntrs.vl15_dropped > 0xFFFFUL)
+		p->vl15_dropped = cpu_to_be16(0xFFFF);
+	else
+		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+
+	return reply((struct ib_smp *)pmp);
+}
+
+static int pma_get_portcounters_ext(struct ib_perf *pmp,
+				    struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portcounters_ext *p =
+		(struct ib_pma_portcounters_ext *)pmp->data;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	u64 swords, rwords, spkts, rpkts, xwait;
+	u8 port_select = p->port_select;
+
+	memset(pmp->data, 0, sizeof(pmp->data));
+
+	p->port_select = port_select;
+	if (pmp->attr_mod != 0 || port_select != port) {
+		pmp->status |= IB_SMP_INVALID_FIELD;
+		goto bail;
+	}
+
+	qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+	/* Adjust counters for any resets done. */
+	swords -= ibp->z_port_xmit_data;
+	rwords -= ibp->z_port_rcv_data;
+	spkts -= ibp->z_port_xmit_packets;
+	rpkts -= ibp->z_port_rcv_packets;
+
+	p->port_xmit_data = cpu_to_be64(swords);
+	p->port_rcv_data = cpu_to_be64(rwords);
+	p->port_xmit_packets = cpu_to_be64(spkts);
+	p->port_rcv_packets = cpu_to_be64(rpkts);
+	p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
+	p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
+	p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
+	p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
+
+bail:
+	return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portcounters(struct ib_perf *pmp,
+				struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+		pmp->data;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_verbs_counters cntrs;
+
+	/*
+	 * Since the HW doesn't support clearing counters, we save the
+	 * current count and subtract it from future responses.
+	 */
+	qib_get_counters(ppd, &cntrs);
+
+	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
+		ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+
+	if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
+		ibp->z_link_error_recovery_counter =
+			cntrs.link_error_recovery_counter;
+
+	if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
+		ibp->z_link_downed_counter = cntrs.link_downed_counter;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
+		ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
+		ibp->z_port_rcv_remphys_errors =
+			cntrs.port_rcv_remphys_errors;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
+		ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+	if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+		ibp->z_local_link_integrity_errors =
+			cntrs.local_link_integrity_errors;
+
+	if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+		ibp->z_excessive_buffer_overrun_errors =
+			cntrs.excessive_buffer_overrun_errors;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
+		ibp->n_vl15_dropped = 0;
+		ibp->z_vl15_dropped = cntrs.vl15_dropped;
+	}
+
+	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+		ibp->z_port_xmit_data = cntrs.port_xmit_data;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
+		ibp->z_port_rcv_data = cntrs.port_rcv_data;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
+		ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
+		ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+
+	return pma_get_portcounters(pmp, ibdev, port);
+}
+
+static int pma_set_portcounters_cong(struct ib_perf *pmp,
+				     struct ib_device *ibdev, u8 port)
+{
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_devdata *dd = dd_from_ppd(ppd);
+	struct qib_verbs_counters cntrs;
+	u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+	int ret = 0;
+	unsigned long flags;
+
+	qib_get_counters(ppd, &cntrs);
+	/* Get counter values before we save them */
+	ret = pma_get_portcounters_cong(pmp, ibdev, port);
+
+	if (counter_select & IB_PMA_SEL_CONG_XMIT) {
+		spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+		ppd->cong_stats.counter = 0;
+		dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
+				      0x0);
+		spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+	}
+	if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
+		ibp->z_port_xmit_data = cntrs.port_xmit_data;
+		ibp->z_port_rcv_data = cntrs.port_rcv_data;
+		ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+		ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+	}
+	if (counter_select & IB_PMA_SEL_CONG_ALL) {
+		ibp->z_symbol_error_counter =
+			cntrs.symbol_error_counter;
+		ibp->z_link_error_recovery_counter =
+			cntrs.link_error_recovery_counter;
+		ibp->z_link_downed_counter =
+			cntrs.link_downed_counter;
+		ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+		ibp->z_port_rcv_remphys_errors =
+			cntrs.port_rcv_remphys_errors;
+		ibp->z_port_xmit_discards =
+			cntrs.port_xmit_discards;
+		ibp->z_local_link_integrity_errors =
+			cntrs.local_link_integrity_errors;
+		ibp->z_excessive_buffer_overrun_errors =
+			cntrs.excessive_buffer_overrun_errors;
+		ibp->n_vl15_dropped = 0;
+		ibp->z_vl15_dropped = cntrs.vl15_dropped;
+	}
+
+	return ret;
+}
+
+static int pma_set_portcounters_ext(struct ib_perf *pmp,
+				    struct ib_device *ibdev, u8 port)
+{
+	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+		pmp->data;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	u64 swords, rwords, spkts, rpkts, xwait;
+
+	qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
+		ibp->z_port_xmit_data = swords;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
+		ibp->z_port_rcv_data = rwords;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
+		ibp->z_port_xmit_packets = spkts;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
+		ibp->z_port_rcv_packets = rpkts;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
+		ibp->n_unicast_xmit = 0;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
+		ibp->n_unicast_rcv = 0;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
+		ibp->n_multicast_xmit = 0;
+
+	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
+		ibp->n_multicast_rcv = 0;
+
+	return pma_get_portcounters_ext(pmp, ibdev, port);
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+			u8 port, struct ib_mad *in_mad,
+			struct ib_mad *out_mad)
+{
+	struct ib_smp *smp = (struct ib_smp *)out_mad;
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	int ret;
+
+	*out_mad = *in_mad;
+	if (smp->class_version != 1) {
+		smp->status |= IB_SMP_UNSUP_VERSION;
+		ret = reply(smp);
+		goto bail;
+	}
+
+	ret = check_mkey(ibp, smp, mad_flags);
+	if (ret) {
+		u32 port_num = be32_to_cpu(smp->attr_mod);
+
+		/*
+		 * If this is a get/set portinfo, we already check the
+		 * M_Key if the MAD is for another port and the M_Key
+		 * is OK on the receiving port. This check is needed
+		 * to increment the error counters when the M_Key
+		 * fails to match on *both* ports.
+		 */
+		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+		    (smp->method == IB_MGMT_METHOD_GET ||
+		     smp->method == IB_MGMT_METHOD_SET) &&
+		    port_num && port_num <= ibdev->phys_port_cnt &&
+		    port != port_num)
+			(void) check_mkey(to_iport(ibdev, port_num), smp, 0);
+		goto bail;
+	}
+
+	switch (smp->method) {
+	case IB_MGMT_METHOD_GET:
+		switch (smp->attr_id) {
+		case IB_SMP_ATTR_NODE_DESC:
+			ret = subn_get_nodedescription(smp, ibdev);
+			goto bail;
+		case IB_SMP_ATTR_NODE_INFO:
+			ret = subn_get_nodeinfo(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_GUID_INFO:
+			ret = subn_get_guidinfo(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_PORT_INFO:
+			ret = subn_get_portinfo(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_PKEY_TABLE:
+			ret = subn_get_pkeytable(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_SL_TO_VL_TABLE:
+			ret = subn_get_sl_to_vl(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_VL_ARB_TABLE:
+			ret = subn_get_vl_arb(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_SM_INFO:
+			if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+				ret = IB_MAD_RESULT_SUCCESS |
+					IB_MAD_RESULT_CONSUMED;
+				goto bail;
+			}
+			if (ibp->port_cap_flags & IB_PORT_SM) {
+				ret = IB_MAD_RESULT_SUCCESS;
+				goto bail;
+			}
+			/* FALLTHROUGH */
+		default:
+			smp->status |= IB_SMP_UNSUP_METH_ATTR;
+			ret = reply(smp);
+			goto bail;
+		}
+
+	case IB_MGMT_METHOD_SET:
+		switch (smp->attr_id) {
+		case IB_SMP_ATTR_GUID_INFO:
+			ret = subn_set_guidinfo(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_PORT_INFO:
+			ret = subn_set_portinfo(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_PKEY_TABLE:
+			ret = subn_set_pkeytable(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_SL_TO_VL_TABLE:
+			ret = subn_set_sl_to_vl(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_VL_ARB_TABLE:
+			ret = subn_set_vl_arb(smp, ibdev, port);
+			goto bail;
+		case IB_SMP_ATTR_SM_INFO:
+			if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+				ret = IB_MAD_RESULT_SUCCESS |
+					IB_MAD_RESULT_CONSUMED;
+				goto bail;
+			}
+			if (ibp->port_cap_flags & IB_PORT_SM) {
+				ret = IB_MAD_RESULT_SUCCESS;
+				goto bail;
+			}
+			/* FALLTHROUGH */
+		default:
+			smp->status |= IB_SMP_UNSUP_METH_ATTR;
+			ret = reply(smp);
+			goto bail;
+		}
+
+	case IB_MGMT_METHOD_TRAP_REPRESS:
+		if (smp->attr_id == IB_SMP_ATTR_NOTICE)
+			ret = subn_trap_repress(smp, ibdev, port);
+		else {
+			smp->status |= IB_SMP_UNSUP_METH_ATTR;
+			ret = reply(smp);
+		}
+		goto bail;
+
+	case IB_MGMT_METHOD_TRAP:
+	case IB_MGMT_METHOD_REPORT:
+	case IB_MGMT_METHOD_REPORT_RESP:
+	case IB_MGMT_METHOD_GET_RESP:
+		/*
+		 * The ib_mad module will call us to process responses
+		 * before checking for other consumers.
+		 * Just tell the caller to process it normally.
+		 */
+		ret = IB_MAD_RESULT_SUCCESS;
+		goto bail;
+
+	case IB_MGMT_METHOD_SEND:
+		if (ib_get_smp_direction(smp) &&
+		    smp->attr_id == QIB_VENDOR_IPG) {
+			ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
+					      smp->data[0]);
+			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+		} else
+			ret = IB_MAD_RESULT_SUCCESS;
+		goto bail;
+
+	default:
+		smp->status |= IB_SMP_UNSUP_METHOD;
+		ret = reply(smp);
+	}
+
+bail:
+	return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port,
+			struct ib_mad *in_mad,
+			struct ib_mad *out_mad)
+{
+	struct ib_perf *pmp = (struct ib_perf *)out_mad;
+	int ret;
+
+	*out_mad = *in_mad;
+	if (pmp->class_version != 1) {
+		pmp->status |= IB_SMP_UNSUP_VERSION;
+		ret = reply((struct ib_smp *) pmp);
+		goto bail;
+	}
+
+	switch (pmp->method) {
+	case IB_MGMT_METHOD_GET:
+		switch (pmp->attr_id) {
+		case IB_PMA_CLASS_PORT_INFO:
+			ret = pma_get_classportinfo(pmp, ibdev);
+			goto bail;
+		case IB_PMA_PORT_SAMPLES_CONTROL:
+			ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_SAMPLES_RESULT:
+			ret = pma_get_portsamplesresult(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_SAMPLES_RESULT_EXT:
+			ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS:
+			ret = pma_get_portcounters(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS_EXT:
+			ret = pma_get_portcounters_ext(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS_CONG:
+			ret = pma_get_portcounters_cong(pmp, ibdev, port);
+			goto bail;
+		default:
+			pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+			ret = reply((struct ib_smp *) pmp);
+			goto bail;
+		}
+
+	case IB_MGMT_METHOD_SET:
+		switch (pmp->attr_id) {
+		case IB_PMA_PORT_SAMPLES_CONTROL:
+			ret = pma_set_portsamplescontrol(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS:
+			ret = pma_set_portcounters(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS_EXT:
+			ret = pma_set_portcounters_ext(pmp, ibdev, port);
+			goto bail;
+		case IB_PMA_PORT_COUNTERS_CONG:
+			ret = pma_set_portcounters_cong(pmp, ibdev, port);
+			goto bail;
+		default:
+			pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+			ret = reply((struct ib_smp *) pmp);
+			goto bail;
+		}
+
+	case IB_MGMT_METHOD_TRAP:
+	case IB_MGMT_METHOD_GET_RESP:
+		/*
+		 * The ib_mad module will call us to process responses
+		 * before checking for other consumers.
+		 * Just tell the caller to process it normally.
+		 */
+		ret = IB_MAD_RESULT_SUCCESS;
+		goto bail;
+
+	default:
+		pmp->status |= IB_SMP_UNSUP_METHOD;
+		ret = reply((struct ib_smp *) pmp);
+	}
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+		    struct ib_wc *in_wc, struct ib_grh *in_grh,
+		    struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+	int ret;
+
+	switch (in_mad->mad_hdr.mgmt_class) {
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+		ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+		goto bail;
+
+	case IB_MGMT_CLASS_PERF_MGMT:
+		ret = process_perf(ibdev, port, in_mad, out_mad);
+		goto bail;
+
+	default:
+		ret = IB_MAD_RESULT_SUCCESS;
+	}
+
+bail:
+	return ret;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+			 struct ib_mad_send_wc *mad_send_wc)
+{
+	ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+static void xmit_wait_timer_func(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+	struct qib_devdata *dd = dd_from_ppd(ppd);
+	unsigned long flags;
+	u8 status;
+
+	spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
+		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+			/* save counter cache */
+			cache_hw_sample_counters(ppd);
+			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+		} else
+			goto done;
+	}
+	ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
+	dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
+done:
+	spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+	mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
+}
+
+int qib_create_agents(struct qib_ibdev *dev)
+{
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct ib_mad_agent *agent;
+	struct qib_ibport *ibp;
+	int p;
+	int ret;
+
+	for (p = 0; p < dd->num_pports; p++) {
+		ibp = &dd->pport[p].ibport_data;
+		agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
+					      NULL, 0, send_handler,
+					      NULL, NULL);
+		if (IS_ERR(agent)) {
+			ret = PTR_ERR(agent);
+			goto err;
+		}
+
+		/* Initialize xmit_wait structure */
+		dd->pport[p].cong_stats.counter = 0;
+		init_timer(&dd->pport[p].cong_stats.timer);
+		dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
+		dd->pport[p].cong_stats.timer.data =
+			(unsigned long)(&dd->pport[p]);
+		dd->pport[p].cong_stats.timer.expires = 0;
+		add_timer(&dd->pport[p].cong_stats.timer);
+
+		ibp->send_agent = agent;
+	}
+
+	return 0;
+
+err:
+	for (p = 0; p < dd->num_pports; p++) {
+		ibp = &dd->pport[p].ibport_data;
+		if (ibp->send_agent) {
+			agent = ibp->send_agent;
+			ibp->send_agent = NULL;
+			ib_unregister_mad_agent(agent);
+		}
+	}
+
+	return ret;
+}
+
+void qib_free_agents(struct qib_ibdev *dev)
+{
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct ib_mad_agent *agent;
+	struct qib_ibport *ibp;
+	int p;
+
+	for (p = 0; p < dd->num_pports; p++) {
+		ibp = &dd->pport[p].ibport_data;
+		if (ibp->send_agent) {
+			agent = ibp->send_agent;
+			ibp->send_agent = NULL;
+			ib_unregister_mad_agent(agent);
+		}
+		if (ibp->sm_ah) {
+			ib_destroy_ah(&ibp->sm_ah->ibah);
+			ibp->sm_ah = NULL;
+		}
+		if (dd->pport[p].cong_stats.timer.data)
+			del_timer_sync(&dd->pport[p].cong_stats.timer);
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 000000000000..147aff9117d7
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define IB_SMP_UNSUP_VERSION    cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD     cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR  cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD    cpu_to_be16(0x001C)
+
+struct ib_node_info {
+	u8 base_version;
+	u8 class_version;
+	u8 node_type;
+	u8 num_ports;
+	__be64 sys_guid;
+	__be64 node_guid;
+	__be64 port_guid;
+	__be16 partition_cap;
+	__be16 device_id;
+	__be32 revision;
+	u8 local_port_num;
+	u8 vendor_id[3];
+} __attribute__ ((packed));
+
+struct ib_mad_notice_attr {
+	u8 generic_type;
+	u8 prod_type_msb;
+	__be16 prod_type_lsb;
+	__be16 trap_num;
+	__be16 issuer_lid;
+	__be16 toggle_count;
+
+	union {
+		struct {
+			u8	details[54];
+		} raw_data;
+
+		struct {
+			__be16	reserved;
+			__be16	lid;		/* where violation happened */
+			u8	port_num;	/* where violation happened */
+		} __attribute__ ((packed)) ntc_129_131;
+
+		struct {
+			__be16	reserved;
+			__be16	lid;		/* LID where change occured */
+			u8	reserved2;
+			u8	local_changes;	/* low bit - local changes */
+			__be32	new_cap_mask;	/* new capability mask */
+			u8	reserved3;
+			u8	change_flags;	/* low 3 bits only */
+		} __attribute__ ((packed)) ntc_144;
+
+		struct {
+			__be16	reserved;
+			__be16	lid;		/* lid where sys guid changed */
+			__be16	reserved2;
+			__be64	new_sys_guid;
+		} __attribute__ ((packed)) ntc_145;
+
+		struct {
+			__be16	reserved;
+			__be16	lid;
+			__be16	dr_slid;
+			u8	method;
+			u8	reserved2;
+			__be16	attr_id;
+			__be32	attr_mod;
+			__be64	mkey;
+			u8	reserved3;
+			u8	dr_trunc_hop;
+			u8	dr_rtn_path[30];
+		} __attribute__ ((packed)) ntc_256;
+
+		struct {
+			__be16		reserved;
+			__be16		lid1;
+			__be16		lid2;
+			__be32		key;
+			__be32		sl_qp1;	/* SL: high 4 bits */
+			__be32		qp2;	/* high 8 bits reserved */
+			union ib_gid	gid1;
+			union ib_gid	gid2;
+		} __attribute__ ((packed)) ntc_257_258;
+
+	} details;
+};
+
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL	0x80
+#define IB_NOTICE_TYPE_URGENT	0x81
+#define IB_NOTICE_TYPE_SECURITY	0x82
+#define IB_NOTICE_TYPE_SM	0x83
+#define IB_NOTICE_TYPE_INFO	0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA		cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH		cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER		cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR	cpu_to_be16(4)
+
+/*
+ * Generic trap/notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH	cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH	cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE	cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG	cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG	cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY		cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY		cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY		cpu_to_be16(258)
+
+/*
+ * Repress trap/notice flags
+ */
+#define IB_NOTICE_REPRESS_LLI_THRESH	(1 << 0)
+#define IB_NOTICE_REPRESS_EBO_THRESH	(1 << 1)
+#define IB_NOTICE_REPRESS_FLOW_UPDATE	(1 << 2)
+#define IB_NOTICE_REPRESS_CAP_MASK_CHG	(1 << 3)
+#define IB_NOTICE_REPRESS_SYS_GUID_CHG	(1 << 4)
+#define IB_NOTICE_REPRESS_BAD_MKEY	(1 << 5)
+#define IB_NOTICE_REPRESS_BAD_PKEY	(1 << 6)
+#define IB_NOTICE_REPRESS_BAD_QKEY	(1 << 7)
+
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG		0x04	/* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG		0x02	/* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG	0x01
+
+/*
+ * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE	0x80
+#define IB_NOTICE_TRAP_DR_TRUNC		0x40
+
+struct ib_vl_weight_elem {
+	u8      vl;     /* Only low 4 bits, upper 4 bits reserved */
+	u8      weight;
+};
+
+#define IB_VLARB_LOWPRI_0_31    1
+#define IB_VLARB_LOWPRI_32_63   2
+#define IB_VLARB_HIGHPRI_0_31   3
+#define IB_VLARB_HIGHPRI_32_63  4
+
+/*
+ * PMA class portinfo capability mask bits
+ */
+#define IB_PMA_CLASS_CAP_ALLPORTSELECT  cpu_to_be16(1 << 8)
+#define IB_PMA_CLASS_CAP_EXT_WIDTH      cpu_to_be16(1 << 9)
+#define IB_PMA_CLASS_CAP_XMIT_WAIT      cpu_to_be16(1 << 12)
+
+#define IB_PMA_CLASS_PORT_INFO          cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL     cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT      cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS            cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT        cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT  cpu_to_be16(0x001E)
+#define IB_PMA_PORT_COUNTERS_CONG       cpu_to_be16(0xFF00)
+
+struct ib_perf {
+	u8 base_version;
+	u8 mgmt_class;
+	u8 class_version;
+	u8 method;
+	__be16 status;
+	__be16 unused;
+	__be64 tid;
+	__be16 attr_id;
+	__be16 resv;
+	__be32 attr_mod;
+	u8 reserved[40];
+	u8 data[192];
+} __attribute__ ((packed));
+
+struct ib_pma_classportinfo {
+	u8 base_version;
+	u8 class_version;
+	__be16 cap_mask;
+	u8 reserved[3];
+	u8 resp_time_value;     /* only lower 5 bits */
+	union ib_gid redirect_gid;
+	__be32 redirect_tc_sl_fl;       /* 8, 4, 20 bits respectively */
+	__be16 redirect_lid;
+	__be16 redirect_pkey;
+	__be32 redirect_qp;     /* only lower 24 bits */
+	__be32 redirect_qkey;
+	union ib_gid trap_gid;
+	__be32 trap_tc_sl_fl;   /* 8, 4, 20 bits respectively */
+	__be16 trap_lid;
+	__be16 trap_pkey;
+	__be32 trap_hl_qp;      /* 8, 24 bits respectively */
+	__be32 trap_qkey;
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplescontrol {
+	u8 opcode;
+	u8 port_select;
+	u8 tick;
+	u8 counter_width;       /* only lower 3 bits */
+	__be32 counter_mask0_9; /* 2, 10 * 3, bits */
+	__be16 counter_mask10_14;       /* 1, 5 * 3, bits */
+	u8 sample_mechanisms;
+	u8 sample_status;       /* only lower 2 bits */
+	__be64 option_mask;
+	__be64 vendor_mask;
+	__be32 sample_start;
+	__be32 sample_interval;
+	__be16 tag;
+	__be16 counter_select[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult {
+	__be16 tag;
+	__be16 sample_status;   /* only lower 2 bits */
+	__be32 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult_ext {
+	__be16 tag;
+	__be16 sample_status;   /* only lower 2 bits */
+	__be32 extended_width;  /* only upper 2 bits */
+	__be64 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters {
+	u8 reserved;
+	u8 port_select;
+	__be16 counter_select;
+	__be16 symbol_error_counter;
+	u8 link_error_recovery_counter;
+	u8 link_downed_counter;
+	__be16 port_rcv_errors;
+	__be16 port_rcv_remphys_errors;
+	__be16 port_rcv_switch_relay_errors;
+	__be16 port_xmit_discards;
+	u8 port_xmit_constraint_errors;
+	u8 port_rcv_constraint_errors;
+	u8 reserved1;
+	u8 lli_ebor_errors;     /* 4, 4, bits */
+	__be16 reserved2;
+	__be16 vl15_dropped;
+	__be32 port_xmit_data;
+	__be32 port_rcv_data;
+	__be32 port_xmit_packets;
+	__be32 port_rcv_packets;
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters_cong {
+	u8 reserved;
+	u8 reserved1;
+	__be16 port_check_rate;
+	__be16 symbol_error_counter;
+	u8 link_error_recovery_counter;
+	u8 link_downed_counter;
+	__be16 port_rcv_errors;
+	__be16 port_rcv_remphys_errors;
+	__be16 port_rcv_switch_relay_errors;
+	__be16 port_xmit_discards;
+	u8 port_xmit_constraint_errors;
+	u8 port_rcv_constraint_errors;
+	u8 reserved2;
+	u8 lli_ebor_errors;    /* 4, 4, bits */
+	__be16 reserved3;
+	__be16 vl15_dropped;
+	__be64 port_xmit_data;
+	__be64 port_rcv_data;
+	__be64 port_xmit_packets;
+	__be64 port_rcv_packets;
+	__be64 port_xmit_wait;
+	__be64 port_adr_events;
+} __attribute__ ((packed));
+
+#define IB_PMA_CONG_HW_CONTROL_TIMER            0x00
+#define IB_PMA_CONG_HW_CONTROL_SAMPLE           0x01
+
+#define QIB_XMIT_RATE_UNSUPPORTED               0x0
+#define QIB_XMIT_RATE_PICO                      0x7
+/* number of 4nsec cycles equaling 2secs */
+#define QIB_CONG_TIMER_PSINTERVAL               0x1DCD64EC
+
+#define IB_PMA_SEL_SYMBOL_ERROR                 cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY          cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED                  cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS              cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS      cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS           cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS  cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS    cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED            cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA               cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA                cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS            cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS             cpu_to_be16(0x8000)
+
+#define IB_PMA_SEL_CONG_ALL                     0x01
+#define IB_PMA_SEL_CONG_PORT_DATA               0x02
+#define IB_PMA_SEL_CONG_XMIT                    0x04
+#define IB_PMA_SEL_CONG_ROUTING                 0x08
+
+struct ib_pma_portcounters_ext {
+	u8 reserved;
+	u8 port_select;
+	__be16 counter_select;
+	__be32 reserved1;
+	__be64 port_xmit_data;
+	__be64 port_rcv_data;
+	__be64 port_xmit_packets;
+	__be64 port_rcv_packets;
+	__be64 port_unicast_xmit_packets;
+	__be64 port_unicast_rcv_packets;
+	__be64 port_multicast_xmit_packets;
+	__be64 port_multicast_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SELX_PORT_XMIT_DATA              cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA               cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS           cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS            cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS       cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS        cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS     cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS      cpu_to_be16(0x0080)
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+	cpu_to_be32(COUNTER_MASK(1, 0) | \
+		    COUNTER_MASK(1, 1) | \
+		    COUNTER_MASK(1, 2) | \
+		    COUNTER_MASK(1, 3) | \
+		    COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 000000000000..8b73a11d571c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct qib_mmap_info
+ */
+void qib_release_mmap_info(struct kref *ref)
+{
+	struct qib_mmap_info *ip =
+		container_of(ref, struct qib_mmap_info, ref);
+	struct qib_ibdev *dev = to_idev(ip->context->device);
+
+	spin_lock_irq(&dev->pending_lock);
+	list_del(&ip->pending_mmaps);
+	spin_unlock_irq(&dev->pending_lock);
+
+	vfree(ip->obj);
+	kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void qib_vma_open(struct vm_area_struct *vma)
+{
+	struct qib_mmap_info *ip = vma->vm_private_data;
+
+	kref_get(&ip->ref);
+}
+
+static void qib_vma_close(struct vm_area_struct *vma)
+{
+	struct qib_mmap_info *ip = vma->vm_private_data;
+
+	kref_put(&ip->ref, qib_release_mmap_info);
+}
+
+static struct vm_operations_struct qib_vm_ops = {
+	.open =     qib_vma_open,
+	.close =    qib_vma_close,
+};
+
+/**
+ * qib_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+	struct qib_ibdev *dev = to_idev(context->device);
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	struct qib_mmap_info *ip, *pp;
+	int ret = -EINVAL;
+
+	/*
+	 * Search the device's list of objects waiting for a mmap call.
+	 * Normally, this list is very short since a call to create a
+	 * CQ, QP, or SRQ is soon followed by a call to mmap().
+	 */
+	spin_lock_irq(&dev->pending_lock);
+	list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+				 pending_mmaps) {
+		/* Only the creator is allowed to mmap the object */
+		if (context != ip->context || (__u64) offset != ip->offset)
+			continue;
+		/* Don't allow a mmap larger than the object. */
+		if (size > ip->size)
+			break;
+
+		list_del_init(&ip->pending_mmaps);
+		spin_unlock_irq(&dev->pending_lock);
+
+		ret = remap_vmalloc_range(vma, ip->obj, 0);
+		if (ret)
+			goto done;
+		vma->vm_ops = &qib_vm_ops;
+		vma->vm_private_data = ip;
+		qib_vma_open(vma);
+		goto done;
+	}
+	spin_unlock_irq(&dev->pending_lock);
+done:
+	return ret;
+}
+
+/*
+ * Allocate information for qib_mmap
+ */
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
+					   u32 size,
+					   struct ib_ucontext *context,
+					   void *obj) {
+	struct qib_mmap_info *ip;
+
+	ip = kmalloc(sizeof *ip, GFP_KERNEL);
+	if (!ip)
+		goto bail;
+
+	size = PAGE_ALIGN(size);
+
+	spin_lock_irq(&dev->mmap_offset_lock);
+	if (dev->mmap_offset == 0)
+		dev->mmap_offset = PAGE_SIZE;
+	ip->offset = dev->mmap_offset;
+	dev->mmap_offset += size;
+	spin_unlock_irq(&dev->mmap_offset_lock);
+
+	INIT_LIST_HEAD(&ip->pending_mmaps);
+	ip->size = size;
+	ip->context = context;
+	ip->obj = obj;
+	kref_init(&ip->ref);
+
+bail:
+	return ip;
+}
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+			  u32 size, void *obj)
+{
+	size = PAGE_ALIGN(size);
+
+	spin_lock_irq(&dev->mmap_offset_lock);
+	if (dev->mmap_offset == 0)
+		dev->mmap_offset = PAGE_SIZE;
+	ip->offset = dev->mmap_offset;
+	dev->mmap_offset += size;
+	spin_unlock_irq(&dev->mmap_offset_lock);
+
+	ip->size = size;
+	ip->obj = obj;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 000000000000..5f95f0f6385d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+
+/* Fast memory region */
+struct qib_fmr {
+	struct ib_fmr ibfmr;
+	u8 page_shift;
+	struct qib_mregion mr;        /* must be last */
+};
+
+static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+	return container_of(ibfmr, struct qib_fmr, ibfmr);
+}
+
+/**
+ * qib_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see qib_dma.c).
+ */
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+	struct qib_ibdev *dev = to_idev(pd->device);
+	struct qib_mr *mr;
+	struct ib_mr *ret;
+	unsigned long flags;
+
+	if (to_ipd(pd)->user) {
+		ret = ERR_PTR(-EPERM);
+		goto bail;
+	}
+
+	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+	if (!mr) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	mr->mr.access_flags = acc;
+	atomic_set(&mr->mr.refcount, 0);
+
+	spin_lock_irqsave(&dev->lk_table.lock, flags);
+	if (!dev->dma_mr)
+		dev->dma_mr = &mr->mr;
+	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+	ret = &mr->ibmr;
+
+bail:
+	return ret;
+}
+
+static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
+{
+	struct qib_mr *mr;
+	int m, i = 0;
+
+	/* Allocate struct plus pointers to first level page tables. */
+	m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+	mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+	if (!mr)
+		goto done;
+
+	/* Allocate first level page tables. */
+	for (; i < m; i++) {
+		mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+		if (!mr->mr.map[i])
+			goto bail;
+	}
+	mr->mr.mapsz = m;
+	mr->mr.max_segs = count;
+
+	/*
+	 * ib_reg_phys_mr() will initialize mr->ibmr except for
+	 * lkey and rkey.
+	 */
+	if (!qib_alloc_lkey(lk_table, &mr->mr))
+		goto bail;
+	mr->ibmr.lkey = mr->mr.lkey;
+	mr->ibmr.rkey = mr->mr.lkey;
+
+	atomic_set(&mr->mr.refcount, 0);
+	goto done;
+
+bail:
+	while (i)
+		kfree(mr->mr.map[--i]);
+	kfree(mr);
+	mr = NULL;
+
+done:
+	return mr;
+}
+
+/**
+ * qib_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+			      struct ib_phys_buf *buffer_list,
+			      int num_phys_buf, int acc, u64 *iova_start)
+{
+	struct qib_mr *mr;
+	int n, m, i;
+	struct ib_mr *ret;
+
+	mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+	if (mr == NULL) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	mr->mr.pd = pd;
+	mr->mr.user_base = *iova_start;
+	mr->mr.iova = *iova_start;
+	mr->mr.length = 0;
+	mr->mr.offset = 0;
+	mr->mr.access_flags = acc;
+	mr->umem = NULL;
+
+	m = 0;
+	n = 0;
+	for (i = 0; i < num_phys_buf; i++) {
+		mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
+		mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+		mr->mr.length += buffer_list[i].size;
+		n++;
+		if (n == QIB_SEGSZ) {
+			m++;
+			n = 0;
+		}
+	}
+
+	ret = &mr->ibmr;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: virtual address to use (from HCA's point of view)
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the QLogic_IB driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+			      u64 virt_addr, int mr_access_flags,
+			      struct ib_udata *udata)
+{
+	struct qib_mr *mr;
+	struct ib_umem *umem;
+	struct ib_umem_chunk *chunk;
+	int n, m, i;
+	struct ib_mr *ret;
+
+	if (length == 0) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	umem = ib_umem_get(pd->uobject->context, start, length,
+			   mr_access_flags, 0);
+	if (IS_ERR(umem))
+		return (void *) umem;
+
+	n = 0;
+	list_for_each_entry(chunk, &umem->chunk_list, list)
+		n += chunk->nents;
+
+	mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+	if (!mr) {
+		ret = ERR_PTR(-ENOMEM);
+		ib_umem_release(umem);
+		goto bail;
+	}
+
+	mr->mr.pd = pd;
+	mr->mr.user_base = start;
+	mr->mr.iova = virt_addr;
+	mr->mr.length = length;
+	mr->mr.offset = umem->offset;
+	mr->mr.access_flags = mr_access_flags;
+	mr->umem = umem;
+
+	m = 0;
+	n = 0;
+	list_for_each_entry(chunk, &umem->chunk_list, list) {
+		for (i = 0; i < chunk->nents; i++) {
+			void *vaddr;
+
+			vaddr = page_address(sg_page(&chunk->page_list[i]));
+			if (!vaddr) {
+				ret = ERR_PTR(-EINVAL);
+				goto bail;
+			}
+			mr->mr.map[m]->segs[n].vaddr = vaddr;
+			mr->mr.map[m]->segs[n].length = umem->page_size;
+			n++;
+			if (n == QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
+		}
+	}
+	ret = &mr->ibmr;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by qib_get_dma_mr()
+ * or qib_reg_user_mr().
+ */
+int qib_dereg_mr(struct ib_mr *ibmr)
+{
+	struct qib_mr *mr = to_imr(ibmr);
+	struct qib_ibdev *dev = to_idev(ibmr->device);
+	int ret;
+	int i;
+
+	ret = qib_free_lkey(dev, &mr->mr);
+	if (ret)
+		return ret;
+
+	i = mr->mr.mapsz;
+	while (i)
+		kfree(mr->mr.map[--i]);
+	if (mr->umem)
+		ib_umem_release(mr->umem);
+	kfree(mr);
+	return 0;
+}
+
+/*
+ * Allocate a memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ *
+ * Return the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+{
+	struct qib_mr *mr;
+
+	mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
+	if (mr == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mr->mr.pd = pd;
+	mr->mr.user_base = 0;
+	mr->mr.iova = 0;
+	mr->mr.length = 0;
+	mr->mr.offset = 0;
+	mr->mr.access_flags = 0;
+	mr->umem = NULL;
+
+	return &mr->ibmr;
+}
+
+struct ib_fast_reg_page_list *
+qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+{
+	unsigned size = page_list_len * sizeof(u64);
+	struct ib_fast_reg_page_list *pl;
+
+	if (size > PAGE_SIZE)
+		return ERR_PTR(-EINVAL);
+
+	pl = kmalloc(sizeof *pl, GFP_KERNEL);
+	if (!pl)
+		return ERR_PTR(-ENOMEM);
+
+	pl->page_list = kmalloc(size, GFP_KERNEL);
+	if (!pl->page_list)
+		goto err_free;
+
+	return pl;
+
+err_free:
+	kfree(pl);
+	return ERR_PTR(-ENOMEM);
+}
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
+{
+	kfree(pl->page_list);
+	kfree(pl);
+}
+
+/**
+ * qib_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+			     struct ib_fmr_attr *fmr_attr)
+{
+	struct qib_fmr *fmr;
+	int m, i = 0;
+	struct ib_fmr *ret;
+
+	/* Allocate struct plus pointers to first level page tables. */
+	m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
+	fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+	if (!fmr)
+		goto bail;
+
+	/* Allocate first level page tables. */
+	for (; i < m; i++) {
+		fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+					 GFP_KERNEL);
+		if (!fmr->mr.map[i])
+			goto bail;
+	}
+	fmr->mr.mapsz = m;
+
+	/*
+	 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+	 * rkey.
+	 */
+	if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+		goto bail;
+	fmr->ibfmr.rkey = fmr->mr.lkey;
+	fmr->ibfmr.lkey = fmr->mr.lkey;
+	/*
+	 * Resources are allocated but no valid mapping (RKEY can't be
+	 * used).
+	 */
+	fmr->mr.pd = pd;
+	fmr->mr.user_base = 0;
+	fmr->mr.iova = 0;
+	fmr->mr.length = 0;
+	fmr->mr.offset = 0;
+	fmr->mr.access_flags = mr_access_flags;
+	fmr->mr.max_segs = fmr_attr->max_pages;
+	fmr->page_shift = fmr_attr->page_shift;
+
+	atomic_set(&fmr->mr.refcount, 0);
+	ret = &fmr->ibfmr;
+	goto done;
+
+bail:
+	while (i)
+		kfree(fmr->mr.map[--i]);
+	kfree(fmr);
+	ret = ERR_PTR(-ENOMEM);
+
+done:
+	return ret;
+}
+
+/**
+ * qib_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+		     int list_len, u64 iova)
+{
+	struct qib_fmr *fmr = to_ifmr(ibfmr);
+	struct qib_lkey_table *rkt;
+	unsigned long flags;
+	int m, n, i;
+	u32 ps;
+	int ret;
+
+	if (atomic_read(&fmr->mr.refcount))
+		return -EBUSY;
+
+	if (list_len > fmr->mr.max_segs) {
+		ret = -EINVAL;
+		goto bail;
+	}
+	rkt = &to_idev(ibfmr->device)->lk_table;
+	spin_lock_irqsave(&rkt->lock, flags);
+	fmr->mr.user_base = iova;
+	fmr->mr.iova = iova;
+	ps = 1 << fmr->page_shift;
+	fmr->mr.length = list_len * ps;
+	m = 0;
+	n = 0;
+	for (i = 0; i < list_len; i++) {
+		fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
+		fmr->mr.map[m]->segs[n].length = ps;
+		if (++n == QIB_SEGSZ) {
+			m++;
+			n = 0;
+		}
+	}
+	spin_unlock_irqrestore(&rkt->lock, flags);
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int qib_unmap_fmr(struct list_head *fmr_list)
+{
+	struct qib_fmr *fmr;
+	struct qib_lkey_table *rkt;
+	unsigned long flags;
+
+	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+		rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+		spin_lock_irqsave(&rkt->lock, flags);
+		fmr->mr.user_base = 0;
+		fmr->mr.iova = 0;
+		fmr->mr.length = 0;
+		spin_unlock_irqrestore(&rkt->lock, flags);
+	}
+	return 0;
+}
+
+/**
+ * qib_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int qib_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+	struct qib_fmr *fmr = to_ifmr(ibfmr);
+	int ret;
+	int i;
+
+	ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
+	if (ret)
+		return ret;
+
+	i = fmr->mr.mapsz;
+	while (i)
+		kfree(fmr->mr.map[--i]);
+	kfree(fmr);
+	return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 000000000000..c926bf4541df
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+
+#include "qib.h"
+
+/*
+ * This file contains PCIe utility routines that are common to the
+ * various QLogic InfiniPath adapters
+ */
+
+/*
+ * Code to adjust PCIe capabilities.
+ * To minimize the change footprint, we call it
+ * from qib_pcie_params, which every chip-specific
+ * file calls, even though this violates some
+ * expectations of harmlessness.
+ */
+static int qib_tune_pcie_caps(struct qib_devdata *);
+static int qib_tune_pcie_coalesce(struct qib_devdata *);
+
+/*
+ * Do all the common PCIe setup and initialization.
+ * devdata is not yet allocated, and is not allocated until after this
+ * routine returns success.  Therefore qib_dev_err() can't be used for error
+ * printing.
+ */
+int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		/*
+		 * This can happen (in theory) iff:
+		 * We did a chip reset, and then failed to reprogram the
+		 * BAR, or the chip reset due to an internal error.  We then
+		 * unloaded the driver and reloaded it.
+		 *
+		 * Both reset cases set the BAR back to initial state.  For
+		 * the latter case, the AER sticky error bit at offset 0x718
+		 * should be set, but the Linux kernel doesn't yet know
+		 * about that, it appears.  If the original BAR was retained
+		 * in the kernel data structures, this may be OK.
+		 */
+		qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
+			      -ret);
+		goto done;
+	}
+
+	ret = pci_request_regions(pdev, QIB_DRV_NAME);
+	if (ret) {
+		qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
+		goto bail;
+	}
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (ret) {
+		/*
+		 * If the 64 bit setup fails, try 32 bit.  Some systems
+		 * do not setup 64 bit maps on systems with 2GB or less
+		 * memory installed.
+		 */
+		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (ret) {
+			qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
+			goto bail;
+		}
+		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	} else
+		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (ret)
+		qib_early_err(&pdev->dev,
+			      "Unable to set DMA consistent mask: %d\n", ret);
+
+	pci_set_master(pdev);
+	ret = pci_enable_pcie_error_reporting(pdev);
+	if (ret)
+		qib_early_err(&pdev->dev,
+			      "Unable to enable pcie error reporting: %d\n",
+			      ret);
+	goto done;
+
+bail:
+	pci_disable_device(pdev);
+	pci_release_regions(pdev);
+done:
+	return ret;
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
+		    const struct pci_device_id *ent)
+{
+	unsigned long len;
+	resource_size_t addr;
+
+	dd->pcidev = pdev;
+	pci_set_drvdata(pdev, dd);
+
+	addr = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+#if defined(__powerpc__)
+	/* There isn't a generic way to specify writethrough mappings */
+	dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
+#else
+	dd->kregbase = ioremap_nocache(addr, len);
+#endif
+
+	if (!dd->kregbase)
+		return -ENOMEM;
+
+	dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
+	dd->physaddr = addr;        /* used for io_remap, etc. */
+
+	/*
+	 * Save BARs to rewrite after device reset.  Save all 64 bits of
+	 * BAR, just in case.
+	 */
+	dd->pcibar0 = addr;
+	dd->pcibar1 = addr >> 32;
+	dd->deviceid = ent->device; /* save for later use */
+	dd->vendorid = ent->vendor;
+
+	return 0;
+}
+
+/*
+ * Do PCIe cleanup, after chip-specific cleanup, etc.  Just prior
+ * to releasing the dd memory.
+ * void because none of the core pcie cleanup returns are void
+ */
+void qib_pcie_ddcleanup(struct qib_devdata *dd)
+{
+	u64 __iomem *base = (void __iomem *) dd->kregbase;
+
+	dd->kregbase = NULL;
+	iounmap(base);
+	if (dd->piobase)
+		iounmap(dd->piobase);
+	if (dd->userbase)
+		iounmap(dd->userbase);
+
+	pci_disable_device(dd->pcidev);
+	pci_release_regions(dd->pcidev);
+
+	pci_set_drvdata(dd->pcidev, NULL);
+}
+
+static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
+			   struct msix_entry *msix_entry)
+{
+	int ret;
+	u32 tabsize = 0;
+	u16 msix_flags;
+
+	pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
+	tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
+	if (tabsize > *msixcnt)
+		tabsize = *msixcnt;
+	ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+	if (ret > 0) {
+		tabsize = ret;
+		ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+	}
+	if (ret) {
+		qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
+			    "falling back to INTx\n", tabsize, ret);
+		tabsize = 0;
+	}
+	*msixcnt = tabsize;
+
+	if (ret)
+		qib_enable_intx(dd->pcidev);
+
+}
+
+/**
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly.
+ */
+static int qib_msi_setup(struct qib_devdata *dd, int pos)
+{
+	struct pci_dev *pdev = dd->pcidev;
+	u16 control;
+	int ret;
+
+	ret = pci_enable_msi(pdev);
+	if (ret)
+		qib_dev_err(dd, "pci_enable_msi failed: %d, "
+			    "interrupts may not work\n", ret);
+	/* continue even if it fails, we may still be OK... */
+
+	pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
+			      &dd->msi_lo);
+	pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
+			      &dd->msi_hi);
+	pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+	/* now save the data (vector) info */
+	pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
+				    ? 12 : 8),
+			     &dd->msi_data);
+	return ret;
+}
+
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
+		    struct msix_entry *entry)
+{
+	u16 linkstat, speed;
+	int pos = 0, pose, ret = 1;
+
+	pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+	if (!pose) {
+		qib_dev_err(dd, "Can't find PCI Express capability!\n");
+		/* set up something... */
+		dd->lbus_width = 1;
+		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+		goto bail;
+	}
+
+	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+	if (nent && *nent && pos) {
+		qib_msix_setup(dd, pos, nent, entry);
+		ret = 0; /* did it, either MSIx or INTx */
+	} else {
+		pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+		if (pos)
+			ret = qib_msi_setup(dd, pos);
+		else
+			qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+	}
+	if (!pos)
+		qib_enable_intx(dd->pcidev);
+
+	pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
+	/*
+	 * speed is bits 0-3, linkwidth is bits 4-8
+	 * no defines for them in headers
+	 */
+	speed = linkstat & 0xf;
+	linkstat >>= 4;
+	linkstat &= 0x1f;
+	dd->lbus_width = linkstat;
+
+	switch (speed) {
+	case 1:
+		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+		break;
+	case 2:
+		dd->lbus_speed = 5000; /* Gen1, 5GHz */
+		break;
+	default: /* not defined, assume gen1 */
+		dd->lbus_speed = 2500;
+		break;
+	}
+
+	/*
+	 * Check against expected pcie width and complain if "wrong"
+	 * on first initialization, not afterwards (i.e., reset).
+	 */
+	if (minw && linkstat < minw)
+		qib_dev_err(dd,
+			    "PCIe width %u (x%u HCA), performance reduced\n",
+			    linkstat, minw);
+
+	qib_tune_pcie_caps(dd);
+
+	qib_tune_pcie_coalesce(dd);
+
+bail:
+	/* fill in string, even on errors */
+	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+		 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
+	return ret;
+}
+
+/*
+ * Setup pcie interrupt stuff again after a reset.  I'd like to just call
+ * pci_enable_msi() again for msi, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline.  Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ */
+int qib_reinit_intr(struct qib_devdata *dd)
+{
+	int pos;
+	u16 control;
+	int ret = 0;
+
+	/* If we aren't using MSI, don't restore it */
+	if (!dd->msi_lo)
+		goto bail;
+
+	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+	if (!pos) {
+		qib_dev_err(dd, "Can't find MSI capability, "
+			    "can't restore MSI settings\n");
+		ret = 0;
+		/* nothing special for MSIx, just MSI */
+		goto bail;
+	}
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+			       dd->msi_lo);
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+			       dd->msi_hi);
+	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+		control |= PCI_MSI_FLAGS_ENABLE;
+		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+				      control);
+	}
+	/* now rewrite the data (vector) info */
+	pci_write_config_word(dd->pcidev, pos +
+			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+			      dd->msi_data);
+	ret = 1;
+bail:
+	if (!ret && (dd->flags & QIB_HAS_INTX)) {
+		qib_enable_intx(dd->pcidev);
+		ret = 1;
+	}
+
+	/* and now set the pci master bit again */
+	pci_set_master(dd->pcidev);
+
+	return ret;
+}
+
+/*
+ * Disable msi interrupt if enabled, and clear msi_lo.
+ * This is used primarily for the fallback to INTx, but
+ * is also used in reinit after reset, and during cleanup.
+ */
+void qib_nomsi(struct qib_devdata *dd)
+{
+	dd->msi_lo = 0;
+	pci_disable_msi(dd->pcidev);
+}
+
+/*
+ * Same as qib_nosmi, but for MSIx.
+ */
+void qib_nomsix(struct qib_devdata *dd)
+{
+	pci_disable_msix(dd->pcidev);
+}
+
+/*
+ * Similar to pci_intx(pdev, 1), except that we make sure
+ * msi(x) is off.
+ */
+void qib_enable_intx(struct pci_dev *pdev)
+{
+	u16 cw, new;
+	int pos;
+
+	/* first, turn on INTx */
+	pci_read_config_word(pdev, PCI_COMMAND, &cw);
+	new = cw & ~PCI_COMMAND_INTX_DISABLE;
+	if (new != cw)
+		pci_write_config_word(pdev, PCI_COMMAND, new);
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (pos) {
+		/* then turn off MSI */
+		pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+		new = cw & ~PCI_MSI_FLAGS_ENABLE;
+		if (new != cw)
+			pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
+	}
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+	if (pos) {
+		/* then turn off MSIx */
+		pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
+		new = cw & ~PCI_MSIX_FLAGS_ENABLE;
+		if (new != cw)
+			pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
+	}
+}
+
+/*
+ * These two routines are helper routines for the device reset code
+ * to move all the pcie code out of the chip-specific driver code.
+ */
+void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
+{
+	pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
+	pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+	pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+}
+
+void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
+{
+	int r;
+	r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+				   dd->pcibar0);
+	if (r)
+		qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
+	r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+				   dd->pcibar1);
+	if (r)
+		qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
+	/* now re-enable memory access, and restore cosmetic settings */
+	pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
+	pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+	r = pci_enable_device(dd->pcidev);
+	if (r)
+		qib_dev_err(dd, "pci_enable_device failed after "
+			    "reset: %d\n", r);
+}
+
+/* code to adjust PCIe capabilities. */
+
+static int fld2val(int wd, int mask)
+{
+	int lsbmask;
+
+	if (!mask)
+		return 0;
+	wd &= mask;
+	lsbmask = mask ^ (mask & (mask - 1));
+	wd /= lsbmask;
+	return wd;
+}
+
+static int val2fld(int wd, int mask)
+{
+	int lsbmask;
+
+	if (!mask)
+		return 0;
+	lsbmask = mask ^ (mask & (mask - 1));
+	wd *= lsbmask;
+	return wd;
+}
+
+static int qib_pcie_coalesce;
+module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
+
+/*
+ * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
+ * chipsets.   This is known to be unsafe for some revisions of some
+ * of these chipsets, with some BIOS settings, and enabling it on those
+ * systems may result in the system crashing, and/or data corruption.
+ */
+static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+{
+	int r;
+	struct pci_dev *parent;
+	int ppos;
+	u16 devid;
+	u32 mask, bits, val;
+
+	if (!qib_pcie_coalesce)
+		return 0;
+
+	/* Find out supported and configured values for parent (root) */
+	parent = dd->pcidev->bus->self;
+	if (parent->bus->parent) {
+		qib_devinfo(dd->pcidev, "Parent not root\n");
+		return 1;
+	}
+	ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+	if (!ppos)
+		return 1;
+	if (parent->vendor != 0x8086)
+		return 1;
+
+	/*
+	 *  - bit 12: Max_rdcmp_Imt_EN: need to set to 1
+	 *  - bit 11: COALESCE_FORCE: need to set to 0
+	 *  - bit 10: COALESCE_EN: need to set to 1
+	 *  (but limitations on some on some chipsets)
+	 *
+	 *  On the Intel 5000, 5100, and 7300 chipsets, there is
+	 *  also: - bit 25:24: COALESCE_MODE, need to set to 0
+	 */
+	devid = parent->device;
+	if (devid >= 0x25e2 && devid <= 0x25fa) {
+		u8 rev;
+
+		/* 5000 P/V/X/Z */
+		pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
+		if (rev <= 0xb2)
+			bits = 1U << 10;
+		else
+			bits = 7U << 10;
+		mask = (3U << 24) | (7U << 10);
+	} else if (devid >= 0x65e2 && devid <= 0x65fa) {
+		/* 5100 */
+		bits = 1U << 10;
+		mask = (3U << 24) | (7U << 10);
+	} else if (devid >= 0x4021 && devid <= 0x402e) {
+		/* 5400 */
+		bits = 7U << 10;
+		mask = 7U << 10;
+	} else if (devid >= 0x3604 && devid <= 0x360a) {
+		/* 7300 */
+		bits = 7U << 10;
+		mask = (3U << 24) | (7U << 10);
+	} else {
+		/* not one of the chipsets that we know about */
+		return 1;
+	}
+	pci_read_config_dword(parent, 0x48, &val);
+	val &= ~mask;
+	val |= bits;
+	r = pci_write_config_dword(parent, 0x48, val);
+	return 0;
+}
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int qib_pcie_caps;
+module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+
+static int qib_tune_pcie_caps(struct qib_devdata *dd)
+{
+	int ret = 1; /* Assume the worst */
+	struct pci_dev *parent;
+	int ppos, epos;
+	u16 pcaps, pctl, ecaps, ectl;
+	int rc_sup, ep_sup;
+	int rc_cur, ep_cur;
+
+	/* Find out supported and configured values for parent (root) */
+	parent = dd->pcidev->bus->self;
+	if (parent->bus->parent) {
+		qib_devinfo(dd->pcidev, "Parent not root\n");
+		goto bail;
+	}
+	ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+	if (ppos) {
+		pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
+		pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
+	} else
+		goto bail;
+	/* Find out supported and configured values for endpoint (us) */
+	epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+	if (epos) {
+		pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
+		pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
+	} else
+		goto bail;
+	ret = 0;
+	/* Find max payload supported by root, endpoint */
+	rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
+	ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
+	if (rc_sup > ep_sup)
+		rc_sup = ep_sup;
+
+	rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
+	ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+
+	/* If Supported greater than limit in module param, limit it */
+	if (rc_sup > (qib_pcie_caps & 7))
+		rc_sup = qib_pcie_caps & 7;
+	/* If less than (allowed, supported), bump root payload */
+	if (rc_sup > rc_cur) {
+		rc_cur = rc_sup;
+		pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+			val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
+		pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+	}
+	/* If less than (allowed, supported), bump endpoint payload */
+	if (rc_sup > ep_cur) {
+		ep_cur = rc_sup;
+		ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+			val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
+		pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+	}
+
+	/*
+	 * Now the Read Request size.
+	 * No field for max supported, but PCIe spec limits it to 4096,
+	 * which is code '5' (log2(4096) - 7)
+	 */
+	rc_sup = 5;
+	if (rc_sup > ((qib_pcie_caps >> 4) & 7))
+		rc_sup = (qib_pcie_caps >> 4) & 7;
+	rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
+	ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
+
+	if (rc_sup > rc_cur) {
+		rc_cur = rc_sup;
+		pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
+			val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
+		pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+	}
+	if (rc_sup > ep_cur) {
+		ep_cur = rc_sup;
+		ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
+			val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
+		pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+	}
+bail:
+	return ret;
+}
+/* End of PCIe capability tuning */
+
+/*
+ * From here through qib_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct qib_devdata *dd = pci_get_drvdata(pdev);
+	pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		qib_devinfo(pdev, "State Normal, ignoring\n");
+		break;
+
+	case pci_channel_io_frozen:
+		qib_devinfo(pdev, "State Frozen, requesting reset\n");
+		pci_disable_device(pdev);
+		ret = PCI_ERS_RESULT_NEED_RESET;
+		break;
+
+	case pci_channel_io_perm_failure:
+		qib_devinfo(pdev, "State Permanent Failure, disabling\n");
+		if (dd) {
+			/* no more register accesses! */
+			dd->flags &= ~QIB_PRESENT;
+			qib_disable_after_error(dd);
+		}
+		 /* else early, or other problem */
+		ret =  PCI_ERS_RESULT_DISCONNECT;
+		break;
+
+	default: /* shouldn't happen */
+		qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
+			state);
+		break;
+	}
+	return ret;
+}
+
+static pci_ers_result_t
+qib_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	u64 words = 0U;
+	struct qib_devdata *dd = pci_get_drvdata(pdev);
+	pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+	if (dd && dd->pport) {
+		words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
+		if (words == ~0ULL)
+			ret = PCI_ERS_RESULT_NEED_RESET;
+	}
+	qib_devinfo(pdev, "QIB mmio_enabled function called, "
+		 "read wordscntr %Lx, returning %d\n", words, ret);
+	return  ret;
+}
+
+static pci_ers_result_t
+qib_pci_slot_reset(struct pci_dev *pdev)
+{
+	qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+	return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t
+qib_pci_link_reset(struct pci_dev *pdev)
+{
+	qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+	return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+qib_pci_resume(struct pci_dev *pdev)
+{
+	struct qib_devdata *dd = pci_get_drvdata(pdev);
+	qib_devinfo(pdev, "QIB resume function called\n");
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	/*
+	 * Running jobs will fail, since it's asynchronous
+	 * unlike sysfs-requested reset.   Better than
+	 * doing nothing.
+	 */
+	qib_init(dd, 1); /* same as re-init after reset */
+}
+
+struct pci_error_handlers qib_pci_err_handler = {
+	.error_detected = qib_pci_error_detected,
+	.mmio_enabled = qib_pci_mmio_enabled,
+	.link_reset = qib_pci_link_reset,
+	.slot_reset = qib_pci_slot_reset,
+	.resume = qib_pci_resume,
+};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 000000000000..10b8c444dd31
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
+ * time.  Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void qib_pio_copy(void __iomem *to, const void *from, size_t count)
+{
+#ifdef CONFIG_64BIT
+	u64 __iomem *dst = to;
+	const u64 *src = from;
+	const u64 *end = src + (count >> 1);
+
+	while (src < end)
+		__raw_writeq(*src++, dst++);
+	if (count & 1)
+		__raw_writel(*(const u32 *)src, dst);
+#else
+	u32 __iomem *dst = to;
+	const u32 *src = from;
+	const u32 *end = src + count;
+
+	while (src < end)
+		__raw_writel(*src++, dst++);
+#endif
+}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 000000000000..e0f65e39076b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+#define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
+
+static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
+			      struct qpn_map *map, unsigned off)
+{
+	return (map - qpt->map) * BITS_PER_PAGE + off;
+}
+
+static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
+					struct qpn_map *map, unsigned off,
+					unsigned r)
+{
+	if (qpt->mask) {
+		off++;
+		if ((off & qpt->mask) >> 1 != r)
+			off = ((off & qpt->mask) ?
+				(off | qpt->mask) + 1 : off) | (r << 1);
+	} else
+		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+	return off;
+}
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static u32 credit_table[31] = {
+	0,                      /* 0 */
+	1,                      /* 1 */
+	2,                      /* 2 */
+	3,                      /* 3 */
+	4,                      /* 4 */
+	6,                      /* 5 */
+	8,                      /* 6 */
+	12,                     /* 7 */
+	16,                     /* 8 */
+	24,                     /* 9 */
+	32,                     /* A */
+	48,                     /* B */
+	64,                     /* C */
+	96,                     /* D */
+	128,                    /* E */
+	192,                    /* F */
+	256,                    /* 10 */
+	384,                    /* 11 */
+	512,                    /* 12 */
+	768,                    /* 13 */
+	1024,                   /* 14 */
+	1536,                   /* 15 */
+	2048,                   /* 16 */
+	3072,                   /* 17 */
+	4096,                   /* 18 */
+	6144,                   /* 19 */
+	8192,                   /* 1A */
+	12288,                  /* 1B */
+	16384,                  /* 1C */
+	24576,                  /* 1D */
+	32768                   /* 1E */
+};
+
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+{
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+	/*
+	 * Free the page if someone raced with us installing it.
+	 */
+
+	spin_lock(&qpt->lock);
+	if (map->page)
+		free_page(page);
+	else
+		map->page = (void *)page;
+	spin_unlock(&qpt->lock);
+}
+
+/*
+ * Allocate the next available QPN or
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+		     enum ib_qp_type type, u8 port)
+{
+	u32 i, offset, max_scan, qpn;
+	struct qpn_map *map;
+	u32 ret;
+	int r;
+
+	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+		unsigned n;
+
+		ret = type == IB_QPT_GSI;
+		n = 1 << (ret + 2 * (port - 1));
+		spin_lock(&qpt->lock);
+		if (qpt->flags & n)
+			ret = -EINVAL;
+		else
+			qpt->flags |= n;
+		spin_unlock(&qpt->lock);
+		goto bail;
+	}
+
+	r = smp_processor_id();
+	if (r >= dd->n_krcv_queues)
+		r %= dd->n_krcv_queues;
+	qpn = qpt->last + 1;
+	if (qpn >= QPN_MAX)
+		qpn = 2;
+	if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
+		qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
+			(r << 1);
+	offset = qpn & BITS_PER_PAGE_MASK;
+	map = &qpt->map[qpn / BITS_PER_PAGE];
+	max_scan = qpt->nmaps - !offset;
+	for (i = 0;;) {
+		if (unlikely(!map->page)) {
+			get_map_page(qpt, map);
+			if (unlikely(!map->page))
+				break;
+		}
+		do {
+			if (!test_and_set_bit(offset, map->page)) {
+				qpt->last = qpn;
+				ret = qpn;
+				goto bail;
+			}
+			offset = find_next_offset(qpt, map, offset, r);
+			qpn = mk_qpn(qpt, map, offset);
+			/*
+			 * This test differs from alloc_pidmap().
+			 * If find_next_offset() does find a zero
+			 * bit, we don't need to check for QPN
+			 * wrapping around past our starting QPN.
+			 * We just need to be sure we don't loop
+			 * forever.
+			 */
+		} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+		/*
+		 * In order to keep the number of pages allocated to a
+		 * minimum, we scan the all existing pages before increasing
+		 * the size of the bitmap table.
+		 */
+		if (++i > max_scan) {
+			if (qpt->nmaps == QPNMAP_ENTRIES)
+				break;
+			map = &qpt->map[qpt->nmaps++];
+			offset = qpt->mask ? (r << 1) : 0;
+		} else if (map < &qpt->map[qpt->nmaps]) {
+			++map;
+			offset = qpt->mask ? (r << 1) : 0;
+		} else {
+			map = &qpt->map[0];
+			offset = qpt->mask ? (r << 1) : 2;
+		}
+		qpn = mk_qpn(qpt, map, offset);
+	}
+
+	ret = -ENOMEM;
+
+bail:
+	return ret;
+}
+
+static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
+{
+	struct qpn_map *map;
+
+	map = qpt->map + qpn / BITS_PER_PAGE;
+	if (map->page)
+		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+}
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->qpt_lock, flags);
+
+	if (qp->ibqp.qp_num == 0)
+		ibp->qp0 = qp;
+	else if (qp->ibqp.qp_num == 1)
+		ibp->qp1 = qp;
+	else {
+		qp->next = dev->qp_table[n];
+		dev->qp_table[n] = qp;
+	}
+	atomic_inc(&qp->refcount);
+
+	spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/*
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	struct qib_qp *q, **qpp;
+	unsigned long flags;
+
+	qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
+
+	spin_lock_irqsave(&dev->qpt_lock, flags);
+
+	if (ibp->qp0 == qp) {
+		ibp->qp0 = NULL;
+		atomic_dec(&qp->refcount);
+	} else if (ibp->qp1 == qp) {
+		ibp->qp1 = NULL;
+		atomic_dec(&qp->refcount);
+	} else
+		for (; (q = *qpp) != NULL; qpp = &q->next)
+			if (q == qp) {
+				*qpp = qp->next;
+				qp->next = NULL;
+				atomic_dec(&qp->refcount);
+				break;
+			}
+
+	spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/**
+ * qib_free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+unsigned qib_free_all_qps(struct qib_devdata *dd)
+{
+	struct qib_ibdev *dev = &dd->verbs_dev;
+	unsigned long flags;
+	struct qib_qp *qp;
+	unsigned n, qp_inuse = 0;
+
+	for (n = 0; n < dd->num_pports; n++) {
+		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
+
+		if (!qib_mcast_tree_empty(ibp))
+			qp_inuse++;
+		if (ibp->qp0)
+			qp_inuse++;
+		if (ibp->qp1)
+			qp_inuse++;
+	}
+
+	spin_lock_irqsave(&dev->qpt_lock, flags);
+	for (n = 0; n < dev->qp_table_size; n++) {
+		qp = dev->qp_table[n];
+		dev->qp_table[n] = NULL;
+
+		for (; qp; qp = qp->next)
+			qp_inuse++;
+	}
+	spin_unlock_irqrestore(&dev->qpt_lock, flags);
+
+	return qp_inuse;
+}
+
+/**
+ * qib_lookup_qpn - return the QP with the given QPN
+ * @qpt: the QP table
+ * @qpn: the QP number to look up
+ *
+ * The caller is responsible for decrementing the QP reference count
+ * when done.
+ */
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+{
+	struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+	unsigned long flags;
+	struct qib_qp *qp;
+
+	spin_lock_irqsave(&dev->qpt_lock, flags);
+
+	if (qpn == 0)
+		qp = ibp->qp0;
+	else if (qpn == 1)
+		qp = ibp->qp1;
+	else
+		for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
+		     qp = qp->next)
+			if (qp->ibqp.qp_num == qpn)
+				break;
+	if (qp)
+		atomic_inc(&qp->refcount);
+
+	spin_unlock_irqrestore(&dev->qpt_lock, flags);
+	return qp;
+}
+
+/**
+ * qib_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ */
+static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
+{
+	qp->remote_qpn = 0;
+	qp->qkey = 0;
+	qp->qp_access_flags = 0;
+	atomic_set(&qp->s_dma_busy, 0);
+	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
+	qp->s_hdrwords = 0;
+	qp->s_wqe = NULL;
+	qp->s_draining = 0;
+	qp->s_next_psn = 0;
+	qp->s_last_psn = 0;
+	qp->s_sending_psn = 0;
+	qp->s_sending_hpsn = 0;
+	qp->s_psn = 0;
+	qp->r_psn = 0;
+	qp->r_msn = 0;
+	if (type == IB_QPT_RC) {
+		qp->s_state = IB_OPCODE_RC_SEND_LAST;
+		qp->r_state = IB_OPCODE_RC_SEND_LAST;
+	} else {
+		qp->s_state = IB_OPCODE_UC_SEND_LAST;
+		qp->r_state = IB_OPCODE_UC_SEND_LAST;
+	}
+	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+	qp->r_nak_state = 0;
+	qp->r_aflags = 0;
+	qp->r_flags = 0;
+	qp->s_head = 0;
+	qp->s_tail = 0;
+	qp->s_cur = 0;
+	qp->s_acked = 0;
+	qp->s_last = 0;
+	qp->s_ssn = 1;
+	qp->s_lsn = 0;
+	qp->s_mig_state = IB_MIG_MIGRATED;
+	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+	qp->r_head_ack_queue = 0;
+	qp->s_tail_ack_queue = 0;
+	qp->s_num_rd_atomic = 0;
+	if (qp->r_rq.wq) {
+		qp->r_rq.wq->head = 0;
+		qp->r_rq.wq->tail = 0;
+	}
+	qp->r_sge.num_sge = 0;
+}
+
+static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+{
+	unsigned n;
+
+	if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+		while (qp->s_rdma_read_sge.num_sge) {
+			atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+			if (--qp->s_rdma_read_sge.num_sge)
+				qp->s_rdma_read_sge.sge =
+					*qp->s_rdma_read_sge.sg_list++;
+		}
+
+	while (qp->r_sge.num_sge) {
+		atomic_dec(&qp->r_sge.sge.mr->refcount);
+		if (--qp->r_sge.num_sge)
+			qp->r_sge.sge = *qp->r_sge.sg_list++;
+	}
+
+	if (clr_sends) {
+		while (qp->s_last != qp->s_head) {
+			struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+			unsigned i;
+
+			for (i = 0; i < wqe->wr.num_sge; i++) {
+				struct qib_sge *sge = &wqe->sg_list[i];
+
+				atomic_dec(&sge->mr->refcount);
+			}
+			if (qp->ibqp.qp_type == IB_QPT_UD ||
+			    qp->ibqp.qp_type == IB_QPT_SMI ||
+			    qp->ibqp.qp_type == IB_QPT_GSI)
+				atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+			if (++qp->s_last >= qp->s_size)
+				qp->s_last = 0;
+		}
+		if (qp->s_rdma_mr) {
+			atomic_dec(&qp->s_rdma_mr->refcount);
+			qp->s_rdma_mr = NULL;
+		}
+	}
+
+	if (qp->ibqp.qp_type != IB_QPT_RC)
+		return;
+
+	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+		struct qib_ack_entry *e = &qp->s_ack_queue[n];
+
+		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+		    e->rdma_sge.mr) {
+			atomic_dec(&e->rdma_sge.mr->refcount);
+			e->rdma_sge.mr = NULL;
+		}
+	}
+}
+
+/**
+ * qib_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+{
+	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+	struct ib_wc wc;
+	int ret = 0;
+
+	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+		goto bail;
+
+	qp->state = IB_QPS_ERR;
+
+	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+		del_timer(&qp->s_timer);
+	}
+	spin_lock(&dev->pending_lock);
+	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
+		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
+		list_del_init(&qp->iowait);
+	}
+	spin_unlock(&dev->pending_lock);
+
+	if (!(qp->s_flags & QIB_S_BUSY)) {
+		qp->s_hdrwords = 0;
+		if (qp->s_rdma_mr) {
+			atomic_dec(&qp->s_rdma_mr->refcount);
+			qp->s_rdma_mr = NULL;
+		}
+		if (qp->s_tx) {
+			qib_put_txreq(qp->s_tx);
+			qp->s_tx = NULL;
+		}
+	}
+
+	/* Schedule the sending tasklet to drain the send work queue. */
+	if (qp->s_last != qp->s_head)
+		qib_schedule_send(qp);
+
+	clear_mr_refs(qp, 0);
+
+	memset(&wc, 0, sizeof(wc));
+	wc.qp = &qp->ibqp;
+	wc.opcode = IB_WC_RECV;
+
+	if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
+		wc.wr_id = qp->r_wr_id;
+		wc.status = err;
+		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+	}
+	wc.status = IB_WC_WR_FLUSH_ERR;
+
+	if (qp->r_rq.wq) {
+		struct qib_rwq *wq;
+		u32 head;
+		u32 tail;
+
+		spin_lock(&qp->r_rq.lock);
+
+		/* sanity check pointers before trusting them */
+		wq = qp->r_rq.wq;
+		head = wq->head;
+		if (head >= qp->r_rq.size)
+			head = 0;
+		tail = wq->tail;
+		if (tail >= qp->r_rq.size)
+			tail = 0;
+		while (tail != head) {
+			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+			if (++tail >= qp->r_rq.size)
+				tail = 0;
+			qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+		}
+		wq->tail = tail;
+
+		spin_unlock(&qp->r_rq.lock);
+	} else if (qp->ibqp.event_handler)
+		ret = 1;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		  int attr_mask, struct ib_udata *udata)
+{
+	struct qib_ibdev *dev = to_idev(ibqp->device);
+	struct qib_qp *qp = to_iqp(ibqp);
+	enum ib_qp_state cur_state, new_state;
+	struct ib_event ev;
+	int lastwqe = 0;
+	int mig = 0;
+	int ret;
+	u32 pmtu = 0; /* for gcc warning only */
+
+	spin_lock_irq(&qp->r_lock);
+	spin_lock(&qp->s_lock);
+
+	cur_state = attr_mask & IB_QP_CUR_STATE ?
+		attr->cur_qp_state : qp->state;
+	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+				attr_mask))
+		goto inval;
+
+	if (attr_mask & IB_QP_AV) {
+		if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+			goto inval;
+		if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
+			goto inval;
+	}
+
+	if (attr_mask & IB_QP_ALT_PATH) {
+		if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+			goto inval;
+		if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+			goto inval;
+		if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+			goto inval;
+	}
+
+	if (attr_mask & IB_QP_PKEY_INDEX)
+		if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+			goto inval;
+
+	if (attr_mask & IB_QP_MIN_RNR_TIMER)
+		if (attr->min_rnr_timer > 31)
+			goto inval;
+
+	if (attr_mask & IB_QP_PORT)
+		if (qp->ibqp.qp_type == IB_QPT_SMI ||
+		    qp->ibqp.qp_type == IB_QPT_GSI ||
+		    attr->port_num == 0 ||
+		    attr->port_num > ibqp->device->phys_port_cnt)
+			goto inval;
+
+	if (attr_mask & IB_QP_DEST_QPN)
+		if (attr->dest_qp_num > QIB_QPN_MASK)
+			goto inval;
+
+	if (attr_mask & IB_QP_RETRY_CNT)
+		if (attr->retry_cnt > 7)
+			goto inval;
+
+	if (attr_mask & IB_QP_RNR_RETRY)
+		if (attr->rnr_retry > 7)
+			goto inval;
+
+	/*
+	 * Don't allow invalid path_mtu values.  OK to set greater
+	 * than the active mtu (or even the max_cap, if we have tuned
+	 * that to a small mtu.  We'll set qp->path_mtu
+	 * to the lesser of requested attribute mtu and active,
+	 * for packetizing messages.
+	 * Note that the QP port has to be set in INIT and MTU in RTR.
+	 */
+	if (attr_mask & IB_QP_PATH_MTU) {
+		struct qib_devdata *dd = dd_from_dev(dev);
+		int mtu, pidx = qp->port_num - 1;
+
+		mtu = ib_mtu_enum_to_int(attr->path_mtu);
+		if (mtu == -1)
+			goto inval;
+		if (mtu > dd->pport[pidx].ibmtu) {
+			switch (dd->pport[pidx].ibmtu) {
+			case 4096:
+				pmtu = IB_MTU_4096;
+				break;
+			case 2048:
+				pmtu = IB_MTU_2048;
+				break;
+			case 1024:
+				pmtu = IB_MTU_1024;
+				break;
+			case 512:
+				pmtu = IB_MTU_512;
+				break;
+			case 256:
+				pmtu = IB_MTU_256;
+				break;
+			default:
+				pmtu = IB_MTU_2048;
+			}
+		} else
+			pmtu = attr->path_mtu;
+	}
+
+	if (attr_mask & IB_QP_PATH_MIG_STATE) {
+		if (attr->path_mig_state == IB_MIG_REARM) {
+			if (qp->s_mig_state == IB_MIG_ARMED)
+				goto inval;
+			if (new_state != IB_QPS_RTS)
+				goto inval;
+		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+			if (qp->s_mig_state == IB_MIG_REARM)
+				goto inval;
+			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+				goto inval;
+			if (qp->s_mig_state == IB_MIG_ARMED)
+				mig = 1;
+		} else
+			goto inval;
+	}
+
+	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+		if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
+			goto inval;
+
+	switch (new_state) {
+	case IB_QPS_RESET:
+		if (qp->state != IB_QPS_RESET) {
+			qp->state = IB_QPS_RESET;
+			spin_lock(&dev->pending_lock);
+			if (!list_empty(&qp->iowait))
+				list_del_init(&qp->iowait);
+			spin_unlock(&dev->pending_lock);
+			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+			spin_unlock(&qp->s_lock);
+			spin_unlock_irq(&qp->r_lock);
+			/* Stop the sending work queue and retry timer */
+			cancel_work_sync(&qp->s_work);
+			del_timer_sync(&qp->s_timer);
+			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+			if (qp->s_tx) {
+				qib_put_txreq(qp->s_tx);
+				qp->s_tx = NULL;
+			}
+			remove_qp(dev, qp);
+			wait_event(qp->wait, !atomic_read(&qp->refcount));
+			spin_lock_irq(&qp->r_lock);
+			spin_lock(&qp->s_lock);
+			clear_mr_refs(qp, 1);
+			qib_reset_qp(qp, ibqp->qp_type);
+		}
+		break;
+
+	case IB_QPS_RTR:
+		/* Allow event to retrigger if QP set to RTR more than once */
+		qp->r_flags &= ~QIB_R_COMM_EST;
+		qp->state = new_state;
+		break;
+
+	case IB_QPS_SQD:
+		qp->s_draining = qp->s_last != qp->s_cur;
+		qp->state = new_state;
+		break;
+
+	case IB_QPS_SQE:
+		if (qp->ibqp.qp_type == IB_QPT_RC)
+			goto inval;
+		qp->state = new_state;
+		break;
+
+	case IB_QPS_ERR:
+		lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+		break;
+
+	default:
+		qp->state = new_state;
+		break;
+	}
+
+	if (attr_mask & IB_QP_PKEY_INDEX)
+		qp->s_pkey_index = attr->pkey_index;
+
+	if (attr_mask & IB_QP_PORT)
+		qp->port_num = attr->port_num;
+
+	if (attr_mask & IB_QP_DEST_QPN)
+		qp->remote_qpn = attr->dest_qp_num;
+
+	if (attr_mask & IB_QP_SQ_PSN) {
+		qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
+		qp->s_psn = qp->s_next_psn;
+		qp->s_sending_psn = qp->s_next_psn;
+		qp->s_last_psn = qp->s_next_psn - 1;
+		qp->s_sending_hpsn = qp->s_last_psn;
+	}
+
+	if (attr_mask & IB_QP_RQ_PSN)
+		qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
+
+	if (attr_mask & IB_QP_ACCESS_FLAGS)
+		qp->qp_access_flags = attr->qp_access_flags;
+
+	if (attr_mask & IB_QP_AV) {
+		qp->remote_ah_attr = attr->ah_attr;
+		qp->s_srate = attr->ah_attr.static_rate;
+	}
+
+	if (attr_mask & IB_QP_ALT_PATH) {
+		qp->alt_ah_attr = attr->alt_ah_attr;
+		qp->s_alt_pkey_index = attr->alt_pkey_index;
+	}
+
+	if (attr_mask & IB_QP_PATH_MIG_STATE) {
+		qp->s_mig_state = attr->path_mig_state;
+		if (mig) {
+			qp->remote_ah_attr = qp->alt_ah_attr;
+			qp->port_num = qp->alt_ah_attr.port_num;
+			qp->s_pkey_index = qp->s_alt_pkey_index;
+		}
+	}
+
+	if (attr_mask & IB_QP_PATH_MTU)
+		qp->path_mtu = pmtu;
+
+	if (attr_mask & IB_QP_RETRY_CNT) {
+		qp->s_retry_cnt = attr->retry_cnt;
+		qp->s_retry = attr->retry_cnt;
+	}
+
+	if (attr_mask & IB_QP_RNR_RETRY) {
+		qp->s_rnr_retry_cnt = attr->rnr_retry;
+		qp->s_rnr_retry = attr->rnr_retry;
+	}
+
+	if (attr_mask & IB_QP_MIN_RNR_TIMER)
+		qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+	if (attr_mask & IB_QP_TIMEOUT)
+		qp->timeout = attr->timeout;
+
+	if (attr_mask & IB_QP_QKEY)
+		qp->qkey = attr->qkey;
+
+	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+		qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+	spin_unlock(&qp->s_lock);
+	spin_unlock_irq(&qp->r_lock);
+
+	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+		insert_qp(dev, qp);
+
+	if (lastwqe) {
+		ev.device = qp->ibqp.device;
+		ev.element.qp = &qp->ibqp;
+		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+	}
+	if (mig) {
+		ev.device = qp->ibqp.device;
+		ev.element.qp = &qp->ibqp;
+		ev.event = IB_EVENT_PATH_MIG;
+		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+	}
+	ret = 0;
+	goto bail;
+
+inval:
+	spin_unlock(&qp->s_lock);
+	spin_unlock_irq(&qp->r_lock);
+	ret = -EINVAL;
+
+bail:
+	return ret;
+}
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		 int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+
+	attr->qp_state = qp->state;
+	attr->cur_qp_state = attr->qp_state;
+	attr->path_mtu = qp->path_mtu;
+	attr->path_mig_state = qp->s_mig_state;
+	attr->qkey = qp->qkey;
+	attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
+	attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
+	attr->dest_qp_num = qp->remote_qpn;
+	attr->qp_access_flags = qp->qp_access_flags;
+	attr->cap.max_send_wr = qp->s_size - 1;
+	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+	attr->cap.max_send_sge = qp->s_max_sge;
+	attr->cap.max_recv_sge = qp->r_rq.max_sge;
+	attr->cap.max_inline_data = 0;
+	attr->ah_attr = qp->remote_ah_attr;
+	attr->alt_ah_attr = qp->alt_ah_attr;
+	attr->pkey_index = qp->s_pkey_index;
+	attr->alt_pkey_index = qp->s_alt_pkey_index;
+	attr->en_sqd_async_notify = 0;
+	attr->sq_draining = qp->s_draining;
+	attr->max_rd_atomic = qp->s_max_rd_atomic;
+	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+	attr->min_rnr_timer = qp->r_min_rnr_timer;
+	attr->port_num = qp->port_num;
+	attr->timeout = qp->timeout;
+	attr->retry_cnt = qp->s_retry_cnt;
+	attr->rnr_retry = qp->s_rnr_retry_cnt;
+	attr->alt_port_num = qp->alt_ah_attr.port_num;
+	attr->alt_timeout = qp->alt_timeout;
+
+	init_attr->event_handler = qp->ibqp.event_handler;
+	init_attr->qp_context = qp->ibqp.qp_context;
+	init_attr->send_cq = qp->ibqp.send_cq;
+	init_attr->recv_cq = qp->ibqp.recv_cq;
+	init_attr->srq = qp->ibqp.srq;
+	init_attr->cap = attr->cap;
+	if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
+		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+	else
+		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+	init_attr->qp_type = qp->ibqp.qp_type;
+	init_attr->port_num = qp->port_num;
+	return 0;
+}
+
+/**
+ * qib_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 qib_compute_aeth(struct qib_qp *qp)
+{
+	u32 aeth = qp->r_msn & QIB_MSN_MASK;
+
+	if (qp->ibqp.srq) {
+		/*
+		 * Shared receive queues don't generate credits.
+		 * Set the credit field to the invalid value.
+		 */
+		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
+	} else {
+		u32 min, max, x;
+		u32 credits;
+		struct qib_rwq *wq = qp->r_rq.wq;
+		u32 head;
+		u32 tail;
+
+		/* sanity check pointers before trusting them */
+		head = wq->head;
+		if (head >= qp->r_rq.size)
+			head = 0;
+		tail = wq->tail;
+		if (tail >= qp->r_rq.size)
+			tail = 0;
+		/*
+		 * Compute the number of credits available (RWQEs).
+		 * XXX Not holding the r_rq.lock here so there is a small
+		 * chance that the pair of reads are not atomic.
+		 */
+		credits = head - tail;
+		if ((int)credits < 0)
+			credits += qp->r_rq.size;
+		/*
+		 * Binary search the credit table to find the code to
+		 * use.
+		 */
+		min = 0;
+		max = 31;
+		for (;;) {
+			x = (min + max) / 2;
+			if (credit_table[x] == credits)
+				break;
+			if (credit_table[x] > credits)
+				max = x;
+			else if (min == x)
+				break;
+			else
+				min = x;
+		}
+		aeth |= x << QIB_AETH_CREDIT_SHIFT;
+	}
+	return cpu_to_be32(aeth);
+}
+
+/**
+ * qib_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+			    struct ib_qp_init_attr *init_attr,
+			    struct ib_udata *udata)
+{
+	struct qib_qp *qp;
+	int err;
+	struct qib_swqe *swq = NULL;
+	struct qib_ibdev *dev;
+	struct qib_devdata *dd;
+	size_t sz;
+	size_t sg_list_sz;
+	struct ib_qp *ret;
+
+	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	/* Check receive queue parameters if no SRQ is specified. */
+	if (!init_attr->srq) {
+		if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
+		    init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
+			ret = ERR_PTR(-EINVAL);
+			goto bail;
+		}
+		if (init_attr->cap.max_send_sge +
+		    init_attr->cap.max_send_wr +
+		    init_attr->cap.max_recv_sge +
+		    init_attr->cap.max_recv_wr == 0) {
+			ret = ERR_PTR(-EINVAL);
+			goto bail;
+		}
+	}
+
+	switch (init_attr->qp_type) {
+	case IB_QPT_SMI:
+	case IB_QPT_GSI:
+		if (init_attr->port_num == 0 ||
+		    init_attr->port_num > ibpd->device->phys_port_cnt) {
+			ret = ERR_PTR(-EINVAL);
+			goto bail;
+		}
+	case IB_QPT_UC:
+	case IB_QPT_RC:
+	case IB_QPT_UD:
+		sz = sizeof(struct qib_sge) *
+			init_attr->cap.max_send_sge +
+			sizeof(struct qib_swqe);
+		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+		if (swq == NULL) {
+			ret = ERR_PTR(-ENOMEM);
+			goto bail;
+		}
+		sz = sizeof(*qp);
+		sg_list_sz = 0;
+		if (init_attr->srq) {
+			struct qib_srq *srq = to_isrq(init_attr->srq);
+
+			if (srq->rq.max_sge > 1)
+				sg_list_sz = sizeof(*qp->r_sg_list) *
+					(srq->rq.max_sge - 1);
+		} else if (init_attr->cap.max_recv_sge > 1)
+			sg_list_sz = sizeof(*qp->r_sg_list) *
+				(init_attr->cap.max_recv_sge - 1);
+		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+		if (!qp) {
+			ret = ERR_PTR(-ENOMEM);
+			goto bail_swq;
+		}
+		if (init_attr->srq)
+			sz = 0;
+		else {
+			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+				sizeof(struct qib_rwqe);
+			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+						   qp->r_rq.size * sz);
+			if (!qp->r_rq.wq) {
+				ret = ERR_PTR(-ENOMEM);
+				goto bail_qp;
+			}
+		}
+
+		/*
+		 * ib_create_qp() will initialize qp->ibqp
+		 * except for qp->ibqp.qp_num.
+		 */
+		spin_lock_init(&qp->r_lock);
+		spin_lock_init(&qp->s_lock);
+		spin_lock_init(&qp->r_rq.lock);
+		atomic_set(&qp->refcount, 0);
+		init_waitqueue_head(&qp->wait);
+		init_waitqueue_head(&qp->wait_dma);
+		init_timer(&qp->s_timer);
+		qp->s_timer.data = (unsigned long)qp;
+		INIT_WORK(&qp->s_work, qib_do_send);
+		INIT_LIST_HEAD(&qp->iowait);
+		INIT_LIST_HEAD(&qp->rspwait);
+		qp->state = IB_QPS_RESET;
+		qp->s_wq = swq;
+		qp->s_size = init_attr->cap.max_send_wr + 1;
+		qp->s_max_sge = init_attr->cap.max_send_sge;
+		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+			qp->s_flags = QIB_S_SIGNAL_REQ_WR;
+		dev = to_idev(ibpd->device);
+		dd = dd_from_dev(dev);
+		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+				init_attr->port_num);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			vfree(qp->r_rq.wq);
+			goto bail_qp;
+		}
+		qp->ibqp.qp_num = err;
+		qp->port_num = init_attr->port_num;
+		qp->processor_id = smp_processor_id();
+		qib_reset_qp(qp, init_attr->qp_type);
+		break;
+
+	default:
+		/* Don't support raw QPs */
+		ret = ERR_PTR(-ENOSYS);
+		goto bail;
+	}
+
+	init_attr->cap.max_inline_data = 0;
+
+	/*
+	 * Return the address of the RWQ as the offset to mmap.
+	 * See qib_mmap() for details.
+	 */
+	if (udata && udata->outlen >= sizeof(__u64)) {
+		if (!qp->r_rq.wq) {
+			__u64 offset = 0;
+
+			err = ib_copy_to_udata(udata, &offset,
+					       sizeof(offset));
+			if (err) {
+				ret = ERR_PTR(err);
+				goto bail_ip;
+			}
+		} else {
+			u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
+
+			qp->ip = qib_create_mmap_info(dev, s,
+						      ibpd->uobject->context,
+						      qp->r_rq.wq);
+			if (!qp->ip) {
+				ret = ERR_PTR(-ENOMEM);
+				goto bail_ip;
+			}
+
+			err = ib_copy_to_udata(udata, &(qp->ip->offset),
+					       sizeof(qp->ip->offset));
+			if (err) {
+				ret = ERR_PTR(err);
+				goto bail_ip;
+			}
+		}
+	}
+
+	spin_lock(&dev->n_qps_lock);
+	if (dev->n_qps_allocated == ib_qib_max_qps) {
+		spin_unlock(&dev->n_qps_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_ip;
+	}
+
+	dev->n_qps_allocated++;
+	spin_unlock(&dev->n_qps_lock);
+
+	if (qp->ip) {
+		spin_lock_irq(&dev->pending_lock);
+		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+		spin_unlock_irq(&dev->pending_lock);
+	}
+
+	ret = &qp->ibqp;
+	goto bail;
+
+bail_ip:
+	if (qp->ip)
+		kref_put(&qp->ip->ref, qib_release_mmap_info);
+	else
+		vfree(qp->r_rq.wq);
+	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+bail_qp:
+	kfree(qp);
+bail_swq:
+	vfree(swq);
+bail:
+	return ret;
+}
+
+/**
+ * qib_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int qib_destroy_qp(struct ib_qp *ibqp)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+	struct qib_ibdev *dev = to_idev(ibqp->device);
+
+	/* Make sure HW and driver activity is stopped. */
+	spin_lock_irq(&qp->s_lock);
+	if (qp->state != IB_QPS_RESET) {
+		qp->state = IB_QPS_RESET;
+		spin_lock(&dev->pending_lock);
+		if (!list_empty(&qp->iowait))
+			list_del_init(&qp->iowait);
+		spin_unlock(&dev->pending_lock);
+		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+		spin_unlock_irq(&qp->s_lock);
+		cancel_work_sync(&qp->s_work);
+		del_timer_sync(&qp->s_timer);
+		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+		if (qp->s_tx) {
+			qib_put_txreq(qp->s_tx);
+			qp->s_tx = NULL;
+		}
+		remove_qp(dev, qp);
+		wait_event(qp->wait, !atomic_read(&qp->refcount));
+		clear_mr_refs(qp, 1);
+	} else
+		spin_unlock_irq(&qp->s_lock);
+
+	/* all user's cleaned up, mark it available */
+	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+	spin_lock(&dev->n_qps_lock);
+	dev->n_qps_allocated--;
+	spin_unlock(&dev->n_qps_lock);
+
+	if (qp->ip)
+		kref_put(&qp->ip->ref, qib_release_mmap_info);
+	else
+		vfree(qp->r_rq.wq);
+	vfree(qp->s_wq);
+	kfree(qp);
+	return 0;
+}
+
+/**
+ * qib_init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+{
+	spin_lock_init(&qpt->lock);
+	qpt->last = 1;          /* start with QPN 2 */
+	qpt->nmaps = 1;
+	qpt->mask = dd->qpn_mask;
+}
+
+/**
+ * qib_free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_free_qpn_table(struct qib_qpn_table *qpt)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+		if (qpt->map[i].page)
+			free_page((unsigned long) qpt->map[i].page);
+}
+
+/**
+ * qib_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void qib_get_credit(struct qib_qp *qp, u32 aeth)
+{
+	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
+
+	/*
+	 * If the credit is invalid, we can send
+	 * as many packets as we like.  Otherwise, we have to
+	 * honor the credit field.
+	 */
+	if (credit == QIB_AETH_CREDIT_INVAL) {
+		if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+			qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
+			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+				qib_schedule_send(qp);
+			}
+		}
+	} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+		/* Compute new LSN (i.e., MSN + credit) */
+		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
+		if (qib_cmp24(credit, qp->s_lsn) > 0) {
+			qp->s_lsn = credit;
+			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+				qib_schedule_send(qp);
+			}
+		}
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 000000000000..35b3604b691d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+#include "qib_qsfp.h"
+
+/*
+ * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
+ * in qib_twsi.c
+ */
+#define QSFP_MAX_RETRY 4
+
+static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 out, mask;
+	int ret, cnt, pass = 0;
+	int stuck = 0;
+	u8 *buff = bp;
+
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (ret)
+		goto no_unlock;
+
+	if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+		ret = -ENXIO;
+		goto bail;
+	}
+
+	/*
+	 * We presume, if we are called at all, that this board has
+	 * QSFP. This is on the same i2c chain as the legacy parts,
+	 * but only responds if the module is selected via GPIO pins.
+	 * Further, there are very long setup and hold requirements
+	 * on MODSEL.
+	 */
+	mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+	out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+	if (ppd->hw_pidx) {
+		mask <<= QSFP_GPIO_PORT2_SHIFT;
+		out <<= QSFP_GPIO_PORT2_SHIFT;
+	}
+
+	dd->f_gpio_mod(dd, out, mask, mask);
+
+	/*
+	 * Module could take up to 2 Msec to respond to MOD_SEL, and there
+	 * is no way to tell if it is ready, so we must wait.
+	 */
+	msleep(2);
+
+	/* Make sure TWSI bus is in sane state. */
+	ret = qib_twsi_reset(dd);
+	if (ret) {
+		qib_dev_porterr(dd, ppd->port,
+				"QSFP interface Reset for read failed\n");
+		ret = -EIO;
+		stuck = 1;
+		goto deselect;
+	}
+
+	/* All QSFP modules are at A0 */
+
+	cnt = 0;
+	while (cnt < len) {
+		unsigned in_page;
+		int wlen = len - cnt;
+		in_page = addr % QSFP_PAGESIZE;
+		if ((in_page + wlen) > QSFP_PAGESIZE)
+			wlen = QSFP_PAGESIZE - in_page;
+		ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
+		/* Some QSFP's fail first try. Retry as experiment */
+		if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
+			continue;
+		if (ret) {
+			/* qib_twsi_blk_rd() 1 for error, else 0 */
+			ret = -EIO;
+			goto deselect;
+		}
+		addr += wlen;
+		cnt += wlen;
+	}
+	ret = cnt;
+
+deselect:
+	/*
+	 * Module could take up to 10 uSec after transfer before
+	 * ready to respond to MOD_SEL negation, and there is no way
+	 * to tell if it is ready, so we must wait.
+	 */
+	udelay(10);
+	/* set QSFP MODSEL, RST. LP all high */
+	dd->f_gpio_mod(dd, mask, mask, mask);
+
+	/*
+	 * Module could take up to 2 Msec to respond to MOD_SEL
+	 * going away, and there is no way to tell if it is ready.
+	 * so we must wait.
+	 */
+	if (stuck)
+		qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
+
+	if (pass >= QSFP_MAX_RETRY && ret)
+		qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
+	else if (pass)
+		qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
+
+	msleep(2);
+
+bail:
+	mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+	return ret;
+}
+
+/*
+ * qsfp_write
+ * We do not ordinarily write the QSFP, but this is needed to select
+ * the page on non-flat QSFPs, and possibly later unusual cases
+ */
+static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
+			  int len)
+{
+	struct qib_devdata *dd = ppd->dd;
+	u32 out, mask;
+	int ret, cnt;
+	u8 *buff = bp;
+
+	ret = mutex_lock_interruptible(&dd->eep_lock);
+	if (ret)
+		goto no_unlock;
+
+	if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+		ret = -ENXIO;
+		goto bail;
+	}
+
+	/*
+	 * We presume, if we are called at all, that this board has
+	 * QSFP. This is on the same i2c chain as the legacy parts,
+	 * but only responds if the module is selected via GPIO pins.
+	 * Further, there are very long setup and hold requirements
+	 * on MODSEL.
+	 */
+	mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+	out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+	if (ppd->hw_pidx) {
+		mask <<= QSFP_GPIO_PORT2_SHIFT;
+		out <<= QSFP_GPIO_PORT2_SHIFT;
+	}
+	dd->f_gpio_mod(dd, out, mask, mask);
+
+	/*
+	 * Module could take up to 2 Msec to respond to MOD_SEL,
+	 * and there is no way to tell if it is ready, so we must wait.
+	 */
+	msleep(2);
+
+	/* Make sure TWSI bus is in sane state. */
+	ret = qib_twsi_reset(dd);
+	if (ret) {
+		qib_dev_porterr(dd, ppd->port,
+				"QSFP interface Reset for write failed\n");
+		ret = -EIO;
+		goto deselect;
+	}
+
+	/* All QSFP modules are at A0 */
+
+	cnt = 0;
+	while (cnt < len) {
+		unsigned in_page;
+		int wlen = len - cnt;
+		in_page = addr % QSFP_PAGESIZE;
+		if ((in_page + wlen) > QSFP_PAGESIZE)
+			wlen = QSFP_PAGESIZE - in_page;
+		ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
+		if (ret) {
+			/* qib_twsi_blk_wr() 1 for error, else 0 */
+			ret = -EIO;
+			goto deselect;
+		}
+		addr += wlen;
+		cnt += wlen;
+	}
+	ret = cnt;
+
+deselect:
+	/*
+	 * Module could take up to 10 uSec after transfer before
+	 * ready to respond to MOD_SEL negation, and there is no way
+	 * to tell if it is ready, so we must wait.
+	 */
+	udelay(10);
+	/* set QSFP MODSEL, RST, LP high */
+	dd->f_gpio_mod(dd, mask, mask, mask);
+	/*
+	 * Module could take up to 2 Msec to respond to MOD_SEL
+	 * going away, and there is no way to tell if it is ready.
+	 * so we must wait.
+	 */
+	msleep(2);
+
+bail:
+	mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+	return ret;
+}
+
+/*
+ * For validation, we want to check the checksums, even of the
+ * fields we do not otherwise use. This function reads the bytes from
+ * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
+ */
+static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
+{
+	int ret;
+	u16 cks;
+	u8 bval;
+
+	cks = 0;
+	while (first < next) {
+		ret = qsfp_read(ppd, first, &bval, 1);
+		if (ret < 0)
+			goto bail;
+		cks += bval;
+		++first;
+	}
+	ret = cks & 0xFF;
+bail:
+	return ret;
+
+}
+
+int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
+{
+	int ret;
+	int idx;
+	u16 cks;
+	u32 mask;
+	u8 peek[4];
+
+	/* ensure sane contents on invalid reads, for cable swaps */
+	memset(cp, 0, sizeof(*cp));
+
+	mask = QSFP_GPIO_MOD_PRS_N;
+	if (ppd->hw_pidx)
+		mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+	ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+	if (ret & mask) {
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	ret = qsfp_read(ppd, 0, peek, 3);
+	if (ret < 0)
+		goto bail;
+	if ((peek[0] & 0xFE) != 0x0C)
+		qib_dev_porterr(ppd->dd, ppd->port,
+				"QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
+
+	if ((peek[2] & 2) == 0) {
+		/*
+		 * If cable is paged, rather than "flat memory", we need to
+		 * set the page to zero, Even if it already appears to be zero.
+		 */
+		u8 poke = 0;
+		ret = qib_qsfp_write(ppd, 127, &poke, 1);
+		udelay(50);
+		if (ret != 1) {
+			qib_dev_porterr(ppd->dd, ppd->port,
+					"Failed QSFP Page set\n");
+			goto bail;
+		}
+	}
+
+	ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
+	if (ret < 0)
+		goto bail;
+	if ((cp->id & 0xFE) != 0x0C)
+		qib_dev_porterr(ppd->dd, ppd->port,
+				"QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
+	cks = cp->id;
+
+	ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
+	if (ret < 0)
+		goto bail;
+	cks += cp->pwr;
+
+	ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
+	if (ret < 0)
+		goto bail;
+	cks += ret;
+
+	ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
+	if (ret < 0)
+		goto bail;
+	cks += cp->len;
+
+	ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
+	if (ret < 0)
+		goto bail;
+	cks += cp->tech;
+
+	ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
+		cks += cp->vendor[idx];
+
+	ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
+	if (ret < 0)
+		goto bail;
+	cks += cp->xt_xcv;
+
+	ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
+		cks += cp->oui[idx];
+
+	ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_PN_LEN; ++idx)
+		cks += cp->partnum[idx];
+
+	ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_REV_LEN; ++idx)
+		cks += cp->rev[idx];
+
+	ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
+		cks += cp->atten[idx];
+
+	ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
+	if (ret < 0)
+		goto bail;
+	cks += ret;
+
+	cks &= 0xFF;
+	ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
+	if (ret < 0)
+		goto bail;
+	if (cks != cp->cks1)
+		qib_dev_porterr(ppd->dd, ppd->port,
+				"QSFP cks1 is %02X, computed %02X\n", cp->cks1,
+				cks);
+
+	/* Second checksum covers 192 to (serial, date, lot) */
+	ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
+	if (ret < 0)
+		goto bail;
+	cks = ret;
+
+	ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_SN_LEN; ++idx)
+		cks += cp->serial[idx];
+
+	ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
+		cks += cp->date[idx];
+
+	ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
+	if (ret < 0)
+		goto bail;
+	for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
+		cks += cp->lot[idx];
+
+	ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
+	if (ret < 0)
+		goto bail;
+	cks += ret;
+
+	ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
+	if (ret < 0)
+		goto bail;
+	cks &= 0xFF;
+	if (cks != cp->cks2)
+		qib_dev_porterr(ppd->dd, ppd->port,
+				"QSFP cks2 is %02X, computed %02X\n", cp->cks2,
+				cks);
+	return 0;
+
+bail:
+	cp->id = 0;
+	return ret;
+}
+
+const char * const qib_qsfp_devtech[16] = {
+	"850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+	"1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+	"Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+	"Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+
+/*
+ * Initialize structures that control access to QSFP. Called once per port
+ * on cards that support QSFP.
+ */
+void qib_qsfp_init(struct qib_qsfp_data *qd,
+		   void (*fevent)(struct work_struct *))
+{
+	u32 mask, highs;
+	int pins;
+
+	struct qib_devdata *dd = qd->ppd->dd;
+
+	/* Initialize work struct for later QSFP events */
+	INIT_WORK(&qd->work, fevent);
+
+	/*
+	 * Later, we may want more validation. For now, just set up pins and
+	 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
+	 * to do further init.
+	 */
+	mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+	highs = mask - QSFP_GPIO_MOD_RST_N;
+	if (qd->ppd->hw_pidx) {
+		mask <<= QSFP_GPIO_PORT2_SHIFT;
+		highs <<= QSFP_GPIO_PORT2_SHIFT;
+	}
+	dd->f_gpio_mod(dd, highs, mask, mask);
+	udelay(20); /* Generous RST dwell */
+
+	dd->f_gpio_mod(dd, mask, mask, mask);
+	/* Spec says module can take up to two seconds! */
+	mask = QSFP_GPIO_MOD_PRS_N;
+	if (qd->ppd->hw_pidx)
+		mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+	/* Do not try to wait here. Better to let event handle it */
+	pins = dd->f_gpio_mod(dd, 0, 0, 0);
+	if (pins & mask)
+		goto bail;
+	/* We see a module, but it may be unwise to look yet. Just schedule */
+	qd->t_insert = get_jiffies_64();
+	schedule_work(&qd->work);
+bail:
+	return;
+}
+
+void qib_qsfp_deinit(struct qib_qsfp_data *qd)
+{
+	/*
+	 * There is nothing to do here for now.  our
+	 * work is scheduled with schedule_work(), and
+	 * flush_scheduled_work() from remove_one will
+	 * block until all work ssetup with schedule_work()
+	 * completes.
+	 */
+}
+
+int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
+{
+	struct qib_qsfp_cache cd;
+	u8 bin_buff[QSFP_DUMP_CHUNK];
+	char lenstr[6];
+	int sofar, ret;
+	int bidx = 0;
+
+	sofar = 0;
+	ret = qib_refresh_qsfp_cache(ppd, &cd);
+	if (ret < 0)
+		goto bail;
+
+	lenstr[0] = ' ';
+	lenstr[1] = '\0';
+	if (QSFP_IS_CU(cd.tech))
+		sprintf(lenstr, "%dM ", cd.len);
+
+	sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
+			   (QSFP_PWR(cd.pwr) * 4));
+
+	sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
+			   qib_qsfp_devtech[cd.tech >> 4]);
+
+	sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+			   QSFP_VEND_LEN, cd.vendor);
+
+	sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+			   QSFP_OUI(cd.oui));
+
+	sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+			   QSFP_PN_LEN, cd.partnum);
+	sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+			   QSFP_REV_LEN, cd.rev);
+	if (QSFP_IS_CU(cd.tech))
+		sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
+				   QSFP_ATTEN_SDR(cd.atten),
+				   QSFP_ATTEN_DDR(cd.atten));
+	sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+			   QSFP_SN_LEN, cd.serial);
+	sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+			   QSFP_DATE_LEN, cd.date);
+	sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+			   QSFP_LOT_LEN, cd.date);
+
+	while (bidx < QSFP_DEFAULT_HDR_CNT) {
+		int iidx;
+		ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
+		if (ret < 0)
+			goto bail;
+		for (iidx = 0; iidx < ret; ++iidx) {
+			sofar += scnprintf(buf + sofar, len-sofar, " %02X",
+				bin_buff[iidx]);
+		}
+		sofar += scnprintf(buf + sofar, len - sofar, "\n");
+		bidx += QSFP_DUMP_CHUNK;
+	}
+	ret = sofar;
+bail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 000000000000..19b527bafd57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* QSFP support common definitions, for ib_qib driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+
+/*
+ * Below are masks for various QSFP signals, for Port 1.
+ * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
+ * _N means asserted low
+ */
+#define QSFP_GPIO_MOD_SEL_N (4)
+#define QSFP_GPIO_MOD_PRS_N (8)
+#define QSFP_GPIO_INT_N (0x10)
+#define QSFP_GPIO_MOD_RST_N (0x20)
+#define QSFP_GPIO_LP_MODE (0x40)
+#define QSFP_GPIO_PORT2_SHIFT 5
+
+#define QSFP_PAGESIZE 128
+/* Defined fields that QLogic requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
+/*
+ * Rest of first 128 not used, although 127 is reserved for page select
+ * if module is not "Flat memory".
+ */
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
+ *  0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not QLogic req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
+/* Byte 141 is Extended Rate Select. Not QLogic req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Qlogc req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const qib_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+			oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR  0x009065
+#define QSFP_OUI_GORE     0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
+ *  If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
+/* Byte 190 is Max Case Temp. Not QLogic req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
+#define QSFP_CC_OFFS 191
+/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * struct qib_qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-chip-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the (increasingly inaccurately named) eep_lock arbitrate
+ * access to common resources.
+ *
+ */
+
+/*
+ * Hold the parts of the onboard EEPROM that we care about, so we aren't
+ * coonstantly bit-boffing
+ */
+struct qib_qsfp_cache {
+	u8 id;	/* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
+	u8 pwr; /* in D6,7 */
+	u8 len;	/* in meters, Cu only */
+	u8 tech;
+	char vendor[QSFP_VEND_LEN];
+	u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
+	u8 oui[QSFP_VOUI_LEN];
+	u8 partnum[QSFP_PN_LEN];
+	u8 rev[QSFP_REV_LEN];
+	u8 atten[QSFP_ATTEN_LEN];
+	u8 cks1;	/* Checksum of bytes 128..190 */
+	u8 serial[QSFP_SN_LEN];
+	u8 date[QSFP_DATE_LEN];
+	u8 lot[QSFP_LOT_LEN];
+	u8 cks2;	/* Checsum of bytes 192..222 */
+};
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+struct qib_qsfp_data {
+	/* Helps to find our way */
+	struct qib_pportdata *ppd;
+	struct work_struct work;
+	struct qib_qsfp_cache cache;
+	u64 t_insert;
+};
+
+extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
+				  struct qib_qsfp_cache *cp);
+extern void qib_qsfp_init(struct qib_qsfp_data *qd,
+			  void (*fevent)(struct work_struct *));
+extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 000000000000..40c0a373719c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/io.h>
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static void rc_timeout(unsigned long arg);
+
+static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+		       u32 psn, u32 pmtu)
+{
+	u32 len;
+
+	len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+	ss->sge = wqe->sg_list[0];
+	ss->sg_list = wqe->sg_list + 1;
+	ss->num_sge = wqe->wr.num_sge;
+	ss->total_len = wqe->length;
+	qib_skip_sge(ss, len, 0);
+	return wqe->length - len;
+}
+
+static void start_timer(struct qib_qp *qp)
+{
+	qp->s_flags |= QIB_S_TIMER;
+	qp->s_timer.function = rc_timeout;
+	/* 4.096 usec. * (1 << qp->timeout) */
+	qp->s_timer.expires = jiffies +
+		usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+	add_timer(&qp->s_timer);
+}
+
+/**
+ * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+			   struct qib_other_headers *ohdr, u32 pmtu)
+{
+	struct qib_ack_entry *e;
+	u32 hwords;
+	u32 len;
+	u32 bth0;
+	u32 bth2;
+
+	/* Don't send an ACK if we aren't supposed to. */
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto bail;
+
+	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+	hwords = 5;
+
+	switch (qp->s_ack_state) {
+	case OP(RDMA_READ_RESPONSE_LAST):
+	case OP(RDMA_READ_RESPONSE_ONLY):
+		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+		if (e->rdma_sge.mr) {
+			atomic_dec(&e->rdma_sge.mr->refcount);
+			e->rdma_sge.mr = NULL;
+		}
+		/* FALLTHROUGH */
+	case OP(ATOMIC_ACKNOWLEDGE):
+		/*
+		 * We can increment the tail pointer now that the last
+		 * response has been sent instead of only being
+		 * constructed.
+		 */
+		if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
+			qp->s_tail_ack_queue = 0;
+		/* FALLTHROUGH */
+	case OP(SEND_ONLY):
+	case OP(ACKNOWLEDGE):
+		/* Check for no next entry in the queue. */
+		if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+			if (qp->s_flags & QIB_S_ACK_PENDING)
+				goto normal;
+			goto bail;
+		}
+
+		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+		if (e->opcode == OP(RDMA_READ_REQUEST)) {
+			/*
+			 * If a RDMA read response is being resent and
+			 * we haven't seen the duplicate request yet,
+			 * then stop sending the remaining responses the
+			 * responder has seen until the requester resends it.
+			 */
+			len = e->rdma_sge.sge_length;
+			if (len && !e->rdma_sge.mr) {
+				qp->s_tail_ack_queue = qp->r_head_ack_queue;
+				goto bail;
+			}
+			/* Copy SGE state in case we need to resend */
+			qp->s_rdma_mr = e->rdma_sge.mr;
+			if (qp->s_rdma_mr)
+				atomic_inc(&qp->s_rdma_mr->refcount);
+			qp->s_ack_rdma_sge.sge = e->rdma_sge;
+			qp->s_ack_rdma_sge.num_sge = 1;
+			qp->s_cur_sge = &qp->s_ack_rdma_sge;
+			if (len > pmtu) {
+				len = pmtu;
+				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+			} else {
+				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+				e->sent = 1;
+			}
+			ohdr->u.aeth = qib_compute_aeth(qp);
+			hwords++;
+			qp->s_ack_rdma_psn = e->psn;
+			bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+		} else {
+			/* COMPARE_SWAP or FETCH_ADD */
+			qp->s_cur_sge = NULL;
+			len = 0;
+			qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+			ohdr->u.at.aeth = qib_compute_aeth(qp);
+			ohdr->u.at.atomic_ack_eth[0] =
+				cpu_to_be32(e->atomic_data >> 32);
+			ohdr->u.at.atomic_ack_eth[1] =
+				cpu_to_be32(e->atomic_data);
+			hwords += sizeof(ohdr->u.at) / sizeof(u32);
+			bth2 = e->psn & QIB_PSN_MASK;
+			e->sent = 1;
+		}
+		bth0 = qp->s_ack_state << 24;
+		break;
+
+	case OP(RDMA_READ_RESPONSE_FIRST):
+		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+		/* FALLTHROUGH */
+	case OP(RDMA_READ_RESPONSE_MIDDLE):
+		qp->s_cur_sge = &qp->s_ack_rdma_sge;
+		qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+		if (qp->s_rdma_mr)
+			atomic_inc(&qp->s_rdma_mr->refcount);
+		len = qp->s_ack_rdma_sge.sge.sge_length;
+		if (len > pmtu)
+			len = pmtu;
+		else {
+			ohdr->u.aeth = qib_compute_aeth(qp);
+			hwords++;
+			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+			e->sent = 1;
+		}
+		bth0 = qp->s_ack_state << 24;
+		bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+		break;
+
+	default:
+normal:
+		/*
+		 * Send a regular ACK.
+		 * Set the s_ack_state so we wait until after sending
+		 * the ACK before setting s_ack_state to ACKNOWLEDGE
+		 * (see above).
+		 */
+		qp->s_ack_state = OP(SEND_ONLY);
+		qp->s_flags &= ~QIB_S_ACK_PENDING;
+		qp->s_cur_sge = NULL;
+		if (qp->s_nak_state)
+			ohdr->u.aeth =
+				cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+					    (qp->s_nak_state <<
+					     QIB_AETH_CREDIT_SHIFT));
+		else
+			ohdr->u.aeth = qib_compute_aeth(qp);
+		hwords++;
+		len = 0;
+		bth0 = OP(ACKNOWLEDGE) << 24;
+		bth2 = qp->s_ack_psn & QIB_PSN_MASK;
+	}
+	qp->s_rdma_ack_cnt++;
+	qp->s_hdrwords = hwords;
+	qp->s_cur_size = len;
+	qib_make_ruc_header(qp, ohdr, bth0, bth2);
+	return 1;
+
+bail:
+	qp->s_ack_state = OP(ACKNOWLEDGE);
+	qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+	return 0;
+}
+
+/**
+ * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_rc_req(struct qib_qp *qp)
+{
+	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+	struct qib_other_headers *ohdr;
+	struct qib_sge_state *ss;
+	struct qib_swqe *wqe;
+	u32 hwords;
+	u32 len;
+	u32 bth0;
+	u32 bth2;
+	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+	char newreq;
+	unsigned long flags;
+	int ret = 0;
+	int delta;
+
+	ohdr = &qp->s_hdr.u.oth;
+	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+		ohdr = &qp->s_hdr.u.l.oth;
+
+	/*
+	 * The lock is needed to synchronize between the sending tasklet,
+	 * the receive interrupt handler, and timeout resends.
+	 */
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	/* Sending responses has higher priority over sending requests. */
+	if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+	    qib_make_rc_ack(dev, qp, ohdr, pmtu))
+		goto done;
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+		if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+			goto bail;
+		/* We are in the error state, flush the work request. */
+		if (qp->s_last == qp->s_head)
+			goto bail;
+		/* If DMAs are in progress, we can't flush immediately. */
+		if (atomic_read(&qp->s_dma_busy)) {
+			qp->s_flags |= QIB_S_WAIT_DMA;
+			goto bail;
+		}
+		wqe = get_swqe_ptr(qp, qp->s_last);
+		while (qp->s_last != qp->s_acked) {
+			qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+			if (++qp->s_last >= qp->s_size)
+				qp->s_last = 0;
+			wqe = get_swqe_ptr(qp, qp->s_last);
+		}
+		qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+		goto done;
+	}
+
+	if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+		goto bail;
+
+	if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+		if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+			qp->s_flags |= QIB_S_WAIT_PSN;
+			goto bail;
+		}
+		qp->s_sending_psn = qp->s_psn;
+		qp->s_sending_hpsn = qp->s_psn - 1;
+	}
+
+	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+	hwords = 5;
+	bth0 = 0;
+
+	/* Send a request. */
+	wqe = get_swqe_ptr(qp, qp->s_cur);
+	switch (qp->s_state) {
+	default:
+		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+			goto bail;
+		/*
+		 * Resend an old request or start a new one.
+		 *
+		 * We keep track of the current SWQE so that
+		 * we don't reset the "furthest progress" state
+		 * if we need to back up.
+		 */
+		newreq = 0;
+		if (qp->s_cur == qp->s_tail) {
+			/* Check if send work queue is empty. */
+			if (qp->s_tail == qp->s_head)
+				goto bail;
+			/*
+			 * If a fence is requested, wait for previous
+			 * RDMA read and atomic operations to finish.
+			 */
+			if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+			    qp->s_num_rd_atomic) {
+				qp->s_flags |= QIB_S_WAIT_FENCE;
+				goto bail;
+			}
+			wqe->psn = qp->s_next_psn;
+			newreq = 1;
+		}
+		/*
+		 * Note that we have to be careful not to modify the
+		 * original work request since we may need to resend
+		 * it.
+		 */
+		len = wqe->length;
+		ss = &qp->s_sge;
+		bth2 = qp->s_psn & QIB_PSN_MASK;
+		switch (wqe->wr.opcode) {
+		case IB_WR_SEND:
+		case IB_WR_SEND_WITH_IMM:
+			/* If no credit, return. */
+			if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+			    qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+				qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+				goto bail;
+			}
+			wqe->lpsn = wqe->psn;
+			if (len > pmtu) {
+				wqe->lpsn += (len - 1) / pmtu;
+				qp->s_state = OP(SEND_FIRST);
+				len = pmtu;
+				break;
+			}
+			if (wqe->wr.opcode == IB_WR_SEND)
+				qp->s_state = OP(SEND_ONLY);
+			else {
+				qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+				/* Immediate data comes after the BTH */
+				ohdr->u.imm_data = wqe->wr.ex.imm_data;
+				hwords += 1;
+			}
+			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+				bth0 |= IB_BTH_SOLICITED;
+			bth2 |= IB_BTH_REQ_ACK;
+			if (++qp->s_cur == qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		case IB_WR_RDMA_WRITE:
+			if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+				qp->s_lsn++;
+			/* FALLTHROUGH */
+		case IB_WR_RDMA_WRITE_WITH_IMM:
+			/* If no credit, return. */
+			if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+			    qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+				qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+				goto bail;
+			}
+			ohdr->u.rc.reth.vaddr =
+				cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+			ohdr->u.rc.reth.rkey =
+				cpu_to_be32(wqe->wr.wr.rdma.rkey);
+			ohdr->u.rc.reth.length = cpu_to_be32(len);
+			hwords += sizeof(struct ib_reth) / sizeof(u32);
+			wqe->lpsn = wqe->psn;
+			if (len > pmtu) {
+				wqe->lpsn += (len - 1) / pmtu;
+				qp->s_state = OP(RDMA_WRITE_FIRST);
+				len = pmtu;
+				break;
+			}
+			if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+				qp->s_state = OP(RDMA_WRITE_ONLY);
+			else {
+				qp->s_state =
+					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+				/* Immediate data comes after RETH */
+				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+				hwords += 1;
+				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+					bth0 |= IB_BTH_SOLICITED;
+			}
+			bth2 |= IB_BTH_REQ_ACK;
+			if (++qp->s_cur == qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		case IB_WR_RDMA_READ:
+			/*
+			 * Don't allow more operations to be started
+			 * than the QP limits allow.
+			 */
+			if (newreq) {
+				if (qp->s_num_rd_atomic >=
+				    qp->s_max_rd_atomic) {
+					qp->s_flags |= QIB_S_WAIT_RDMAR;
+					goto bail;
+				}
+				qp->s_num_rd_atomic++;
+				if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+					qp->s_lsn++;
+				/*
+				 * Adjust s_next_psn to count the
+				 * expected number of responses.
+				 */
+				if (len > pmtu)
+					qp->s_next_psn += (len - 1) / pmtu;
+				wqe->lpsn = qp->s_next_psn++;
+			}
+			ohdr->u.rc.reth.vaddr =
+				cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+			ohdr->u.rc.reth.rkey =
+				cpu_to_be32(wqe->wr.wr.rdma.rkey);
+			ohdr->u.rc.reth.length = cpu_to_be32(len);
+			qp->s_state = OP(RDMA_READ_REQUEST);
+			hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+			ss = NULL;
+			len = 0;
+			bth2 |= IB_BTH_REQ_ACK;
+			if (++qp->s_cur == qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		case IB_WR_ATOMIC_CMP_AND_SWP:
+		case IB_WR_ATOMIC_FETCH_AND_ADD:
+			/*
+			 * Don't allow more operations to be started
+			 * than the QP limits allow.
+			 */
+			if (newreq) {
+				if (qp->s_num_rd_atomic >=
+				    qp->s_max_rd_atomic) {
+					qp->s_flags |= QIB_S_WAIT_RDMAR;
+					goto bail;
+				}
+				qp->s_num_rd_atomic++;
+				if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+					qp->s_lsn++;
+				wqe->lpsn = wqe->psn;
+			}
+			if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+				qp->s_state = OP(COMPARE_SWAP);
+				ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+					wqe->wr.wr.atomic.swap);
+				ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+					wqe->wr.wr.atomic.compare_add);
+			} else {
+				qp->s_state = OP(FETCH_ADD);
+				ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+					wqe->wr.wr.atomic.compare_add);
+				ohdr->u.atomic_eth.compare_data = 0;
+			}
+			ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
+				wqe->wr.wr.atomic.remote_addr >> 32);
+			ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
+				wqe->wr.wr.atomic.remote_addr);
+			ohdr->u.atomic_eth.rkey = cpu_to_be32(
+				wqe->wr.wr.atomic.rkey);
+			hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+			ss = NULL;
+			len = 0;
+			bth2 |= IB_BTH_REQ_ACK;
+			if (++qp->s_cur == qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		default:
+			goto bail;
+		}
+		qp->s_sge.sge = wqe->sg_list[0];
+		qp->s_sge.sg_list = wqe->sg_list + 1;
+		qp->s_sge.num_sge = wqe->wr.num_sge;
+		qp->s_sge.total_len = wqe->length;
+		qp->s_len = wqe->length;
+		if (newreq) {
+			qp->s_tail++;
+			if (qp->s_tail >= qp->s_size)
+				qp->s_tail = 0;
+		}
+		if (wqe->wr.opcode == IB_WR_RDMA_READ)
+			qp->s_psn = wqe->lpsn + 1;
+		else {
+			qp->s_psn++;
+			if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+				qp->s_next_psn = qp->s_psn;
+		}
+		break;
+
+	case OP(RDMA_READ_RESPONSE_FIRST):
+		/*
+		 * qp->s_state is normally set to the opcode of the
+		 * last packet constructed for new requests and therefore
+		 * is never set to RDMA read response.
+		 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+		 * thread to indicate a SEND needs to be restarted from an
+		 * earlier PSN without interferring with the sending thread.
+		 * See qib_restart_rc().
+		 */
+		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+		/* FALLTHROUGH */
+	case OP(SEND_FIRST):
+		qp->s_state = OP(SEND_MIDDLE);
+		/* FALLTHROUGH */
+	case OP(SEND_MIDDLE):
+		bth2 = qp->s_psn++ & QIB_PSN_MASK;
+		if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+			qp->s_next_psn = qp->s_psn;
+		ss = &qp->s_sge;
+		len = qp->s_len;
+		if (len > pmtu) {
+			len = pmtu;
+			break;
+		}
+		if (wqe->wr.opcode == IB_WR_SEND)
+			qp->s_state = OP(SEND_LAST);
+		else {
+			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+			/* Immediate data comes after the BTH */
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
+			hwords += 1;
+		}
+		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+			bth0 |= IB_BTH_SOLICITED;
+		bth2 |= IB_BTH_REQ_ACK;
+		qp->s_cur++;
+		if (qp->s_cur >= qp->s_size)
+			qp->s_cur = 0;
+		break;
+
+	case OP(RDMA_READ_RESPONSE_LAST):
+		/*
+		 * qp->s_state is normally set to the opcode of the
+		 * last packet constructed for new requests and therefore
+		 * is never set to RDMA read response.
+		 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+		 * thread to indicate a RDMA write needs to be restarted from
+		 * an earlier PSN without interferring with the sending thread.
+		 * See qib_restart_rc().
+		 */
+		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+		/* FALLTHROUGH */
+	case OP(RDMA_WRITE_FIRST):
+		qp->s_state = OP(RDMA_WRITE_MIDDLE);
+		/* FALLTHROUGH */
+	case OP(RDMA_WRITE_MIDDLE):
+		bth2 = qp->s_psn++ & QIB_PSN_MASK;
+		if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+			qp->s_next_psn = qp->s_psn;
+		ss = &qp->s_sge;
+		len = qp->s_len;
+		if (len > pmtu) {
+			len = pmtu;
+			break;
+		}
+		if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+			qp->s_state = OP(RDMA_WRITE_LAST);
+		else {
+			qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+			/* Immediate data comes after the BTH */
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
+			hwords += 1;
+			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+				bth0 |= IB_BTH_SOLICITED;
+		}
+		bth2 |= IB_BTH_REQ_ACK;
+		qp->s_cur++;
+		if (qp->s_cur >= qp->s_size)
+			qp->s_cur = 0;
+		break;
+
+	case OP(RDMA_READ_RESPONSE_MIDDLE):
+		/*
+		 * qp->s_state is normally set to the opcode of the
+		 * last packet constructed for new requests and therefore
+		 * is never set to RDMA read response.
+		 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+		 * thread to indicate a RDMA read needs to be restarted from
+		 * an earlier PSN without interferring with the sending thread.
+		 * See qib_restart_rc().
+		 */
+		len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+		ohdr->u.rc.reth.vaddr =
+			cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+		ohdr->u.rc.reth.rkey =
+			cpu_to_be32(wqe->wr.wr.rdma.rkey);
+		ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+		qp->s_state = OP(RDMA_READ_REQUEST);
+		hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+		bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
+		qp->s_psn = wqe->lpsn + 1;
+		ss = NULL;
+		len = 0;
+		qp->s_cur++;
+		if (qp->s_cur == qp->s_size)
+			qp->s_cur = 0;
+		break;
+	}
+	qp->s_sending_hpsn = bth2;
+	delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
+	if (delta && delta % QIB_PSN_CREDIT == 0)
+		bth2 |= IB_BTH_REQ_ACK;
+	if (qp->s_flags & QIB_S_SEND_ONE) {
+		qp->s_flags &= ~QIB_S_SEND_ONE;
+		qp->s_flags |= QIB_S_WAIT_ACK;
+		bth2 |= IB_BTH_REQ_ACK;
+	}
+	qp->s_len -= len;
+	qp->s_hdrwords = hwords;
+	qp->s_cur_sge = ss;
+	qp->s_cur_size = len;
+	qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
+done:
+	ret = 1;
+	goto unlock;
+
+bail:
+	qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	return ret;
+}
+
+/**
+ * qib_send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
+ *
+ * This is called from qib_rc_rcv() and qib_kreceive().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and tasklet.
+ */
+void qib_send_rc_ack(struct qib_qp *qp)
+{
+	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	u64 pbc;
+	u16 lrh0;
+	u32 bth0;
+	u32 hwords;
+	u32 pbufn;
+	u32 __iomem *piobuf;
+	struct qib_ib_header hdr;
+	struct qib_other_headers *ohdr;
+	u32 control;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto unlock;
+
+	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+	if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+		goto queue_ack;
+
+	/* Construct the header with s_lock held so APM doesn't change it. */
+	ohdr = &hdr.u.oth;
+	lrh0 = QIB_LRH_BTH;
+	/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+	hwords = 6;
+	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+		hwords += qib_make_grh(ibp, &hdr.u.l.grh,
+				       &qp->remote_ah_attr.grh, hwords, 0);
+		ohdr = &hdr.u.l.oth;
+		lrh0 = QIB_LRH_GRH;
+	}
+	/* read pkey_index w/o lock (its atomic) */
+	bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
+	if (qp->s_mig_state == IB_MIG_MIGRATED)
+		bth0 |= IB_BTH_MIG_REQ;
+	if (qp->r_nak_state)
+		ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+					    (qp->r_nak_state <<
+					     QIB_AETH_CREDIT_SHIFT));
+	else
+		ohdr->u.aeth = qib_compute_aeth(qp);
+	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+		qp->remote_ah_attr.sl << 4;
+	hdr.lrh[0] = cpu_to_be16(lrh0);
+	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+	hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+	ohdr->bth[0] = cpu_to_be32(bth0);
+	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
+
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	/* Don't try to send ACKs if the link isn't ACTIVE */
+	if (!(ppd->lflags & QIBL_LINKACTIVE))
+		goto done;
+
+	control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
+				       qp->s_srate, lrh0 >> 12);
+	/* length is + 1 for the control dword */
+	pbc = ((u64) control << 32) | (hwords + 1);
+
+	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+	if (!piobuf) {
+		/*
+		 * We are out of PIO buffers at the moment.
+		 * Pass responsibility for sending the ACK to the
+		 * send tasklet so that when a PIO buffer becomes
+		 * available, the ACK is sent ahead of other outgoing
+		 * packets.
+		 */
+		spin_lock_irqsave(&qp->s_lock, flags);
+		goto queue_ack;
+	}
+
+	/*
+	 * Write the pbc.
+	 * We have to flush after the PBC for correctness
+	 * on some cpus or WC buffer can be written out of order.
+	 */
+	writeq(pbc, piobuf);
+
+	if (dd->flags & QIB_PIO_FLUSH_WC) {
+		u32 *hdrp = (u32 *) &hdr;
+
+		qib_flush_wc();
+		qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
+		qib_flush_wc();
+		__raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
+	} else
+		qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
+
+	if (dd->flags & QIB_USE_SPCL_TRIG) {
+		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+		qib_flush_wc();
+		__raw_writel(0xaebecede, piobuf + spcl_off);
+	}
+
+	qib_flush_wc();
+	qib_sendbuf_done(dd, pbufn);
+
+	ibp->n_unicast_xmit++;
+	goto done;
+
+queue_ack:
+	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+		ibp->n_rc_qacks++;
+		qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+		qp->s_nak_state = qp->r_nak_state;
+		qp->s_ack_psn = qp->r_ack_psn;
+
+		/* Schedule the send tasklet. */
+		qib_schedule_send(qp);
+	}
+unlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+	return;
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct qib_qp *qp, u32 psn)
+{
+	u32 n = qp->s_acked;
+	struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+	u32 opcode;
+
+	qp->s_cur = n;
+
+	/*
+	 * If we are starting the request from the beginning,
+	 * let the normal send code handle initialization.
+	 */
+	if (qib_cmp24(psn, wqe->psn) <= 0) {
+		qp->s_state = OP(SEND_LAST);
+		goto done;
+	}
+
+	/* Find the work request opcode corresponding to the given PSN. */
+	opcode = wqe->wr.opcode;
+	for (;;) {
+		int diff;
+
+		if (++n == qp->s_size)
+			n = 0;
+		if (n == qp->s_tail)
+			break;
+		wqe = get_swqe_ptr(qp, n);
+		diff = qib_cmp24(psn, wqe->psn);
+		if (diff < 0)
+			break;
+		qp->s_cur = n;
+		/*
+		 * If we are starting the request from the beginning,
+		 * let the normal send code handle initialization.
+		 */
+		if (diff == 0) {
+			qp->s_state = OP(SEND_LAST);
+			goto done;
+		}
+		opcode = wqe->wr.opcode;
+	}
+
+	/*
+	 * Set the state to restart in the middle of a request.
+	 * Don't change the s_sge, s_cur_sge, or s_cur_size.
+	 * See qib_make_rc_req().
+	 */
+	switch (opcode) {
+	case IB_WR_SEND:
+	case IB_WR_SEND_WITH_IMM:
+		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+		break;
+
+	case IB_WR_RDMA_WRITE:
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+		break;
+
+	case IB_WR_RDMA_READ:
+		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+		break;
+
+	default:
+		/*
+		 * This case shouldn't happen since its only
+		 * one PSN per req.
+		 */
+		qp->s_state = OP(SEND_LAST);
+	}
+done:
+	qp->s_psn = psn;
+	/*
+	 * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+	 * asynchronously before the send tasklet can get scheduled.
+	 * Doing it in qib_make_rc_req() is too late.
+	 */
+	if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+	    (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+		qp->s_flags |= QIB_S_WAIT_PSN;
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+{
+	struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+	struct qib_ibport *ibp;
+
+	if (qp->s_retry == 0) {
+		if (qp->s_mig_state == IB_MIG_ARMED) {
+			qib_migrate_qp(qp);
+			qp->s_retry = qp->s_retry_cnt;
+		} else if (qp->s_last == qp->s_acked) {
+			qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+			qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+			return;
+		} else /* XXX need to handle delayed completion */
+			return;
+	} else
+		qp->s_retry--;
+
+	ibp = to_iport(qp->ibqp.device, qp->port_num);
+	if (wqe->wr.opcode == IB_WR_RDMA_READ)
+		ibp->n_rc_resends++;
+	else
+		ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+	qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
+			 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
+			 QIB_S_WAIT_ACK);
+	if (wait)
+		qp->s_flags |= QIB_S_SEND_ONE;
+	reset_psn(qp, psn);
+}
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rc_timeout(unsigned long arg)
+{
+	struct qib_qp *qp = (struct qib_qp *)arg;
+	struct qib_ibport *ibp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (qp->s_flags & QIB_S_TIMER) {
+		ibp = to_iport(qp->ibqp.device, qp->port_num);
+		ibp->n_rc_timeouts++;
+		qp->s_flags &= ~QIB_S_TIMER;
+		del_timer(&qp->s_timer);
+		qib_restart_rc(qp, qp->s_last_psn + 1, 1);
+		qib_schedule_send(qp);
+	}
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+void qib_rc_rnr_retry(unsigned long arg)
+{
+	struct qib_qp *qp = (struct qib_qp *)arg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (qp->s_flags & QIB_S_WAIT_RNR) {
+		qp->s_flags &= ~QIB_S_WAIT_RNR;
+		del_timer(&qp->s_timer);
+		qib_schedule_send(qp);
+	}
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads are present.
+ */
+static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+{
+	struct qib_swqe *wqe;
+	u32 n = qp->s_last;
+
+	/* Find the work request corresponding to the given PSN. */
+	for (;;) {
+		wqe = get_swqe_ptr(qp, n);
+		if (qib_cmp24(psn, wqe->lpsn) <= 0) {
+			if (wqe->wr.opcode == IB_WR_RDMA_READ)
+				qp->s_sending_psn = wqe->lpsn + 1;
+			else
+				qp->s_sending_psn = psn + 1;
+			break;
+		}
+		if (++n == qp->s_size)
+			n = 0;
+		if (n == qp->s_tail)
+			break;
+	}
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+{
+	struct qib_other_headers *ohdr;
+	struct qib_swqe *wqe;
+	struct ib_wc wc;
+	unsigned i;
+	u32 opcode;
+	u32 psn;
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+		return;
+
+	/* Find out where the BTH is */
+	if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
+		ohdr = &hdr->u.oth;
+	else
+		ohdr = &hdr->u.l.oth;
+
+	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+		WARN_ON(!qp->s_rdma_ack_cnt);
+		qp->s_rdma_ack_cnt--;
+		return;
+	}
+
+	psn = be32_to_cpu(ohdr->bth[2]);
+	reset_sending_psn(qp, psn);
+
+	/*
+	 * Start timer after a packet requesting an ACK has been sent and
+	 * there are still requests that haven't been acked.
+	 */
+	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+		start_timer(qp);
+
+	while (qp->s_last != qp->s_acked) {
+		wqe = get_swqe_ptr(qp, qp->s_last);
+		if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+		    qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+			break;
+		for (i = 0; i < wqe->wr.num_sge; i++) {
+			struct qib_sge *sge = &wqe->sg_list[i];
+
+			atomic_dec(&sge->mr->refcount);
+		}
+		/* Post a send completion queue entry if requested. */
+		if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+			memset(&wc, 0, sizeof wc);
+			wc.wr_id = wqe->wr.wr_id;
+			wc.status = IB_WC_SUCCESS;
+			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+			wc.byte_len = wqe->length;
+			wc.qp = &qp->ibqp;
+			qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+		}
+		if (++qp->s_last >= qp->s_size)
+			qp->s_last = 0;
+	}
+	/*
+	 * If we were waiting for sends to complete before resending,
+	 * and they are now complete, restart sending.
+	 */
+	if (qp->s_flags & QIB_S_WAIT_PSN &&
+	    qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+		qp->s_flags &= ~QIB_S_WAIT_PSN;
+		qp->s_sending_psn = qp->s_psn;
+		qp->s_sending_hpsn = qp->s_psn - 1;
+		qib_schedule_send(qp);
+	}
+}
+
+static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+{
+	qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to qib_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
+					 struct qib_swqe *wqe,
+					 struct qib_ibport *ibp)
+{
+	struct ib_wc wc;
+	unsigned i;
+
+	/*
+	 * Don't decrement refcount and don't generate a
+	 * completion if the SWQE is being resent until the send
+	 * is finished.
+	 */
+	if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+	    qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+		for (i = 0; i < wqe->wr.num_sge; i++) {
+			struct qib_sge *sge = &wqe->sg_list[i];
+
+			atomic_dec(&sge->mr->refcount);
+		}
+		/* Post a send completion queue entry if requested. */
+		if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+			memset(&wc, 0, sizeof wc);
+			wc.wr_id = wqe->wr.wr_id;
+			wc.status = IB_WC_SUCCESS;
+			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+			wc.byte_len = wqe->length;
+			wc.qp = &qp->ibqp;
+			qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+		}
+		if (++qp->s_last >= qp->s_size)
+			qp->s_last = 0;
+	} else
+		ibp->n_rc_delayed_comp++;
+
+	qp->s_retry = qp->s_retry_cnt;
+	update_last_psn(qp, wqe->lpsn);
+
+	/*
+	 * If we are completing a request which is in the process of
+	 * being resent, we can stop resending it since we know the
+	 * responder has already seen it.
+	 */
+	if (qp->s_acked == qp->s_cur) {
+		if (++qp->s_cur >= qp->s_size)
+			qp->s_cur = 0;
+		qp->s_acked = qp->s_cur;
+		wqe = get_swqe_ptr(qp, qp->s_cur);
+		if (qp->s_acked != qp->s_tail) {
+			qp->s_state = OP(SEND_LAST);
+			qp->s_psn = wqe->psn;
+		}
+	} else {
+		if (++qp->s_acked >= qp->s_size)
+			qp->s_acked = 0;
+		if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+			qp->s_draining = 0;
+		wqe = get_swqe_ptr(qp, qp->s_acked);
+	}
+	return wqe;
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+		     u64 val, struct qib_ctxtdata *rcd)
+{
+	struct qib_ibport *ibp;
+	enum ib_wc_status status;
+	struct qib_swqe *wqe;
+	int ret = 0;
+	u32 ack_psn;
+	int diff;
+
+	/* Remove QP from retry timer */
+	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+		del_timer(&qp->s_timer);
+	}
+
+	/*
+	 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+	 * requests and implicitly NAK RDMA read and atomic requests issued
+	 * before the NAK'ed request.  The MSN won't include the NAK'ed
+	 * request but will include an ACK'ed request(s).
+	 */
+	ack_psn = psn;
+	if (aeth >> 29)
+		ack_psn--;
+	wqe = get_swqe_ptr(qp, qp->s_acked);
+	ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+	/*
+	 * The MSN might be for a later WQE than the PSN indicates so
+	 * only complete WQEs that the PSN finishes.
+	 */
+	while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
+		/*
+		 * RDMA_READ_RESPONSE_ONLY is a special case since
+		 * we want to generate completion events for everything
+		 * before the RDMA read, copy the data, then generate
+		 * the completion for the read.
+		 */
+		if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+		    opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+		    diff == 0) {
+			ret = 1;
+			goto bail;
+		}
+		/*
+		 * If this request is a RDMA read or atomic, and the ACK is
+		 * for a later operation, this ACK NAKs the RDMA read or
+		 * atomic.  In other words, only a RDMA_READ_LAST or ONLY
+		 * can ACK a RDMA read and likewise for atomic ops.  Note
+		 * that the NAK case can only happen if relaxed ordering is
+		 * used and requests are sent after an RDMA read or atomic
+		 * is sent but before the response is received.
+		 */
+		if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+		     (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+		    ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+		      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+		     (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
+			/* Retry this request. */
+			if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
+				qp->r_flags |= QIB_R_RDMAR_SEQ;
+				qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+				if (list_empty(&qp->rspwait)) {
+					qp->r_flags |= QIB_R_RSP_SEND;
+					atomic_inc(&qp->refcount);
+					list_add_tail(&qp->rspwait,
+						      &rcd->qp_wait_list);
+				}
+			}
+			/*
+			 * No need to process the ACK/NAK since we are
+			 * restarting an earlier request.
+			 */
+			goto bail;
+		}
+		if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+			u64 *vaddr = wqe->sg_list[0].vaddr;
+			*vaddr = val;
+		}
+		if (qp->s_num_rd_atomic &&
+		    (wqe->wr.opcode == IB_WR_RDMA_READ ||
+		     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+		     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+			qp->s_num_rd_atomic--;
+			/* Restart sending task if fence is complete */
+			if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+			    !qp->s_num_rd_atomic) {
+				qp->s_flags &= ~(QIB_S_WAIT_FENCE |
+						 QIB_S_WAIT_ACK);
+				qib_schedule_send(qp);
+			} else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
+				qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
+						 QIB_S_WAIT_ACK);
+				qib_schedule_send(qp);
+			}
+		}
+		wqe = do_rc_completion(qp, wqe, ibp);
+		if (qp->s_acked == qp->s_tail)
+			break;
+	}
+
+	switch (aeth >> 29) {
+	case 0:         /* ACK */
+		ibp->n_rc_acks++;
+		if (qp->s_acked != qp->s_tail) {
+			/*
+			 * We are expecting more ACKs so
+			 * reset the retransmit timer.
+			 */
+			start_timer(qp);
+			/*
+			 * We can stop resending the earlier packets and
+			 * continue with the next packet the receiver wants.
+			 */
+			if (qib_cmp24(qp->s_psn, psn) <= 0)
+				reset_psn(qp, psn + 1);
+		} else if (qib_cmp24(qp->s_psn, psn) <= 0) {
+			qp->s_state = OP(SEND_LAST);
+			qp->s_psn = psn + 1;
+		}
+		if (qp->s_flags & QIB_S_WAIT_ACK) {
+			qp->s_flags &= ~QIB_S_WAIT_ACK;
+			qib_schedule_send(qp);
+		}
+		qib_get_credit(qp, aeth);
+		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+		qp->s_retry = qp->s_retry_cnt;
+		update_last_psn(qp, psn);
+		ret = 1;
+		goto bail;
+
+	case 1:         /* RNR NAK */
+		ibp->n_rnr_naks++;
+		if (qp->s_acked == qp->s_tail)
+			goto bail;
+		if (qp->s_flags & QIB_S_WAIT_RNR)
+			goto bail;
+		if (qp->s_rnr_retry == 0) {
+			status = IB_WC_RNR_RETRY_EXC_ERR;
+			goto class_b;
+		}
+		if (qp->s_rnr_retry_cnt < 7)
+			qp->s_rnr_retry--;
+
+		/* The last valid PSN is the previous PSN. */
+		update_last_psn(qp, psn - 1);
+
+		ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+		reset_psn(qp, psn);
+
+		qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
+		qp->s_flags |= QIB_S_WAIT_RNR;
+		qp->s_timer.function = qib_rc_rnr_retry;
+		qp->s_timer.expires = jiffies + usecs_to_jiffies(
+			ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
+					   QIB_AETH_CREDIT_MASK]);
+		add_timer(&qp->s_timer);
+		goto bail;
+
+	case 3:         /* NAK */
+		if (qp->s_acked == qp->s_tail)
+			goto bail;
+		/* The last valid PSN is the previous PSN. */
+		update_last_psn(qp, psn - 1);
+		switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
+			QIB_AETH_CREDIT_MASK) {
+		case 0: /* PSN sequence error */
+			ibp->n_seq_naks++;
+			/*
+			 * Back up to the responder's expected PSN.
+			 * Note that we might get a NAK in the middle of an
+			 * RDMA READ response which terminates the RDMA
+			 * READ.
+			 */
+			qib_restart_rc(qp, psn, 0);
+			qib_schedule_send(qp);
+			break;
+
+		case 1: /* Invalid Request */
+			status = IB_WC_REM_INV_REQ_ERR;
+			ibp->n_other_naks++;
+			goto class_b;
+
+		case 2: /* Remote Access Error */
+			status = IB_WC_REM_ACCESS_ERR;
+			ibp->n_other_naks++;
+			goto class_b;
+
+		case 3: /* Remote Operation Error */
+			status = IB_WC_REM_OP_ERR;
+			ibp->n_other_naks++;
+class_b:
+			if (qp->s_last == qp->s_acked) {
+				qib_send_complete(qp, wqe, status);
+				qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+			}
+			break;
+
+		default:
+			/* Ignore other reserved NAK error codes */
+			goto reserved;
+		}
+		qp->s_retry = qp->s_retry_cnt;
+		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+		goto bail;
+
+	default:                /* 2: reserved */
+reserved:
+		/* Ignore reserved NAK codes. */
+		goto bail;
+	}
+
+bail:
+	return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+			 struct qib_ctxtdata *rcd)
+{
+	struct qib_swqe *wqe;
+
+	/* Remove QP from retry timer */
+	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+		del_timer(&qp->s_timer);
+	}
+
+	wqe = get_swqe_ptr(qp, qp->s_acked);
+
+	while (qib_cmp24(psn, wqe->lpsn) > 0) {
+		if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+		    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+			break;
+		wqe = do_rc_completion(qp, wqe, ibp);
+	}
+
+	ibp->n_rdma_seq++;
+	qp->r_flags |= QIB_R_RDMAR_SEQ;
+	qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+	if (list_empty(&qp->rspwait)) {
+		qp->r_flags |= QIB_R_RSP_SEND;
+		atomic_inc(&qp->refcount);
+		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+	}
+}
+
+/**
+ * qib_rc_rcv_resp - process an incoming RC response packet
+ * @ibp: the port this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_rc_rcv_resp(struct qib_ibport *ibp,
+			    struct qib_other_headers *ohdr,
+			    void *data, u32 tlen,
+			    struct qib_qp *qp,
+			    u32 opcode,
+			    u32 psn, u32 hdrsize, u32 pmtu,
+			    struct qib_ctxtdata *rcd)
+{
+	struct qib_swqe *wqe;
+	enum ib_wc_status status;
+	unsigned long flags;
+	int diff;
+	u32 pad;
+	u32 aeth;
+	u64 val;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	/* Double check we can process this now that we hold the s_lock. */
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto ack_done;
+
+	/* Ignore invalid responses. */
+	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+		goto ack_done;
+
+	/* Ignore duplicate responses. */
+	diff = qib_cmp24(psn, qp->s_last_psn);
+	if (unlikely(diff <= 0)) {
+		/* Update credits for "ghost" ACKs */
+		if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+			aeth = be32_to_cpu(ohdr->u.aeth);
+			if ((aeth >> 29) == 0)
+				qib_get_credit(qp, aeth);
+		}
+		goto ack_done;
+	}
+
+	/*
+	 * Skip everything other than the PSN we expect, if we are waiting
+	 * for a reply to a restarted RDMA read or atomic op.
+	 */
+	if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+		if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
+			goto ack_done;
+		qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+	}
+
+	if (unlikely(qp->s_acked == qp->s_tail))
+		goto ack_done;
+	wqe = get_swqe_ptr(qp, qp->s_acked);
+	status = IB_WC_SUCCESS;
+
+	switch (opcode) {
+	case OP(ACKNOWLEDGE):
+	case OP(ATOMIC_ACKNOWLEDGE):
+	case OP(RDMA_READ_RESPONSE_FIRST):
+		aeth = be32_to_cpu(ohdr->u.aeth);
+		if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
+			__be32 *p = ohdr->u.at.atomic_ack_eth;
+
+			val = ((u64) be32_to_cpu(p[0]) << 32) |
+				be32_to_cpu(p[1]);
+		} else
+			val = 0;
+		if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+		    opcode != OP(RDMA_READ_RESPONSE_FIRST))
+			goto ack_done;
+		hdrsize += 4;
+		wqe = get_swqe_ptr(qp, qp->s_acked);
+		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+			goto ack_op_err;
+		/*
+		 * If this is a response to a resent RDMA read, we
+		 * have to be careful to copy the data to the right
+		 * location.
+		 */
+		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+						  wqe, psn, pmtu);
+		goto read_middle;
+
+	case OP(RDMA_READ_RESPONSE_MIDDLE):
+		/* no AETH, no ACK */
+		if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+			goto ack_seq_err;
+		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+			goto ack_op_err;
+read_middle:
+		if (unlikely(tlen != (hdrsize + pmtu + 4)))
+			goto ack_len_err;
+		if (unlikely(pmtu >= qp->s_rdma_read_len))
+			goto ack_len_err;
+
+		/*
+		 * We got a response so update the timeout.
+		 * 4.096 usec. * (1 << qp->timeout)
+		 */
+		qp->s_flags |= QIB_S_TIMER;
+		mod_timer(&qp->s_timer, jiffies +
+			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+					 1000UL));
+		if (qp->s_flags & QIB_S_WAIT_ACK) {
+			qp->s_flags &= ~QIB_S_WAIT_ACK;
+			qib_schedule_send(qp);
+		}
+
+		if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+			qp->s_retry = qp->s_retry_cnt;
+
+		/*
+		 * Update the RDMA receive state but do the copy w/o
+		 * holding the locks and blocking interrupts.
+		 */
+		qp->s_rdma_read_len -= pmtu;
+		update_last_psn(qp, psn);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+		goto bail;
+
+	case OP(RDMA_READ_RESPONSE_ONLY):
+		aeth = be32_to_cpu(ohdr->u.aeth);
+		if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+			goto ack_done;
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/*
+		 * Check that the data size is >= 0 && <= pmtu.
+		 * Remember to account for the AETH header (4) and
+		 * ICRC (4).
+		 */
+		if (unlikely(tlen < (hdrsize + pad + 8)))
+			goto ack_len_err;
+		/*
+		 * If this is a response to a resent RDMA read, we
+		 * have to be careful to copy the data to the right
+		 * location.
+		 */
+		wqe = get_swqe_ptr(qp, qp->s_acked);
+		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+						  wqe, psn, pmtu);
+		goto read_last;
+
+	case OP(RDMA_READ_RESPONSE_LAST):
+		/* ACKs READ req. */
+		if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+			goto ack_seq_err;
+		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+			goto ack_op_err;
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/*
+		 * Check that the data size is >= 1 && <= pmtu.
+		 * Remember to account for the AETH header (4) and
+		 * ICRC (4).
+		 */
+		if (unlikely(tlen <= (hdrsize + pad + 8)))
+			goto ack_len_err;
+read_last:
+		tlen -= hdrsize + pad + 8;
+		if (unlikely(tlen != qp->s_rdma_read_len))
+			goto ack_len_err;
+		aeth = be32_to_cpu(ohdr->u.aeth);
+		qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+		WARN_ON(qp->s_rdma_read_sge.num_sge);
+		(void) do_rc_ack(qp, aeth, psn,
+				 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+		goto ack_done;
+	}
+
+ack_op_err:
+	status = IB_WC_LOC_QP_OP_ERR;
+	goto ack_err;
+
+ack_seq_err:
+	rdma_seq_err(qp, ibp, psn, rcd);
+	goto ack_done;
+
+ack_len_err:
+	status = IB_WC_LOC_LEN_ERR;
+ack_err:
+	if (qp->s_last == qp->s_acked) {
+		qib_send_complete(qp, wqe, status);
+		qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+	}
+ack_done:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+	return;
+}
+
+/**
+ * qib_rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ *
+ * This is called from qib_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+			    void *data,
+			    struct qib_qp *qp,
+			    u32 opcode,
+			    u32 psn,
+			    int diff,
+			    struct qib_ctxtdata *rcd)
+{
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	struct qib_ack_entry *e;
+	unsigned long flags;
+	u8 i, prev;
+	int old_req;
+
+	if (diff > 0) {
+		/*
+		 * Packet sequence error.
+		 * A NAK will ACK earlier sends and RDMA writes.
+		 * Don't queue the NAK if we already sent one.
+		 */
+		if (!qp->r_nak_state) {
+			ibp->n_rc_seqnak++;
+			qp->r_nak_state = IB_NAK_PSN_ERROR;
+			/* Use the expected PSN. */
+			qp->r_ack_psn = qp->r_psn;
+			/*
+			 * Wait to send the sequence NAK until all packets
+			 * in the receive queue have been processed.
+			 * Otherwise, we end up propagating congestion.
+			 */
+			if (list_empty(&qp->rspwait)) {
+				qp->r_flags |= QIB_R_RSP_NAK;
+				atomic_inc(&qp->refcount);
+				list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+			}
+		}
+		goto done;
+	}
+
+	/*
+	 * Handle a duplicate request.  Don't re-execute SEND, RDMA
+	 * write or atomic op.  Don't NAK errors, just silently drop
+	 * the duplicate request.  Note that r_sge, r_len, and
+	 * r_rcv_len may be in use so don't modify them.
+	 *
+	 * We are supposed to ACK the earliest duplicate PSN but we
+	 * can coalesce an outstanding duplicate ACK.  We have to
+	 * send the earliest so that RDMA reads can be restarted at
+	 * the requester's expected PSN.
+	 *
+	 * First, find where this duplicate PSN falls within the
+	 * ACKs previously sent.
+	 * old_req is true if there is an older response that is scheduled
+	 * to be sent before sending this one.
+	 */
+	e = NULL;
+	old_req = 1;
+	ibp->n_rc_dupreq++;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	/* Double check we can process this now that we hold the s_lock. */
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto unlock_done;
+
+	for (i = qp->r_head_ack_queue; ; i = prev) {
+		if (i == qp->s_tail_ack_queue)
+			old_req = 0;
+		if (i)
+			prev = i - 1;
+		else
+			prev = QIB_MAX_RDMA_ATOMIC;
+		if (prev == qp->r_head_ack_queue) {
+			e = NULL;
+			break;
+		}
+		e = &qp->s_ack_queue[prev];
+		if (!e->opcode) {
+			e = NULL;
+			break;
+		}
+		if (qib_cmp24(psn, e->psn) >= 0) {
+			if (prev == qp->s_tail_ack_queue &&
+			    qib_cmp24(psn, e->lpsn) <= 0)
+				old_req = 0;
+			break;
+		}
+	}
+	switch (opcode) {
+	case OP(RDMA_READ_REQUEST): {
+		struct ib_reth *reth;
+		u32 offset;
+		u32 len;
+
+		/*
+		 * If we didn't find the RDMA read request in the ack queue,
+		 * we can ignore this request.
+		 */
+		if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+			goto unlock_done;
+		/* RETH comes after BTH */
+		reth = &ohdr->u.rc.reth;
+		/*
+		 * Address range must be a subset of the original
+		 * request and start on pmtu boundaries.
+		 * We reuse the old ack_queue slot since the requester
+		 * should not back up and request an earlier PSN for the
+		 * same request.
+		 */
+		offset = ((psn - e->psn) & QIB_PSN_MASK) *
+			ib_mtu_enum_to_int(qp->path_mtu);
+		len = be32_to_cpu(reth->length);
+		if (unlikely(offset + len != e->rdma_sge.sge_length))
+			goto unlock_done;
+		if (e->rdma_sge.mr) {
+			atomic_dec(&e->rdma_sge.mr->refcount);
+			e->rdma_sge.mr = NULL;
+		}
+		if (len != 0) {
+			u32 rkey = be32_to_cpu(reth->rkey);
+			u64 vaddr = be64_to_cpu(reth->vaddr);
+			int ok;
+
+			ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+					 IB_ACCESS_REMOTE_READ);
+			if (unlikely(!ok))
+				goto unlock_done;
+		} else {
+			e->rdma_sge.vaddr = NULL;
+			e->rdma_sge.length = 0;
+			e->rdma_sge.sge_length = 0;
+		}
+		e->psn = psn;
+		if (old_req)
+			goto unlock_done;
+		qp->s_tail_ack_queue = prev;
+		break;
+	}
+
+	case OP(COMPARE_SWAP):
+	case OP(FETCH_ADD): {
+		/*
+		 * If we didn't find the atomic request in the ack queue
+		 * or the send tasklet is already backed up to send an
+		 * earlier entry, we can ignore this request.
+		 */
+		if (!e || e->opcode != (u8) opcode || old_req)
+			goto unlock_done;
+		qp->s_tail_ack_queue = prev;
+		break;
+	}
+
+	default:
+		/*
+		 * Ignore this operation if it doesn't request an ACK
+		 * or an earlier RDMA read or atomic is going to be resent.
+		 */
+		if (!(psn & IB_BTH_REQ_ACK) || old_req)
+			goto unlock_done;
+		/*
+		 * Resend the most recent ACK if this request is
+		 * after all the previous RDMA reads and atomics.
+		 */
+		if (i == qp->r_head_ack_queue) {
+			spin_unlock_irqrestore(&qp->s_lock, flags);
+			qp->r_nak_state = 0;
+			qp->r_ack_psn = qp->r_psn - 1;
+			goto send_ack;
+		}
+		/*
+		 * Try to send a simple ACK to work around a Mellanox bug
+		 * which doesn't accept a RDMA read response or atomic
+		 * response as an ACK for earlier SENDs or RDMA writes.
+		 */
+		if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+			spin_unlock_irqrestore(&qp->s_lock, flags);
+			qp->r_nak_state = 0;
+			qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+			goto send_ack;
+		}
+		/*
+		 * Resend the RDMA read or atomic op which
+		 * ACKs this duplicate request.
+		 */
+		qp->s_tail_ack_queue = i;
+		break;
+	}
+	qp->s_ack_state = OP(ACKNOWLEDGE);
+	qp->s_flags |= QIB_S_RESP_PENDING;
+	qp->r_nak_state = 0;
+	qib_schedule_send(qp);
+
+unlock_done:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+	return 1;
+
+send_ack:
+	return 0;
+}
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+{
+	unsigned long flags;
+	int lastwqe;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	lastwqe = qib_error_qp(qp, err);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	if (lastwqe) {
+		struct ib_event ev;
+
+		ev.device = qp->ibqp.device;
+		ev.element.qp = &qp->ibqp;
+		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+	}
+}
+
+static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+{
+	unsigned next;
+
+	next = n + 1;
+	if (next > QIB_MAX_RDMA_ATOMIC)
+		next = 0;
+	qp->s_tail_ack_queue = next;
+	qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+/**
+ * qib_rc_rcv - process an incoming RC packet
+ * @rcd: the context pointer
+ * @hdr: the header of this packet
+ * @has_grh: true if the header has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from qib_qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+	struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+	struct qib_other_headers *ohdr;
+	u32 opcode;
+	u32 hdrsize;
+	u32 psn;
+	u32 pad;
+	struct ib_wc wc;
+	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+	int diff;
+	struct ib_reth *reth;
+	unsigned long flags;
+	int ret;
+
+	/* Check for GRH */
+	if (!has_grh) {
+		ohdr = &hdr->u.oth;
+		hdrsize = 8 + 12;       /* LRH + BTH */
+	} else {
+		ohdr = &hdr->u.l.oth;
+		hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
+	}
+
+	opcode = be32_to_cpu(ohdr->bth[0]);
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+		goto sunlock;
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	psn = be32_to_cpu(ohdr->bth[2]);
+	opcode >>= 24;
+
+	/* Prevent simultaneous processing after APM on different CPUs */
+	spin_lock(&qp->r_lock);
+
+	/*
+	 * Process responses (ACKs) before anything else.  Note that the
+	 * packet sequence number will be for something in the send work
+	 * queue rather than the expected receive packet sequence number.
+	 * In other words, this QP is the requester.
+	 */
+	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+		qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
+				hdrsize, pmtu, rcd);
+		goto runlock;
+	}
+
+	/* Compute 24 bits worth of difference. */
+	diff = qib_cmp24(psn, qp->r_psn);
+	if (unlikely(diff)) {
+		if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+			goto runlock;
+		goto send_ack;
+	}
+
+	/* Check for opcode sequence errors. */
+	switch (qp->r_state) {
+	case OP(SEND_FIRST):
+	case OP(SEND_MIDDLE):
+		if (opcode == OP(SEND_MIDDLE) ||
+		    opcode == OP(SEND_LAST) ||
+		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+			break;
+		goto nack_inv;
+
+	case OP(RDMA_WRITE_FIRST):
+	case OP(RDMA_WRITE_MIDDLE):
+		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+		    opcode == OP(RDMA_WRITE_LAST) ||
+		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+			break;
+		goto nack_inv;
+
+	default:
+		if (opcode == OP(SEND_MIDDLE) ||
+		    opcode == OP(SEND_LAST) ||
+		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+		    opcode == OP(RDMA_WRITE_MIDDLE) ||
+		    opcode == OP(RDMA_WRITE_LAST) ||
+		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+			goto nack_inv;
+		/*
+		 * Note that it is up to the requester to not send a new
+		 * RDMA read or atomic operation before receiving an ACK
+		 * for the previous operation.
+		 */
+		break;
+	}
+
+	memset(&wc, 0, sizeof wc);
+
+	if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+		qp->r_flags |= QIB_R_COMM_EST;
+		if (qp->ibqp.event_handler) {
+			struct ib_event ev;
+
+			ev.device = qp->ibqp.device;
+			ev.element.qp = &qp->ibqp;
+			ev.event = IB_EVENT_COMM_EST;
+			qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+		}
+	}
+
+	/* OK, process the packet. */
+	switch (opcode) {
+	case OP(SEND_FIRST):
+		ret = qib_get_rwqe(qp, 0);
+		if (ret < 0)
+			goto nack_op_err;
+		if (!ret)
+			goto rnr_nak;
+		qp->r_rcv_len = 0;
+		/* FALLTHROUGH */
+	case OP(SEND_MIDDLE):
+	case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+		/* Check for invalid length PMTU or posted rwqe len. */
+		if (unlikely(tlen != (hdrsize + pmtu + 4)))
+			goto nack_inv;
+		qp->r_rcv_len += pmtu;
+		if (unlikely(qp->r_rcv_len > qp->r_len))
+			goto nack_inv;
+		qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+		break;
+
+	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+		/* consume RWQE */
+		ret = qib_get_rwqe(qp, 1);
+		if (ret < 0)
+			goto nack_op_err;
+		if (!ret)
+			goto rnr_nak;
+		goto send_last_imm;
+
+	case OP(SEND_ONLY):
+	case OP(SEND_ONLY_WITH_IMMEDIATE):
+		ret = qib_get_rwqe(qp, 0);
+		if (ret < 0)
+			goto nack_op_err;
+		if (!ret)
+			goto rnr_nak;
+		qp->r_rcv_len = 0;
+		if (opcode == OP(SEND_ONLY))
+			goto send_last;
+		/* FALLTHROUGH */
+	case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+		wc.ex.imm_data = ohdr->u.imm_data;
+		hdrsize += 4;
+		wc.wc_flags = IB_WC_WITH_IMM;
+		/* FALLTHROUGH */
+	case OP(SEND_LAST):
+	case OP(RDMA_WRITE_LAST):
+send_last:
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/* Check for invalid length. */
+		/* XXX LAST len should be >= 1 */
+		if (unlikely(tlen < (hdrsize + pad + 4)))
+			goto nack_inv;
+		/* Don't count the CRC. */
+		tlen -= (hdrsize + pad + 4);
+		wc.byte_len = tlen + qp->r_rcv_len;
+		if (unlikely(wc.byte_len > qp->r_len))
+			goto nack_inv;
+		qib_copy_sge(&qp->r_sge, data, tlen, 1);
+		while (qp->r_sge.num_sge) {
+			atomic_dec(&qp->r_sge.sge.mr->refcount);
+			if (--qp->r_sge.num_sge)
+				qp->r_sge.sge = *qp->r_sge.sg_list++;
+		}
+		qp->r_msn++;
+		if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+			break;
+		wc.wr_id = qp->r_wr_id;
+		wc.status = IB_WC_SUCCESS;
+		if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+			wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+		else
+			wc.opcode = IB_WC_RECV;
+		wc.qp = &qp->ibqp;
+		wc.src_qp = qp->remote_qpn;
+		wc.slid = qp->remote_ah_attr.dlid;
+		wc.sl = qp->remote_ah_attr.sl;
+		/* Signal completion event if the solicited bit is set. */
+		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+			     (ohdr->bth[0] &
+			      cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+		break;
+
+	case OP(RDMA_WRITE_FIRST):
+	case OP(RDMA_WRITE_ONLY):
+	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+			goto nack_inv;
+		/* consume RWQE */
+		reth = &ohdr->u.rc.reth;
+		hdrsize += sizeof(*reth);
+		qp->r_len = be32_to_cpu(reth->length);
+		qp->r_rcv_len = 0;
+		qp->r_sge.sg_list = NULL;
+		if (qp->r_len != 0) {
+			u32 rkey = be32_to_cpu(reth->rkey);
+			u64 vaddr = be64_to_cpu(reth->vaddr);
+			int ok;
+
+			/* Check rkey & NAK */
+			ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+					 rkey, IB_ACCESS_REMOTE_WRITE);
+			if (unlikely(!ok))
+				goto nack_acc;
+			qp->r_sge.num_sge = 1;
+		} else {
+			qp->r_sge.num_sge = 0;
+			qp->r_sge.sge.mr = NULL;
+			qp->r_sge.sge.vaddr = NULL;
+			qp->r_sge.sge.length = 0;
+			qp->r_sge.sge.sge_length = 0;
+		}
+		if (opcode == OP(RDMA_WRITE_FIRST))
+			goto send_middle;
+		else if (opcode == OP(RDMA_WRITE_ONLY))
+			goto send_last;
+		ret = qib_get_rwqe(qp, 1);
+		if (ret < 0)
+			goto nack_op_err;
+		if (!ret)
+			goto rnr_nak;
+		goto send_last_imm;
+
+	case OP(RDMA_READ_REQUEST): {
+		struct qib_ack_entry *e;
+		u32 len;
+		u8 next;
+
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+			goto nack_inv;
+		next = qp->r_head_ack_queue + 1;
+		/* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
+		if (next > QIB_MAX_RDMA_ATOMIC)
+			next = 0;
+		spin_lock_irqsave(&qp->s_lock, flags);
+		/* Double check we can process this while holding the s_lock. */
+		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+			goto srunlock;
+		if (unlikely(next == qp->s_tail_ack_queue)) {
+			if (!qp->s_ack_queue[next].sent)
+				goto nack_inv_unlck;
+			qib_update_ack_queue(qp, next);
+		}
+		e = &qp->s_ack_queue[qp->r_head_ack_queue];
+		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+			atomic_dec(&e->rdma_sge.mr->refcount);
+			e->rdma_sge.mr = NULL;
+		}
+		reth = &ohdr->u.rc.reth;
+		len = be32_to_cpu(reth->length);
+		if (len) {
+			u32 rkey = be32_to_cpu(reth->rkey);
+			u64 vaddr = be64_to_cpu(reth->vaddr);
+			int ok;
+
+			/* Check rkey & NAK */
+			ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+					 rkey, IB_ACCESS_REMOTE_READ);
+			if (unlikely(!ok))
+				goto nack_acc_unlck;
+			/*
+			 * Update the next expected PSN.  We add 1 later
+			 * below, so only add the remainder here.
+			 */
+			if (len > pmtu)
+				qp->r_psn += (len - 1) / pmtu;
+		} else {
+			e->rdma_sge.mr = NULL;
+			e->rdma_sge.vaddr = NULL;
+			e->rdma_sge.length = 0;
+			e->rdma_sge.sge_length = 0;
+		}
+		e->opcode = opcode;
+		e->sent = 0;
+		e->psn = psn;
+		e->lpsn = qp->r_psn;
+		/*
+		 * We need to increment the MSN here instead of when we
+		 * finish sending the result since a duplicate request would
+		 * increment it more than once.
+		 */
+		qp->r_msn++;
+		qp->r_psn++;
+		qp->r_state = opcode;
+		qp->r_nak_state = 0;
+		qp->r_head_ack_queue = next;
+
+		/* Schedule the send tasklet. */
+		qp->s_flags |= QIB_S_RESP_PENDING;
+		qib_schedule_send(qp);
+
+		goto srunlock;
+	}
+
+	case OP(COMPARE_SWAP):
+	case OP(FETCH_ADD): {
+		struct ib_atomic_eth *ateth;
+		struct qib_ack_entry *e;
+		u64 vaddr;
+		atomic64_t *maddr;
+		u64 sdata;
+		u32 rkey;
+		u8 next;
+
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+			goto nack_inv;
+		next = qp->r_head_ack_queue + 1;
+		if (next > QIB_MAX_RDMA_ATOMIC)
+			next = 0;
+		spin_lock_irqsave(&qp->s_lock, flags);
+		/* Double check we can process this while holding the s_lock. */
+		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+			goto srunlock;
+		if (unlikely(next == qp->s_tail_ack_queue)) {
+			if (!qp->s_ack_queue[next].sent)
+				goto nack_inv_unlck;
+			qib_update_ack_queue(qp, next);
+		}
+		e = &qp->s_ack_queue[qp->r_head_ack_queue];
+		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+			atomic_dec(&e->rdma_sge.mr->refcount);
+			e->rdma_sge.mr = NULL;
+		}
+		ateth = &ohdr->u.atomic_eth;
+		vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+			be32_to_cpu(ateth->vaddr[1]);
+		if (unlikely(vaddr & (sizeof(u64) - 1)))
+			goto nack_inv_unlck;
+		rkey = be32_to_cpu(ateth->rkey);
+		/* Check rkey & NAK */
+		if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+					  vaddr, rkey,
+					  IB_ACCESS_REMOTE_ATOMIC)))
+			goto nack_acc_unlck;
+		/* Perform atomic OP and save result. */
+		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+		sdata = be64_to_cpu(ateth->swap_data);
+		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+			(u64) atomic64_add_return(sdata, maddr) - sdata :
+			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+				      be64_to_cpu(ateth->compare_data),
+				      sdata);
+		atomic_dec(&qp->r_sge.sge.mr->refcount);
+		qp->r_sge.num_sge = 0;
+		e->opcode = opcode;
+		e->sent = 0;
+		e->psn = psn;
+		e->lpsn = psn;
+		qp->r_msn++;
+		qp->r_psn++;
+		qp->r_state = opcode;
+		qp->r_nak_state = 0;
+		qp->r_head_ack_queue = next;
+
+		/* Schedule the send tasklet. */
+		qp->s_flags |= QIB_S_RESP_PENDING;
+		qib_schedule_send(qp);
+
+		goto srunlock;
+	}
+
+	default:
+		/* NAK unknown opcodes. */
+		goto nack_inv;
+	}
+	qp->r_psn++;
+	qp->r_state = opcode;
+	qp->r_ack_psn = psn;
+	qp->r_nak_state = 0;
+	/* Send an ACK if requested or required. */
+	if (psn & (1 << 31))
+		goto send_ack;
+	goto runlock;
+
+rnr_nak:
+	qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+	qp->r_ack_psn = qp->r_psn;
+	/* Queue RNR NAK for later */
+	if (list_empty(&qp->rspwait)) {
+		qp->r_flags |= QIB_R_RSP_NAK;
+		atomic_inc(&qp->refcount);
+		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+	}
+	goto runlock;
+
+nack_op_err:
+	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+	qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+	qp->r_ack_psn = qp->r_psn;
+	/* Queue NAK for later */
+	if (list_empty(&qp->rspwait)) {
+		qp->r_flags |= QIB_R_RSP_NAK;
+		atomic_inc(&qp->refcount);
+		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+	}
+	goto runlock;
+
+nack_inv_unlck:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+	qp->r_ack_psn = qp->r_psn;
+	/* Queue NAK for later */
+	if (list_empty(&qp->rspwait)) {
+		qp->r_flags |= QIB_R_RSP_NAK;
+		atomic_inc(&qp->refcount);
+		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+	}
+	goto runlock;
+
+nack_acc_unlck:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+	qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
+	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+	qp->r_ack_psn = qp->r_psn;
+send_ack:
+	qib_send_rc_ack(qp);
+runlock:
+	spin_unlock(&qp->r_lock);
+	return;
+
+srunlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	spin_unlock(&qp->r_lock);
+	return;
+
+sunlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 000000000000..eb78d9367f06
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+const u32 ib_qib_rnr_table[32] = {
+	655360,	/* 00: 655.36 */
+	10,	/* 01:    .01 */
+	20,	/* 02     .02 */
+	30,	/* 03:    .03 */
+	40,	/* 04:    .04 */
+	60,	/* 05:    .06 */
+	80,	/* 06:    .08 */
+	120,	/* 07:    .12 */
+	160,	/* 08:    .16 */
+	240,	/* 09:    .24 */
+	320,	/* 0A:    .32 */
+	480,	/* 0B:    .48 */
+	640,	/* 0C:    .64 */
+	960,	/* 0D:    .96 */
+	1280,	/* 0E:   1.28 */
+	1920,	/* 0F:   1.92 */
+	2560,	/* 10:   2.56 */
+	3840,	/* 11:   3.84 */
+	5120,	/* 12:   5.12 */
+	7680,	/* 13:   7.68 */
+	10240,	/* 14:  10.24 */
+	15360,	/* 15:  15.36 */
+	20480,	/* 16:  20.48 */
+	30720,	/* 17:  30.72 */
+	40960,	/* 18:  40.96 */
+	61440,	/* 19:  61.44 */
+	81920,	/* 1A:  81.92 */
+	122880,	/* 1B: 122.88 */
+	163840,	/* 1C: 163.84 */
+	245760,	/* 1D: 245.76 */
+	327680,	/* 1E: 327.68 */
+	491520	/* 1F: 491.52 */
+};
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+{
+	int i, j, ret;
+	struct ib_wc wc;
+	struct qib_lkey_table *rkt;
+	struct qib_pd *pd;
+	struct qib_sge_state *ss;
+
+	rkt = &to_idev(qp->ibqp.device)->lk_table;
+	pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+	ss = &qp->r_sge;
+	ss->sg_list = qp->r_sg_list;
+	qp->r_len = 0;
+	for (i = j = 0; i < wqe->num_sge; i++) {
+		if (wqe->sg_list[i].length == 0)
+			continue;
+		/* Check LKEY */
+		if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+				 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+			goto bad_lkey;
+		qp->r_len += wqe->sg_list[i].length;
+		j++;
+	}
+	ss->num_sge = j;
+	ss->total_len = qp->r_len;
+	ret = 1;
+	goto bail;
+
+bad_lkey:
+	while (j) {
+		struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+		atomic_dec(&sge->mr->refcount);
+	}
+	ss->num_sge = 0;
+	memset(&wc, 0, sizeof(wc));
+	wc.wr_id = wqe->wr_id;
+	wc.status = IB_WC_LOC_PROT_ERR;
+	wc.opcode = IB_WC_RECV;
+	wc.qp = &qp->ibqp;
+	/* Signal solicited completion event. */
+	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+	ret = 0;
+bail:
+	return ret;
+}
+
+/**
+ * qib_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+{
+	unsigned long flags;
+	struct qib_rq *rq;
+	struct qib_rwq *wq;
+	struct qib_srq *srq;
+	struct qib_rwqe *wqe;
+	void (*handler)(struct ib_event *, void *);
+	u32 tail;
+	int ret;
+
+	if (qp->ibqp.srq) {
+		srq = to_isrq(qp->ibqp.srq);
+		handler = srq->ibsrq.event_handler;
+		rq = &srq->rq;
+	} else {
+		srq = NULL;
+		handler = NULL;
+		rq = &qp->r_rq;
+	}
+
+	spin_lock_irqsave(&rq->lock, flags);
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+		ret = 0;
+		goto unlock;
+	}
+
+	wq = rq->wq;
+	tail = wq->tail;
+	/* Validate tail before using it since it is user writable. */
+	if (tail >= rq->size)
+		tail = 0;
+	if (unlikely(tail == wq->head)) {
+		ret = 0;
+		goto unlock;
+	}
+	/* Make sure entry is read after head index is read. */
+	smp_rmb();
+	wqe = get_rwqe_ptr(rq, tail);
+	/*
+	 * Even though we update the tail index in memory, the verbs
+	 * consumer is not supposed to post more entries until a
+	 * completion is generated.
+	 */
+	if (++tail >= rq->size)
+		tail = 0;
+	wq->tail = tail;
+	if (!wr_id_only && !qib_init_sge(qp, wqe)) {
+		ret = -1;
+		goto unlock;
+	}
+	qp->r_wr_id = wqe->wr_id;
+
+	ret = 1;
+	set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+	if (handler) {
+		u32 n;
+
+		/*
+		 * Validate head pointer value and compute
+		 * the number of remaining WQEs.
+		 */
+		n = wq->head;
+		if (n >= rq->size)
+			n = 0;
+		if (n < tail)
+			n += rq->size - tail;
+		else
+			n -= tail;
+		if (n < srq->limit) {
+			struct ib_event ev;
+
+			srq->limit = 0;
+			spin_unlock_irqrestore(&rq->lock, flags);
+			ev.device = qp->ibqp.device;
+			ev.element.srq = qp->ibqp.srq;
+			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+			handler(&ev, srq->ibsrq.srq_context);
+			goto bail;
+		}
+	}
+unlock:
+	spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+	return ret;
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void qib_migrate_qp(struct qib_qp *qp)
+{
+	struct ib_event ev;
+
+	qp->s_mig_state = IB_MIG_MIGRATED;
+	qp->remote_ah_attr = qp->alt_ah_attr;
+	qp->port_num = qp->alt_ah_attr.port_num;
+	qp->s_pkey_index = qp->s_alt_pkey_index;
+
+	ev.device = qp->ibqp.device;
+	ev.element.qp = &qp->ibqp;
+	ev.event = IB_EVENT_PATH_MIG;
+	qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
+{
+	if (!index) {
+		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+		return ppd->guid;
+	} else
+		return ibp->guids[index - 1];
+}
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+	return (gid->global.interface_id == id &&
+		(gid->global.subnet_prefix == gid_prefix ||
+		 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP s_lock held.
+ */
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		      int has_grh, struct qib_qp *qp, u32 bth0)
+{
+	__be64 guid;
+
+	if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
+		if (!has_grh) {
+			if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+				goto err;
+		} else {
+			if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+				goto err;
+			guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+			if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+				goto err;
+			if (!gid_ok(&hdr->u.l.grh.sgid,
+			    qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+			    qp->alt_ah_attr.grh.dgid.global.interface_id))
+				goto err;
+		}
+		if (!qib_pkey_ok((u16)bth0,
+				 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
+			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+				      (u16)bth0,
+				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+				      0, qp->ibqp.qp_num,
+				      hdr->lrh[3], hdr->lrh[1]);
+			goto err;
+		}
+		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+		if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
+		    ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+			goto err;
+		qib_migrate_qp(qp);
+	} else {
+		if (!has_grh) {
+			if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+				goto err;
+		} else {
+			if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+				goto err;
+			guid = get_sguid(ibp,
+					 qp->remote_ah_attr.grh.sgid_index);
+			if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+				goto err;
+			if (!gid_ok(&hdr->u.l.grh.sgid,
+			    qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+			    qp->remote_ah_attr.grh.dgid.global.interface_id))
+				goto err;
+		}
+		if (!qib_pkey_ok((u16)bth0,
+				 qib_get_pkey(ibp, qp->s_pkey_index))) {
+			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+				      (u16)bth0,
+				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+				      0, qp->ibqp.qp_num,
+				      hdr->lrh[3], hdr->lrh[1]);
+			goto err;
+		}
+		/* Validate the SLID. See Ch. 9.6.1.5 */
+		if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+		    ppd_from_ibp(ibp)->port != qp->port_num)
+			goto err;
+		if (qp->s_mig_state == IB_MIG_REARM &&
+		    !(bth0 & IB_BTH_MIG_REQ))
+			qp->s_mig_state = IB_MIG_ARMED;
+	}
+
+	return 0;
+
+err:
+	return 1;
+}
+
+/**
+ * qib_ruc_loopback - handle UC and RC lookback requests
+ * @sqp: the sending QP
+ *
+ * This is called from qib_do_send() to
+ * forward a WQE addressed to the same HCA.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send().  We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+static void qib_ruc_loopback(struct qib_qp *sqp)
+{
+	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+	struct qib_qp *qp;
+	struct qib_swqe *wqe;
+	struct qib_sge *sge;
+	unsigned long flags;
+	struct ib_wc wc;
+	u64 sdata;
+	atomic64_t *maddr;
+	enum ib_wc_status send_status;
+	int release;
+	int ret;
+
+	/*
+	 * Note that we check the responder QP state after
+	 * checking the requester's state.
+	 */
+	qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+
+	spin_lock_irqsave(&sqp->s_lock, flags);
+
+	/* Return if we are already busy processing a work request. */
+	if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
+	    !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+		goto unlock;
+
+	sqp->s_flags |= QIB_S_BUSY;
+
+again:
+	if (sqp->s_last == sqp->s_head)
+		goto clr_busy;
+	wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+	/* Return if it is not OK to start a new work reqeust. */
+	if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+		if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+			goto clr_busy;
+		/* We are in the error state, flush the work request. */
+		send_status = IB_WC_WR_FLUSH_ERR;
+		goto flush_send;
+	}
+
+	/*
+	 * We can rely on the entry not changing without the s_lock
+	 * being held until we update s_last.
+	 * We increment s_cur to indicate s_last is in progress.
+	 */
+	if (sqp->s_last == sqp->s_cur) {
+		if (++sqp->s_cur >= sqp->s_size)
+			sqp->s_cur = 0;
+	}
+	spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+	if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+		ibp->n_pkt_drops++;
+		/*
+		 * For RC, the requester would timeout and retry so
+		 * shortcut the timeouts and just signal too many retries.
+		 */
+		if (sqp->ibqp.qp_type == IB_QPT_RC)
+			send_status = IB_WC_RETRY_EXC_ERR;
+		else
+			send_status = IB_WC_SUCCESS;
+		goto serr;
+	}
+
+	memset(&wc, 0, sizeof wc);
+	send_status = IB_WC_SUCCESS;
+
+	release = 1;
+	sqp->s_sge.sge = wqe->sg_list[0];
+	sqp->s_sge.sg_list = wqe->sg_list + 1;
+	sqp->s_sge.num_sge = wqe->wr.num_sge;
+	sqp->s_len = wqe->length;
+	switch (wqe->wr.opcode) {
+	case IB_WR_SEND_WITH_IMM:
+		wc.wc_flags = IB_WC_WITH_IMM;
+		wc.ex.imm_data = wqe->wr.ex.imm_data;
+		/* FALLTHROUGH */
+	case IB_WR_SEND:
+		ret = qib_get_rwqe(qp, 0);
+		if (ret < 0)
+			goto op_err;
+		if (!ret)
+			goto rnr_nak;
+		break;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+			goto inv_err;
+		wc.wc_flags = IB_WC_WITH_IMM;
+		wc.ex.imm_data = wqe->wr.ex.imm_data;
+		ret = qib_get_rwqe(qp, 1);
+		if (ret < 0)
+			goto op_err;
+		if (!ret)
+			goto rnr_nak;
+		/* FALLTHROUGH */
+	case IB_WR_RDMA_WRITE:
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+			goto inv_err;
+		if (wqe->length == 0)
+			break;
+		if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+					  wqe->wr.wr.rdma.remote_addr,
+					  wqe->wr.wr.rdma.rkey,
+					  IB_ACCESS_REMOTE_WRITE)))
+			goto acc_err;
+		qp->r_sge.sg_list = NULL;
+		qp->r_sge.num_sge = 1;
+		qp->r_sge.total_len = wqe->length;
+		break;
+
+	case IB_WR_RDMA_READ:
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+			goto inv_err;
+		if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+					  wqe->wr.wr.rdma.remote_addr,
+					  wqe->wr.wr.rdma.rkey,
+					  IB_ACCESS_REMOTE_READ)))
+			goto acc_err;
+		release = 0;
+		sqp->s_sge.sg_list = NULL;
+		sqp->s_sge.num_sge = 1;
+		qp->r_sge.sge = wqe->sg_list[0];
+		qp->r_sge.sg_list = wqe->sg_list + 1;
+		qp->r_sge.num_sge = wqe->wr.num_sge;
+		qp->r_sge.total_len = wqe->length;
+		break;
+
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+			goto inv_err;
+		if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+					  wqe->wr.wr.atomic.remote_addr,
+					  wqe->wr.wr.atomic.rkey,
+					  IB_ACCESS_REMOTE_ATOMIC)))
+			goto acc_err;
+		/* Perform atomic OP and save result. */
+		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+		sdata = wqe->wr.wr.atomic.compare_add;
+		*(u64 *) sqp->s_sge.sge.vaddr =
+			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+			(u64) atomic64_add_return(sdata, maddr) - sdata :
+			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+				      sdata, wqe->wr.wr.atomic.swap);
+		atomic_dec(&qp->r_sge.sge.mr->refcount);
+		qp->r_sge.num_sge = 0;
+		goto send_comp;
+
+	default:
+		send_status = IB_WC_LOC_QP_OP_ERR;
+		goto serr;
+	}
+
+	sge = &sqp->s_sge.sge;
+	while (sqp->s_len) {
+		u32 len = sqp->s_len;
+
+		if (len > sge->length)
+			len = sge->length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (!release)
+				atomic_dec(&sge->mr->refcount);
+			if (--sqp->s_sge.num_sge)
+				*sge = *sqp->s_sge.sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		sqp->s_len -= len;
+	}
+	if (release)
+		while (qp->r_sge.num_sge) {
+			atomic_dec(&qp->r_sge.sge.mr->refcount);
+			if (--qp->r_sge.num_sge)
+				qp->r_sge.sge = *qp->r_sge.sg_list++;
+		}
+
+	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+		goto send_comp;
+
+	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+	else
+		wc.opcode = IB_WC_RECV;
+	wc.wr_id = qp->r_wr_id;
+	wc.status = IB_WC_SUCCESS;
+	wc.byte_len = wqe->length;
+	wc.qp = &qp->ibqp;
+	wc.src_qp = qp->remote_qpn;
+	wc.slid = qp->remote_ah_attr.dlid;
+	wc.sl = qp->remote_ah_attr.sl;
+	wc.port_num = 1;
+	/* Signal completion event if the solicited bit is set. */
+	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+		       wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+	spin_lock_irqsave(&sqp->s_lock, flags);
+	ibp->n_loop_pkts++;
+flush_send:
+	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+	qib_send_complete(sqp, wqe, send_status);
+	goto again;
+
+rnr_nak:
+	/* Handle RNR NAK */
+	if (qp->ibqp.qp_type == IB_QPT_UC)
+		goto send_comp;
+	ibp->n_rnr_naks++;
+	/*
+	 * Note: we don't need the s_lock held since the BUSY flag
+	 * makes this single threaded.
+	 */
+	if (sqp->s_rnr_retry == 0) {
+		send_status = IB_WC_RNR_RETRY_EXC_ERR;
+		goto serr;
+	}
+	if (sqp->s_rnr_retry_cnt < 7)
+		sqp->s_rnr_retry--;
+	spin_lock_irqsave(&sqp->s_lock, flags);
+	if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+		goto clr_busy;
+	sqp->s_flags |= QIB_S_WAIT_RNR;
+	sqp->s_timer.function = qib_rc_rnr_retry;
+	sqp->s_timer.expires = jiffies +
+		usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
+	add_timer(&sqp->s_timer);
+	goto clr_busy;
+
+op_err:
+	send_status = IB_WC_REM_OP_ERR;
+	wc.status = IB_WC_LOC_QP_OP_ERR;
+	goto err;
+
+inv_err:
+	send_status = IB_WC_REM_INV_REQ_ERR;
+	wc.status = IB_WC_LOC_QP_OP_ERR;
+	goto err;
+
+acc_err:
+	send_status = IB_WC_REM_ACCESS_ERR;
+	wc.status = IB_WC_LOC_PROT_ERR;
+err:
+	/* responder goes to error state */
+	qib_rc_error(qp, wc.status);
+
+serr:
+	spin_lock_irqsave(&sqp->s_lock, flags);
+	qib_send_complete(sqp, wqe, send_status);
+	if (sqp->ibqp.qp_type == IB_QPT_RC) {
+		int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+		sqp->s_flags &= ~QIB_S_BUSY;
+		spin_unlock_irqrestore(&sqp->s_lock, flags);
+		if (lastwqe) {
+			struct ib_event ev;
+
+			ev.device = sqp->ibqp.device;
+			ev.element.qp = &sqp->ibqp;
+			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+		}
+		goto done;
+	}
+clr_busy:
+	sqp->s_flags &= ~QIB_S_BUSY;
+unlock:
+	spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+	if (qp && atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+		 struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+	hdr->version_tclass_flow =
+		cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+			    (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+			    (grh->flow_label << IB_GRH_FLOW_SHIFT));
+	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
+	hdr->next_hdr = IB_GRH_NEXT_HDR;
+	hdr->hop_limit = grh->hop_limit;
+	/* The SGID is 32-bit aligned. */
+	hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+	hdr->sgid.global.interface_id = grh->sgid_index ?
+		ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
+	hdr->dgid = grh->dgid;
+
+	/* GRH header size in 32-bit words. */
+	return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+			 u32 bth0, u32 bth2)
+{
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	u16 lrh0;
+	u32 nwords;
+	u32 extra_bytes;
+
+	/* Construct the header. */
+	extra_bytes = -qp->s_cur_size & 3;
+	nwords = (qp->s_cur_size + extra_bytes) >> 2;
+	lrh0 = QIB_LRH_BTH;
+	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+					       &qp->remote_ah_attr.grh,
+					       qp->s_hdrwords, nwords);
+		lrh0 = QIB_LRH_GRH;
+	}
+	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+		qp->remote_ah_attr.sl << 4;
+	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+	qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+				       qp->remote_ah_attr.src_path_bits);
+	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
+	bth0 |= extra_bytes << 20;
+	if (qp->s_mig_state == IB_MIG_MIGRATED)
+		bth0 |= IB_BTH_MIG_REQ;
+	ohdr->bth[0] = cpu_to_be32(bth0);
+	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+	ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * qib_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, two threads could send packets out of order.
+ */
+void qib_do_send(struct work_struct *work)
+{
+	struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	int (*make_req)(struct qib_qp *qp);
+	unsigned long flags;
+
+	if ((qp->ibqp.qp_type == IB_QPT_RC ||
+	     qp->ibqp.qp_type == IB_QPT_UC) &&
+	    (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+		qib_ruc_loopback(qp);
+		return;
+	}
+
+	if (qp->ibqp.qp_type == IB_QPT_RC)
+		make_req = qib_make_rc_req;
+	else if (qp->ibqp.qp_type == IB_QPT_UC)
+		make_req = qib_make_uc_req;
+	else
+		make_req = qib_make_ud_req;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	/* Return if we are already busy processing a work request. */
+	if (!qib_send_ok(qp)) {
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		return;
+	}
+
+	qp->s_flags |= QIB_S_BUSY;
+
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	do {
+		/* Check for a constructed packet to be sent. */
+		if (qp->s_hdrwords != 0) {
+			/*
+			 * If the packet cannot be sent now, return and
+			 * the send tasklet will be woken up later.
+			 */
+			if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
+					   qp->s_cur_sge, qp->s_cur_size))
+				break;
+			/* Record that s_hdr is empty. */
+			qp->s_hdrwords = 0;
+		}
+	} while (make_req(qp));
+}
+
+/*
+ * This should be called with s_lock held.
+ */
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+		       enum ib_wc_status status)
+{
+	u32 old_last, last;
+	unsigned i;
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+		return;
+
+	for (i = 0; i < wqe->wr.num_sge; i++) {
+		struct qib_sge *sge = &wqe->sg_list[i];
+
+		atomic_dec(&sge->mr->refcount);
+	}
+	if (qp->ibqp.qp_type == IB_QPT_UD ||
+	    qp->ibqp.qp_type == IB_QPT_SMI ||
+	    qp->ibqp.qp_type == IB_QPT_GSI)
+		atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+
+	/* See ch. 11.2.4.1 and 10.7.3.1 */
+	if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+	    status != IB_WC_SUCCESS) {
+		struct ib_wc wc;
+
+		memset(&wc, 0, sizeof wc);
+		wc.wr_id = wqe->wr.wr_id;
+		wc.status = status;
+		wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+		wc.qp = &qp->ibqp;
+		if (status == IB_WC_SUCCESS)
+			wc.byte_len = wqe->length;
+		qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+			     status != IB_WC_SUCCESS);
+	}
+
+	last = qp->s_last;
+	old_last = last;
+	if (++last >= qp->s_size)
+		last = 0;
+	qp->s_last = last;
+	if (qp->s_acked == old_last)
+		qp->s_acked = last;
+	if (qp->s_cur == old_last)
+		qp->s_cur = last;
+	if (qp->s_tail == old_last)
+		qp->s_tail = last;
+	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+		qp->s_draining = 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 2a68d9f624dd..0aeed0e74cb6 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -32,22 +32,40 @@
  */
 /*
  * This file contains all of the code that is specific to the SerDes
- * on the InfiniPath 7220 chip.
+ * on the QLogic_IB 7220 chip.
  */
 
 #include <linux/pci.h>
 #include <linux/delay.h>
 
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
+
+/*
+ * Same as in qib_iba7220.c, but just the registers needed here.
+ * Could move whole set to qib_7220.h, but decided better to keep
+ * local.
+ */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+/* these are used only here, not in qib_iba7220.c */
+#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
+#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
+#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
+#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
+#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
 
 /*
  * The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC. It is not part of the normal kregs
- * map and is used in exactly one place, hence the #define below.
+ * various SerDes registers by IBC.
  */
-#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
+#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
 
 /*
  * Below used for sdnum parameter, selecting one of the two sections
@@ -71,42 +89,37 @@
 #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
 
 /* Forward declarations. */
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
-				u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+			      u32 data, u32 mask);
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
 			     int mask);
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
-				      const char *where);
-static int ipath_sd_setvals(struct ipath_devdata *dd);
-static int ipath_sd_early(struct ipath_devdata *dd);
-static int ipath_sd_dactrim(struct ipath_devdata *dd);
-/* Set the registers that IBC may muck with to their default "preset" values */
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-static int ipath_internal_presets(struct ipath_devdata *dd);
+static int qib_sd_trimdone_poll(struct qib_devdata *dd);
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
+static int qib_sd_setvals(struct qib_devdata *dd);
+static int qib_sd_early(struct qib_devdata *dd);
+static int qib_sd_dactrim(struct qib_devdata *dd);
+static int qib_internal_presets(struct qib_devdata *dd);
 /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
-
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
+static int qib_sd_trimself(struct qib_devdata *dd, int val);
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
 
 /*
  * Below keeps track of whether the "once per power-on" initialization has
  * been done, because uC code Version 1.32.17 or higher allows the uC to
  * be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", as reflected in was_reset parameter to
- * ipath_sd7220_init() is no longer valid. Instead, we check for the
+ * state of the reset "pin", is no longer valid. Instead, we check for the
  * actual uC code having been loaded.
  */
-static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
+static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
 {
-	if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
-		dd->serdes_first_init_done = 1;
-	return dd->serdes_first_init_done;
+	struct qib_devdata *dd = ppd->dd;
+	if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
+		dd->cspec->serdes_first_init_done = 1;
+	return dd->cspec->serdes_first_init_done;
 }
 
-/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR      0x0000004000000000ULL
+/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR      0x0000004000000000ULL
 #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
 #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
 #define UC_PAR_CLR_D 8
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
 #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
 #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
 
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
+void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
 {
 	int ret;
 
 	/* clear, then re-enable parity errs */
-	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
+	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
 		UC_PAR_CLR_D, UC_PAR_CLR_M);
 	if (ret < 0) {
-		ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
+		qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
 		goto bail;
 	}
-	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
+	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
 		UC_PAR_CLR_M);
 
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	qib_read_kreg32(dd, kr_scratch);
 	udelay(4);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-		INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	qib_write_kreg(dd, kr_hwerrclear,
+		QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+	qib_read_kreg32(dd, kr_scratch);
 bail:
 	return;
 }
@@ -146,7 +159,7 @@ bail:
 #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
 #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
 
-static int ipath_resync_ibepb(struct ipath_devdata *dd)
+static int qib_resync_ibepb(struct qib_devdata *dd)
 {
 	int ret, pat, tries, chn;
 	u32 loc;
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
 	chn = 0;
 	for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
 		loc = IB_PGUDP(chn);
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed read in resync\n");
+			qib_dev_err(dd, "Failed read in resync\n");
 			continue;
 		}
 		if (ret != 0xF0 && ret != 0x55 && tries == 0)
-			ipath_dev_err(dd, "unexpected pattern in resync\n");
+			qib_dev_err(dd, "unexpected pattern in resync\n");
 		pat = ret ^ 0xA5; /* alternate F0 and 55 */
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed write in resync\n");
+			qib_dev_err(dd, "Failed write in resync\n");
 			continue;
 		}
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed re-read in resync\n");
+			qib_dev_err(dd, "Failed re-read in resync\n");
 			continue;
 		}
 		if (ret != pat) {
-			ipath_dev_err(dd, "Failed compare1 in resync\n");
+			qib_dev_err(dd, "Failed compare1 in resync\n");
 			continue;
 		}
 		loc = IB_CMUDONE(chn);
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
+			qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
 			continue;
 		}
 		if ((ret & 0x70) != ((chn << 4) | 0x40)) {
-			ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
-				ret, chn);
+			qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
+				    ret, chn);
 			continue;
 		}
 		if (++chn == 4)
 			break;  /* Success */
 	}
-	ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
 	return (ret > 0) ? 0 : ret;
 }
 
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
  * Localize the stuff that should be done to change IB uC reset
  * returns <0 for errors.
  */
-static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
+static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
 {
 	u64 rst_val;
 	int ret = 0;
 	unsigned long flags;
 
-	rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+	rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
 	if (assert_rst) {
 		/*
 		 * Vendor recommends "interrupting" uC before reset, to
 		 * minimize possible glitches.
 		 */
-		spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+		spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
 		epb_access(dd, IB_7220_SERDES, 1);
 		rst_val |= 1ULL;
 		/* Squelch possible parity error from _asserting_ reset */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-			dd->ipath_hwerrmask &
-			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+		qib_write_kreg(dd, kr_hwerrmask,
+			       dd->cspec->hwerrmask &
+			       ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+		qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
 		/* flush write, delay to ensure it took effect */
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		qib_read_kreg32(dd, kr_scratch);
 		udelay(2);
 		/* once it's reset, can remove interrupt */
 		epb_access(dd, IB_7220_SERDES, -1);
-		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
 	} else {
 		/*
 		 * Before we de-assert reset, we need to deal with
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
 		 */
 		u64 val;
 		rst_val &= ~(1ULL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-			dd->ipath_hwerrmask &
-			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+		qib_write_kreg(dd, kr_hwerrmask,
+			       dd->cspec->hwerrmask &
+			       ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
 
-		ret = ipath_resync_ibepb(dd);
+		ret = qib_resync_ibepb(dd);
 		if (ret < 0)
-			ipath_dev_err(dd, "unable to re-sync IB EPB\n");
+			qib_dev_err(dd, "unable to re-sync IB EPB\n");
 
 		/* set uC control regs to suppress parity errs */
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
 		if (ret < 0)
 			goto bail;
 		/* IB uC code past Version 1.32.17 allow suppression of wdog */
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
 			0x80);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed to set WDOG disable\n");
+			qib_dev_err(dd, "Failed to set WDOG disable\n");
 			goto bail;
 		}
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+		qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
 		/* flush write, delay for startup */
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		qib_read_kreg32(dd, kr_scratch);
 		udelay(1);
 		/* clear, then re-enable parity errs */
-		ipath_sd7220_clr_ibpar(dd);
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-		if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
-			ipath_dev_err(dd, "IBUC Parity still set after RST\n");
-			dd->ipath_hwerrmask &=
-				~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+		qib_sd7220_clr_ibpar(dd);
+		val = qib_read_kreg64(dd, kr_hwerrstatus);
+		if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
+			qib_dev_err(dd, "IBUC Parity still set after RST\n");
+			dd->cspec->hwerrmask &=
+				~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
 		}
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-			dd->ipath_hwerrmask);
+		qib_write_kreg(dd, kr_hwerrmask,
+			dd->cspec->hwerrmask);
 	}
 
 bail:
 	return ret;
 }
 
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
        const char *where)
 {
 	int ret, chn, baduns;
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
 	/* give time for reset to settle out in EPB */
 	udelay(2);
 
-	ret = ipath_resync_ibepb(dd);
+	ret = qib_resync_ibepb(dd);
 	if (ret < 0)
-		ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
+		qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
 
 	/* Do "sacrificial read" to get EPB in sane state after reset */
-	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
+	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
 	if (ret < 0)
-		ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
+		qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
 
 	/* Check/show "summary" Trim-done bit in IBCStatus */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-	if (val & (1ULL << 11))
-		ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
-	else
-		ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
-
+	val = qib_read_kreg64(dd, kr_ibcstatus);
+	if (!(val & (1ULL << 11)))
+		qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
+	/*
+	 * Do "dummy read/mod/wr" to get EPB in sane state after reset
+	 * The default value for MPREG6 is 0.
+	 */
 	udelay(2);
 
-	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
+	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
 	if (ret < 0)
-		ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
+		qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
 	udelay(10);
 
 	baduns = 0;
 
 	for (chn = 3; chn >= 0; --chn) {
 		/* Read CTRL reg for each channel to check TRIMDONE */
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 			IB_CTRL2(chn), 0, 0);
 		if (ret < 0)
-			ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
-				" (%s)\n", chn, where);
+			qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
+				    " (%s)\n", chn, where);
 
 		if (!(ret & 0x10)) {
 			int probe;
+
 			baduns |= (1 << chn);
-			ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
+			qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
 				" (%s)\n", chn, ret, where);
-			probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+			probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 				IB_PGUDP(0), 0, 0);
-			ipath_dev_err(dd, "probe is %d (%02X)\n",
+			qib_dev_err(dd, "probe is %d (%02X)\n",
 				probe, probe);
-			probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+			probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 				IB_CTRL2(chn), 0, 0);
-			ipath_dev_err(dd, "re-read: %d (%02X)\n",
+			qib_dev_err(dd, "re-read: %d (%02X)\n",
 				probe, probe);
-			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 				IB_CTRL2(chn), 0x10, 0x10);
 			if (ret < 0)
-				ipath_dev_err(dd,
+				qib_dev_err(dd,
 					"Err on TRIMDONE rewrite1\n");
 		}
 	}
 	for (chn = 3; chn >= 0; --chn) {
 		/* Read CTRL reg for each channel to check TRIMDONE */
 		if (baduns & (1 << chn)) {
-			ipath_dev_err(dd,
+			qib_dev_err(dd,
 				"Reseting TRIMDONE on chn %d (%s)\n",
 				chn, where);
-			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 				IB_CTRL2(chn), 0x10, 0x10);
 			if (ret < 0)
-				ipath_dev_err(dd, "Failed re-setting "
+				qib_dev_err(dd, "Failed re-setting "
 					"TRIMDONE, chn %d (%s)\n",
 					chn, where);
 		}
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
  * Post IB uC code version 1.32.17, was_reset being 1 is not really
  * informative, so we double-check.
  */
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
+int qib_sd7220_init(struct qib_devdata *dd)
 {
 	int ret = 1; /* default to failure */
-	int first_reset;
-	int val_stat;
+	int first_reset, was_reset;
 
+	/* SERDES MPU reset recorded in D0 */
+	was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
 	if (!was_reset) {
 		/* entered with reset not asserted, we need to do it */
-		ipath_ibsd_reset(dd, 1);
-		ipath_sd_trimdone_monitor(dd, "Driver-reload");
+		qib_ibsd_reset(dd, 1);
+		qib_sd_trimdone_monitor(dd, "Driver-reload");
 	}
-
 	/* Substitute our deduced value for was_reset */
-	ret = ipath_ibsd_ucode_loaded(dd);
-	if (ret < 0) {
-		ret = 1;
-		goto done;
-	}
-	first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
+	ret = qib_ibsd_ucode_loaded(dd->pport);
+	if (ret < 0)
+		goto bail;
 
+	first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
 	/*
 	 * Alter some regs per vendor latest doc, reset-defaults
 	 * are not right for IB.
 	 */
-	ret = ipath_sd_early(dd);
+	ret = qib_sd_early(dd);
 	if (ret < 0) {
-		ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
-		ret = 1;
-		goto done;
+		qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
+		goto bail;
 	}
-
 	/*
 	 * Set DAC manual trim IB.
 	 * We only do this once after chip has been reset (usually
 	 * same as once per system boot).
 	 */
 	if (first_reset) {
-		ret = ipath_sd_dactrim(dd);
+		ret = qib_sd_dactrim(dd);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
-			ret = 1;
-			goto done;
+			qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
+			goto bail;
 		}
 	}
-
 	/*
 	 * Set various registers (DDS and RXEQ) that will be
 	 * controlled by IBC (in 1.2 mode) to reasonable preset values
 	 * Calling the "internal" version avoids the "check for needed"
 	 * and "trimdone monitor" that might be counter-productive.
 	 */
-	ret = ipath_internal_presets(dd);
+	ret = qib_internal_presets(dd);
 	if (ret < 0) {
-		ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
-		ret = 1;
-		goto done;
+		qib_dev_err(dd, "Failed to set IB SERDES presets\n");
+		goto bail;
 	}
-	ret = ipath_sd_trimself(dd, 0x80);
+	ret = qib_sd_trimself(dd, 0x80);
 	if (ret < 0) {
-		ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
-		ret = 1;
-		goto done;
+		qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
+		goto bail;
 	}
 
 	/* Load image, then try to verify */
-	ret = 0;	/* Assume success */
+	ret = 0;        /* Assume success */
 	if (first_reset) {
 		int vfy;
 		int trim_done;
-		ipath_dbg("SerDes uC was reset, reloading PRAM\n");
-		ret = ipath_sd7220_ib_load(dd);
+
+		ret = qib_sd7220_ib_load(dd);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed to load IB SERDES image\n");
-			ret = 1;
-			goto done;
-		}
+			qib_dev_err(dd, "Failed to load IB SERDES image\n");
+			goto bail;
+		} else {
+			/* Loaded image, try to verify */
+			vfy = qib_sd7220_ib_vfy(dd);
+			if (vfy != ret) {
+				qib_dev_err(dd, "SERDES PRAM VFY failed\n");
+				goto bail;
+			} /* end if verified */
+		} /* end if loaded */
 
-		/* Loaded image, try to verify */
-		vfy = ipath_sd7220_ib_vfy(dd);
-		if (vfy != ret) {
-			ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
-			ret = 1;
-			goto done;
-		}
 		/*
 		 * Loaded and verified. Almost good...
 		 * hold "success" in ret
 		 */
 		ret = 0;
-
 		/*
 		 * Prev steps all worked, continue bringup
 		 * De-assert RESET to uC, only in first reset, to allow
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
 		 */
 		ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
 		if (ret < 0) {
-			ipath_dev_err(dd, "Failed clearing START_EQ1\n");
-			ret = 1;
-			goto done;
+			qib_dev_err(dd, "Failed clearing START_EQ1\n");
+			goto bail;
 		}
 
-		ipath_ibsd_reset(dd, 0);
+		qib_ibsd_reset(dd, 0);
 		/*
 		 * If this is not the first reset, trimdone should be set
-		 * already.
+		 * already. We may need to check about this.
 		 */
-		trim_done = ipath_sd_trimdone_poll(dd);
+		trim_done = qib_sd_trimdone_poll(dd);
 		/*
 		 * Whether or not trimdone succeeded, we need to put the
 		 * uC back into reset to avoid a possible fight with the
 		 * IBC state-machine.
 		 */
-		ipath_ibsd_reset(dd, 1);
+		qib_ibsd_reset(dd, 1);
 
 		if (!trim_done) {
-			ipath_dev_err(dd, "No TRIMDONE seen\n");
-			ret = 1;
-			goto done;
+			qib_dev_err(dd, "No TRIMDONE seen\n");
+			goto bail;
 		}
-
-		ipath_sd_trimdone_monitor(dd, "First-reset");
+		/*
+		 * DEBUG: check each time we reset if trimdone bits have
+		 * gotten cleared, and re-set them.
+		 */
+		qib_sd_trimdone_monitor(dd, "First-reset");
 		/* Remember so we do not re-do the load, dactrim, etc. */
-		dd->serdes_first_init_done = 1;
+		dd->cspec->serdes_first_init_done = 1;
 	}
 	/*
-	 * Setup for channel training and load values for
+	 * setup for channel training and load values for
 	 * RxEq and DDS in tables used by IBC in IB1.2 mode
 	 */
-
-	val_stat = ipath_sd_setvals(dd);
-	if (val_stat < 0)
-		ret = 1;
+	ret = 0;
+	if (qib_sd_setvals(dd) >= 0)
+		goto done;
+bail:
+	ret = 1;
 done:
 	/* start relock timer regardless, but start at 1 second */
-	ipath_set_relock_poll(dd, -1);
+	set_7220_relock_poll(dd, -1);
 	return ret;
 }
 
@@ -517,7 +523,7 @@ done:
  * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
  * Returns <0 for errors, >0 if we had ownership, else 0.
  */
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
 {
 	u16 acc;
 	u64 accval;
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
 	u64 oct_sel = 0;
 
 	switch (sdnum) {
-	case IB_7220_SERDES :
+	case IB_7220_SERDES:
 		/*
 		 * The IB SERDES "ownership" is fairly simple. A single each
 		 * request/grant.
 		 */
-		acc = dd->ipath_kregs->kr_ib_epbacc;
+		acc = kr_ibsd_epb_access_ctrl;
 		break;
-	case PCIE_SERDES0 :
-	case PCIE_SERDES1 :
+
+	case PCIE_SERDES0:
+	case PCIE_SERDES1:
 		/* PCIe SERDES has two "octants", need to select which */
-		acc = dd->ipath_kregs->kr_pcie_epbacc;
+		acc = kr_pciesd_epb_access_ctrl;
 		oct_sel = (2 << (sdnum - PCIE_SERDES0));
 		break;
-	default :
+
+	default:
 		return 0;
 	}
 
 	/* Make sure any outstanding transaction was seen */
-	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+	qib_read_kreg32(dd, kr_scratch);
 	udelay(15);
 
-	accval = ipath_read_kreg32(dd, acc);
+	accval = qib_read_kreg32(dd, acc);
 
 	owned = !!(accval & EPB_ACC_GNT);
 	if (claim < 0) {
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
 		 * Both should be clear
 		 */
 		u64 newval = 0;
-		ipath_write_kreg(dd, acc, newval);
+		qib_write_kreg(dd, acc, newval);
 		/* First read after write is not trustworthy */
-		pollval = ipath_read_kreg32(dd, acc);
+		pollval = qib_read_kreg32(dd, acc);
 		udelay(5);
-		pollval = ipath_read_kreg32(dd, acc);
+		pollval = qib_read_kreg32(dd, acc);
 		if (pollval & EPB_ACC_GNT)
 			owned = -1;
 	} else if (claim > 0) {
 		/* Need to claim */
 		u64 pollval;
 		u64 newval = EPB_ACC_REQ | oct_sel;
-		ipath_write_kreg(dd, acc, newval);
+		qib_write_kreg(dd, acc, newval);
 		/* First read after write is not trustworthy */
-		pollval = ipath_read_kreg32(dd, acc);
+		pollval = qib_read_kreg32(dd, acc);
 		udelay(5);
-		pollval = ipath_read_kreg32(dd, acc);
+		pollval = qib_read_kreg32(dd, acc);
 		if (!(pollval & EPB_ACC_GNT))
 			owned = -1;
 	}
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
 /*
  * Lemma to deal with race condition of write..read to epb regs
  */
-static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
+static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
 {
 	int tries;
 	u64 transval;
 
-
-	ipath_write_kreg(dd, reg, i_val);
+	qib_write_kreg(dd, reg, i_val);
 	/* Throw away first read, as RDY bit may be stale */
-	transval = ipath_read_kreg64(dd, reg);
+	transval = qib_read_kreg64(dd, reg);
 
 	for (tries = EPB_TRANS_TRIES; tries; --tries) {
-		transval = ipath_read_kreg32(dd, reg);
+		transval = qib_read_kreg32(dd, reg);
 		if (transval & EPB_TRANS_RDY)
 			break;
 		udelay(5);
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
 }
 
 /**
- *
- * ipath_sd7220_reg_mod - modify SERDES register
- * @dd: the infinipath device
+ * qib_sd7220_reg_mod - modify SERDES register
+ * @dd: the qlogic_ib device
  * @sdnum: which SERDES to access
  * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
  * @wd: Write Data - value to set in register
  * @mask: ones where data should be spliced into reg.
  *
- * Basic register read/modify/write, with un-needed accesses elided. That is,
+ * Basic register read/modify/write, with un-needed acesses elided. That is,
  * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
  * returns current (presumed, if a write was done) contents of selected
  * register, or <0 if errors.
  */
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
-				u32 wd, u32 mask)
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+			      u32 wd, u32 mask)
 {
 	u16 trans;
 	u64 transval;
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
 	unsigned long flags;
 
 	switch (sdnum) {
-	case IB_7220_SERDES :
-		trans = dd->ipath_kregs->kr_ib_epbtrans;
+	case IB_7220_SERDES:
+		trans = kr_ibsd_epb_transaction_reg;
 		break;
-	case PCIE_SERDES0 :
-	case PCIE_SERDES1 :
-		trans = dd->ipath_kregs->kr_pcie_epbtrans;
+
+	case PCIE_SERDES0:
+	case PCIE_SERDES1:
+		trans = kr_pciesd_epb_transaction_reg;
 		break;
-	default :
+
+	default:
 		return -1;
 	}
 
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
 	 * All access is locked in software (vs other host threads) and
 	 * hardware (vs uC access).
 	 */
-	spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+	spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
 
 	owned = epb_access(dd, sdnum, 1);
 	if (owned < 0) {
-		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
 		return -1;
 	}
 	ret = 0;
 	for (tries = EPB_TRANS_TRIES; tries; --tries) {
-		transval = ipath_read_kreg32(dd, trans);
+		transval = qib_read_kreg32(dd, trans);
 		if (transval & EPB_TRANS_RDY)
 			break;
 		udelay(5);
 	}
 
 	if (tries > 0) {
-		tries = 1;	/* to make read-skip work */
+		tries = 1;      /* to make read-skip work */
 		if (mask != 0xFF) {
 			/*
 			 * Not a pure write, so need to read.
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
 	else
 		ret = transval & EPB_DATA_MASK;
 
-	spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+	spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
 	if (tries <= 0)
 		ret = -1;
 	return ret;
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
 #define EPB_RAMDATA EPB_LOC(6, 0, 5)
 
 /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
+static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
 			       u8 *buf, int cnt, int rd_notwr)
 {
 	u16 trans;
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 
 	/* Pick appropriate transaction reg and "Chip select" for this serdes */
 	switch (sdnum) {
-	case IB_7220_SERDES :
+	case IB_7220_SERDES:
 		csbit = 1ULL << EPB_IB_UC_CS_SHF;
-		trans = dd->ipath_kregs->kr_ib_epbtrans;
+		trans = kr_ibsd_epb_transaction_reg;
 		break;
-	case PCIE_SERDES0 :
-	case PCIE_SERDES1 :
+
+	case PCIE_SERDES0:
+	case PCIE_SERDES1:
 		/* PCIe SERDES has uC "chip select" in different bit, too */
 		csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
-		trans = dd->ipath_kregs->kr_pcie_epbtrans;
+		trans = kr_pciesd_epb_transaction_reg;
 		break;
-	default :
+
+	default:
 		return -1;
 	}
 
 	op = rd_notwr ? "Rd" : "Wr";
-	spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+	spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
 
 	owned = epb_access(dd, sdnum, 1);
 	if (owned < 0) {
-		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
-		ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
-			op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
-			owned, loc);
+		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
 		return -1;
 	}
 
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 	 */
 	addr = loc & 0x1FFF;
 	for (tries = EPB_TRANS_TRIES; tries; --tries) {
-		transval = ipath_read_kreg32(dd, trans);
+		transval = qib_read_kreg32(dd, trans);
 		if (transval & EPB_TRANS_RDY)
 			break;
 		udelay(5);
 	}
 
 	sofar = 0;
-	if (tries <= 0)
-		ipath_dbg("No initial RDY on EPB access request\n");
-	else {
+	if (tries > 0) {
 		/*
 		 * Every "memory" access is doubly-indirect.
 		 * We set two bytes of address, then read/write
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 		transval = csbit | EPB_UC_CTL |
 			(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
 		tries = epb_trans(dd, trans, transval, &transval);
-		if (tries <= 0)
-			ipath_dbg("No EPB response to uC %s cmd\n", op);
 		while (tries > 0 && sofar < cnt) {
 			if (!sofar) {
 				/* Only set address at start of chunk */
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 				transval = csbit | EPB_MADDRH | addrbyte;
 				tries = epb_trans(dd, trans, transval,
 						  &transval);
-				if (tries <= 0) {
-					ipath_dbg("No EPB response ADDRH\n");
+				if (tries <= 0)
 					break;
-				}
 				addrbyte = (addr + sofar) & 0xFF;
 				transval = csbit | EPB_MADDRL | addrbyte;
 				tries = epb_trans(dd, trans, transval,
 						 &transval);
-				if (tries <= 0) {
-					ipath_dbg("No EPB response ADDRL\n");
+				if (tries <= 0)
 					break;
-				}
 			}
 
 			if (rd_notwr)
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 			else
 				transval = csbit | EPB_ROMDATA | buf[sofar];
 			tries = epb_trans(dd, trans, transval, &transval);
-			if (tries <= 0) {
-				ipath_dbg("No EPB response DATA\n");
+			if (tries <= 0)
 				break;
-			}
 			if (rd_notwr)
 				buf[sofar] = transval & EPB_DATA_MASK;
 			++sofar;
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 		/* Finally, clear control-bit for Read or Write */
 		transval = csbit | EPB_UC_CTL;
 		tries = epb_trans(dd, trans, transval, &transval);
-		if (tries <= 0)
-			ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
 	}
 
 	ret = sofar;
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
 	if (epb_access(dd, sdnum, -1) < 0)
 		ret = -1;
 
-	spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
-	if (tries <= 0) {
-		ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
+	spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
+	if (tries <= 0)
 		ret = -1;
-	}
 	return ret;
 }
 
 #define PROG_CHUNK 64
 
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
-	u8 *img, int len, int offset)
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
+		       u8 *img, int len, int offset)
 {
 	int cnt, sofar, req;
 
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
 		req = len - sofar;
 		if (req > PROG_CHUNK)
 			req = PROG_CHUNK;
-		cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
+		cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
 					  img + sofar, req, 0);
 		if (cnt < req) {
 			sofar = -1;
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
 #define VFY_CHUNK 64
 #define SD_PRAM_ERROR_LIMIT 42
 
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
-	const u8 *img, int len, int offset)
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
+			const u8 *img, int len, int offset)
 {
 	int cnt, sofar, req, idx, errors;
 	unsigned char readback[VFY_CHUNK];
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
 		req = len - sofar;
 		if (req > VFY_CHUNK)
 			req = VFY_CHUNK;
-		cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
+		cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
 					  readback, req, 1);
 		if (cnt < req) {
 			/* failed in read itself */
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
 	return errors ? -errors : sofar;
 }
 
-/* IRQ not set up at this point in init, so we poll. */
+/*
+ * IRQ not set up at this point in init, so we poll.
+ */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
 #define TRIM_TMO (30)
 
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
+static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
 	int trim_tmo, ret;
 	uint64_t val;
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
 	 */
 	ret = 0;
 	for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+		val = qib_read_kreg64(dd, kr_ibcstatus);
 		if (val & IB_SERDES_TRIM_DONE) {
-			ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
 			ret = 1;
 			break;
 		}
 		msleep(10);
 	}
 	if (trim_tmo >= TRIM_TMO) {
-		ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
+		qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
 		ret = 0;
 	}
 	return ret;
@@ -964,8 +958,7 @@ static struct dds_init {
 };
 
 /*
- * Next, values related to Receive Equalization.
- * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
+ * Now the RXEQ section of the table.
  */
 /* Hardware packs an element number and register address thus: */
 #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
@@ -981,23 +974,23 @@ static struct dds_init {
 #define RXEQ_SDR_ZCNT 23
 
 static struct rxeq_init {
-	u16 rdesc;	/* in form used in SerDesDDSRXEQ */
+	u16 rdesc;      /* in form used in SerDesDDSRXEQ */
 	u8  rdata[4];
 } rxeq_init_vals[] = {
 	/* Set Rcv Eq. to Preset node */
 	RXEQ_VAL_ALL(7, 0x27, 0x10),
 	/* Set DFELTHFDR/HDR thresholds */
-	RXEQ_VAL(7, 8,    0, 0, 0, 0), /* FDR */
+	RXEQ_VAL(7, 8,    0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
 	RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
-	/* Set TLTHFDR/HDR threshold */
-	RXEQ_VAL(7, 9,    2, 2, 2, 2), /* FDR */
-	RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
+	/* Set TLTHFDR/HDR theshold */
+	RXEQ_VAL(7, 9,    2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
+	RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was  0, 1, 2, 3 */
 	/* Set Preamp setting 2 (ZFR/ZCNT) */
-	RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
-	RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
+	RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
+	RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
 	/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
-	RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
-	RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
+	RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
+	RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
 	/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
 	RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
 	RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
@@ -1007,27 +1000,27 @@ static struct rxeq_init {
 #define DDS_ROWS (16)
 #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
 
-static int ipath_sd_setvals(struct ipath_devdata *dd)
+static int qib_sd_setvals(struct qib_devdata *dd)
 {
 	int idx, midx;
-	int min_idx;	 /* Minimum index for this portion of table */
+	int min_idx;     /* Minimum index for this portion of table */
 	uint32_t dds_reg_map;
 	u64 __iomem *taddr, *iaddr;
 	uint64_t data;
 	uint64_t sdctl;
 
-	taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
-	iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
+	taddr = dd->kregbase + kr_serdes_maptable;
+	iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
 
 	/*
 	 * Init the DDS section of the table.
 	 * Each "row" of the table provokes NUM_DDS_REG writes, to the
 	 * registers indicated in DDS_REG_MAP.
 	 */
-	sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+	sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
 	sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
 	sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
+	qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
 
 	/*
 	 * Iterate down table within loop for each register to store.
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
 		data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
 		writeq(data, iaddr + idx);
 		mmiowb();
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		qib_read_kreg32(dd, kr_scratch);
 		dds_reg_map >>= 4;
 		for (midx = 0; midx < DDS_ROWS; ++midx) {
 			u64 __iomem *daddr = taddr + ((midx << 4) + idx);
 			data = dds_init_vals[midx].reg_vals[idx];
 			writeq(data, daddr);
 			mmiowb();
-			ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+			qib_read_kreg32(dd, kr_scratch);
 		} /* End inner for (vals for this reg, each row) */
 	} /* end outer for (regs to be stored) */
 
 	/*
-	 * Init the RXEQ section of the table. As explained above the table
-	 * rxeq_init_vals[], this runs in a different order, as the pattern
-	 * of register references is more complex, but there are only
+	 * Init the RXEQ section of the table.
+	 * This runs in a different order, as the pattern of
+	 * register references is more complex, but there are only
 	 * four "data" values per register.
 	 */
 	min_idx = idx; /* RXEQ indices pick up where DDS left off */
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
 		/* Store the next RXEQ register address */
 		writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
 		mmiowb();
-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+		qib_read_kreg32(dd, kr_scratch);
 		/* Iterate through RXEQ values */
 		for (vidx = 0; vidx < 4; vidx++) {
 			data = rxeq_init_vals[idx].rdata[vidx];
 			writeq(data, taddr + (vidx << 6) + idx);
 			mmiowb();
-			ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+			qib_read_kreg32(dd, kr_scratch);
 		}
 	} /* end outer for (Reg-writes for RXEQ) */
 	return 0;
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
 #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
 #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
 
-static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
-{
-	int ret = -1;
-	int sloc; /* shifted loc, for messages */
-
-	loc |= (1U << EPB_IB_QUAD0_CS_SHF);
-	sloc = loc >> EPB_ADDR_SHF;
-
-	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
-	if (ret < 0)
-		ipath_dev_err(dd, "Write failed: elt %d,"
-			" addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
-			(sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
-			val & 0xFF, mask & 0xFF);
-	return ret;
-}
-
 /*
  * Repeat a "store" across all channels of the IB SerDes.
  * Although nominally it inherits the "read value" of the last
  * channel it modified, the only really useful return is <0 for
  * failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location for the channel-0 copy of the register to
- * be modified.
+ * be the location in some channel of the register to be modified
+ * The caller can specify use of the "gang write" option of EPB,
+ * in which case we use the specified channel data for any fields
+ * not explicitely written.
  */
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
-	int mask)
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
+			     int mask)
 {
 	int ret = -1;
 	int chnl;
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
 		loc |= (1U << EPB_IB_QUAD0_CS_SHF);
 		chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
 		if (mask != 0xFF) {
-			ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
-				loc & ~EPB_GLOBAL_WR, 0, 0);
+			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
+						 loc & ~EPB_GLOBAL_WR, 0, 0);
 			if (ret < 0) {
 				int sloc = loc >> EPB_ADDR_SHF;
-				ipath_dev_err(dd, "pre-read failed: elt %d,"
-					" addr 0x%X, chnl %d\n", (sloc & 0xF),
-					(sloc >> 9) & 0x3f, chnl);
+
+				qib_dev_err(dd, "pre-read failed: elt %d,"
+					    " addr 0x%X, chnl %d\n",
+					    (sloc & 0xF),
+					    (sloc >> 9) & 0x3f, chnl);
 				return ret;
 			}
 			val = (ret & ~mask) | (val & mask);
 		}
 		loc &=  ~(7 << (4+EPB_ADDR_SHF));
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
 		if (ret < 0) {
 			int sloc = loc >> EPB_ADDR_SHF;
-			ipath_dev_err(dd, "Global WR failed: elt %d,"
-				" addr 0x%X, val %02X\n",
-				(sloc & 0xF), (sloc >> 9) & 0x3f, val);
+
+			qib_dev_err(dd, "Global WR failed: elt %d,"
+				    " addr 0x%X, val %02X\n",
+				    (sloc & 0xF), (sloc >> 9) & 0x3f, val);
 		}
 		return ret;
 	}
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
 	loc &=  ~(7 << (4+EPB_ADDR_SHF));
 	loc |= (1U << EPB_IB_QUAD0_CS_SHF);
 	for (chnl = 0; chnl < 4; ++chnl) {
-		int cloc;
-		cloc = loc | (chnl << (4+EPB_ADDR_SHF));
-		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
+		int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
+
+		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
 		if (ret < 0) {
 			int sloc = loc >> EPB_ADDR_SHF;
-			ipath_dev_err(dd, "Write failed: elt %d,"
-				" addr 0x%X, chnl %d, val 0x%02X,"
-				" mask 0x%02X\n",
-				(sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
-				val & 0xFF, mask & 0xFF);
+
+			qib_dev_err(dd, "Write failed: elt %d,"
+				    " addr 0x%X, chnl %d, val 0x%02X,"
+				    " mask 0x%02X\n",
+				    (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+				    val & 0xFF, mask & 0xFF);
 			break;
 		}
 	}
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
  * Set the Tx values normally modified by IBC in IB1.2 mode to default
  * values, as gotten from first row of init table.
  */
-static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
+static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
 {
 	int ret;
 	int idx, reg, data;
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
  * Set the Rx values normally modified by IBC in IB1.2 mode to default
  * values, as gotten from selected column of init table.
  */
-static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
+static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
 {
 	int ret;
 	int ridx;
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
 
 	for (ridx = 0; ridx < cnt; ++ridx) {
 		int elt, reg, val, loc;
+
 		elt = rxeq_init_vals[ridx].rdesc & 0xF;
 		reg = rxeq_init_vals[ridx].rdesc >> 4;
 		loc = EPB_LOC(0, elt, reg);
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
 /*
  * Set the default values (row 0) for DDR Driver Demphasis.
  * we do this initially and whenever we turn off IB-1.2
+ *
  * The "default" values for Rx equalization are also stored to
  * SerDes registers. Formerly (and still default), we used set 2.
  * For experimenting with cables and link-partners, we allow changing
  * that via a module parameter.
  */
-static unsigned ipath_rxeq_set = 2;
-module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
-	S_IWUSR | S_IRUGO);
+static unsigned qib_rxeq_set = 2;
+module_param_named(rxeq_default_set, qib_rxeq_set, uint,
+		   S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(rxeq_default_set,
-	"Which set [0..3] of Rx Equalization values is default");
+		 "Which set [0..3] of Rx Equalization values is default");
 
-static int ipath_internal_presets(struct ipath_devdata *dd)
+static int qib_internal_presets(struct qib_devdata *dd)
 {
 	int ret = 0;
 
 	ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
 
 	if (ret < 0)
-		ipath_dev_err(dd, "Failed to set default DDS values\n");
-	ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
+		qib_dev_err(dd, "Failed to set default DDS values\n");
+	ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
 	if (ret < 0)
-		ipath_dev_err(dd, "Failed to set default RXEQ values\n");
+		qib_dev_err(dd, "Failed to set default RXEQ values\n");
 	return ret;
 }
 
-int ipath_sd7220_presets(struct ipath_devdata *dd)
+int qib_sd7220_presets(struct qib_devdata *dd)
 {
 	int ret = 0;
 
-	if (!dd->ipath_presets_needed)
+	if (!dd->cspec->presets_needed)
 		return ret;
-	dd->ipath_presets_needed = 0;
+	dd->cspec->presets_needed = 0;
 	/* Assert uC reset, so we don't clash with it. */
-	ipath_ibsd_reset(dd, 1);
+	qib_ibsd_reset(dd, 1);
 	udelay(2);
-	ipath_sd_trimdone_monitor(dd, "link-down");
+	qib_sd_trimdone_monitor(dd, "link-down");
 
-	ret = ipath_internal_presets(dd);
-return ret;
+	ret = qib_internal_presets(dd);
+	return ret;
 }
 
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
+static int qib_sd_trimself(struct qib_devdata *dd, int val)
 {
-	return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
+	int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
+
+	return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
 }
 
-static int ipath_sd_early(struct ipath_devdata *dd)
+static int qib_sd_early(struct qib_devdata *dd)
 {
-	int ret = -1; /* Default failed */
-	int chnl;
+	int ret;
 
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
-	/* more fine-tuning of what will be default */
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
-		if (ret < 0)
-			goto bail;
-	}
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
+	ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
+	if (ret < 0)
+		goto bail;
+	ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
+	if (ret < 0)
+		goto bail;
+	ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
 bail:
 	return ret;
 }
@@ -1302,50 +1268,53 @@ bail:
 #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
 #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
 
-static int ipath_sd_dactrim(struct ipath_devdata *dd)
+static int qib_sd_dactrim(struct qib_devdata *dd)
 {
-	int ret = -1; /* Default failed */
-	int chnl;
+	int ret;
+
+	ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
+	if (ret < 0)
+		goto bail;
+
+	/* more fine-tuning of what will be default */
+	ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
+	if (ret < 0)
+		goto bail;
+
+	ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
+	if (ret < 0)
+		goto bail;
+
+	ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+	if (ret < 0)
+		goto bail;
+
+	ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+	if (ret < 0)
+		goto bail;
 
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
 	/*
-	 * delay for max possible number of steps, with slop.
+	 * Delay for max possible number of steps, with slop.
 	 * Each step is about 4usec.
 	 */
 	udelay(415);
-	for (chnl = 0; chnl < 4; ++chnl) {
-		ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
-		if (ret < 0)
-			goto bail;
-	}
+
+	ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
+
 bail:
 	return ret;
 }
 
 #define RELOCK_FIRST_MS 3
 #define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void ipath_toggle_rclkrls(struct ipath_devdata *dd)
+void toggle_7220_rclkrls(struct qib_devdata *dd)
 {
 	int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
 	int ret;
 
 	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
 	if (ret < 0)
-		ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+		qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
 	else {
 		udelay(1);
 		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd)
 	udelay(1);
 	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
 	if (ret < 0)
-		ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+		qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
 	else {
 		udelay(1);
 		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
 	}
 	/* Now reset xgxs and IBC to complete the recovery */
-	dd->ipath_f_xgxs_reset(dd);
+	dd->f_xgxs_reset(dd->pport);
 }
 
 /*
  * Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from ipath_7220_quiet_serdes(), which is called
- * just before ipath_shutdown_device() in ipath_driver.c shuts down all
+ * this is "hooked" from qib_7220_quiet_serdes(), which is called
+ * just before qib_shutdown_device() in qib_driver.c shuts down all
  * the other timers
  */
-void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
+void shutdown_7220_relock_poll(struct qib_devdata *dd)
 {
-	struct ipath_relock *irp = &dd->ipath_relock_singleton;
-	if (atomic_read(&irp->ipath_relock_timer_active)) {
-		del_timer_sync(&irp->ipath_relock_timer);
-		atomic_set(&irp->ipath_relock_timer_active, 0);
-	}
+	if (dd->cspec->relock_timer_active)
+		del_timer_sync(&dd->cspec->relock_timer);
 }
 
-static unsigned ipath_relock_by_timer = 1;
-module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
-	S_IWUSR | S_IRUGO);
+static unsigned qib_relock_by_timer = 1;
+module_param_named(relock_by_timer, qib_relock_by_timer, uint,
+		   S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
 
-static void ipath_run_relock(unsigned long opaque)
+static void qib_run_relock(unsigned long opaque)
 {
-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-	struct ipath_relock *irp = &dd->ipath_relock_singleton;
-	u64 val, ltstate;
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* Not yet up, just reenable the timer for later */
-		irp->ipath_relock_interval = HZ;
-		mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
-		return;
-	}
+	struct qib_devdata *dd = (struct qib_devdata *)opaque;
+	struct qib_pportdata *ppd = dd->pport;
+	struct qib_chip_specific *cs = dd->cspec;
+	int timeoff;
 
 	/*
-	 * Check link-training state for "stuck" state.
+	 * Check link-training state for "stuck" state, when down.
 	 * if found, try relock and schedule another try at
 	 * exponentially growing delay, maxed at one second.
 	 * if not stuck, our work is done.
 	 */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-	ltstate = ipath_ib_linktrstate(dd, val);
-
-	if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
-		&& ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
-		int timeoff;
-		/* Not up yet. Try again, if allowed by module-param */
-		if (ipath_relock_by_timer) {
-			if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
-				ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
-			else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
-				ipath_cdbg(VERBOSE, "RELOCK\n");
-				ipath_toggle_rclkrls(dd);
-			}
+	if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
+	    (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
+	     QIBL_LINKACTIVE))) {
+		if (qib_relock_by_timer) {
+			if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
+				toggle_7220_rclkrls(dd);
 		}
 		/* re-set timer for next check */
-		timeoff = irp->ipath_relock_interval << 1;
+		timeoff = cs->relock_interval << 1;
 		if (timeoff > HZ)
 			timeoff = HZ;
-		irp->ipath_relock_interval = timeoff;
-
-		mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
-	} else {
-		/* Up, so no more need to check so often */
-		mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
-	}
+		cs->relock_interval = timeoff;
+	} else
+		timeoff = HZ;
+	mod_timer(&cs->relock_timer, jiffies + timeoff);
 }
 
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
+void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
 {
-	struct ipath_relock *irp = &dd->ipath_relock_singleton;
+	struct qib_chip_specific *cs = dd->cspec;
 
-	if (ibup > 0) {
-		/* we are now up, so relax timer to 1 second interval */
-		if (atomic_read(&irp->ipath_relock_timer_active))
-			mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+	if (ibup) {
+		/* We are now up, relax timer to 1 second interval */
+		if (cs->relock_timer_active) {
+			cs->relock_interval = HZ;
+			mod_timer(&cs->relock_timer, jiffies + HZ);
+		}
 	} else {
 		/* Transition to down, (re-)set timer to short interval. */
-		int timeout;
-		timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
+		unsigned int timeout;
+
+		timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
 		if (timeout == 0)
 			timeout = 1;
 		/* If timer has not yet been started, do so. */
-		if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
-			init_timer(&irp->ipath_relock_timer);
-			irp->ipath_relock_timer.function = ipath_run_relock;
-			irp->ipath_relock_timer.data = (unsigned long) dd;
-			irp->ipath_relock_interval = timeout;
-			irp->ipath_relock_timer.expires = jiffies + timeout;
-			add_timer(&irp->ipath_relock_timer);
+		if (!cs->relock_timer_active) {
+			cs->relock_timer_active = 1;
+			init_timer(&cs->relock_timer);
+			cs->relock_timer.function = qib_run_relock;
+			cs->relock_timer.data = (unsigned long) dd;
+			cs->relock_interval = timeout;
+			cs->relock_timer.expires = jiffies + timeout;
+			add_timer(&cs->relock_timer);
 		} else {
-			irp->ipath_relock_interval = timeout;
-			mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
-			atomic_dec(&irp->ipath_relock_timer_active);
+			cs->relock_interval = timeout;
+			mod_timer(&cs->relock_timer, jiffies + timeout);
 		}
 	}
 }
-
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
index 5ef59da9270a..a1118fbd2370 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -38,11 +38,10 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
 
-static unsigned char ipath_sd7220_ib_img[] = {
+static unsigned char qib_sd7220_ib_img[] = {
 /*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
 	0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
 /*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
 	0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
 };
 
-int ipath_sd7220_ib_load(struct ipath_devdata *dd)
+int qib_sd7220_ib_load(struct qib_devdata *dd)
 {
-	return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
-		sizeof(ipath_sd7220_ib_img), 0);
+	return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+		sizeof(qib_sd7220_ib_img), 0);
 }
 
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
+int qib_sd7220_ib_vfy(struct qib_devdata *dd)
 {
-	return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
-		sizeof(ipath_sd7220_ib_img), 0);
+	return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+		sizeof(qib_sd7220_ib_img), 0);
 }
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 000000000000..b8456881f7f6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/* default pio off, sdma on */
+static ushort sdma_descq_cnt = 256;
+module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC_LAST          (1ULL << 11)
+#define SDMA_DESC_FIRST         (1ULL << 12)
+#define SDMA_DESC_DMA_HEAD      (1ULL << 13)
+#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
+#define SDMA_DESC_INTR          (1ULL << 15)
+#define SDMA_DESC_COUNT_LSB     16
+#define SDMA_DESC_GEN_LSB       30
+
+char *qib_sdma_state_names[] = {
+	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
+	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
+	[qib_sdma_state_s20_idle]             = "s20_Idle",
+	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
+	[qib_sdma_state_s99_running]          = "s99_Running",
+};
+
+char *qib_sdma_event_names[] = {
+	[qib_sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
+	[qib_sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
+	[qib_sdma_event_e20_hw_started]   = "e20_HwStarted",
+	[qib_sdma_event_e30_go_running]   = "e30_GoRunning",
+	[qib_sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
+	[qib_sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
+	[qib_sdma_event_e60_hw_halted]    = "e60_HwHalted",
+	[qib_sdma_event_e70_go_idle]      = "e70_GoIdle",
+	[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
+	[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
+	[qib_sdma_event_e90_timer_tick]   = "e90_TimerTick",
+};
+
+/* declare all statics here rather than keep sorting */
+static int alloc_sdma(struct qib_pportdata *);
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct qib_sdma_state *);
+static void sdma_get(struct qib_sdma_state *);
+static void sdma_put(struct qib_sdma_state *);
+static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
+static void sdma_start_sw_clean_up(struct qib_pportdata *);
+static void sdma_sw_clean_up_task(unsigned long);
+static void unmap_desc(struct qib_pportdata *, unsigned);
+
+static void sdma_get(struct qib_sdma_state *ss)
+{
+	kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+	struct qib_sdma_state *ss =
+		container_of(kref, struct qib_sdma_state, kref);
+
+	complete(&ss->comp);
+}
+
+static void sdma_put(struct qib_sdma_state *ss)
+{
+	kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct qib_sdma_state *ss)
+{
+	sdma_put(ss);
+	wait_for_completion(&ss->comp);
+}
+
+/*
+ * Complete all the sdma requests on the active list, in the correct
+ * order, and with appropriate processing.   Called when cleaning up
+ * after sdma shutdown, and when new sdma requests are submitted for
+ * a link that is down.   This matches what is done for requests
+ * that complete normally, it's just the full list.
+ *
+ * Must be called with sdma_lock held
+ */
+static void clear_sdma_activelist(struct qib_pportdata *ppd)
+{
+	struct qib_sdma_txreq *txp, *txp_next;
+
+	list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
+		list_del_init(&txp->list);
+		if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
+			unsigned idx;
+
+			idx = txp->start_idx;
+			while (idx != txp->next_descq_idx) {
+				unmap_desc(ppd, idx);
+				if (++idx == ppd->sdma_descq_cnt)
+					idx = 0;
+			}
+		}
+		if (txp->callback)
+			(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
+	}
+}
+
+static void sdma_sw_clean_up_task(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	/*
+	 * At this point, the following should always be true:
+	 * - We are halted, so no more descriptors are getting retired.
+	 * - We are not running, so no one is submitting new work.
+	 * - Only we can send the e40_sw_cleaned, so we can't start
+	 *   running again until we say so.  So, the active list and
+	 *   descq are ours to play with.
+	 */
+
+	/* Process all retired requests. */
+	qib_sdma_make_progress(ppd);
+
+	clear_sdma_activelist(ppd);
+
+	/*
+	 * Resync count of added and removed.  It is VERY important that
+	 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
+	 */
+	ppd->sdma_descq_removed = ppd->sdma_descq_added;
+
+	/*
+	 * Reset our notion of head and tail.
+	 * Note that the HW registers will be reset when switching states
+	 * due to calling __qib_sdma_process_event() below.
+	 */
+	ppd->sdma_descq_tail = 0;
+	ppd->sdma_descq_head = 0;
+	ppd->sdma_head_dma[0] = 0;
+	ppd->sdma_generation = 0;
+
+	__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
+
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
+ * as a result of send buffer errors or send DMA descriptor errors.
+ * We want to disarm the buffers in these cases.
+ */
+static void sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+	struct qib_sdma_state *ss = &ppd->sdma_state;
+	unsigned bufno;
+
+	for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
+		ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
+
+	ppd->dd->f_sdma_hw_start_up(ppd);
+}
+
+static void sdma_sw_tear_down(struct qib_pportdata *ppd)
+{
+	struct qib_sdma_state *ss = &ppd->sdma_state;
+
+	/* Releasing this reference means the state machine has stopped. */
+	sdma_put(ss);
+}
+
+static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
+{
+	tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
+}
+
+static void sdma_set_state(struct qib_pportdata *ppd,
+	enum qib_sdma_states next_state)
+{
+	struct qib_sdma_state *ss = &ppd->sdma_state;
+	struct sdma_set_state_action *action = ss->set_state_action;
+	unsigned op = 0;
+
+	/* debugging bookkeeping */
+	ss->previous_state = ss->current_state;
+	ss->previous_op = ss->current_op;
+
+	ss->current_state = next_state;
+
+	if (action[next_state].op_enable)
+		op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
+
+	if (action[next_state].op_intenable)
+		op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
+
+	if (action[next_state].op_halt)
+		op |= QIB_SDMA_SENDCTRL_OP_HALT;
+
+	if (action[next_state].op_drain)
+		op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
+
+	if (action[next_state].go_s99_running_tofalse)
+		ss->go_s99_running = 0;
+
+	if (action[next_state].go_s99_running_totrue)
+		ss->go_s99_running = 1;
+
+	ss->current_op = op;
+
+	ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
+}
+
+static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
+{
+	__le64 *descqp = &ppd->sdma_descq[head].qw[0];
+	u64 desc[2];
+	dma_addr_t addr;
+	size_t len;
+
+	desc[0] = le64_to_cpu(descqp[0]);
+	desc[1] = le64_to_cpu(descqp[1]);
+
+	addr = (desc[1] << 32) | (desc[0] >> 32);
+	len = (desc[0] >> 14) & (0x7ffULL << 2);
+	dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
+}
+
+static int alloc_sdma(struct qib_pportdata *ppd)
+{
+	ppd->sdma_descq_cnt = sdma_descq_cnt;
+	if (!ppd->sdma_descq_cnt)
+		ppd->sdma_descq_cnt = 256;
+
+	/* Allocate memory for SendDMA descriptor FIFO */
+	ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+		ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
+		GFP_KERNEL);
+
+	if (!ppd->sdma_descq) {
+		qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
+			    "FIFO memory\n");
+		goto bail;
+	}
+
+	/* Allocate memory for DMA of head register to memory */
+	ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+		PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
+	if (!ppd->sdma_head_dma) {
+		qib_dev_err(ppd->dd, "failed to allocate SendDMA "
+			    "head memory\n");
+		goto cleanup_descq;
+	}
+	ppd->sdma_head_dma[0] = 0;
+	return 0;
+
+cleanup_descq:
+	dma_free_coherent(&ppd->dd->pcidev->dev,
+		ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
+		ppd->sdma_descq_phys);
+	ppd->sdma_descq = NULL;
+	ppd->sdma_descq_phys = 0;
+bail:
+	ppd->sdma_descq_cnt = 0;
+	return -ENOMEM;
+}
+
+static void free_sdma(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+
+	if (ppd->sdma_head_dma) {
+		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+				  (void *)ppd->sdma_head_dma,
+				  ppd->sdma_head_phys);
+		ppd->sdma_head_dma = NULL;
+		ppd->sdma_head_phys = 0;
+	}
+
+	if (ppd->sdma_descq) {
+		dma_free_coherent(&dd->pcidev->dev,
+				  ppd->sdma_descq_cnt * sizeof(u64[2]),
+				  ppd->sdma_descq, ppd->sdma_descq_phys);
+		ppd->sdma_descq = NULL;
+		ppd->sdma_descq_phys = 0;
+	}
+}
+
+static inline void make_sdma_desc(struct qib_pportdata *ppd,
+				  u64 *sdmadesc, u64 addr, u64 dwlen,
+				  u64 dwoffset)
+{
+
+	WARN_ON(addr & 3);
+	/* SDmaPhyAddr[47:32] */
+	sdmadesc[1] = addr >> 32;
+	/* SDmaPhyAddr[31:0] */
+	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
+	/* SDmaGeneration[1:0] */
+	sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
+		SDMA_DESC_GEN_LSB;
+	/* SDmaDwordCount[10:0] */
+	sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
+	/* SDmaBufOffset[12:2] */
+	sdmadesc[0] |= dwoffset & 0x7ffULL;
+}
+
+/* sdma_lock must be held */
+int qib_sdma_make_progress(struct qib_pportdata *ppd)
+{
+	struct list_head *lp = NULL;
+	struct qib_sdma_txreq *txp = NULL;
+	struct qib_devdata *dd = ppd->dd;
+	int progress = 0;
+	u16 hwhead;
+	u16 idx = 0;
+
+	hwhead = dd->f_sdma_gethead(ppd);
+
+	/* The reason for some of the complexity of this code is that
+	 * not all descriptors have corresponding txps.  So, we have to
+	 * be able to skip over descs until we wander into the range of
+	 * the next txp on the list.
+	 */
+
+	if (!list_empty(&ppd->sdma_activelist)) {
+		lp = ppd->sdma_activelist.next;
+		txp = list_entry(lp, struct qib_sdma_txreq, list);
+		idx = txp->start_idx;
+	}
+
+	while (ppd->sdma_descq_head != hwhead) {
+		/* if desc is part of this txp, unmap if needed */
+		if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
+		    (idx == ppd->sdma_descq_head)) {
+			unmap_desc(ppd, ppd->sdma_descq_head);
+			if (++idx == ppd->sdma_descq_cnt)
+				idx = 0;
+		}
+
+		/* increment dequed desc count */
+		ppd->sdma_descq_removed++;
+
+		/* advance head, wrap if needed */
+		if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
+			ppd->sdma_descq_head = 0;
+
+		/* if now past this txp's descs, do the callback */
+		if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
+			/* remove from active list */
+			list_del_init(&txp->list);
+			if (txp->callback)
+				(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
+			/* see if there is another txp */
+			if (list_empty(&ppd->sdma_activelist))
+				txp = NULL;
+			else {
+				lp = ppd->sdma_activelist.next;
+				txp = list_entry(lp, struct qib_sdma_txreq,
+					list);
+				idx = txp->start_idx;
+			}
+		}
+		progress = 1;
+	}
+	if (progress)
+		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+	return progress;
+}
+
+/*
+ * This is called from interrupt context.
+ */
+void qib_sdma_intr(struct qib_pportdata *ppd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	__qib_sdma_intr(ppd);
+
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_intr(struct qib_pportdata *ppd)
+{
+	if (__qib_sdma_running(ppd))
+		qib_sdma_make_progress(ppd);
+}
+
+int qib_setup_sdma(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	unsigned long flags;
+	int ret = 0;
+
+	ret = alloc_sdma(ppd);
+	if (ret)
+		goto bail;
+
+	/* set consistent sdma state */
+	ppd->dd->f_sdma_init_early(ppd);
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+	sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+	/* set up reference counting */
+	kref_init(&ppd->sdma_state.kref);
+	init_completion(&ppd->sdma_state.comp);
+
+	ppd->sdma_generation = 0;
+	ppd->sdma_descq_head = 0;
+	ppd->sdma_descq_removed = 0;
+	ppd->sdma_descq_added = 0;
+
+	INIT_LIST_HEAD(&ppd->sdma_activelist);
+
+	tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
+		(unsigned long)ppd);
+
+	ret = dd->f_init_sdma_regs(ppd);
+	if (ret)
+		goto bail_alloc;
+
+	qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
+
+	return 0;
+
+bail_alloc:
+	qib_teardown_sdma(ppd);
+bail:
+	return ret;
+}
+
+void qib_teardown_sdma(struct qib_pportdata *ppd)
+{
+	qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+
+	/*
+	 * This waits for the state machine to exit so it is not
+	 * necessary to kill the sdma_sw_clean_up_task to make sure
+	 * it is not running.
+	 */
+	sdma_finalput(&ppd->sdma_state);
+
+	free_sdma(ppd);
+}
+
+int qib_sdma_running(struct qib_pportdata *ppd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+	ret = __qib_sdma_running(ppd);
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+	return ret;
+}
+
+/*
+ * Complete a request when sdma not running; likely only request
+ * but to simplify the code, always queue it, then process the full
+ * activelist.  We process the entire list to ensure that this particular
+ * request does get it's callback, but in the correct order.
+ * Must be called with sdma_lock held
+ */
+static void complete_sdma_err_req(struct qib_pportdata *ppd,
+				  struct qib_verbs_txreq *tx)
+{
+	atomic_inc(&tx->qp->s_dma_busy);
+	/* no sdma descriptors, so no unmap_desc */
+	tx->txreq.start_idx = 0;
+	tx->txreq.next_descq_idx = 0;
+	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+	clear_sdma_activelist(ppd);
+}
+
+/*
+ * This function queues one IB packet onto the send DMA queue per call.
+ * The caller is responsible for checking:
+ * 1) The number of send DMA descriptor entries is less than the size of
+ *    the descriptor queue.
+ * 2) The IB SGE addresses and lengths are 32-bit aligned
+ *    (except possibly the last SGE's length)
+ * 3) The SGE addresses are suitable for passing to dma_map_single().
+ */
+int qib_sdma_verbs_send(struct qib_pportdata *ppd,
+			struct qib_sge_state *ss, u32 dwords,
+			struct qib_verbs_txreq *tx)
+{
+	unsigned long flags;
+	struct qib_sge *sge;
+	struct qib_qp *qp;
+	int ret = 0;
+	u16 tail;
+	__le64 *descqp;
+	u64 sdmadesc[2];
+	u32 dwoffset;
+	dma_addr_t addr;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+retry:
+	if (unlikely(!__qib_sdma_running(ppd))) {
+		complete_sdma_err_req(ppd, tx);
+		goto unlock;
+	}
+
+	if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
+		if (qib_sdma_make_progress(ppd))
+			goto retry;
+		if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
+			ppd->dd->f_sdma_set_desc_cnt(ppd,
+					ppd->sdma_descq_cnt / 2);
+		goto busy;
+	}
+
+	dwoffset = tx->hdr_dwords;
+	make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
+
+	sdmadesc[0] |= SDMA_DESC_FIRST;
+	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+		sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+
+	/* write to the descq */
+	tail = ppd->sdma_descq_tail;
+	descqp = &ppd->sdma_descq[tail].qw[0];
+	*descqp++ = cpu_to_le64(sdmadesc[0]);
+	*descqp++ = cpu_to_le64(sdmadesc[1]);
+
+	/* increment the tail */
+	if (++tail == ppd->sdma_descq_cnt) {
+		tail = 0;
+		descqp = &ppd->sdma_descq[0].qw[0];
+		++ppd->sdma_generation;
+	}
+
+	tx->txreq.start_idx = tail;
+
+	sge = &ss->sge;
+	while (dwords) {
+		u32 dw;
+		u32 len;
+
+		len = dwords << 2;
+		if (len > sge->length)
+			len = sge->length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		dw = (len + 3) >> 2;
+		addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
+				      dw << 2, DMA_TO_DEVICE);
+		if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
+			goto unmap;
+		sdmadesc[0] = 0;
+		make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
+		/* SDmaUseLargeBuf has to be set in every descriptor */
+		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+			sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+		/* write to the descq */
+		*descqp++ = cpu_to_le64(sdmadesc[0]);
+		*descqp++ = cpu_to_le64(sdmadesc[1]);
+
+		/* increment the tail */
+		if (++tail == ppd->sdma_descq_cnt) {
+			tail = 0;
+			descqp = &ppd->sdma_descq[0].qw[0];
+			++ppd->sdma_generation;
+		}
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+
+		dwoffset += dw;
+		dwords -= dw;
+	}
+
+	if (!tail)
+		descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
+	descqp -= 2;
+	descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
+	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
+		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
+	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
+		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
+
+	atomic_inc(&tx->qp->s_dma_busy);
+	tx->txreq.next_descq_idx = tail;
+	ppd->dd->f_sdma_update_tail(ppd, tail);
+	ppd->sdma_descq_added += tx->txreq.sg_count;
+	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+	goto unlock;
+
+unmap:
+	for (;;) {
+		if (!tail)
+			tail = ppd->sdma_descq_cnt - 1;
+		else
+			tail--;
+		if (tail == ppd->sdma_descq_tail)
+			break;
+		unmap_desc(ppd, tail);
+	}
+	qp = tx->qp;
+	qib_put_txreq(tx);
+	spin_lock(&qp->s_lock);
+	if (qp->ibqp.qp_type == IB_QPT_RC) {
+		/* XXX what about error sending RDMA read responses? */
+		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
+			qib_error_qp(qp, IB_WC_GENERAL_ERR);
+	} else if (qp->s_wqe)
+		qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+	spin_unlock(&qp->s_lock);
+	/* return zero to process the next send work request */
+	goto unlock;
+
+busy:
+	qp = tx->qp;
+	spin_lock(&qp->s_lock);
+	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+		struct qib_ibdev *dev;
+
+		/*
+		 * If we couldn't queue the DMA request, save the info
+		 * and try again later rather than destroying the
+		 * buffer and undoing the side effects of the copy.
+		 */
+		tx->ss = ss;
+		tx->dwords = dwords;
+		qp->s_tx = tx;
+		dev = &ppd->dd->verbs_dev;
+		spin_lock(&dev->pending_lock);
+		if (list_empty(&qp->iowait)) {
+			struct qib_ibport *ibp;
+
+			ibp = &ppd->ibport_data;
+			ibp->n_dmawait++;
+			qp->s_flags |= QIB_S_WAIT_DMA_DESC;
+			list_add_tail(&qp->iowait, &dev->dmawait);
+		}
+		spin_unlock(&dev->pending_lock);
+		qp->s_flags &= ~QIB_S_BUSY;
+		spin_unlock(&qp->s_lock);
+		ret = -EBUSY;
+	} else {
+		spin_unlock(&qp->s_lock);
+		qib_put_txreq(tx);
+	}
+unlock:
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+	return ret;
+}
+
+void qib_sdma_process_event(struct qib_pportdata *ppd,
+	enum qib_sdma_events event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	__qib_sdma_process_event(ppd, event);
+
+	if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
+		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_process_event(struct qib_pportdata *ppd,
+	enum qib_sdma_events event)
+{
+	struct qib_sdma_state *ss = &ppd->sdma_state;
+
+	switch (ss->current_state) {
+	case qib_sdma_state_s00_hw_down:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			break;
+		case qib_sdma_event_e30_go_running:
+			/*
+			 * If down, but running requested (usually result
+			 * of link up, then we need to start up.
+			 * This can happen when hw down is requested while
+			 * bringing the link up with traffic active on
+			 * 7220, e.g. */
+			ss->go_s99_running = 1;
+			/* fall through and start dma engine */
+		case qib_sdma_event_e10_go_hw_start:
+			/* This reference means the state machine is started */
+			sdma_get(&ppd->sdma_state);
+			sdma_set_state(ppd,
+				       qib_sdma_state_s10_hw_start_up_wait);
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			sdma_sw_tear_down(ppd);
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			break;
+		case qib_sdma_event_e70_go_idle:
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s10_hw_start_up_wait:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			sdma_sw_tear_down(ppd);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			sdma_set_state(ppd, ss->go_s99_running ?
+				       qib_sdma_state_s99_running :
+				       qib_sdma_state_s20_idle);
+			break;
+		case qib_sdma_event_e30_go_running:
+			ss->go_s99_running = 1;
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			break;
+		case qib_sdma_event_e70_go_idle:
+			ss->go_s99_running = 0;
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s20_idle:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			sdma_sw_tear_down(ppd);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e30_go_running:
+			sdma_set_state(ppd, qib_sdma_state_s99_running);
+			ss->go_s99_running = 1;
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			break;
+		case qib_sdma_event_e70_go_idle:
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s30_sw_clean_up_wait:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e30_go_running:
+			ss->go_s99_running = 1;
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			sdma_set_state(ppd,
+				       qib_sdma_state_s10_hw_start_up_wait);
+			sdma_hw_start_up(ppd);
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			break;
+		case qib_sdma_event_e70_go_idle:
+			ss->go_s99_running = 0;
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s40_hw_clean_up_wait:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e30_go_running:
+			ss->go_s99_running = 1;
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			sdma_set_state(ppd,
+				       qib_sdma_state_s30_sw_clean_up_wait);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			break;
+		case qib_sdma_event_e70_go_idle:
+			ss->go_s99_running = 0;
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s50_hw_halt_wait:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e30_go_running:
+			ss->go_s99_running = 1;
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			sdma_set_state(ppd,
+				       qib_sdma_state_s40_hw_clean_up_wait);
+			ppd->dd->f_sdma_hw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e70_go_idle:
+			ss->go_s99_running = 0;
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+
+	case qib_sdma_state_s99_running:
+		switch (event) {
+		case qib_sdma_event_e00_go_hw_down:
+			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e10_go_hw_start:
+			break;
+		case qib_sdma_event_e20_hw_started:
+			break;
+		case qib_sdma_event_e30_go_running:
+			break;
+		case qib_sdma_event_e40_sw_cleaned:
+			break;
+		case qib_sdma_event_e50_hw_cleaned:
+			break;
+		case qib_sdma_event_e60_hw_halted:
+			sdma_set_state(ppd,
+				       qib_sdma_state_s30_sw_clean_up_wait);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e70_go_idle:
+			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+			ss->go_s99_running = 0;
+			break;
+		case qib_sdma_event_e7220_err_halted:
+			sdma_set_state(ppd,
+				       qib_sdma_state_s30_sw_clean_up_wait);
+			sdma_start_sw_clean_up(ppd);
+			break;
+		case qib_sdma_event_e7322_err_halted:
+			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+			break;
+		case qib_sdma_event_e90_timer_tick:
+			break;
+		}
+		break;
+	}
+
+	ss->last_event = event;
+}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 000000000000..c3ec8efc2ed8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+			 struct ib_recv_wr **bad_wr)
+{
+	struct qib_srq *srq = to_isrq(ibsrq);
+	struct qib_rwq *wq;
+	unsigned long flags;
+	int ret;
+
+	for (; wr; wr = wr->next) {
+		struct qib_rwqe *wqe;
+		u32 next;
+		int i;
+
+		if ((unsigned) wr->num_sge > srq->rq.max_sge) {
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto bail;
+		}
+
+		spin_lock_irqsave(&srq->rq.lock, flags);
+		wq = srq->rq.wq;
+		next = wq->head + 1;
+		if (next >= srq->rq.size)
+			next = 0;
+		if (next == wq->tail) {
+			spin_unlock_irqrestore(&srq->rq.lock, flags);
+			*bad_wr = wr;
+			ret = -ENOMEM;
+			goto bail;
+		}
+
+		wqe = get_rwqe_ptr(&srq->rq, wq->head);
+		wqe->wr_id = wr->wr_id;
+		wqe->num_sge = wr->num_sge;
+		for (i = 0; i < wr->num_sge; i++)
+			wqe->sg_list[i] = wr->sg_list[i];
+		/* Make sure queue entry is written before the head index. */
+		smp_wmb();
+		wq->head = next;
+		spin_unlock_irqrestore(&srq->rq.lock, flags);
+	}
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ */
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+			      struct ib_srq_init_attr *srq_init_attr,
+			      struct ib_udata *udata)
+{
+	struct qib_ibdev *dev = to_idev(ibpd->device);
+	struct qib_srq *srq;
+	u32 sz;
+	struct ib_srq *ret;
+
+	if (srq_init_attr->attr.max_sge == 0 ||
+	    srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
+	    srq_init_attr->attr.max_wr == 0 ||
+	    srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
+		ret = ERR_PTR(-EINVAL);
+		goto done;
+	}
+
+	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+	if (!srq) {
+		ret = ERR_PTR(-ENOMEM);
+		goto done;
+	}
+
+	/*
+	 * Need to use vmalloc() if we want to support large #s of entries.
+	 */
+	srq->rq.size = srq_init_attr->attr.max_wr + 1;
+	srq->rq.max_sge = srq_init_attr->attr.max_sge;
+	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+		sizeof(struct qib_rwqe);
+	srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
+	if (!srq->rq.wq) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_srq;
+	}
+
+	/*
+	 * Return the address of the RWQ as the offset to mmap.
+	 * See qib_mmap() for details.
+	 */
+	if (udata && udata->outlen >= sizeof(__u64)) {
+		int err;
+		u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
+
+		srq->ip =
+		    qib_create_mmap_info(dev, s, ibpd->uobject->context,
+					 srq->rq.wq);
+		if (!srq->ip) {
+			ret = ERR_PTR(-ENOMEM);
+			goto bail_wq;
+		}
+
+		err = ib_copy_to_udata(udata, &srq->ip->offset,
+				       sizeof(srq->ip->offset));
+		if (err) {
+			ret = ERR_PTR(err);
+			goto bail_ip;
+		}
+	} else
+		srq->ip = NULL;
+
+	/*
+	 * ib_create_srq() will initialize srq->ibsrq.
+	 */
+	spin_lock_init(&srq->rq.lock);
+	srq->rq.wq->head = 0;
+	srq->rq.wq->tail = 0;
+	srq->limit = srq_init_attr->attr.srq_limit;
+
+	spin_lock(&dev->n_srqs_lock);
+	if (dev->n_srqs_allocated == ib_qib_max_srqs) {
+		spin_unlock(&dev->n_srqs_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_ip;
+	}
+
+	dev->n_srqs_allocated++;
+	spin_unlock(&dev->n_srqs_lock);
+
+	if (srq->ip) {
+		spin_lock_irq(&dev->pending_lock);
+		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+		spin_unlock_irq(&dev->pending_lock);
+	}
+
+	ret = &srq->ibsrq;
+	goto done;
+
+bail_ip:
+	kfree(srq->ip);
+bail_wq:
+	vfree(srq->rq.wq);
+bail_srq:
+	kfree(srq);
+done:
+	return ret;
+}
+
+/**
+ * qib_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ */
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+		   enum ib_srq_attr_mask attr_mask,
+		   struct ib_udata *udata)
+{
+	struct qib_srq *srq = to_isrq(ibsrq);
+	struct qib_rwq *wq;
+	int ret = 0;
+
+	if (attr_mask & IB_SRQ_MAX_WR) {
+		struct qib_rwq *owq;
+		struct qib_rwqe *p;
+		u32 sz, size, n, head, tail;
+
+		/* Check that the requested sizes are below the limits. */
+		if ((attr->max_wr > ib_qib_max_srq_wrs) ||
+		    ((attr_mask & IB_SRQ_LIMIT) ?
+		     attr->srq_limit : srq->limit) > attr->max_wr) {
+			ret = -EINVAL;
+			goto bail;
+		}
+
+		sz = sizeof(struct qib_rwqe) +
+			srq->rq.max_sge * sizeof(struct ib_sge);
+		size = attr->max_wr + 1;
+		wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
+		if (!wq) {
+			ret = -ENOMEM;
+			goto bail;
+		}
+
+		/* Check that we can write the offset to mmap. */
+		if (udata && udata->inlen >= sizeof(__u64)) {
+			__u64 offset_addr;
+			__u64 offset = 0;
+
+			ret = ib_copy_from_udata(&offset_addr, udata,
+						 sizeof(offset_addr));
+			if (ret)
+				goto bail_free;
+			udata->outbuf =
+				(void __user *) (unsigned long) offset_addr;
+			ret = ib_copy_to_udata(udata, &offset,
+					       sizeof(offset));
+			if (ret)
+				goto bail_free;
+		}
+
+		spin_lock_irq(&srq->rq.lock);
+		/*
+		 * validate head and tail pointer values and compute
+		 * the number of remaining WQEs.
+		 */
+		owq = srq->rq.wq;
+		head = owq->head;
+		tail = owq->tail;
+		if (head >= srq->rq.size || tail >= srq->rq.size) {
+			ret = -EINVAL;
+			goto bail_unlock;
+		}
+		n = head;
+		if (n < tail)
+			n += srq->rq.size - tail;
+		else
+			n -= tail;
+		if (size <= n) {
+			ret = -EINVAL;
+			goto bail_unlock;
+		}
+		n = 0;
+		p = wq->wq;
+		while (tail != head) {
+			struct qib_rwqe *wqe;
+			int i;
+
+			wqe = get_rwqe_ptr(&srq->rq, tail);
+			p->wr_id = wqe->wr_id;
+			p->num_sge = wqe->num_sge;
+			for (i = 0; i < wqe->num_sge; i++)
+				p->sg_list[i] = wqe->sg_list[i];
+			n++;
+			p = (struct qib_rwqe *)((char *) p + sz);
+			if (++tail >= srq->rq.size)
+				tail = 0;
+		}
+		srq->rq.wq = wq;
+		srq->rq.size = size;
+		wq->head = n;
+		wq->tail = 0;
+		if (attr_mask & IB_SRQ_LIMIT)
+			srq->limit = attr->srq_limit;
+		spin_unlock_irq(&srq->rq.lock);
+
+		vfree(owq);
+
+		if (srq->ip) {
+			struct qib_mmap_info *ip = srq->ip;
+			struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
+			u32 s = sizeof(struct qib_rwq) + size * sz;
+
+			qib_update_mmap_info(dev, ip, s, wq);
+
+			/*
+			 * Return the offset to mmap.
+			 * See qib_mmap() for details.
+			 */
+			if (udata && udata->inlen >= sizeof(__u64)) {
+				ret = ib_copy_to_udata(udata, &ip->offset,
+						       sizeof(ip->offset));
+				if (ret)
+					goto bail;
+			}
+
+			/*
+			 * Put user mapping info onto the pending list
+			 * unless it already is on the list.
+			 */
+			spin_lock_irq(&dev->pending_lock);
+			if (list_empty(&ip->pending_mmaps))
+				list_add(&ip->pending_mmaps,
+					 &dev->pending_mmaps);
+			spin_unlock_irq(&dev->pending_lock);
+		}
+	} else if (attr_mask & IB_SRQ_LIMIT) {
+		spin_lock_irq(&srq->rq.lock);
+		if (attr->srq_limit >= srq->rq.size)
+			ret = -EINVAL;
+		else
+			srq->limit = attr->srq_limit;
+		spin_unlock_irq(&srq->rq.lock);
+	}
+	goto bail;
+
+bail_unlock:
+	spin_unlock_irq(&srq->rq.lock);
+bail_free:
+	vfree(wq);
+bail:
+	return ret;
+}
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+	struct qib_srq *srq = to_isrq(ibsrq);
+
+	attr->max_wr = srq->rq.size - 1;
+	attr->max_sge = srq->rq.max_sge;
+	attr->srq_limit = srq->limit;
+	return 0;
+}
+
+/**
+ * qib_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int qib_destroy_srq(struct ib_srq *ibsrq)
+{
+	struct qib_srq *srq = to_isrq(ibsrq);
+	struct qib_ibdev *dev = to_idev(ibsrq->device);
+
+	spin_lock(&dev->n_srqs_lock);
+	dev->n_srqs_allocated--;
+	spin_unlock(&dev->n_srqs_lock);
+	if (srq->ip)
+		kref_put(&srq->ip->ref, qib_release_mmap_info);
+	else
+		vfree(srq->rq.wq);
+	kfree(srq);
+
+	return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 000000000000..dab4d9f4a2cc
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/ctype.h>
+
+#include "qib.h"
+
+/**
+ * qib_parse_ushort - parse an unsigned short value in an arbitrary base
+ * @str: the string containing the number
+ * @valp: where to put the result
+ *
+ * Returns the number of bytes consumed, or negative value on error.
+ */
+static int qib_parse_ushort(const char *str, unsigned short *valp)
+{
+	unsigned long val;
+	char *end;
+	int ret;
+
+	if (!isdigit(str[0])) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	val = simple_strtoul(str, &end, 0);
+
+	if (val > 0xffff) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	*valp = val;
+
+	ret = end + 1 - str;
+	if (ret == 0)
+		ret = -EINVAL;
+
+bail:
+	return ret;
+}
+
+/* start of per-port functions */
+/*
+ * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
+ */
+static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret;
+
+	ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+	return ret;
+}
+
+static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
+			       size_t count)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret;
+	u16 val;
+
+	ret = qib_parse_ushort(buf, &val);
+
+	/*
+	 * Set the "intentional" heartbeat enable per either of
+	 * "Enable" and "Auto", as these are normally set together.
+	 * This bit is consulted when leaving loopback mode,
+	 * because entering loopback mode overrides it and automatically
+	 * disables heartbeat.
+	 */
+	if (ret >= 0)
+		ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
+	if (ret < 0)
+		qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
+			      size_t count)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret = count, r;
+
+	r = dd->f_set_ib_loopback(ppd, buf);
+	if (r < 0)
+		ret = r;
+
+	return ret;
+}
+
+static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
+				  size_t count)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret;
+	u16 val;
+
+	ret = qib_parse_ushort(buf, &val);
+	if (ret > 0)
+		qib_set_led_override(ppd, val);
+	else
+		qib_dev_err(dd, "attempt to set invalid LED override\n");
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
+{
+	ssize_t ret;
+
+	if (!ppd->statusp)
+		ret = -EINVAL;
+	else
+		ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
+				(unsigned long long) *(ppd->statusp));
+	return ret;
+}
+
+/*
+ * For userland compatibility, these offsets must remain fixed.
+ * They are strings for QIB_STATUS_*
+ */
+static const char *qib_status_str[] = {
+	"Initted",
+	"",
+	"",
+	"",
+	"",
+	"Present",
+	"IB_link_up",
+	"IB_configured",
+	"",
+	"Fatal_Hardware_Error",
+	NULL,
+};
+
+static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
+{
+	int i, any;
+	u64 s;
+	ssize_t ret;
+
+	if (!ppd->statusp) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	s = *(ppd->statusp);
+	*buf = '\0';
+	for (any = i = 0; s && qib_status_str[i]; i++) {
+		if (s & 1) {
+			/* if overflow */
+			if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+				break;
+			if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
+					PAGE_SIZE)
+				break;
+			any = 1;
+		}
+		s >>= 1;
+	}
+	if (any)
+		strlcat(buf, "\n", PAGE_SIZE);
+
+	ret = strlen(buf);
+
+bail:
+	return ret;
+}
+
+/* end of per-port functions */
+
+/*
+ * Start of per-port file structures and support code
+ * Because we are fitting into other infrastructure, we have to supply the
+ * full set of kobject/sysfs_ops structures and routines.
+ */
+#define QIB_PORT_ATTR(name, mode, show, store) \
+	static struct qib_port_attr qib_port_attr_##name = \
+		__ATTR(name, mode, show, store)
+
+struct qib_port_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct qib_pportdata *, char *);
+	ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
+};
+
+QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
+QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
+	      store_hrtbt_enb);
+QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
+QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
+
+static struct attribute *port_default_attributes[] = {
+	&qib_port_attr_loopback.attr,
+	&qib_port_attr_led_override.attr,
+	&qib_port_attr_hrtbt_enable.attr,
+	&qib_port_attr_status.attr,
+	&qib_port_attr_status_str.attr,
+	NULL
+};
+
+static ssize_t qib_portattr_show(struct kobject *kobj,
+	struct attribute *attr, char *buf)
+{
+	struct qib_port_attr *pattr =
+		container_of(attr, struct qib_port_attr, attr);
+	struct qib_pportdata *ppd =
+		container_of(kobj, struct qib_pportdata, pport_kobj);
+
+	return pattr->show(ppd, buf);
+}
+
+static ssize_t qib_portattr_store(struct kobject *kobj,
+	struct attribute *attr, const char *buf, size_t len)
+{
+	struct qib_port_attr *pattr =
+		container_of(attr, struct qib_port_attr, attr);
+	struct qib_pportdata *ppd =
+		container_of(kobj, struct qib_pportdata, pport_kobj);
+
+	return pattr->store(ppd, buf, len);
+}
+
+static void qib_port_release(struct kobject *kobj)
+{
+	/* nothing to do since memory is freed by qib_free_devdata() */
+}
+
+static const struct sysfs_ops qib_port_ops = {
+	.show = qib_portattr_show,
+	.store = qib_portattr_store,
+};
+
+static struct kobj_type qib_port_ktype = {
+	.release = qib_port_release,
+	.sysfs_ops = &qib_port_ops,
+	.default_attrs = port_default_attributes
+};
+
+/* Start sl2vl */
+
+#define QIB_SL2VL_ATTR(N) \
+	static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
+		.attr = { .name = __stringify(N), .mode = 0444 }, \
+		.sl = N \
+	}
+
+struct qib_sl2vl_attr {
+	struct attribute attr;
+	int sl;
+};
+
+QIB_SL2VL_ATTR(0);
+QIB_SL2VL_ATTR(1);
+QIB_SL2VL_ATTR(2);
+QIB_SL2VL_ATTR(3);
+QIB_SL2VL_ATTR(4);
+QIB_SL2VL_ATTR(5);
+QIB_SL2VL_ATTR(6);
+QIB_SL2VL_ATTR(7);
+QIB_SL2VL_ATTR(8);
+QIB_SL2VL_ATTR(9);
+QIB_SL2VL_ATTR(10);
+QIB_SL2VL_ATTR(11);
+QIB_SL2VL_ATTR(12);
+QIB_SL2VL_ATTR(13);
+QIB_SL2VL_ATTR(14);
+QIB_SL2VL_ATTR(15);
+
+static struct attribute *sl2vl_default_attributes[] = {
+	&qib_sl2vl_attr_0.attr,
+	&qib_sl2vl_attr_1.attr,
+	&qib_sl2vl_attr_2.attr,
+	&qib_sl2vl_attr_3.attr,
+	&qib_sl2vl_attr_4.attr,
+	&qib_sl2vl_attr_5.attr,
+	&qib_sl2vl_attr_6.attr,
+	&qib_sl2vl_attr_7.attr,
+	&qib_sl2vl_attr_8.attr,
+	&qib_sl2vl_attr_9.attr,
+	&qib_sl2vl_attr_10.attr,
+	&qib_sl2vl_attr_11.attr,
+	&qib_sl2vl_attr_12.attr,
+	&qib_sl2vl_attr_13.attr,
+	&qib_sl2vl_attr_14.attr,
+	&qib_sl2vl_attr_15.attr,
+	NULL
+};
+
+static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
+			       char *buf)
+{
+	struct qib_sl2vl_attr *sattr =
+		container_of(attr, struct qib_sl2vl_attr, attr);
+	struct qib_pportdata *ppd =
+		container_of(kobj, struct qib_pportdata, sl2vl_kobj);
+	struct qib_ibport *qibp = &ppd->ibport_data;
+
+	return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+}
+
+static const struct sysfs_ops qib_sl2vl_ops = {
+	.show = sl2vl_attr_show,
+};
+
+static struct kobj_type qib_sl2vl_ktype = {
+	.release = qib_port_release,
+	.sysfs_ops = &qib_sl2vl_ops,
+	.default_attrs = sl2vl_default_attributes
+};
+
+/* End sl2vl */
+
+/* Start diag_counters */
+
+#define QIB_DIAGC_ATTR(N) \
+	static struct qib_diagc_attr qib_diagc_attr_##N = { \
+		.attr = { .name = __stringify(N), .mode = 0444 }, \
+		.counter = offsetof(struct qib_ibport, n_##N) \
+	}
+
+struct qib_diagc_attr {
+	struct attribute attr;
+	size_t counter;
+};
+
+QIB_DIAGC_ATTR(rc_resends);
+QIB_DIAGC_ATTR(rc_acks);
+QIB_DIAGC_ATTR(rc_qacks);
+QIB_DIAGC_ATTR(rc_delayed_comp);
+QIB_DIAGC_ATTR(seq_naks);
+QIB_DIAGC_ATTR(rdma_seq);
+QIB_DIAGC_ATTR(rnr_naks);
+QIB_DIAGC_ATTR(other_naks);
+QIB_DIAGC_ATTR(rc_timeouts);
+QIB_DIAGC_ATTR(loop_pkts);
+QIB_DIAGC_ATTR(pkt_drops);
+QIB_DIAGC_ATTR(dmawait);
+QIB_DIAGC_ATTR(unaligned);
+QIB_DIAGC_ATTR(rc_dupreq);
+QIB_DIAGC_ATTR(rc_seqnak);
+
+static struct attribute *diagc_default_attributes[] = {
+	&qib_diagc_attr_rc_resends.attr,
+	&qib_diagc_attr_rc_acks.attr,
+	&qib_diagc_attr_rc_qacks.attr,
+	&qib_diagc_attr_rc_delayed_comp.attr,
+	&qib_diagc_attr_seq_naks.attr,
+	&qib_diagc_attr_rdma_seq.attr,
+	&qib_diagc_attr_rnr_naks.attr,
+	&qib_diagc_attr_other_naks.attr,
+	&qib_diagc_attr_rc_timeouts.attr,
+	&qib_diagc_attr_loop_pkts.attr,
+	&qib_diagc_attr_pkt_drops.attr,
+	&qib_diagc_attr_dmawait.attr,
+	&qib_diagc_attr_unaligned.attr,
+	&qib_diagc_attr_rc_dupreq.attr,
+	&qib_diagc_attr_rc_seqnak.attr,
+	NULL
+};
+
+static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
+			       char *buf)
+{
+	struct qib_diagc_attr *dattr =
+		container_of(attr, struct qib_diagc_attr, attr);
+	struct qib_pportdata *ppd =
+		container_of(kobj, struct qib_pportdata, diagc_kobj);
+	struct qib_ibport *qibp = &ppd->ibport_data;
+
+	return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+}
+
+static const struct sysfs_ops qib_diagc_ops = {
+	.show = diagc_attr_show,
+};
+
+static struct kobj_type qib_diagc_ktype = {
+	.release = qib_port_release,
+	.sysfs_ops = &qib_diagc_ops,
+	.default_attrs = diagc_default_attributes
+};
+
+/* End diag_counters */
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+
+	return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	int ret;
+
+	if (!dd->boardname)
+		ret = -EINVAL;
+	else
+		ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
+	return ret;
+}
+
+static ssize_t show_version(struct device *device,
+			    struct device_attribute *attr, char *buf)
+{
+	/* The string printed here is already newline-terminated. */
+	return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
+}
+
+static ssize_t show_boardversion(struct device *device,
+				 struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+
+	/* The string printed here is already newline-terminated. */
+	return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+}
+
+
+static ssize_t show_localbus_info(struct device *device,
+				  struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+
+	/* The string printed here is already newline-terminated. */
+	return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
+}
+
+
+static ssize_t show_nctxts(struct device *device,
+			   struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+
+	/* Return the number of user ports (contexts) available. */
+	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+		dd->first_user_ctxt);
+}
+
+static ssize_t show_serial(struct device *device,
+			   struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+
+	buf[sizeof dd->serial] = '\0';
+	memcpy(buf, dd->serial, sizeof dd->serial);
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t store_chip_reset(struct device *device,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	int ret;
+
+	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	ret = qib_reset_device(dd->unit);
+bail:
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t show_logged_errs(struct device *device,
+				struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	int idx, count;
+
+	/* force consistency with actual EEPROM */
+	if (qib_update_eeprom_log(dd) != 0)
+		return -ENXIO;
+
+	count = 0;
+	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+		count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+				   dd->eep_st_errs[idx],
+				   idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+	}
+
+	return count;
+}
+
+/*
+ * Dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *device,
+			      struct device_attribute *attr, char *buf)
+{
+	struct qib_ibdev *dev =
+		container_of(device, struct qib_ibdev, ibdev.dev);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	int ret;
+	int idx;
+	u8 regvals[8];
+
+	ret = -ENXIO;
+	for (idx = 0; idx < 8; ++idx) {
+		if (idx == 6)
+			continue;
+		ret = dd->f_tempsense_rd(dd, idx);
+		if (ret < 0)
+			break;
+		regvals[idx] = ret;
+	}
+	if (idx == 8)
+		ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
+				*(signed char *)(regvals),
+				*(signed char *)(regvals + 1),
+				regvals[2], regvals[3],
+				*(signed char *)(regvals + 5),
+				*(signed char *)(regvals + 7));
+	return ret;
+}
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+
+static struct device_attribute *qib_attributes[] = {
+	&dev_attr_hw_rev,
+	&dev_attr_hca_type,
+	&dev_attr_board_id,
+	&dev_attr_version,
+	&dev_attr_nctxts,
+	&dev_attr_serial,
+	&dev_attr_boardversion,
+	&dev_attr_logged_errors,
+	&dev_attr_tempsense,
+	&dev_attr_localbus_info,
+	&dev_attr_chip_reset,
+};
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+			  struct kobject *kobj)
+{
+	struct qib_pportdata *ppd;
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	int ret;
+
+	if (!port_num || port_num > dd->num_pports) {
+		qib_dev_err(dd, "Skipping infiniband class with "
+			    "invalid port %u\n", port_num);
+		ret = -ENODEV;
+		goto bail;
+	}
+	ppd = &dd->pport[port_num - 1];
+
+	ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
+				   "linkcontrol");
+	if (ret) {
+		qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
+			    "(err %d) port %u\n", ret, port_num);
+		goto bail;
+	}
+	kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
+
+	ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
+				   "sl2vl");
+	if (ret) {
+		qib_dev_err(dd, "Skipping sl2vl sysfs info, "
+			    "(err %d) port %u\n", ret, port_num);
+		goto bail_sl;
+	}
+	kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
+
+	ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
+				   "diag_counters");
+	if (ret) {
+		qib_dev_err(dd, "Skipping diag_counters sysfs info, "
+			    "(err %d) port %u\n", ret, port_num);
+		goto bail_diagc;
+	}
+	kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
+
+	return 0;
+
+bail_diagc:
+	kobject_put(&ppd->sl2vl_kobj);
+bail_sl:
+	kobject_put(&ppd->pport_kobj);
+bail:
+	return ret;
+}
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int qib_verbs_register_sysfs(struct qib_devdata *dd)
+{
+	struct ib_device *dev = &dd->verbs_dev.ibdev;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
+		ret = device_create_file(&dev->dev, qib_attributes[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
+{
+	struct qib_pportdata *ppd;
+	int i;
+
+	for (i = 0; i < dd->num_pports; i++) {
+		ppd = &dd->pport[i];
+		kobject_put(&ppd->pport_kobj);
+		kobject_put(&ppd->sl2vl_kobj);
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 000000000000..6f31ca5039db
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * QLogic_IB "Two Wire Serial Interface" driver.
+ * Originally written for a not-quite-i2c serial eeprom, which is
+ * still used on some supported boards. Later boards have added a
+ * variety of other uses, most board-specific, so teh bit-boffing
+ * part has been split off to this file, while the other parts
+ * have been moved to chip-specific files.
+ *
+ * We have also dropped all pretense of fully generic (e.g. pretend
+ * we don't know whether '1' is the higher voltage) interface, as
+ * the restrictions of the generic i2c interface (e.g. no access from
+ * driver itself) make it unsuitable for this use.
+ */
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the qlogic_ib device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip.  Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct qib_devdata *dd)
+{
+	/*
+	 * implicit read of EXTStatus is as good as explicit
+	 * read of scratch, if all we want to do is flush
+	 * writes.
+	 */
+	dd->f_gpio_mod(dd, 0, 0, 0);
+	rmb(); /* inlined, so prevent compiler reordering */
+}
+
+/*
+ * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
+ * for "almost compliant" modules
+ */
+#define SCL_WAIT_USEC 1000
+
+/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
+ * Should be 20, but some chips need more.
+ */
+#define TWSI_BUF_WAIT_USEC 60
+
+static void scl_out(struct qib_devdata *dd, u8 bit)
+{
+	u32 mask;
+
+	udelay(1);
+
+	mask = 1UL << dd->gpio_scl_num;
+
+	/* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+	dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+	/*
+	 * Allow for slow slaves by simple
+	 * delay for falling edge, sampling on rise.
+	 */
+	if (!bit)
+		udelay(2);
+	else {
+		int rise_usec;
+		for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
+			if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
+				break;
+			udelay(2);
+		}
+		if (rise_usec <= 0)
+			qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
+				    SCL_WAIT_USEC);
+	}
+	i2c_wait_for_writes(dd);
+}
+
+static void sda_out(struct qib_devdata *dd, u8 bit)
+{
+	u32 mask;
+
+	mask = 1UL << dd->gpio_sda_num;
+
+	/* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+	dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+	i2c_wait_for_writes(dd);
+	udelay(2);
+}
+
+static u8 sda_in(struct qib_devdata *dd, int wait)
+{
+	int bnum;
+	u32 read_val, mask;
+
+	bnum = dd->gpio_sda_num;
+	mask = (1UL << bnum);
+	/* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+	dd->f_gpio_mod(dd, 0, 0, mask);
+	read_val = dd->f_gpio_mod(dd, 0, 0, 0);
+	if (wait)
+		i2c_wait_for_writes(dd);
+	return (read_val & mask) >> bnum;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the qlogic_ib device
+ */
+static int i2c_ackrcv(struct qib_devdata *dd)
+{
+	u8 ack_received;
+
+	/* AT ENTRY SCL = LOW */
+	/* change direction, ignore data */
+	ack_received = sda_in(dd, 1);
+	scl_out(dd, 1);
+	ack_received = sda_in(dd, 1) == 0;
+	scl_out(dd, 0);
+	return ack_received;
+}
+
+static void stop_cmd(struct qib_devdata *dd);
+
+/**
+ * rd_byte - read a byte, sending STOP on last, else ACK
+ * @dd: the qlogic_ib device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct qib_devdata *dd, int last)
+{
+	int bit_cntr, data;
+
+	data = 0;
+
+	for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+		data <<= 1;
+		scl_out(dd, 1);
+		data |= sda_in(dd, 0);
+		scl_out(dd, 0);
+	}
+	if (last) {
+		scl_out(dd, 1);
+		stop_cmd(dd);
+	} else {
+		sda_out(dd, 0);
+		scl_out(dd, 1);
+		scl_out(dd, 0);
+		sda_out(dd, 1);
+	}
+	return data;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the qlogic_ib device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct qib_devdata *dd, u8 data)
+{
+	int bit_cntr;
+	u8 bit;
+
+	for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+		bit = (data >> bit_cntr) & 1;
+		sda_out(dd, bit);
+		scl_out(dd, 1);
+		scl_out(dd, 0);
+	}
+	return (!i2c_ackrcv(dd)) ? 1 : 0;
+}
+
+/*
+ * issue TWSI start sequence:
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static void start_seq(struct qib_devdata *dd)
+{
+	sda_out(dd, 1);
+	scl_out(dd, 1);
+	sda_out(dd, 0);
+	udelay(1);
+	scl_out(dd, 0);
+}
+
+/**
+ * stop_seq - transmit the stop sequence
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_seq(struct qib_devdata *dd)
+{
+	scl_out(dd, 0);
+	sda_out(dd, 0);
+	scl_out(dd, 1);
+	sda_out(dd, 1);
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct qib_devdata *dd)
+{
+	stop_seq(dd);
+	udelay(TWSI_BUF_WAIT_USEC);
+}
+
+/**
+ * qib_twsi_reset - reset I2C communication
+ * @dd: the qlogic_ib device
+ */
+
+int qib_twsi_reset(struct qib_devdata *dd)
+{
+	int clock_cycles_left = 9;
+	int was_high = 0;
+	u32 pins, mask;
+
+	/* Both SCL and SDA should be high. If not, there
+	 * is something wrong.
+	 */
+	mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
+
+	/*
+	 * Force pins to desired innocuous state.
+	 * This is the default power-on state with out=0 and dir=0,
+	 * So tri-stated and should be floating high (barring HW problems)
+	 */
+	dd->f_gpio_mod(dd, 0, 0, mask);
+
+	/*
+	 * Clock nine times to get all listeners into a sane state.
+	 * If SDA does not go high at any point, we are wedged.
+	 * One vendor recommends then issuing START followed by STOP.
+	 * we cannot use our "normal" functions to do that, because
+	 * if SCL drops between them, another vendor's part will
+	 * wedge, dropping SDA and keeping it low forever, at the end of
+	 * the next transaction (even if it was not the device addressed).
+	 * So our START and STOP take place with SCL held high.
+	 */
+	while (clock_cycles_left--) {
+		scl_out(dd, 0);
+		scl_out(dd, 1);
+		/* Note if SDA is high, but keep clocking to sync slave */
+		was_high |= sda_in(dd, 0);
+	}
+
+	if (was_high) {
+		/*
+		 * We saw a high, which we hope means the slave is sync'd.
+		 * Issue START, STOP, pause for T_BUF.
+		 */
+
+		pins = dd->f_gpio_mod(dd, 0, 0, 0);
+		if ((pins & mask) != mask)
+			qib_dev_err(dd, "GPIO pins not at rest: %d\n",
+				    pins & mask);
+		/* Drop SDA to issue START */
+		udelay(1); /* Guarantee .6 uSec setup */
+		sda_out(dd, 0);
+		udelay(1); /* Guarantee .6 uSec hold */
+		/* At this point, SCL is high, SDA low. Raise SDA for STOP */
+		sda_out(dd, 1);
+		udelay(TWSI_BUF_WAIT_USEC);
+	}
+
+	return !was_high;
+}
+
+#define QIB_TWSI_START 0x100
+#define QIB_TWSI_STOP 0x200
+
+/* Write byte to TWSI, optionally prefixed with START or suffixed with
+ * STOP.
+ * returns 0 if OK (ACK received), else != 0
+ */
+static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
+{
+	int ret = 1;
+	if (flags & QIB_TWSI_START)
+		start_seq(dd);
+
+	ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
+
+	if (flags & QIB_TWSI_STOP)
+		stop_cmd(dd);
+	return ret;
+}
+
+/* Added functionality for IBA7220-based cards */
+#define QIB_TEMP_DEV 0x98
+
+/*
+ * qib_twsi_blk_rd
+ * Formerly called qib_eeprom_internal_read, and only used for eeprom,
+ * but now the general interface for data transfer from twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device from which data should
+ * be read.
+ */
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
+		    void *buffer, int len)
+{
+	int ret;
+	u8 *bp = buffer;
+
+	ret = 1;
+
+	if (dev == QIB_TWSI_NO_DEV) {
+		/* legacy not-really-I2C */
+		addr = (addr << 1) | READ_CMD;
+		ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
+	} else {
+		/* Actual I2C */
+		ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
+		if (ret) {
+			stop_cmd(dd);
+			ret = 1;
+			goto bail;
+		}
+		/*
+		 * SFF spec claims we do _not_ stop after the addr
+		 * but simply issue a start with the "read" dev-addr.
+		 * Since we are implicitely waiting for ACK here,
+		 * we need t_buf (nominally 20uSec) before that start,
+		 * and cannot rely on the delay built in to the STOP
+		 */
+		ret = qib_twsi_wr(dd, addr, 0);
+		udelay(TWSI_BUF_WAIT_USEC);
+
+		if (ret) {
+			qib_dev_err(dd,
+				"Failed to write interface read addr %02X\n",
+				addr);
+			ret = 1;
+			goto bail;
+		}
+		ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
+	}
+	if (ret) {
+		stop_cmd(dd);
+		ret = 1;
+		goto bail;
+	}
+
+	/*
+	 * block devices keeps clocking data out as long as we ack,
+	 * automatically incrementing the address. Some have "pages"
+	 * whose boundaries will not be crossed, but the handling
+	 * of these is left to the caller, who is in a better
+	 * position to know.
+	 */
+	while (len-- > 0) {
+		/*
+		 * Get and store data, sending ACK if length remaining,
+		 * else STOP
+		 */
+		*bp++ = rd_byte(dd, !len);
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/*
+ * qib_twsi_blk_wr
+ * Formerly called qib_eeprom_internal_write, and only used for eeprom,
+ * but now the general interface for data transfer to twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device to which data should
+ * be written.
+ */
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+		    const void *buffer, int len)
+{
+	int sub_len;
+	const u8 *bp = buffer;
+	int max_wait_time, i;
+	int ret;
+	ret = 1;
+
+	while (len > 0) {
+		if (dev == QIB_TWSI_NO_DEV) {
+			if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
+					QIB_TWSI_START)) {
+				goto failed_write;
+			}
+		} else {
+			/* Real I2C */
+			if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
+				goto failed_write;
+			ret = qib_twsi_wr(dd, addr, 0);
+			if (ret) {
+				qib_dev_err(dd, "Failed to write interface"
+					    " write addr %02X\n", addr);
+				goto failed_write;
+			}
+		}
+
+		sub_len = min(len, 4);
+		addr += sub_len;
+		len -= sub_len;
+
+		for (i = 0; i < sub_len; i++)
+			if (qib_twsi_wr(dd, *bp++, 0))
+				goto failed_write;
+
+		stop_cmd(dd);
+
+		/*
+		 * Wait for write complete by waiting for a successful
+		 * read (the chip replies with a zero after the write
+		 * cmd completes, and before it writes to the eeprom.
+		 * The startcmd for the read will fail the ack until
+		 * the writes have completed.   We do this inline to avoid
+		 * the debug prints that are in the real read routine
+		 * if the startcmd fails.
+		 * We also use the proper device address, so it doesn't matter
+		 * whether we have real eeprom_dev. Legacy likes any address.
+		 */
+		max_wait_time = 100;
+		while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
+			stop_cmd(dd);
+			if (!--max_wait_time)
+				goto failed_write;
+		}
+		/* now read (and ignore) the resulting byte */
+		rd_byte(dd, 1);
+	}
+
+	ret = 0;
+	goto bail;
+
+failed_write:
+	stop_cmd(dd);
+	ret = 1;
+
+bail:
+	return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 000000000000..f7eb1ddff5f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+static unsigned qib_hol_timeout_ms = 3000;
+module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(hol_timeout_ms,
+		 "duration of user app suspension after link failure");
+
+unsigned qib_sdma_fetch_arb = 1;
+module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
+MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+
+/**
+ * qib_disarm_piobufs - cancel a range of PIO buffers
+ * @dd: the qlogic_ib device
+ * @first: the first PIO buffer to cancel
+ * @cnt: the number of PIO buffers to cancel
+ *
+ * Cancel a range of PIO buffers. Used at user process close,
+ * in case it died while writing to a PIO buffer.
+ */
+void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
+{
+	unsigned long flags;
+	unsigned i;
+	unsigned last;
+
+	last = first + cnt;
+	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	for (i = first; i < last; i++) {
+		__clear_bit(i, dd->pio_need_disarm);
+		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+	}
+	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * This is called by a user process when it sees the DISARM_BUFS event
+ * bit is set.
+ */
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
+{
+	struct qib_devdata *dd = rcd->dd;
+	unsigned i;
+	unsigned last;
+	unsigned n = 0;
+
+	last = rcd->pio_base + rcd->piocnt;
+	/*
+	 * Don't need uctxt_lock here, since user has called in to us.
+	 * Clear at start in case more interrupts set bits while we
+	 * are disarming
+	 */
+	if (rcd->user_event_mask) {
+		/*
+		 * subctxt_cnt is 0 if not shared, so do base
+		 * separately, first, then remaining subctxt, if any
+		 */
+		clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
+		for (i = 1; i < rcd->subctxt_cnt; i++)
+			clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+				  &rcd->user_event_mask[i]);
+	}
+	spin_lock_irq(&dd->pioavail_lock);
+	for (i = rcd->pio_base; i < last; i++) {
+		if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
+			n++;
+			dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
+		}
+	}
+	spin_unlock_irq(&dd->pioavail_lock);
+	return 0;
+}
+
+static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
+{
+	struct qib_pportdata *ppd;
+	unsigned pidx;
+
+	for (pidx = 0; pidx < dd->num_pports; pidx++) {
+		ppd = dd->pport + pidx;
+		if (i >= ppd->sdma_state.first_sendbuf &&
+		    i < ppd->sdma_state.last_sendbuf)
+			return ppd;
+	}
+	return NULL;
+}
+
+/*
+ * Return true if send buffer is being used by a user context.
+ * Sets  _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
+ */
+static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
+{
+	struct qib_ctxtdata *rcd;
+	unsigned ctxt;
+	int ret = 0;
+
+	spin_lock(&dd->uctxt_lock);
+	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+		rcd = dd->rcd[ctxt];
+		if (!rcd || bufn < rcd->pio_base ||
+		    bufn >= rcd->pio_base + rcd->piocnt)
+			continue;
+		if (rcd->user_event_mask) {
+			int i;
+			/*
+			 * subctxt_cnt is 0 if not shared, so do base
+			 * separately, first, then remaining subctxt, if any
+			 */
+			set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+				&rcd->user_event_mask[0]);
+			for (i = 1; i < rcd->subctxt_cnt; i++)
+				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+					&rcd->user_event_mask[i]);
+		}
+		ret = 1;
+		break;
+	}
+	spin_unlock(&dd->uctxt_lock);
+
+	return ret;
+}
+
+/*
+ * Disarm a set of send buffers.  If the buffer might be actively being
+ * written to, mark the buffer to be disarmed later when it is not being
+ * written to.
+ *
+ * This should only be called from the IRQ error handler.
+ */
+void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
+			    unsigned cnt)
+{
+	struct qib_pportdata *ppd, *pppd[dd->num_pports];
+	unsigned i;
+	unsigned long flags;
+
+	for (i = 0; i < dd->num_pports; i++)
+		pppd[i] = NULL;
+
+	for (i = 0; i < cnt; i++) {
+		int which;
+		if (!test_bit(i, mask))
+			continue;
+		/*
+		 * If the buffer is owned by the DMA hardware,
+		 * reset the DMA engine.
+		 */
+		ppd = is_sdma_buf(dd, i);
+		if (ppd) {
+			pppd[ppd->port] = ppd;
+			continue;
+		}
+		/*
+		 * If the kernel is writing the buffer or the buffer is
+		 * owned by a user process, we can't clear it yet.
+		 */
+		spin_lock_irqsave(&dd->pioavail_lock, flags);
+		if (test_bit(i, dd->pio_writing) ||
+		    (!test_bit(i << 1, dd->pioavailkernel) &&
+		     find_ctxt(dd, i))) {
+			__set_bit(i, dd->pio_need_disarm);
+			which = 0;
+		} else {
+			which = 1;
+			dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+		}
+		spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+	}
+
+	/* do cancel_sends once per port that had sdma piobufs in error */
+	for (i = 0; i < dd->num_pports; i++)
+		if (pppd[i])
+			qib_cancel_sends(pppd[i]);
+}
+
+/**
+ * update_send_bufs - update shadow copy of the PIO availability map
+ * @dd: the qlogic_ib device
+ *
+ * called whenever our local copy indicates we have run out of send buffers
+ */
+static void update_send_bufs(struct qib_devdata *dd)
+{
+	unsigned long flags;
+	unsigned i;
+	const unsigned piobregs = dd->pioavregs;
+
+	/*
+	 * If the generation (check) bits have changed, then we update the
+	 * busy bit for the corresponding PIO buffer.  This algorithm will
+	 * modify positions to the value they already have in some cases
+	 * (i.e., no change), but it's faster than changing only the bits
+	 * that have changed.
+	 *
+	 * We would like to do this atomicly, to avoid spinlocks in the
+	 * critical send path, but that's not really possible, given the
+	 * type of changes, and that this routine could be called on
+	 * multiple cpu's simultaneously, so we lock in this routine only,
+	 * to avoid conflicting updates; all we change is the shadow, and
+	 * it's a single 64 bit memory location, so by definition the update
+	 * is atomic in terms of what other cpu's can see in testing the
+	 * bits.  The spin_lock overhead isn't too bad, since it only
+	 * happens when all buffers are in use, so only cpu overhead, not
+	 * latency or bandwidth is affected.
+	 */
+	if (!dd->pioavailregs_dma)
+		return;
+	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	for (i = 0; i < piobregs; i++) {
+		u64 pchbusy, pchg, piov, pnew;
+
+		piov = le64_to_cpu(dd->pioavailregs_dma[i]);
+		pchg = dd->pioavailkernel[i] &
+			~(dd->pioavailshadow[i] ^ piov);
+		pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
+		if (pchg && (pchbusy & dd->pioavailshadow[i])) {
+			pnew = dd->pioavailshadow[i] & ~pchbusy;
+			pnew |= piov & pchbusy;
+			dd->pioavailshadow[i] = pnew;
+		}
+	}
+	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * Debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_send_bufs(struct qib_devdata *dd)
+{
+	dd->upd_pio_shadow = 1;
+
+	/* not atomic, but if we lose a stat count in a while, that's OK */
+	qib_stats.sps_nopiobufs++;
+}
+
+/*
+ * Common code for normal driver send buffer allocation, and reserved
+ * allocation.
+ *
+ * Do appropriate marking as busy, etc.
+ * Returns buffer pointer if one is found, otherwise NULL.
+ */
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
+				  u32 first, u32 last)
+{
+	unsigned i, j, updated = 0;
+	unsigned nbufs;
+	unsigned long flags;
+	unsigned long *shadow = dd->pioavailshadow;
+	u32 __iomem *buf;
+
+	if (!(dd->flags & QIB_PRESENT))
+		return NULL;
+
+	nbufs = last - first + 1; /* number in range to check */
+	if (dd->upd_pio_shadow) {
+		/*
+		 * Minor optimization.  If we had no buffers on last call,
+		 * start out by doing the update; continue and do scan even
+		 * if no buffers were updated, to be paranoid.
+		 */
+		update_send_bufs(dd);
+		updated++;
+	}
+	i = first;
+rescan:
+	/*
+	 * While test_and_set_bit() is atomic, we do that and then the
+	 * change_bit(), and the pair is not.  See if this is the cause
+	 * of the remaining armlaunch errors.
+	 */
+	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	for (j = 0; j < nbufs; j++, i++) {
+		if (i > last)
+			i = first;
+		if (__test_and_set_bit((2 * i) + 1, shadow))
+			continue;
+		/* flip generation bit */
+		__change_bit(2 * i, shadow);
+		/* remember that the buffer can be written to now */
+		__set_bit(i, dd->pio_writing);
+		break;
+	}
+	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+	if (j == nbufs) {
+		if (!updated) {
+			/*
+			 * First time through; shadow exhausted, but may be
+			 * buffers available, try an update and then rescan.
+			 */
+			update_send_bufs(dd);
+			updated++;
+			i = first;
+			goto rescan;
+		}
+		no_send_bufs(dd);
+		buf = NULL;
+	} else {
+		if (i < dd->piobcnt2k)
+			buf = (u32 __iomem *)(dd->pio2kbase +
+				i * dd->palign);
+		else
+			buf = (u32 __iomem *)(dd->pio4kbase +
+				(i - dd->piobcnt2k) * dd->align4k);
+		if (pbufnum)
+			*pbufnum = i;
+		dd->upd_pio_shadow = 0;
+	}
+
+	return buf;
+}
+
+/*
+ * Record that the caller is finished writing to the buffer so we don't
+ * disarm it while it is being written and disarm it now if needed.
+ */
+void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	__clear_bit(n, dd->pio_writing);
+	if (__test_and_clear_bit(n, dd->pio_need_disarm))
+		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
+	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/**
+ * qib_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the qlogic_ib device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
+	unsigned len, u32 avail, struct qib_ctxtdata *rcd)
+{
+	unsigned long flags;
+	unsigned end;
+	unsigned ostart = start;
+
+	/* There are two bits per send buffer (busy and generation) */
+	start *= 2;
+	end = start + len * 2;
+
+	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	/* Set or clear the busy bit in the shadow. */
+	while (start < end) {
+		if (avail) {
+			unsigned long dma;
+			int i;
+
+			/*
+			 * The BUSY bit will never be set, because we disarm
+			 * the user buffers before we hand them back to the
+			 * kernel.  We do have to make sure the generation
+			 * bit is set correctly in shadow, since it could
+			 * have changed many times while allocated to user.
+			 * We can't use the bitmap functions on the full
+			 * dma array because it is always little-endian, so
+			 * we have to flip to host-order first.
+			 * BITS_PER_LONG is slightly wrong, since it's
+			 * always 64 bits per register in chip...
+			 * We only work on 64 bit kernels, so that's OK.
+			 */
+			i = start / BITS_PER_LONG;
+			__clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
+				    dd->pioavailshadow);
+			dma = (unsigned long)
+				le64_to_cpu(dd->pioavailregs_dma[i]);
+			if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+				      start) % BITS_PER_LONG, &dma))
+				__set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+					  start, dd->pioavailshadow);
+			else
+				__clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
+					    + start, dd->pioavailshadow);
+			__set_bit(start, dd->pioavailkernel);
+		} else {
+			__set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
+				  dd->pioavailshadow);
+			__clear_bit(start, dd->pioavailkernel);
+		}
+		start += 2;
+	}
+
+	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+	dd->f_txchk_change(dd, ostart, len, avail, rcd);
+}
+
+/*
+ * Flush all sends that might be in the ready to send state, as well as any
+ * that are in the process of being sent.  Used whenever we need to be
+ * sure the send side is idle.  Cleans up all buffer state by canceling
+ * all pio buffers, and issuing an abort, which cleans up anything in the
+ * launch fifo.  The cancel is superfluous on some chip versions, but
+ * it's safer to always do it.
+ * PIOAvail bits are updated by the chip as if a normal send had happened.
+ */
+void qib_cancel_sends(struct qib_pportdata *ppd)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct qib_ctxtdata *rcd;
+	unsigned long flags;
+	unsigned ctxt;
+	unsigned i;
+	unsigned last;
+
+	/*
+	 * Tell PSM to disarm buffers again before trying to reuse them.
+	 * We need to be sure the rcd doesn't change out from under us
+	 * while we do so.  We hold the two locks sequentially.  We might
+	 * needlessly set some need_disarm bits as a result, if the
+	 * context is closed after we release the uctxt_lock, but that's
+	 * fairly benign, and safer than nesting the locks.
+	 */
+	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+		spin_lock_irqsave(&dd->uctxt_lock, flags);
+		rcd = dd->rcd[ctxt];
+		if (rcd && rcd->ppd == ppd) {
+			last = rcd->pio_base + rcd->piocnt;
+			if (rcd->user_event_mask) {
+				/*
+				 * subctxt_cnt is 0 if not shared, so do base
+				 * separately, first, then remaining subctxt,
+				 * if any
+				 */
+				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+					&rcd->user_event_mask[0]);
+				for (i = 1; i < rcd->subctxt_cnt; i++)
+					set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+						&rcd->user_event_mask[i]);
+			}
+			i = rcd->pio_base;
+			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+			spin_lock_irqsave(&dd->pioavail_lock, flags);
+			for (; i < last; i++)
+				__set_bit(i, dd->pio_need_disarm);
+			spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+		} else
+			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+	}
+
+	if (!(dd->flags & QIB_HAS_SEND_DMA))
+		dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
+				    QIB_SENDCTRL_FLUSH);
+}
+
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.
+ * If already off, this routine is a nop, on the assumption that the
+ * caller (or set of callers) will "do the right thing".
+ * This is a per-device operation, so just the first port.
+ */
+void qib_force_pio_avail_update(struct qib_devdata *dd)
+{
+	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+}
+
+void qib_hol_down(struct qib_pportdata *ppd)
+{
+	/*
+	 * Cancel sends when the link goes DOWN so that we aren't doing it
+	 * at INIT when we might be trying to send SMI packets.
+	 */
+	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+		qib_cancel_sends(ppd);
+}
+
+/*
+ * Link is at INIT.
+ * We start the HoL timer so we can detect stuck packets blocking SMP replies.
+ * Timer may already be running, so use mod_timer, not add_timer.
+ */
+void qib_hol_init(struct qib_pportdata *ppd)
+{
+	if (ppd->hol_state != QIB_HOL_INIT) {
+		ppd->hol_state = QIB_HOL_INIT;
+		mod_timer(&ppd->hol_timer,
+			  jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+	}
+}
+
+/*
+ * Link is up, continue any user processes, and ensure timer
+ * is a nop, if running.  Let timer keep running, if set; it
+ * will nop when it sees the link is up.
+ */
+void qib_hol_up(struct qib_pportdata *ppd)
+{
+	ppd->hol_state = QIB_HOL_UP;
+}
+
+/*
+ * This is only called via the timer.
+ */
+void qib_hol_event(unsigned long opaque)
+{
+	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+	/* If hardware error, etc, skip. */
+	if (!(ppd->dd->flags & QIB_INITTED))
+		return;
+
+	if (ppd->hol_state != QIB_HOL_UP) {
+		/*
+		 * Try to flush sends in case a stuck packet is blocking
+		 * SMP replies.
+		 */
+		qib_hol_down(ppd);
+		mod_timer(&ppd->hol_timer,
+			  jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 000000000000..6c7fe78cca64
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+/**
+ * qib_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_uc_req(struct qib_qp *qp)
+{
+	struct qib_other_headers *ohdr;
+	struct qib_swqe *wqe;
+	unsigned long flags;
+	u32 hwords;
+	u32 bth0;
+	u32 len;
+	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+	int ret = 0;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+		if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+			goto bail;
+		/* We are in the error state, flush the work request. */
+		if (qp->s_last == qp->s_head)
+			goto bail;
+		/* If DMAs are in progress, we can't flush immediately. */
+		if (atomic_read(&qp->s_dma_busy)) {
+			qp->s_flags |= QIB_S_WAIT_DMA;
+			goto bail;
+		}
+		wqe = get_swqe_ptr(qp, qp->s_last);
+		qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+		goto done;
+	}
+
+	ohdr = &qp->s_hdr.u.oth;
+	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+		ohdr = &qp->s_hdr.u.l.oth;
+
+	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+	hwords = 5;
+	bth0 = 0;
+
+	/* Get the next send request. */
+	wqe = get_swqe_ptr(qp, qp->s_cur);
+	qp->s_wqe = NULL;
+	switch (qp->s_state) {
+	default:
+		if (!(ib_qib_state_ops[qp->state] &
+		    QIB_PROCESS_NEXT_SEND_OK))
+			goto bail;
+		/* Check if send work queue is empty. */
+		if (qp->s_cur == qp->s_head)
+			goto bail;
+		/*
+		 * Start a new request.
+		 */
+		wqe->psn = qp->s_next_psn;
+		qp->s_psn = qp->s_next_psn;
+		qp->s_sge.sge = wqe->sg_list[0];
+		qp->s_sge.sg_list = wqe->sg_list + 1;
+		qp->s_sge.num_sge = wqe->wr.num_sge;
+		qp->s_sge.total_len = wqe->length;
+		len = wqe->length;
+		qp->s_len = len;
+		switch (wqe->wr.opcode) {
+		case IB_WR_SEND:
+		case IB_WR_SEND_WITH_IMM:
+			if (len > pmtu) {
+				qp->s_state = OP(SEND_FIRST);
+				len = pmtu;
+				break;
+			}
+			if (wqe->wr.opcode == IB_WR_SEND)
+				qp->s_state = OP(SEND_ONLY);
+			else {
+				qp->s_state =
+					OP(SEND_ONLY_WITH_IMMEDIATE);
+				/* Immediate data comes after the BTH */
+				ohdr->u.imm_data = wqe->wr.ex.imm_data;
+				hwords += 1;
+			}
+			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+				bth0 |= IB_BTH_SOLICITED;
+			qp->s_wqe = wqe;
+			if (++qp->s_cur >= qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		case IB_WR_RDMA_WRITE:
+		case IB_WR_RDMA_WRITE_WITH_IMM:
+			ohdr->u.rc.reth.vaddr =
+				cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+			ohdr->u.rc.reth.rkey =
+				cpu_to_be32(wqe->wr.wr.rdma.rkey);
+			ohdr->u.rc.reth.length = cpu_to_be32(len);
+			hwords += sizeof(struct ib_reth) / 4;
+			if (len > pmtu) {
+				qp->s_state = OP(RDMA_WRITE_FIRST);
+				len = pmtu;
+				break;
+			}
+			if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+				qp->s_state = OP(RDMA_WRITE_ONLY);
+			else {
+				qp->s_state =
+					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+				/* Immediate data comes after the RETH */
+				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+				hwords += 1;
+				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+					bth0 |= IB_BTH_SOLICITED;
+			}
+			qp->s_wqe = wqe;
+			if (++qp->s_cur >= qp->s_size)
+				qp->s_cur = 0;
+			break;
+
+		default:
+			goto bail;
+		}
+		break;
+
+	case OP(SEND_FIRST):
+		qp->s_state = OP(SEND_MIDDLE);
+		/* FALLTHROUGH */
+	case OP(SEND_MIDDLE):
+		len = qp->s_len;
+		if (len > pmtu) {
+			len = pmtu;
+			break;
+		}
+		if (wqe->wr.opcode == IB_WR_SEND)
+			qp->s_state = OP(SEND_LAST);
+		else {
+			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+			/* Immediate data comes after the BTH */
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
+			hwords += 1;
+		}
+		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+			bth0 |= IB_BTH_SOLICITED;
+		qp->s_wqe = wqe;
+		if (++qp->s_cur >= qp->s_size)
+			qp->s_cur = 0;
+		break;
+
+	case OP(RDMA_WRITE_FIRST):
+		qp->s_state = OP(RDMA_WRITE_MIDDLE);
+		/* FALLTHROUGH */
+	case OP(RDMA_WRITE_MIDDLE):
+		len = qp->s_len;
+		if (len > pmtu) {
+			len = pmtu;
+			break;
+		}
+		if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+			qp->s_state = OP(RDMA_WRITE_LAST);
+		else {
+			qp->s_state =
+				OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+			/* Immediate data comes after the BTH */
+			ohdr->u.imm_data = wqe->wr.ex.imm_data;
+			hwords += 1;
+			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+				bth0 |= IB_BTH_SOLICITED;
+		}
+		qp->s_wqe = wqe;
+		if (++qp->s_cur >= qp->s_size)
+			qp->s_cur = 0;
+		break;
+	}
+	qp->s_len -= len;
+	qp->s_hdrwords = hwords;
+	qp->s_cur_sge = &qp->s_sge;
+	qp->s_cur_size = len;
+	qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+			    qp->s_next_psn++ & QIB_PSN_MASK);
+done:
+	ret = 1;
+	goto unlock;
+
+bail:
+	qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	return ret;
+}
+
+/**
+ * qib_uc_rcv - handle an incoming UC packet
+ * @ibp: the port the packet came in on
+ * @hdr: the header of the packet
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from qib_qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+	struct qib_other_headers *ohdr;
+	unsigned long flags;
+	u32 opcode;
+	u32 hdrsize;
+	u32 psn;
+	u32 pad;
+	struct ib_wc wc;
+	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+	struct ib_reth *reth;
+	int ret;
+
+	/* Check for GRH */
+	if (!has_grh) {
+		ohdr = &hdr->u.oth;
+		hdrsize = 8 + 12;       /* LRH + BTH */
+	} else {
+		ohdr = &hdr->u.l.oth;
+		hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
+	}
+
+	opcode = be32_to_cpu(ohdr->bth[0]);
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+		goto sunlock;
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	psn = be32_to_cpu(ohdr->bth[2]);
+	opcode >>= 24;
+	memset(&wc, 0, sizeof wc);
+
+	/* Prevent simultaneous processing after APM on different CPUs */
+	spin_lock(&qp->r_lock);
+
+	/* Compare the PSN verses the expected PSN. */
+	if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
+		/*
+		 * Handle a sequence error.
+		 * Silently drop any current message.
+		 */
+		qp->r_psn = psn;
+inv:
+		if (qp->r_state == OP(SEND_FIRST) ||
+		    qp->r_state == OP(SEND_MIDDLE)) {
+			set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+			qp->r_sge.num_sge = 0;
+		} else
+			while (qp->r_sge.num_sge) {
+				atomic_dec(&qp->r_sge.sge.mr->refcount);
+				if (--qp->r_sge.num_sge)
+					qp->r_sge.sge = *qp->r_sge.sg_list++;
+			}
+		qp->r_state = OP(SEND_LAST);
+		switch (opcode) {
+		case OP(SEND_FIRST):
+		case OP(SEND_ONLY):
+		case OP(SEND_ONLY_WITH_IMMEDIATE):
+			goto send_first;
+
+		case OP(RDMA_WRITE_FIRST):
+		case OP(RDMA_WRITE_ONLY):
+		case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+			goto rdma_first;
+
+		default:
+			goto drop;
+		}
+	}
+
+	/* Check for opcode sequence errors. */
+	switch (qp->r_state) {
+	case OP(SEND_FIRST):
+	case OP(SEND_MIDDLE):
+		if (opcode == OP(SEND_MIDDLE) ||
+		    opcode == OP(SEND_LAST) ||
+		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+			break;
+		goto inv;
+
+	case OP(RDMA_WRITE_FIRST):
+	case OP(RDMA_WRITE_MIDDLE):
+		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+		    opcode == OP(RDMA_WRITE_LAST) ||
+		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+			break;
+		goto inv;
+
+	default:
+		if (opcode == OP(SEND_FIRST) ||
+		    opcode == OP(SEND_ONLY) ||
+		    opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+		    opcode == OP(RDMA_WRITE_FIRST) ||
+		    opcode == OP(RDMA_WRITE_ONLY) ||
+		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+			break;
+		goto inv;
+	}
+
+	if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+		qp->r_flags |= QIB_R_COMM_EST;
+		if (qp->ibqp.event_handler) {
+			struct ib_event ev;
+
+			ev.device = qp->ibqp.device;
+			ev.element.qp = &qp->ibqp;
+			ev.event = IB_EVENT_COMM_EST;
+			qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+		}
+	}
+
+	/* OK, process the packet. */
+	switch (opcode) {
+	case OP(SEND_FIRST):
+	case OP(SEND_ONLY):
+	case OP(SEND_ONLY_WITH_IMMEDIATE):
+send_first:
+		if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+			qp->r_sge = qp->s_rdma_read_sge;
+		else {
+			ret = qib_get_rwqe(qp, 0);
+			if (ret < 0)
+				goto op_err;
+			if (!ret)
+				goto drop;
+			/*
+			 * qp->s_rdma_read_sge will be the owner
+			 * of the mr references.
+			 */
+			qp->s_rdma_read_sge = qp->r_sge;
+		}
+		qp->r_rcv_len = 0;
+		if (opcode == OP(SEND_ONLY))
+			goto send_last;
+		else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+			goto send_last_imm;
+		/* FALLTHROUGH */
+	case OP(SEND_MIDDLE):
+		/* Check for invalid length PMTU or posted rwqe len. */
+		if (unlikely(tlen != (hdrsize + pmtu + 4)))
+			goto rewind;
+		qp->r_rcv_len += pmtu;
+		if (unlikely(qp->r_rcv_len > qp->r_len))
+			goto rewind;
+		qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+		break;
+
+	case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+		wc.ex.imm_data = ohdr->u.imm_data;
+		hdrsize += 4;
+		wc.wc_flags = IB_WC_WITH_IMM;
+		/* FALLTHROUGH */
+	case OP(SEND_LAST):
+send_last:
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/* Check for invalid length. */
+		/* XXX LAST len should be >= 1 */
+		if (unlikely(tlen < (hdrsize + pad + 4)))
+			goto rewind;
+		/* Don't count the CRC. */
+		tlen -= (hdrsize + pad + 4);
+		wc.byte_len = tlen + qp->r_rcv_len;
+		if (unlikely(wc.byte_len > qp->r_len))
+			goto rewind;
+		wc.opcode = IB_WC_RECV;
+last_imm:
+		qib_copy_sge(&qp->r_sge, data, tlen, 0);
+		while (qp->s_rdma_read_sge.num_sge) {
+			atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+			if (--qp->s_rdma_read_sge.num_sge)
+				qp->s_rdma_read_sge.sge =
+					*qp->s_rdma_read_sge.sg_list++;
+		}
+		wc.wr_id = qp->r_wr_id;
+		wc.status = IB_WC_SUCCESS;
+		wc.qp = &qp->ibqp;
+		wc.src_qp = qp->remote_qpn;
+		wc.slid = qp->remote_ah_attr.dlid;
+		wc.sl = qp->remote_ah_attr.sl;
+		/* Signal completion event if the solicited bit is set. */
+		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+			     (ohdr->bth[0] &
+				cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+		break;
+
+	case OP(RDMA_WRITE_FIRST):
+	case OP(RDMA_WRITE_ONLY):
+	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+rdma_first:
+		if (unlikely(!(qp->qp_access_flags &
+			       IB_ACCESS_REMOTE_WRITE))) {
+			goto drop;
+		}
+		reth = &ohdr->u.rc.reth;
+		hdrsize += sizeof(*reth);
+		qp->r_len = be32_to_cpu(reth->length);
+		qp->r_rcv_len = 0;
+		qp->r_sge.sg_list = NULL;
+		if (qp->r_len != 0) {
+			u32 rkey = be32_to_cpu(reth->rkey);
+			u64 vaddr = be64_to_cpu(reth->vaddr);
+			int ok;
+
+			/* Check rkey */
+			ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+					 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+			if (unlikely(!ok))
+				goto drop;
+			qp->r_sge.num_sge = 1;
+		} else {
+			qp->r_sge.num_sge = 0;
+			qp->r_sge.sge.mr = NULL;
+			qp->r_sge.sge.vaddr = NULL;
+			qp->r_sge.sge.length = 0;
+			qp->r_sge.sge.sge_length = 0;
+		}
+		if (opcode == OP(RDMA_WRITE_ONLY))
+			goto rdma_last;
+		else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+			goto rdma_last_imm;
+		/* FALLTHROUGH */
+	case OP(RDMA_WRITE_MIDDLE):
+		/* Check for invalid length PMTU or posted rwqe len. */
+		if (unlikely(tlen != (hdrsize + pmtu + 4)))
+			goto drop;
+		qp->r_rcv_len += pmtu;
+		if (unlikely(qp->r_rcv_len > qp->r_len))
+			goto drop;
+		qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+		break;
+
+	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+rdma_last_imm:
+		wc.ex.imm_data = ohdr->u.imm_data;
+		hdrsize += 4;
+		wc.wc_flags = IB_WC_WITH_IMM;
+
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/* Check for invalid length. */
+		/* XXX LAST len should be >= 1 */
+		if (unlikely(tlen < (hdrsize + pad + 4)))
+			goto drop;
+		/* Don't count the CRC. */
+		tlen -= (hdrsize + pad + 4);
+		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+			goto drop;
+		if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+			while (qp->s_rdma_read_sge.num_sge) {
+				atomic_dec(&qp->s_rdma_read_sge.sge.mr->
+					   refcount);
+				if (--qp->s_rdma_read_sge.num_sge)
+					qp->s_rdma_read_sge.sge =
+						*qp->s_rdma_read_sge.sg_list++;
+			}
+		else {
+			ret = qib_get_rwqe(qp, 1);
+			if (ret < 0)
+				goto op_err;
+			if (!ret)
+				goto drop;
+		}
+		wc.byte_len = qp->r_len;
+		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+		goto last_imm;
+
+	case OP(RDMA_WRITE_LAST):
+rdma_last:
+		/* Get the number of bytes the message was padded by. */
+		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+		/* Check for invalid length. */
+		/* XXX LAST len should be >= 1 */
+		if (unlikely(tlen < (hdrsize + pad + 4)))
+			goto drop;
+		/* Don't count the CRC. */
+		tlen -= (hdrsize + pad + 4);
+		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+			goto drop;
+		qib_copy_sge(&qp->r_sge, data, tlen, 1);
+		while (qp->r_sge.num_sge) {
+			atomic_dec(&qp->r_sge.sge.mr->refcount);
+			if (--qp->r_sge.num_sge)
+				qp->r_sge.sge = *qp->r_sge.sg_list++;
+		}
+		break;
+
+	default:
+		/* Drop packet for unknown opcodes. */
+		goto drop;
+	}
+	qp->r_psn++;
+	qp->r_state = opcode;
+	spin_unlock(&qp->r_lock);
+	return;
+
+rewind:
+	set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+	qp->r_sge.num_sge = 0;
+drop:
+	ibp->n_pkt_drops++;
+	spin_unlock(&qp->r_lock);
+	return;
+
+op_err:
+	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+	spin_unlock(&qp->r_lock);
+	return;
+
+sunlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 000000000000..c838cda73347
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/**
+ * qib_ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from qib_make_ud_req() to forward a WQE addressed
+ * to the same HCA.
+ * Note that the receive interrupt handler may be calling qib_ud_rcv()
+ * while this is being called.
+ */
+static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+{
+	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+	struct qib_pportdata *ppd;
+	struct qib_qp *qp;
+	struct ib_ah_attr *ah_attr;
+	unsigned long flags;
+	struct qib_sge_state ssge;
+	struct qib_sge *sge;
+	struct ib_wc wc;
+	u32 length;
+
+	qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+	if (!qp) {
+		ibp->n_pkt_drops++;
+		return;
+	}
+	if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+	    !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+		ibp->n_pkt_drops++;
+		goto drop;
+	}
+
+	ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+	ppd = ppd_from_ibp(ibp);
+
+	if (qp->ibqp.qp_num > 1) {
+		u16 pkey1;
+		u16 pkey2;
+		u16 lid;
+
+		pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
+		pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+		if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+			lid = ppd->lid | (ah_attr->src_path_bits &
+					  ((1 << ppd->lmc) - 1));
+			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
+				      ah_attr->sl,
+				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
+				      cpu_to_be16(lid),
+				      cpu_to_be16(ah_attr->dlid));
+			goto drop;
+		}
+	}
+
+	/*
+	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+	 * Qkeys with the high order bit set mean use the
+	 * qkey from the QP context instead of the WR (see 10.2.5).
+	 */
+	if (qp->ibqp.qp_num) {
+		u32 qkey;
+
+		qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
+			sqp->qkey : swqe->wr.wr.ud.remote_qkey;
+		if (unlikely(qkey != qp->qkey)) {
+			u16 lid;
+
+			lid = ppd->lid | (ah_attr->src_path_bits &
+					  ((1 << ppd->lmc) - 1));
+			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+				      ah_attr->sl,
+				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
+				      cpu_to_be16(lid),
+				      cpu_to_be16(ah_attr->dlid));
+			goto drop;
+		}
+	}
+
+	/*
+	 * A GRH is expected to preceed the data even if not
+	 * present on the wire.
+	 */
+	length = swqe->length;
+	memset(&wc, 0, sizeof wc);
+	wc.byte_len = length + sizeof(struct ib_grh);
+
+	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+		wc.wc_flags = IB_WC_WITH_IMM;
+		wc.ex.imm_data = swqe->wr.ex.imm_data;
+	}
+
+	spin_lock_irqsave(&qp->r_lock, flags);
+
+	/*
+	 * Get the next work request entry to find where to put the data.
+	 */
+	if (qp->r_flags & QIB_R_REUSE_SGE)
+		qp->r_flags &= ~QIB_R_REUSE_SGE;
+	else {
+		int ret;
+
+		ret = qib_get_rwqe(qp, 0);
+		if (ret < 0) {
+			qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+			goto bail_unlock;
+		}
+		if (!ret) {
+			if (qp->ibqp.qp_num == 0)
+				ibp->n_vl15_dropped++;
+			goto bail_unlock;
+		}
+	}
+	/* Silently drop packets which are too big. */
+	if (unlikely(wc.byte_len > qp->r_len)) {
+		qp->r_flags |= QIB_R_REUSE_SGE;
+		ibp->n_pkt_drops++;
+		goto bail_unlock;
+	}
+
+	if (ah_attr->ah_flags & IB_AH_GRH) {
+		qib_copy_sge(&qp->r_sge, &ah_attr->grh,
+			     sizeof(struct ib_grh), 1);
+		wc.wc_flags |= IB_WC_GRH;
+	} else
+		qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+	ssge.sg_list = swqe->sg_list + 1;
+	ssge.sge = *swqe->sg_list;
+	ssge.num_sge = swqe->wr.num_sge;
+	sge = &ssge.sge;
+	while (length) {
+		u32 len = sge->length;
+
+		if (len > length)
+			len = length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (--ssge.num_sge)
+				*sge = *ssge.sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		length -= len;
+	}
+	while (qp->r_sge.num_sge) {
+		atomic_dec(&qp->r_sge.sge.mr->refcount);
+		if (--qp->r_sge.num_sge)
+			qp->r_sge.sge = *qp->r_sge.sg_list++;
+	}
+	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+		goto bail_unlock;
+	wc.wr_id = qp->r_wr_id;
+	wc.status = IB_WC_SUCCESS;
+	wc.opcode = IB_WC_RECV;
+	wc.qp = &qp->ibqp;
+	wc.src_qp = sqp->ibqp.qp_num;
+	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+		swqe->wr.wr.ud.pkey_index : 0;
+	wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+	wc.sl = ah_attr->sl;
+	wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+	wc.port_num = qp->port_num;
+	/* Signal completion event if the solicited bit is set. */
+	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+		     swqe->wr.send_flags & IB_SEND_SOLICITED);
+	ibp->n_loop_pkts++;
+bail_unlock:
+	spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+	if (atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_ud_req(struct qib_qp *qp)
+{
+	struct qib_other_headers *ohdr;
+	struct ib_ah_attr *ah_attr;
+	struct qib_pportdata *ppd;
+	struct qib_ibport *ibp;
+	struct qib_swqe *wqe;
+	unsigned long flags;
+	u32 nwords;
+	u32 extra_bytes;
+	u32 bth0;
+	u16 lrh0;
+	u16 lid;
+	int ret = 0;
+	int next_cur;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+		if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+			goto bail;
+		/* We are in the error state, flush the work request. */
+		if (qp->s_last == qp->s_head)
+			goto bail;
+		/* If DMAs are in progress, we can't flush immediately. */
+		if (atomic_read(&qp->s_dma_busy)) {
+			qp->s_flags |= QIB_S_WAIT_DMA;
+			goto bail;
+		}
+		wqe = get_swqe_ptr(qp, qp->s_last);
+		qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+		goto done;
+	}
+
+	if (qp->s_cur == qp->s_head)
+		goto bail;
+
+	wqe = get_swqe_ptr(qp, qp->s_cur);
+	next_cur = qp->s_cur + 1;
+	if (next_cur >= qp->s_size)
+		next_cur = 0;
+
+	/* Construct the header. */
+	ibp = to_iport(qp->ibqp.device, qp->port_num);
+	ppd = ppd_from_ibp(ibp);
+	ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+	if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
+		if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+			ibp->n_multicast_xmit++;
+		else
+			ibp->n_unicast_xmit++;
+	} else {
+		ibp->n_unicast_xmit++;
+		lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+		if (unlikely(lid == ppd->lid)) {
+			/*
+			 * If DMAs are in progress, we can't generate
+			 * a completion for the loopback packet since
+			 * it would be out of order.
+			 * XXX Instead of waiting, we could queue a
+			 * zero length descriptor so we get a callback.
+			 */
+			if (atomic_read(&qp->s_dma_busy)) {
+				qp->s_flags |= QIB_S_WAIT_DMA;
+				goto bail;
+			}
+			qp->s_cur = next_cur;
+			spin_unlock_irqrestore(&qp->s_lock, flags);
+			qib_ud_loopback(qp, wqe);
+			spin_lock_irqsave(&qp->s_lock, flags);
+			qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+			goto done;
+		}
+	}
+
+	qp->s_cur = next_cur;
+	extra_bytes = -wqe->length & 3;
+	nwords = (wqe->length + extra_bytes) >> 2;
+
+	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+	qp->s_hdrwords = 7;
+	qp->s_cur_size = wqe->length;
+	qp->s_cur_sge = &qp->s_sge;
+	qp->s_srate = ah_attr->static_rate;
+	qp->s_wqe = wqe;
+	qp->s_sge.sge = wqe->sg_list[0];
+	qp->s_sge.sg_list = wqe->sg_list + 1;
+	qp->s_sge.num_sge = wqe->wr.num_sge;
+	qp->s_sge.total_len = wqe->length;
+
+	if (ah_attr->ah_flags & IB_AH_GRH) {
+		/* Header size in 32-bit words. */
+		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+					       &ah_attr->grh,
+					       qp->s_hdrwords, nwords);
+		lrh0 = QIB_LRH_GRH;
+		ohdr = &qp->s_hdr.u.l.oth;
+		/*
+		 * Don't worry about sending to locally attached multicast
+		 * QPs.  It is unspecified by the spec. what happens.
+		 */
+	} else {
+		/* Header size in 32-bit words. */
+		lrh0 = QIB_LRH_BTH;
+		ohdr = &qp->s_hdr.u.oth;
+	}
+	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+		qp->s_hdrwords++;
+		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+	} else
+		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+	lrh0 |= ah_attr->sl << 4;
+	if (qp->ibqp.qp_type == IB_QPT_SMI)
+		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+	else
+		lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
+	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+	qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
+	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+	lid = ppd->lid;
+	if (lid) {
+		lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+		qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+	} else
+		qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+		bth0 |= IB_BTH_SOLICITED;
+	bth0 |= extra_bytes << 20;
+	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
+		qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
+			     wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
+	ohdr->bth[0] = cpu_to_be32(bth0);
+	/*
+	 * Use the multicast QP if the destination LID is a multicast LID.
+	 */
+	ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+		ah_attr->dlid != QIB_PERMISSIVE_LID ?
+		cpu_to_be32(QIB_MULTICAST_QPN) :
+		cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+	/*
+	 * Qkeys with the high order bit set mean use the
+	 * qkey from the QP context instead of the WR (see 10.2.5).
+	 */
+	ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+					 qp->qkey : wqe->wr.wr.ud.remote_qkey);
+	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+
+done:
+	ret = 1;
+	goto unlock;
+
+bail:
+	qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	return ret;
+}
+
+static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
+{
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_devdata *dd = ppd->dd;
+	unsigned ctxt = ppd->hw_pidx;
+	unsigned i;
+
+	pkey &= 0x7fff;	/* remove limited/full membership bit */
+
+	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
+		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
+			return i;
+
+	/*
+	 * Should not get here, this means hardware failed to validate pkeys.
+	 * Punt and return index 0.
+	 */
+	return 0;
+}
+
+/**
+ * qib_ud_rcv - receive an incoming UD packet
+ * @ibp: the port the packet came in on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+	struct qib_other_headers *ohdr;
+	int opcode;
+	u32 hdrsize;
+	u32 pad;
+	struct ib_wc wc;
+	u32 qkey;
+	u32 src_qp;
+	u16 dlid;
+
+	/* Check for GRH */
+	if (!has_grh) {
+		ohdr = &hdr->u.oth;
+		hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
+	} else {
+		ohdr = &hdr->u.l.oth;
+		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
+	}
+	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+
+	/* Get the number of bytes the message was padded by. */
+	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+	if (unlikely(tlen < (hdrsize + pad + 4))) {
+		/* Drop incomplete packets. */
+		ibp->n_pkt_drops++;
+		goto bail;
+	}
+	tlen -= hdrsize + pad + 4;
+
+	/*
+	 * Check that the permissive LID is only used on QP0
+	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+	 */
+	if (qp->ibqp.qp_num) {
+		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
+			ibp->n_pkt_drops++;
+			goto bail;
+		}
+		if (qp->ibqp.qp_num > 1) {
+			u16 pkey1, pkey2;
+
+			pkey1 = be32_to_cpu(ohdr->bth[0]);
+			pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+			if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+				qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+					      pkey1,
+					      (be16_to_cpu(hdr->lrh[0]) >> 4) &
+						0xF,
+					      src_qp, qp->ibqp.qp_num,
+					      hdr->lrh[3], hdr->lrh[1]);
+				goto bail;
+			}
+		}
+		if (unlikely(qkey != qp->qkey)) {
+			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+				      src_qp, qp->ibqp.qp_num,
+				      hdr->lrh[3], hdr->lrh[1]);
+			goto bail;
+		}
+		/* Drop invalid MAD packets (see 13.5.3.1). */
+		if (unlikely(qp->ibqp.qp_num == 1 &&
+			     (tlen != 256 ||
+			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
+			ibp->n_pkt_drops++;
+			goto bail;
+		}
+	} else {
+		struct ib_smp *smp;
+
+		/* Drop invalid MAD packets (see 13.5.3.1). */
+		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
+			ibp->n_pkt_drops++;
+			goto bail;
+		}
+		smp = (struct ib_smp *) data;
+		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
+		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+			ibp->n_pkt_drops++;
+			goto bail;
+		}
+	}
+
+	/*
+	 * The opcode is in the low byte when its in network order
+	 * (top byte when in host order).
+	 */
+	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+	if (qp->ibqp.qp_num > 1 &&
+	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+		wc.ex.imm_data = ohdr->u.ud.imm_data;
+		wc.wc_flags = IB_WC_WITH_IMM;
+		hdrsize += sizeof(u32);
+	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+		wc.ex.imm_data = 0;
+		wc.wc_flags = 0;
+	} else {
+		ibp->n_pkt_drops++;
+		goto bail;
+	}
+
+	/*
+	 * A GRH is expected to preceed the data even if not
+	 * present on the wire.
+	 */
+	wc.byte_len = tlen + sizeof(struct ib_grh);
+
+	/*
+	 * We need to serialize getting a receive work queue entry and
+	 * generating a completion for it against QPs sending to this QP
+	 * locally.
+	 */
+	spin_lock(&qp->r_lock);
+
+	/*
+	 * Get the next work request entry to find where to put the data.
+	 */
+	if (qp->r_flags & QIB_R_REUSE_SGE)
+		qp->r_flags &= ~QIB_R_REUSE_SGE;
+	else {
+		int ret;
+
+		ret = qib_get_rwqe(qp, 0);
+		if (ret < 0) {
+			qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+			goto bail_unlock;
+		}
+		if (!ret) {
+			if (qp->ibqp.qp_num == 0)
+				ibp->n_vl15_dropped++;
+			goto bail_unlock;
+		}
+	}
+	/* Silently drop packets which are too big. */
+	if (unlikely(wc.byte_len > qp->r_len)) {
+		qp->r_flags |= QIB_R_REUSE_SGE;
+		ibp->n_pkt_drops++;
+		goto bail_unlock;
+	}
+	if (has_grh) {
+		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+			     sizeof(struct ib_grh), 1);
+		wc.wc_flags |= IB_WC_GRH;
+	} else
+		qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+	qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+	while (qp->r_sge.num_sge) {
+		atomic_dec(&qp->r_sge.sge.mr->refcount);
+		if (--qp->r_sge.num_sge)
+			qp->r_sge.sge = *qp->r_sge.sg_list++;
+	}
+	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+		goto bail_unlock;
+	wc.wr_id = qp->r_wr_id;
+	wc.status = IB_WC_SUCCESS;
+	wc.opcode = IB_WC_RECV;
+	wc.vendor_err = 0;
+	wc.qp = &qp->ibqp;
+	wc.src_qp = src_qp;
+	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+		qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
+	wc.slid = be16_to_cpu(hdr->lrh[3]);
+	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+	dlid = be16_to_cpu(hdr->lrh[1]);
+	/*
+	 * Save the LMC lower bits if the destination LID is a unicast LID.
+	 */
+	wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+		dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+	wc.port_num = qp->port_num;
+	/* Signal completion event if the solicited bit is set. */
+	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+		     (ohdr->bth[0] &
+			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+bail_unlock:
+	spin_unlock(&qp->r_lock);
+bail:;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 000000000000..d7a26c1d4f37
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "qib.h"
+
+static void __qib_release_user_pages(struct page **p, size_t num_pages,
+				     int dirty)
+{
+	size_t i;
+
+	for (i = 0; i < num_pages; i++) {
+		if (dirty)
+			set_page_dirty_lock(p[i]);
+		put_page(p[i]);
+	}
+}
+
+/*
+ * Call with current->mm->mmap_sem held.
+ */
+static int __get_user_pages(unsigned long start_page, size_t num_pages,
+			    struct page **p, struct vm_area_struct **vma)
+{
+	unsigned long lock_limit;
+	size_t got;
+	int ret;
+
+	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+	if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	for (got = 0; got < num_pages; got += ret) {
+		ret = get_user_pages(current, current->mm,
+				     start_page + got * PAGE_SIZE,
+				     num_pages - got, 1, 1,
+				     p + got, vma);
+		if (ret < 0)
+			goto bail_release;
+	}
+
+	current->mm->locked_vm += num_pages;
+
+	ret = 0;
+	goto bail;
+
+bail_release:
+	__qib_release_user_pages(p, got, 0);
+bail:
+	return ret;
+}
+
+/**
+ * qib_map_page - a safety wrapper around pci_map_page()
+ *
+ * A dma_addr of all 0's is interpreted by the chip as "disabled".
+ * Unfortunately, it can also be a valid dma_addr returned on some
+ * architectures.
+ *
+ * The powerpc iommu assigns dma_addrs in ascending order, so we don't
+ * have to bother with retries or mapping a dummy page to insure we
+ * don't just get the same mapping again.
+ *
+ * I'm sure we won't be so lucky with other iommu's, so FIXME.
+ */
+dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
+			unsigned long offset, size_t size, int direction)
+{
+	dma_addr_t phys;
+
+	phys = pci_map_page(hwdev, page, offset, size, direction);
+
+	if (phys == 0) {
+		pci_unmap_page(hwdev, phys, size, direction);
+		phys = pci_map_page(hwdev, page, offset, size, direction);
+		/*
+		 * FIXME: If we get 0 again, we should keep this page,
+		 * map another, then free the 0 page.
+		 */
+	}
+
+	return phys;
+}
+
+/**
+ * qib_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages.  For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int qib_get_user_pages(unsigned long start_page, size_t num_pages,
+		       struct page **p)
+{
+	int ret;
+
+	down_write(&current->mm->mmap_sem);
+
+	ret = __get_user_pages(start_page, num_pages, p, NULL);
+
+	up_write(&current->mm->mmap_sem);
+
+	return ret;
+}
+
+void qib_release_user_pages(struct page **p, size_t num_pages)
+{
+	if (current->mm) /* during close after signal, mm can be NULL */
+		down_write(&current->mm->mmap_sem);
+
+	__qib_release_user_pages(p, num_pages, 1);
+
+	if (current->mm) {
+		current->mm->locked_vm -= num_pages;
+		up_write(&current->mm->mmap_sem);
+	}
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 000000000000..4c19e06b5e85
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_user_sdma.h"
+
+/* minimum size of header */
+#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
+/* expected size of headers (for dma_pool) */
+#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
+/* attempt to drain the queue for 5secs */
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+
+struct qib_user_sdma_pkt {
+	u8 naddr;               /* dimension of addr (1..3) ... */
+	u32 counter;            /* sdma pkts queued counter for this entry */
+	u64 added;              /* global descq number of entries */
+
+	struct {
+		u32 offset;                     /* offset for kvaddr, addr */
+		u32 length;                     /* length in page */
+		u8  put_page;                   /* should we put_page? */
+		u8  dma_mapped;                 /* is page dma_mapped? */
+		struct page *page;              /* may be NULL (coherent mem) */
+		void *kvaddr;                   /* FIXME: only for pio hack */
+		dma_addr_t addr;
+	} addr[4];   /* max pages, any more and we coalesce */
+	struct list_head list;  /* list element */
+};
+
+struct qib_user_sdma_queue {
+	/*
+	 * pkts sent to dma engine are queued on this
+	 * list head.  the type of the elements of this
+	 * list are struct qib_user_sdma_pkt...
+	 */
+	struct list_head sent;
+
+	/* headers with expected length are allocated from here... */
+	char header_cache_name[64];
+	struct dma_pool *header_cache;
+
+	/* packets are allocated from the slab cache... */
+	char pkt_slab_name[64];
+	struct kmem_cache *pkt_slab;
+
+	/* as packets go on the queued queue, they are counted... */
+	u32 counter;
+	u32 sent_counter;
+
+	/* dma page table */
+	struct rb_root dma_pages_root;
+
+	/* protect everything above... */
+	struct mutex lock;
+};
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
+{
+	struct qib_user_sdma_queue *pq =
+		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
+
+	if (!pq)
+		goto done;
+
+	pq->counter = 0;
+	pq->sent_counter = 0;
+	INIT_LIST_HEAD(&pq->sent);
+
+	mutex_init(&pq->lock);
+
+	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
+		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
+	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
+					 sizeof(struct qib_user_sdma_pkt),
+					 0, 0, NULL);
+
+	if (!pq->pkt_slab)
+		goto err_kfree;
+
+	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
+		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
+	pq->header_cache = dma_pool_create(pq->header_cache_name,
+					   dev,
+					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
+					   4, 0);
+	if (!pq->header_cache)
+		goto err_slab;
+
+	pq->dma_pages_root = RB_ROOT;
+
+	goto done;
+
+err_slab:
+	kmem_cache_destroy(pq->pkt_slab);
+err_kfree:
+	kfree(pq);
+	pq = NULL;
+
+done:
+	return pq;
+}
+
+static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
+				    int i, size_t offset, size_t len,
+				    int put_page, int dma_mapped,
+				    struct page *page,
+				    void *kvaddr, dma_addr_t dma_addr)
+{
+	pkt->addr[i].offset = offset;
+	pkt->addr[i].length = len;
+	pkt->addr[i].put_page = put_page;
+	pkt->addr[i].dma_mapped = dma_mapped;
+	pkt->addr[i].page = page;
+	pkt->addr[i].kvaddr = kvaddr;
+	pkt->addr[i].addr = dma_addr;
+}
+
+static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
+				      u32 counter, size_t offset,
+				      size_t len, int dma_mapped,
+				      struct page *page,
+				      void *kvaddr, dma_addr_t dma_addr)
+{
+	pkt->naddr = 1;
+	pkt->counter = counter;
+	qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
+				kvaddr, dma_addr);
+}
+
+/* we've too many pages in the iovec, coalesce to a single page */
+static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+				  struct qib_user_sdma_pkt *pkt,
+				  const struct iovec *iov,
+				  unsigned long niov)
+{
+	int ret = 0;
+	struct page *page = alloc_page(GFP_KERNEL);
+	void *mpage_save;
+	char *mpage;
+	int i;
+	int len = 0;
+	dma_addr_t dma_addr;
+
+	if (!page) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	mpage = kmap(page);
+	mpage_save = mpage;
+	for (i = 0; i < niov; i++) {
+		int cfur;
+
+		cfur = copy_from_user(mpage,
+				      iov[i].iov_base, iov[i].iov_len);
+		if (cfur) {
+			ret = -EFAULT;
+			goto free_unmap;
+		}
+
+		mpage += iov[i].iov_len;
+		len += iov[i].iov_len;
+	}
+
+	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+				DMA_TO_DEVICE);
+	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+		ret = -ENOMEM;
+		goto free_unmap;
+	}
+
+	qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
+				dma_addr);
+	pkt->naddr = 2;
+
+	goto done;
+
+free_unmap:
+	kunmap(page);
+	__free_page(page);
+done:
+	return ret;
+}
+
+/*
+ * How many pages in this iovec element?
+ */
+static int qib_user_sdma_num_pages(const struct iovec *iov)
+{
+	const unsigned long addr  = (unsigned long) iov->iov_base;
+	const unsigned long  len  = iov->iov_len;
+	const unsigned long spage = addr & PAGE_MASK;
+	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+	return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+/*
+ * Truncate length to page boundry.
+ */
+static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
+{
+	const unsigned long offset = addr & ~PAGE_MASK;
+
+	return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
+}
+
+static void qib_user_sdma_free_pkt_frag(struct device *dev,
+					struct qib_user_sdma_queue *pq,
+					struct qib_user_sdma_pkt *pkt,
+					int frag)
+{
+	const int i = frag;
+
+	if (pkt->addr[i].page) {
+		if (pkt->addr[i].dma_mapped)
+			dma_unmap_page(dev,
+				       pkt->addr[i].addr,
+				       pkt->addr[i].length,
+				       DMA_TO_DEVICE);
+
+		if (pkt->addr[i].kvaddr)
+			kunmap(pkt->addr[i].page);
+
+		if (pkt->addr[i].put_page)
+			put_page(pkt->addr[i].page);
+		else
+			__free_page(pkt->addr[i].page);
+	} else if (pkt->addr[i].kvaddr)
+		/* free coherent mem from cache... */
+		dma_pool_free(pq->header_cache,
+			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
+}
+
+/* return number of pages pinned... */
+static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+				   struct qib_user_sdma_pkt *pkt,
+				   unsigned long addr, int tlen, int npages)
+{
+	struct page *pages[2];
+	int j;
+	int ret;
+
+	ret = get_user_pages(current, current->mm, addr,
+			     npages, 0, 1, pages, NULL);
+
+	if (ret != npages) {
+		int i;
+
+		for (i = 0; i < ret; i++)
+			put_page(pages[i]);
+
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (j = 0; j < npages; j++) {
+		/* map the pages... */
+		const int flen = qib_user_sdma_page_length(addr, tlen);
+		dma_addr_t dma_addr =
+			dma_map_page(&dd->pcidev->dev,
+				     pages[j], 0, flen, DMA_TO_DEVICE);
+		unsigned long fofs = addr & ~PAGE_MASK;
+
+		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
+					pages[j], kmap(pages[j]), dma_addr);
+
+		pkt->naddr++;
+		addr += flen;
+		tlen -= flen;
+	}
+
+done:
+	return ret;
+}
+
+static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
+				 struct qib_user_sdma_queue *pq,
+				 struct qib_user_sdma_pkt *pkt,
+				 const struct iovec *iov,
+				 unsigned long niov)
+{
+	int ret = 0;
+	unsigned long idx;
+
+	for (idx = 0; idx < niov; idx++) {
+		const int npages = qib_user_sdma_num_pages(iov + idx);
+		const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+		ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+					      iov[idx].iov_len, npages);
+		if (ret < 0)
+			goto free_pkt;
+	}
+
+	goto done;
+
+free_pkt:
+	for (idx = 0; idx < pkt->naddr; idx++)
+		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+
+done:
+	return ret;
+}
+
+static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
+				      struct qib_user_sdma_queue *pq,
+				      struct qib_user_sdma_pkt *pkt,
+				      const struct iovec *iov,
+				      unsigned long niov, int npages)
+{
+	int ret = 0;
+
+	if (npages >= ARRAY_SIZE(pkt->addr))
+		ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+	else
+		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
+
+	return ret;
+}
+
+/* free a packet list -- return counter value of last packet */
+static void qib_user_sdma_free_pkt_list(struct device *dev,
+					struct qib_user_sdma_queue *pq,
+					struct list_head *list)
+{
+	struct qib_user_sdma_pkt *pkt, *pkt_next;
+
+	list_for_each_entry_safe(pkt, pkt_next, list, list) {
+		int i;
+
+		for (i = 0; i < pkt->naddr; i++)
+			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
+
+		kmem_cache_free(pq->pkt_slab, pkt);
+	}
+}
+
+/*
+ * copy headers, coalesce etc -- pq->lock must be held
+ *
+ * we queue all the packets to list, returning the
+ * number of bytes total.  list must be empty initially,
+ * as, if there is an error we clean it...
+ */
+static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+				    struct qib_user_sdma_queue *pq,
+				    struct list_head *list,
+				    const struct iovec *iov,
+				    unsigned long niov,
+				    int maxpkts)
+{
+	unsigned long idx = 0;
+	int ret = 0;
+	int npkts = 0;
+	struct page *page = NULL;
+	__le32 *pbc;
+	dma_addr_t dma_addr;
+	struct qib_user_sdma_pkt *pkt = NULL;
+	size_t len;
+	size_t nw;
+	u32 counter = pq->counter;
+	int dma_mapped = 0;
+
+	while (idx < niov && npkts < maxpkts) {
+		const unsigned long addr = (unsigned long) iov[idx].iov_base;
+		const unsigned long idx_save = idx;
+		unsigned pktnw;
+		unsigned pktnwc;
+		int nfrags = 0;
+		int npages = 0;
+		int cfur;
+
+		dma_mapped = 0;
+		len = iov[idx].iov_len;
+		nw = len >> 2;
+		page = NULL;
+
+		pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+		if (!pkt) {
+			ret = -ENOMEM;
+			goto free_list;
+		}
+
+		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
+		    len > PAGE_SIZE || len & 3 || addr & 3) {
+			ret = -EINVAL;
+			goto free_pkt;
+		}
+
+		if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+			pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+					     &dma_addr);
+		else
+			pbc = NULL;
+
+		if (!pbc) {
+			page = alloc_page(GFP_KERNEL);
+			if (!page) {
+				ret = -ENOMEM;
+				goto free_pkt;
+			}
+			pbc = kmap(page);
+		}
+
+		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
+		if (cfur) {
+			ret = -EFAULT;
+			goto free_pbc;
+		}
+
+		/*
+		 * This assignment is a bit strange.  it's because the
+		 * the pbc counts the number of 32 bit words in the full
+		 * packet _except_ the first word of the pbc itself...
+		 */
+		pktnwc = nw - 1;
+
+		/*
+		 * pktnw computation yields the number of 32 bit words
+		 * that the caller has indicated in the PBC.  note that
+		 * this is one less than the total number of words that
+		 * goes to the send DMA engine as the first 32 bit word
+		 * of the PBC itself is not counted.  Armed with this count,
+		 * we can verify that the packet is consistent with the
+		 * iovec lengths.
+		 */
+		pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
+		if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+			ret = -EINVAL;
+			goto free_pbc;
+		}
+
+		idx++;
+		while (pktnwc < pktnw && idx < niov) {
+			const size_t slen = iov[idx].iov_len;
+			const unsigned long faddr =
+				(unsigned long) iov[idx].iov_base;
+
+			if (slen & 3 || faddr & 3 || !slen ||
+			    slen > PAGE_SIZE) {
+				ret = -EINVAL;
+				goto free_pbc;
+			}
+
+			npages++;
+			if ((faddr & PAGE_MASK) !=
+			    ((faddr + slen - 1) & PAGE_MASK))
+				npages++;
+
+			pktnwc += slen >> 2;
+			idx++;
+			nfrags++;
+		}
+
+		if (pktnwc != pktnw) {
+			ret = -EINVAL;
+			goto free_pbc;
+		}
+
+		if (page) {
+			dma_addr = dma_map_page(&dd->pcidev->dev,
+						page, 0, len, DMA_TO_DEVICE);
+			if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+				ret = -ENOMEM;
+				goto free_pbc;
+			}
+
+			dma_mapped = 1;
+		}
+
+		qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
+					  page, pbc, dma_addr);
+
+		if (nfrags) {
+			ret = qib_user_sdma_init_payload(dd, pq, pkt,
+							 iov + idx_save + 1,
+							 nfrags, npages);
+			if (ret < 0)
+				goto free_pbc_dma;
+		}
+
+		counter++;
+		npkts++;
+
+		list_add_tail(&pkt->list, list);
+	}
+
+	ret = idx;
+	goto done;
+
+free_pbc_dma:
+	if (dma_mapped)
+		dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pbc:
+	if (page) {
+		kunmap(page);
+		__free_page(page);
+	} else
+		dma_pool_free(pq->header_cache, pbc, dma_addr);
+free_pkt:
+	kmem_cache_free(pq->pkt_slab, pkt);
+free_list:
+	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
+done:
+	return ret;
+}
+
+static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
+					       u32 c)
+{
+	pq->sent_counter = c;
+}
+
+/* try to clean out queue -- needs pq->lock */
+static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
+				     struct qib_user_sdma_queue *pq)
+{
+	struct qib_devdata *dd = ppd->dd;
+	struct list_head free_list;
+	struct qib_user_sdma_pkt *pkt;
+	struct qib_user_sdma_pkt *pkt_prev;
+	int ret = 0;
+
+	INIT_LIST_HEAD(&free_list);
+
+	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
+		s64 descd = ppd->sdma_descq_removed - pkt->added;
+
+		if (descd < 0)
+			break;
+
+		list_move_tail(&pkt->list, &free_list);
+
+		/* one more packet cleaned */
+		ret++;
+	}
+
+	if (!list_empty(&free_list)) {
+		u32 counter;
+
+		pkt = list_entry(free_list.prev,
+				 struct qib_user_sdma_pkt, list);
+		counter = pkt->counter;
+
+		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+		qib_user_sdma_set_complete_counter(pq, counter);
+	}
+
+	return ret;
+}
+
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
+{
+	if (!pq)
+		return;
+
+	kmem_cache_destroy(pq->pkt_slab);
+	dma_pool_destroy(pq->header_cache);
+	kfree(pq);
+}
+
+/* clean descriptor queue, returns > 0 if some elements cleaned */
+static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+	ret = qib_sdma_make_progress(ppd);
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+	return ret;
+}
+
+/* we're in close, drain packets so that we can cleanup successfully... */
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+			       struct qib_user_sdma_queue *pq)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int i;
+
+	if (!pq)
+		return;
+
+	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
+		mutex_lock(&pq->lock);
+		if (list_empty(&pq->sent)) {
+			mutex_unlock(&pq->lock);
+			break;
+		}
+		qib_user_sdma_hwqueue_clean(ppd);
+		qib_user_sdma_queue_clean(ppd, pq);
+		mutex_unlock(&pq->lock);
+		msleep(10);
+	}
+
+	if (!list_empty(&pq->sent)) {
+		struct list_head free_list;
+
+		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
+		INIT_LIST_HEAD(&free_list);
+		mutex_lock(&pq->lock);
+		list_splice_init(&pq->sent, &free_list);
+		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+		mutex_unlock(&pq->lock);
+	}
+}
+
+static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+					 u64 addr, u64 dwlen, u64 dwoffset)
+{
+	u8 tmpgen;
+
+	tmpgen = ppd->sdma_generation;
+
+	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
+			   ((addr & 0xfffffffcULL) << 32) |
+			   /* SDmaGeneration[1:0] */
+			   ((tmpgen & 3ULL) << 30) |
+			   /* SDmaDwordCount[10:0] */
+			   ((dwlen & 0x7ffULL) << 16) |
+			   /* SDmaBufOffset[12:2] */
+			   (dwoffset & 0x7ffULL));
+}
+
+static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
+{
+	return descq | cpu_to_le64(1ULL << 12);
+}
+
+static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
+{
+					      /* last */  /* dma head */
+	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
+}
+
+static inline __le64 qib_sdma_make_desc1(u64 addr)
+{
+	/* SDmaPhyAddr[47:32] */
+	return cpu_to_le64(addr >> 32);
+}
+
+static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
+				    struct qib_user_sdma_pkt *pkt, int idx,
+				    unsigned ofs, u16 tail)
+{
+	const u64 addr = (u64) pkt->addr[idx].addr +
+		(u64) pkt->addr[idx].offset;
+	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
+	__le64 *descqp;
+	__le64 descq0;
+
+	descqp = &ppd->sdma_descq[tail].qw[0];
+
+	descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
+	if (idx == 0)
+		descq0 = qib_sdma_make_first_desc0(descq0);
+	if (idx == pkt->naddr - 1)
+		descq0 = qib_sdma_make_last_desc0(descq0);
+
+	descqp[0] = descq0;
+	descqp[1] = qib_sdma_make_desc1(addr);
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+				   struct qib_user_sdma_queue *pq,
+				   struct list_head *pktlist)
+{
+	struct qib_devdata *dd = ppd->dd;
+	int ret = 0;
+	unsigned long flags;
+	u16 tail;
+	u8 generation;
+	u64 descq_added;
+
+	if (list_empty(pktlist))
+		return 0;
+
+	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+		return -ECOMM;
+
+	spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+	/* keep a copy for restoring purposes in case of problems */
+	generation = ppd->sdma_generation;
+	descq_added = ppd->sdma_descq_added;
+
+	if (unlikely(!__qib_sdma_running(ppd))) {
+		ret = -ECOMM;
+		goto unlock;
+	}
+
+	tail = ppd->sdma_descq_tail;
+	while (!list_empty(pktlist)) {
+		struct qib_user_sdma_pkt *pkt =
+			list_entry(pktlist->next, struct qib_user_sdma_pkt,
+				   list);
+		int i;
+		unsigned ofs = 0;
+		u16 dtail = tail;
+
+		if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
+			goto unlock_check_tail;
+
+		for (i = 0; i < pkt->naddr; i++) {
+			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+			ofs += pkt->addr[i].length >> 2;
+
+			if (++tail == ppd->sdma_descq_cnt) {
+				tail = 0;
+				++ppd->sdma_generation;
+			}
+		}
+
+		if ((ofs << 2) > ppd->ibmaxlen) {
+			ret = -EMSGSIZE;
+			goto unlock;
+		}
+
+		/*
+		 * If the packet is >= 2KB mtu equivalent, we have to use
+		 * the large buffers, and have to mark each descriptor as
+		 * part of a large buffer packet.
+		 */
+		if (ofs > dd->piosize2kmax_dwords) {
+			for (i = 0; i < pkt->naddr; i++) {
+				ppd->sdma_descq[dtail].qw[0] |=
+					cpu_to_le64(1ULL << 14);
+				if (++dtail == ppd->sdma_descq_cnt)
+					dtail = 0;
+			}
+		}
+
+		ppd->sdma_descq_added += pkt->naddr;
+		pkt->added = ppd->sdma_descq_added;
+		list_move_tail(&pkt->list, &pq->sent);
+		ret++;
+	}
+
+unlock_check_tail:
+	/* advance the tail on the chip if necessary */
+	if (ppd->sdma_descq_tail != tail)
+		dd->f_sdma_update_tail(ppd, tail);
+
+unlock:
+	if (unlikely(ret < 0)) {
+		ppd->sdma_generation = generation;
+		ppd->sdma_descq_added = descq_added;
+	}
+	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+	return ret;
+}
+
+int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+			 struct qib_user_sdma_queue *pq,
+			 const struct iovec *iov,
+			 unsigned long dim)
+{
+	struct qib_devdata *dd = rcd->dd;
+	struct qib_pportdata *ppd = rcd->ppd;
+	int ret = 0;
+	struct list_head list;
+	int npkts = 0;
+
+	INIT_LIST_HEAD(&list);
+
+	mutex_lock(&pq->lock);
+
+	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
+	if (!qib_sdma_running(ppd))
+		goto done_unlock;
+
+	if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+		qib_user_sdma_hwqueue_clean(ppd);
+		qib_user_sdma_queue_clean(ppd, pq);
+	}
+
+	while (dim) {
+		const int mxp = 8;
+
+		down_write(&current->mm->mmap_sem);
+		ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+		up_write(&current->mm->mmap_sem);
+
+		if (ret <= 0)
+			goto done_unlock;
+		else {
+			dim -= ret;
+			iov += ret;
+		}
+
+		/* force packets onto the sdma hw queue... */
+		if (!list_empty(&list)) {
+			/*
+			 * Lazily clean hw queue.  the 4 is a guess of about
+			 * how many sdma descriptors a packet will take (it
+			 * doesn't have to be perfect).
+			 */
+			if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+				qib_user_sdma_hwqueue_clean(ppd);
+				qib_user_sdma_queue_clean(ppd, pq);
+			}
+
+			ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+			if (ret < 0)
+				goto done_unlock;
+			else {
+				npkts += ret;
+				pq->counter += ret;
+
+				if (!list_empty(&list))
+					goto done_unlock;
+			}
+		}
+	}
+
+done_unlock:
+	if (!list_empty(&list))
+		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
+	mutex_unlock(&pq->lock);
+
+	return (ret < 0) ? ret : npkts;
+}
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+				struct qib_user_sdma_queue *pq)
+{
+	int ret = 0;
+
+	mutex_lock(&pq->lock);
+	qib_user_sdma_hwqueue_clean(ppd);
+	ret = qib_user_sdma_queue_clean(ppd, pq);
+	mutex_unlock(&pq->lock);
+
+	return ret;
+}
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
+{
+	return pq ? pq->sent_counter : 0;
+}
+
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
+{
+	return pq ? pq->counter : 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 000000000000..ce8cbaf6a5c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+
+struct qib_user_sdma_queue;
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
+
+int qib_user_sdma_writev(struct qib_ctxtdata *pd,
+			 struct qib_user_sdma_queue *pq,
+			 const struct iovec *iov,
+			 unsigned long dim);
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+				struct qib_user_sdma_queue *pq);
+
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+			       struct qib_user_sdma_queue *pq);
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 000000000000..cda8f4173d23
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+static unsigned int ib_qib_qp_table_size = 251;
+module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+unsigned int ib_qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+		   S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int ib_qib_max_pds = 0xFFFF;
+module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+		 "Maximum number of protection domains to support");
+
+static unsigned int ib_qib_max_ahs = 0xFFFF;
+module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int ib_qib_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+		 "Maximum number of completion queue entries to support");
+
+unsigned int ib_qib_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int ib_qib_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int ib_qib_max_qps = 16384;
+module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int ib_qib_max_sges = 0x60;
+module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int ib_qib_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+		 "Maximum number of multicast groups to support");
+
+unsigned int ib_qib_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
+		   uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+		 "Maximum number of attached QPs to support");
+
+unsigned int ib_qib_max_srqs = 1024;
+module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int ib_qib_max_srq_sges = 128;
+module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+static unsigned int ib_qib_disable_sma;
+module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(disable_sma, "Disable the SMA");
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; qib_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
+	[IB_QPS_RESET] = 0,
+	[IB_QPS_INIT] = QIB_POST_RECV_OK,
+	[IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
+	[IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
+	    QIB_PROCESS_NEXT_SEND_OK,
+	[IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+	    QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
+	[IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+	[IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
+	    QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+};
+
+struct qib_ucontext {
+	struct ib_ucontext ibucontext;
+};
+
+static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
+						  *ibucontext)
+{
+	return container_of(ibucontext, struct qib_ucontext, ibucontext);
+}
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+	[IB_WR_SEND] = IB_WC_SEND,
+	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_qib_sys_image_guid;
+
+/**
+ * qib_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+{
+	struct qib_sge *sge = &ss->sge;
+
+	while (length) {
+		u32 len = sge->length;
+
+		if (len > length)
+			len = length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		memcpy(sge->vaddr, data, len);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (release)
+				atomic_dec(&sge->mr->refcount);
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		data += len;
+		length -= len;
+	}
+}
+
+/**
+ * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+{
+	struct qib_sge *sge = &ss->sge;
+
+	while (length) {
+		u32 len = sge->length;
+
+		if (len > length)
+			len = length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (release)
+				atomic_dec(&sge->mr->refcount);
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		length -= len;
+	}
+}
+
+/*
+ * Count the number of DMA descriptors needed to send length bytes of data.
+ * Don't modify the qib_sge_state to get the count.
+ * Return zero if any of the segments is not aligned.
+ */
+static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+{
+	struct qib_sge *sg_list = ss->sg_list;
+	struct qib_sge sge = ss->sge;
+	u8 num_sge = ss->num_sge;
+	u32 ndesc = 1;  /* count the header */
+
+	while (length) {
+		u32 len = sge.length;
+
+		if (len > length)
+			len = length;
+		if (len > sge.sge_length)
+			len = sge.sge_length;
+		BUG_ON(len == 0);
+		if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
+		    (len != length && (len & (sizeof(u32) - 1)))) {
+			ndesc = 0;
+			break;
+		}
+		ndesc++;
+		sge.vaddr += len;
+		sge.length -= len;
+		sge.sge_length -= len;
+		if (sge.sge_length == 0) {
+			if (--num_sge)
+				sge = *sg_list++;
+		} else if (sge.length == 0 && sge.mr->lkey) {
+			if (++sge.n >= QIB_SEGSZ) {
+				if (++sge.m >= sge.mr->mapsz)
+					break;
+				sge.n = 0;
+			}
+			sge.vaddr =
+				sge.mr->map[sge.m]->segs[sge.n].vaddr;
+			sge.length =
+				sge.mr->map[sge.m]->segs[sge.n].length;
+		}
+		length -= len;
+	}
+	return ndesc;
+}
+
+/*
+ * Copy from the SGEs to the data buffer.
+ */
+static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+{
+	struct qib_sge *sge = &ss->sge;
+
+	while (length) {
+		u32 len = sge->length;
+
+		if (len > length)
+			len = length;
+		if (len > sge->sge_length)
+			len = sge->sge_length;
+		BUG_ON(len == 0);
+		memcpy(data, sge->vaddr, len);
+		sge->vaddr += len;
+		sge->length -= len;
+		sge->sge_length -= len;
+		if (sge->sge_length == 0) {
+			if (--ss->num_sge)
+				*sge = *ss->sg_list++;
+		} else if (sge->length == 0 && sge->mr->lkey) {
+			if (++sge->n >= QIB_SEGSZ) {
+				if (++sge->m >= sge->mr->mapsz)
+					break;
+				sge->n = 0;
+			}
+			sge->vaddr =
+				sge->mr->map[sge->m]->segs[sge->n].vaddr;
+			sge->length =
+				sge->mr->map[sge->m]->segs[sge->n].length;
+		}
+		data += len;
+		length -= len;
+	}
+}
+
+/**
+ * qib_post_one_send - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+	struct qib_swqe *wqe;
+	u32 next;
+	int i;
+	int j;
+	int acc;
+	int ret;
+	unsigned long flags;
+	struct qib_lkey_table *rkt;
+	struct qib_pd *pd;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	/* Check that state is OK to post send. */
+	if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
+		goto bail_inval;
+
+	/* IB spec says that num_sge == 0 is OK. */
+	if (wr->num_sge > qp->s_max_sge)
+		goto bail_inval;
+
+	/*
+	 * Don't allow RDMA reads or atomic operations on UC or
+	 * undefined operations.
+	 * Make sure buffer is large enough to hold the result for atomics.
+	 */
+	if (wr->opcode == IB_WR_FAST_REG_MR) {
+		if (qib_fast_reg_mr(qp, wr))
+			goto bail_inval;
+	} else if (qp->ibqp.qp_type == IB_QPT_UC) {
+		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
+			goto bail_inval;
+	} else if (qp->ibqp.qp_type != IB_QPT_RC) {
+		/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+		if (wr->opcode != IB_WR_SEND &&
+		    wr->opcode != IB_WR_SEND_WITH_IMM)
+			goto bail_inval;
+		/* Check UD destination address PD */
+		if (qp->ibqp.pd != wr->wr.ud.ah->pd)
+			goto bail_inval;
+	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
+		goto bail_inval;
+	else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+		   (wr->num_sge == 0 ||
+		    wr->sg_list[0].length < sizeof(u64) ||
+		    wr->sg_list[0].addr & (sizeof(u64) - 1)))
+		goto bail_inval;
+	else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
+		goto bail_inval;
+
+	next = qp->s_head + 1;
+	if (next >= qp->s_size)
+		next = 0;
+	if (next == qp->s_last) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	rkt = &to_idev(qp->ibqp.device)->lk_table;
+	pd = to_ipd(qp->ibqp.pd);
+	wqe = get_swqe_ptr(qp, qp->s_head);
+	wqe->wr = *wr;
+	wqe->length = 0;
+	j = 0;
+	if (wr->num_sge) {
+		acc = wr->opcode >= IB_WR_RDMA_READ ?
+			IB_ACCESS_LOCAL_WRITE : 0;
+		for (i = 0; i < wr->num_sge; i++) {
+			u32 length = wr->sg_list[i].length;
+			int ok;
+
+			if (length == 0)
+				continue;
+			ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
+					 &wr->sg_list[i], acc);
+			if (!ok)
+				goto bail_inval_free;
+			wqe->length += length;
+			j++;
+		}
+		wqe->wr.num_sge = j;
+	}
+	if (qp->ibqp.qp_type == IB_QPT_UC ||
+	    qp->ibqp.qp_type == IB_QPT_RC) {
+		if (wqe->length > 0x80000000U)
+			goto bail_inval_free;
+	} else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
+				  qp->port_num - 1)->ibmtu)
+		goto bail_inval_free;
+	else
+		atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
+	wqe->ssn = qp->s_ssn++;
+	qp->s_head = next;
+
+	ret = 0;
+	goto bail;
+
+bail_inval_free:
+	while (j) {
+		struct qib_sge *sge = &wqe->sg_list[--j];
+
+		atomic_dec(&sge->mr->refcount);
+	}
+bail_inval:
+	ret = -EINVAL;
+bail:
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	return ret;
+}
+
+/**
+ * qib_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+			 struct ib_send_wr **bad_wr)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+	int err = 0;
+
+	for (; wr; wr = wr->next) {
+		err = qib_post_one_send(qp, wr);
+		if (err) {
+			*bad_wr = wr;
+			goto bail;
+		}
+	}
+
+	/* Try to do the send work in the caller's context. */
+	qib_do_send(&qp->s_work);
+
+bail:
+	return err;
+}
+
+/**
+ * qib_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+			    struct ib_recv_wr **bad_wr)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+	struct qib_rwq *wq = qp->r_rq.wq;
+	unsigned long flags;
+	int ret;
+
+	/* Check that state is OK to post receive. */
+	if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
+		*bad_wr = wr;
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	for (; wr; wr = wr->next) {
+		struct qib_rwqe *wqe;
+		u32 next;
+		int i;
+
+		if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto bail;
+		}
+
+		spin_lock_irqsave(&qp->r_rq.lock, flags);
+		next = wq->head + 1;
+		if (next >= qp->r_rq.size)
+			next = 0;
+		if (next == wq->tail) {
+			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+			*bad_wr = wr;
+			ret = -ENOMEM;
+			goto bail;
+		}
+
+		wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
+		wqe->wr_id = wr->wr_id;
+		wqe->num_sge = wr->num_sge;
+		for (i = 0; i < wr->num_sge; i++)
+			wqe->sg_list[i] = wr->sg_list[i];
+		/* Make sure queue entry is written before the head index. */
+		smp_wmb();
+		wq->head = next;
+		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+	}
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_qp_rcv - processing an incoming packet on a QP
+ * @rcd: the context pointer
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_ib_rcv() to process an incoming packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+		       int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+	struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+
+	/* Check for valid receive state. */
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+		ibp->n_pkt_drops++;
+		return;
+	}
+
+	switch (qp->ibqp.qp_type) {
+	case IB_QPT_SMI:
+	case IB_QPT_GSI:
+		if (ib_qib_disable_sma)
+			break;
+		/* FALLTHROUGH */
+	case IB_QPT_UD:
+		qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
+		break;
+
+	case IB_QPT_RC:
+		qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
+		break;
+
+	case IB_QPT_UC:
+		qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/**
+ * qib_ib_rcv - process an incoming packet
+ * @rcd: the context pointer
+ * @rhdr: the header of the packet
+ * @data: the packet payload
+ * @tlen: the packet length
+ *
+ * This is called from qib_kreceive() to process an incoming packet at
+ * interrupt level. Tlen is the length of the header + data + CRC in bytes.
+ */
+void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+{
+	struct qib_pportdata *ppd = rcd->ppd;
+	struct qib_ibport *ibp = &ppd->ibport_data;
+	struct qib_ib_header *hdr = rhdr;
+	struct qib_other_headers *ohdr;
+	struct qib_qp *qp;
+	u32 qp_num;
+	int lnh;
+	u8 opcode;
+	u16 lid;
+
+	/* 24 == LRH+BTH+CRC */
+	if (unlikely(tlen < 24))
+		goto drop;
+
+	/* Check for a valid destination LID (see ch. 7.11.1). */
+	lid = be16_to_cpu(hdr->lrh[1]);
+	if (lid < QIB_MULTICAST_LID_BASE) {
+		lid &= ~((1 << ppd->lmc) - 1);
+		if (unlikely(lid != ppd->lid))
+			goto drop;
+	}
+
+	/* Check for GRH */
+	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+	if (lnh == QIB_LRH_BTH)
+		ohdr = &hdr->u.oth;
+	else if (lnh == QIB_LRH_GRH) {
+		u32 vtf;
+
+		ohdr = &hdr->u.l.oth;
+		if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+			goto drop;
+		vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+			goto drop;
+	} else
+		goto drop;
+
+	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+	ibp->opstats[opcode & 0x7f].n_bytes += tlen;
+	ibp->opstats[opcode & 0x7f].n_packets++;
+
+	/* Get the destination QP number. */
+	qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+	if (qp_num == QIB_MULTICAST_QPN) {
+		struct qib_mcast *mcast;
+		struct qib_mcast_qp *p;
+
+		if (lnh != QIB_LRH_GRH)
+			goto drop;
+		mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+		if (mcast == NULL)
+			goto drop;
+		ibp->n_multicast_rcv++;
+		list_for_each_entry_rcu(p, &mcast->qp_list, list)
+			qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+		/*
+		 * Notify qib_multicast_detach() if it is waiting for us
+		 * to finish.
+		 */
+		if (atomic_dec_return(&mcast->refcount) <= 1)
+			wake_up(&mcast->wait);
+	} else {
+		qp = qib_lookup_qpn(ibp, qp_num);
+		if (!qp)
+			goto drop;
+		ibp->n_unicast_rcv++;
+		qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+		/*
+		 * Notify qib_destroy_qp() if it is waiting
+		 * for us to finish.
+		 */
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	}
+	return;
+
+drop:
+	ibp->n_pkt_drops++;
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(unsigned long data)
+{
+	struct qib_ibdev *dev = (struct qib_ibdev *) data;
+	struct list_head *list = &dev->memwait;
+	struct qib_qp *qp = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->pending_lock, flags);
+	if (!list_empty(list)) {
+		qp = list_entry(list->next, struct qib_qp, iowait);
+		list_del_init(&qp->iowait);
+		atomic_inc(&qp->refcount);
+		if (!list_empty(list))
+			mod_timer(&dev->mem_timer, jiffies + 1);
+	}
+	spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+	if (qp) {
+		spin_lock_irqsave(&qp->s_lock, flags);
+		if (qp->s_flags & QIB_S_WAIT_KMEM) {
+			qp->s_flags &= ~QIB_S_WAIT_KMEM;
+			qib_schedule_send(qp);
+		}
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	}
+}
+
+static void update_sge(struct qib_sge_state *ss, u32 length)
+{
+	struct qib_sge *sge = &ss->sge;
+
+	sge->vaddr += length;
+	sge->length -= length;
+	sge->sge_length -= length;
+	if (sge->sge_length == 0) {
+		if (--ss->num_sge)
+			*sge = *ss->sg_list++;
+	} else if (sge->length == 0 && sge->mr->lkey) {
+		if (++sge->n >= QIB_SEGSZ) {
+			if (++sge->m >= sge->mr->mapsz)
+				return;
+			sge->n = 0;
+		}
+		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+	}
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+	return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+	return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+	return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+	return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+	return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+	return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+		    u32 length, unsigned flush_wc)
+{
+	u32 extra = 0;
+	u32 data = 0;
+	u32 last;
+
+	while (1) {
+		u32 len = ss->sge.length;
+		u32 off;
+
+		if (len > length)
+			len = length;
+		if (len > ss->sge.sge_length)
+			len = ss->sge.sge_length;
+		BUG_ON(len == 0);
+		/* If the source address is not aligned, try to align it. */
+		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+		if (off) {
+			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+					    ~(sizeof(u32) - 1));
+			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+			u32 y;
+
+			y = sizeof(u32) - off;
+			if (len > y)
+				len = y;
+			if (len + extra >= sizeof(u32)) {
+				data |= set_upper_bits(v, extra *
+						       BITS_PER_BYTE);
+				len = sizeof(u32) - extra;
+				if (len == length) {
+					last = data;
+					break;
+				}
+				__raw_writel(data, piobuf);
+				piobuf++;
+				extra = 0;
+				data = 0;
+			} else {
+				/* Clear unused upper bytes */
+				data |= clear_upper_bytes(v, len, extra);
+				if (len == length) {
+					last = data;
+					break;
+				}
+				extra += len;
+			}
+		} else if (extra) {
+			/* Source address is aligned. */
+			u32 *addr = (u32 *) ss->sge.vaddr;
+			int shift = extra * BITS_PER_BYTE;
+			int ushift = 32 - shift;
+			u32 l = len;
+
+			while (l >= sizeof(u32)) {
+				u32 v = *addr;
+
+				data |= set_upper_bits(v, shift);
+				__raw_writel(data, piobuf);
+				data = get_upper_bits(v, ushift);
+				piobuf++;
+				addr++;
+				l -= sizeof(u32);
+			}
+			/*
+			 * We still have 'extra' number of bytes leftover.
+			 */
+			if (l) {
+				u32 v = *addr;
+
+				if (l + extra >= sizeof(u32)) {
+					data |= set_upper_bits(v, shift);
+					len -= l + extra - sizeof(u32);
+					if (len == length) {
+						last = data;
+						break;
+					}
+					__raw_writel(data, piobuf);
+					piobuf++;
+					extra = 0;
+					data = 0;
+				} else {
+					/* Clear unused upper bytes */
+					data |= clear_upper_bytes(v, l, extra);
+					if (len == length) {
+						last = data;
+						break;
+					}
+					extra += l;
+				}
+			} else if (len == length) {
+				last = data;
+				break;
+			}
+		} else if (len == length) {
+			u32 w;
+
+			/*
+			 * Need to round up for the last dword in the
+			 * packet.
+			 */
+			w = (len + 3) >> 2;
+			qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
+			piobuf += w - 1;
+			last = ((u32 *) ss->sge.vaddr)[w - 1];
+			break;
+		} else {
+			u32 w = len >> 2;
+
+			qib_pio_copy(piobuf, ss->sge.vaddr, w);
+			piobuf += w;
+
+			extra = len & (sizeof(u32) - 1);
+			if (extra) {
+				u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+				/* Clear unused upper bytes */
+				data = clear_upper_bytes(v, extra, 0);
+			}
+		}
+		update_sge(ss, len);
+		length -= len;
+	}
+	/* Update address before sending packet. */
+	update_sge(ss, length);
+	if (flush_wc) {
+		/* must flush early everything before trigger word */
+		qib_flush_wc();
+		__raw_writel(last, piobuf);
+		/* be sure trigger word is written */
+		qib_flush_wc();
+	} else
+		__raw_writel(last, piobuf);
+}
+
+static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+					 struct qib_qp *qp, int *retp)
+{
+	struct qib_verbs_txreq *tx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	spin_lock(&dev->pending_lock);
+
+	if (!list_empty(&dev->txreq_free)) {
+		struct list_head *l = dev->txreq_free.next;
+
+		list_del(l);
+		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+		*retp = 0;
+	} else {
+		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
+		    list_empty(&qp->iowait)) {
+			dev->n_txwait++;
+			qp->s_flags |= QIB_S_WAIT_TX;
+			list_add_tail(&qp->iowait, &dev->txwait);
+		}
+		tx = NULL;
+		qp->s_flags &= ~QIB_S_BUSY;
+		*retp = -EBUSY;
+	}
+
+	spin_unlock(&dev->pending_lock);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	return tx;
+}
+
+void qib_put_txreq(struct qib_verbs_txreq *tx)
+{
+	struct qib_ibdev *dev;
+	struct qib_qp *qp;
+	unsigned long flags;
+
+	qp = tx->qp;
+	dev = to_idev(qp->ibqp.device);
+
+	if (atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+	if (tx->mr) {
+		atomic_dec(&tx->mr->refcount);
+		tx->mr = NULL;
+	}
+	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
+		tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
+		dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
+				 tx->txreq.addr, tx->hdr_dwords << 2,
+				 DMA_TO_DEVICE);
+		kfree(tx->align_buf);
+	}
+
+	spin_lock_irqsave(&dev->pending_lock, flags);
+
+	/* Put struct back on free list */
+	list_add(&tx->txreq.list, &dev->txreq_free);
+
+	if (!list_empty(&dev->txwait)) {
+		/* Wake up first QP wanting a free struct */
+		qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
+		list_del_init(&qp->iowait);
+		atomic_inc(&qp->refcount);
+		spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+		spin_lock_irqsave(&qp->s_lock, flags);
+		if (qp->s_flags & QIB_S_WAIT_TX) {
+			qp->s_flags &= ~QIB_S_WAIT_TX;
+			qib_schedule_send(qp);
+		}
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	} else
+		spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with ppd->sdma_lock held.
+ */
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
+{
+	struct qib_qp *qp, *nqp;
+	struct qib_qp *qps[20];
+	struct qib_ibdev *dev;
+	unsigned i, n;
+
+	n = 0;
+	dev = &ppd->dd->verbs_dev;
+	spin_lock(&dev->pending_lock);
+
+	/* Search wait list for first QP wanting DMA descriptors. */
+	list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+		if (qp->port_num != ppd->port)
+			continue;
+		if (n == ARRAY_SIZE(qps))
+			break;
+		if (qp->s_tx->txreq.sg_count > avail)
+			break;
+		avail -= qp->s_tx->txreq.sg_count;
+		list_del_init(&qp->iowait);
+		atomic_inc(&qp->refcount);
+		qps[n++] = qp;
+	}
+
+	spin_unlock(&dev->pending_lock);
+
+	for (i = 0; i < n; i++) {
+		qp = qps[i];
+		spin_lock(&qp->s_lock);
+		if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
+			qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+			qib_schedule_send(qp);
+		}
+		spin_unlock(&qp->s_lock);
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	}
+}
+
+/*
+ * This is called with ppd->sdma_lock held.
+ */
+static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
+{
+	struct qib_verbs_txreq *tx =
+		container_of(cookie, struct qib_verbs_txreq, txreq);
+	struct qib_qp *qp = tx->qp;
+
+	spin_lock(&qp->s_lock);
+	if (tx->wqe)
+		qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+	else if (qp->ibqp.qp_type == IB_QPT_RC) {
+		struct qib_ib_header *hdr;
+
+		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
+			hdr = &tx->align_buf->hdr;
+		else {
+			struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+
+			hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
+		}
+		qib_rc_send_complete(qp, hdr);
+	}
+	if (atomic_dec_and_test(&qp->s_dma_busy)) {
+		if (qp->state == IB_QPS_RESET)
+			wake_up(&qp->wait_dma);
+		else if (qp->s_flags & QIB_S_WAIT_DMA) {
+			qp->s_flags &= ~QIB_S_WAIT_DMA;
+			qib_schedule_send(qp);
+		}
+	}
+	spin_unlock(&qp->s_lock);
+
+	qib_put_txreq(tx);
+}
+
+static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+		spin_lock(&dev->pending_lock);
+		if (list_empty(&qp->iowait)) {
+			if (list_empty(&dev->memwait))
+				mod_timer(&dev->mem_timer, jiffies + 1);
+			qp->s_flags |= QIB_S_WAIT_KMEM;
+			list_add_tail(&qp->iowait, &dev->memwait);
+		}
+		spin_unlock(&dev->pending_lock);
+		qp->s_flags &= ~QIB_S_BUSY;
+		ret = -EBUSY;
+	}
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	return ret;
+}
+
+static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
+			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
+			      u32 plen, u32 dwords)
+{
+	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+	struct qib_devdata *dd = dd_from_dev(dev);
+	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_verbs_txreq *tx;
+	struct qib_pio_header *phdr;
+	u32 control;
+	u32 ndesc;
+	int ret;
+
+	tx = qp->s_tx;
+	if (tx) {
+		qp->s_tx = NULL;
+		/* resend previously constructed packet */
+		ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
+		goto bail;
+	}
+
+	tx = get_txreq(dev, qp, &ret);
+	if (!tx)
+		goto bail;
+
+	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+				       be16_to_cpu(hdr->lrh[0]) >> 12);
+	tx->qp = qp;
+	atomic_inc(&qp->refcount);
+	tx->wqe = qp->s_wqe;
+	tx->mr = qp->s_rdma_mr;
+	if (qp->s_rdma_mr)
+		qp->s_rdma_mr = NULL;
+	tx->txreq.callback = sdma_complete;
+	if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
+		tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
+	else
+		tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
+	if (plen + 1 > dd->piosize2kmax_dwords)
+		tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
+
+	if (len) {
+		/*
+		 * Don't try to DMA if it takes more descriptors than
+		 * the queue holds.
+		 */
+		ndesc = qib_count_sge(ss, len);
+		if (ndesc >= ppd->sdma_descq_cnt)
+			ndesc = 0;
+	} else
+		ndesc = 1;
+	if (ndesc) {
+		phdr = &dev->pio_hdrs[tx->hdr_inx];
+		phdr->pbc[0] = cpu_to_le32(plen);
+		phdr->pbc[1] = cpu_to_le32(control);
+		memcpy(&phdr->hdr, hdr, hdrwords << 2);
+		tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
+		tx->txreq.sg_count = ndesc;
+		tx->txreq.addr = dev->pio_hdrs_phys +
+			tx->hdr_inx * sizeof(struct qib_pio_header);
+		tx->hdr_dwords = hdrwords + 2; /* add PBC length */
+		ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
+		goto bail;
+	}
+
+	/* Allocate a buffer and copy the header and payload to it. */
+	tx->hdr_dwords = plen + 1;
+	phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
+	if (!phdr)
+		goto err_tx;
+	phdr->pbc[0] = cpu_to_le32(plen);
+	phdr->pbc[1] = cpu_to_le32(control);
+	memcpy(&phdr->hdr, hdr, hdrwords << 2);
+	qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
+
+	tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
+					tx->hdr_dwords << 2, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
+		goto map_err;
+	tx->align_buf = phdr;
+	tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
+	tx->txreq.sg_count = 1;
+	ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
+	goto unaligned;
+
+map_err:
+	kfree(phdr);
+err_tx:
+	qib_put_txreq(tx);
+	ret = wait_kmem(dev, qp);
+unaligned:
+	ibp->n_unaligned++;
+bail:
+	return ret;
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int no_bufs_available(struct qib_qp *qp)
+{
+	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+	struct qib_devdata *dd;
+	unsigned long flags;
+	int ret = 0;
+
+	/*
+	 * Note that as soon as want_buffer() is called and
+	 * possibly before it returns, qib_ib_piobufavail()
+	 * could be called. Therefore, put QP on the I/O wait list before
+	 * enabling the PIO avail interrupt.
+	 */
+	spin_lock_irqsave(&qp->s_lock, flags);
+	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+		spin_lock(&dev->pending_lock);
+		if (list_empty(&qp->iowait)) {
+			dev->n_piowait++;
+			qp->s_flags |= QIB_S_WAIT_PIO;
+			list_add_tail(&qp->iowait, &dev->piowait);
+			dd = dd_from_dev(dev);
+			dd->f_wantpiobuf_intr(dd, 1);
+		}
+		spin_unlock(&dev->pending_lock);
+		qp->s_flags &= ~QIB_S_BUSY;
+		ret = -EBUSY;
+	}
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+	return ret;
+}
+
+static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
+			      u32 hdrwords, struct qib_sge_state *ss, u32 len,
+			      u32 plen, u32 dwords)
+{
+	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+	struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
+	u32 *hdr = (u32 *) ibhdr;
+	u32 __iomem *piobuf_orig;
+	u32 __iomem *piobuf;
+	u64 pbc;
+	unsigned long flags;
+	unsigned flush_wc;
+	u32 control;
+	u32 pbufn;
+
+	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+		be16_to_cpu(ibhdr->lrh[0]) >> 12);
+	pbc = ((u64) control << 32) | plen;
+	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+	if (unlikely(piobuf == NULL))
+		return no_bufs_available(qp);
+
+	/*
+	 * Write the pbc.
+	 * We have to flush after the PBC for correctness on some cpus
+	 * or WC buffer can be written out of order.
+	 */
+	writeq(pbc, piobuf);
+	piobuf_orig = piobuf;
+	piobuf += 2;
+
+	flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
+	if (len == 0) {
+		/*
+		 * If there is just the header portion, must flush before
+		 * writing last word of header for correctness, and after
+		 * the last header word (trigger word).
+		 */
+		if (flush_wc) {
+			qib_flush_wc();
+			qib_pio_copy(piobuf, hdr, hdrwords - 1);
+			qib_flush_wc();
+			__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+			qib_flush_wc();
+		} else
+			qib_pio_copy(piobuf, hdr, hdrwords);
+		goto done;
+	}
+
+	if (flush_wc)
+		qib_flush_wc();
+	qib_pio_copy(piobuf, hdr, hdrwords);
+	piobuf += hdrwords;
+
+	/* The common case is aligned and contained in one segment. */
+	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+		u32 *addr = (u32 *) ss->sge.vaddr;
+
+		/* Update address before sending packet. */
+		update_sge(ss, len);
+		if (flush_wc) {
+			qib_pio_copy(piobuf, addr, dwords - 1);
+			/* must flush early everything before trigger word */
+			qib_flush_wc();
+			__raw_writel(addr[dwords - 1], piobuf + dwords - 1);
+			/* be sure trigger word is written */
+			qib_flush_wc();
+		} else
+			qib_pio_copy(piobuf, addr, dwords);
+		goto done;
+	}
+	copy_io(piobuf, ss, len, flush_wc);
+done:
+	if (dd->flags & QIB_USE_SPCL_TRIG) {
+		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+		qib_flush_wc();
+		__raw_writel(0xaebecede, piobuf_orig + spcl_off);
+	}
+	qib_sendbuf_done(dd, pbufn);
+	if (qp->s_rdma_mr) {
+		atomic_dec(&qp->s_rdma_mr->refcount);
+		qp->s_rdma_mr = NULL;
+	}
+	if (qp->s_wqe) {
+		spin_lock_irqsave(&qp->s_lock, flags);
+		qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
+		spin_lock_irqsave(&qp->s_lock, flags);
+		qib_rc_send_complete(qp, ibhdr);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+	}
+	return 0;
+}
+
+/**
+ * qib_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @hdr: the packet header
+ * @hdrwords: the number of 32-bit words in the header
+ * @ss: the SGE to send
+ * @len: the length of the packet in bytes
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ */
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+		   u32 hdrwords, struct qib_sge_state *ss, u32 len)
+{
+	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+	u32 plen;
+	int ret;
+	u32 dwords = (len + 3) >> 2;
+
+	/*
+	 * Calculate the send buffer trigger address.
+	 * The +1 counts for the pbc control dword following the pbc length.
+	 */
+	plen = hdrwords + dwords + 1;
+
+	/*
+	 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+	 * can defer SDMA restart until link goes ACTIVE without
+	 * worrying about just how we got there.
+	 */
+	if (qp->ibqp.qp_type == IB_QPT_SMI ||
+	    !(dd->flags & QIB_HAS_SEND_DMA))
+		ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+					 plen, dwords);
+	else
+		ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
+					 plen, dwords);
+
+	return ret;
+}
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+			  u64 *rwords, u64 *spkts, u64 *rpkts,
+			  u64 *xmit_wait)
+{
+	int ret;
+	struct qib_devdata *dd = ppd->dd;
+
+	if (!(dd->flags & QIB_PRESENT)) {
+		/* no hardware, freeze, etc. */
+		ret = -EINVAL;
+		goto bail;
+	}
+	*swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
+	*rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
+	*spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
+	*rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
+	*xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_get_counters - get various chip counters
+ * @dd: the qlogic_ib device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int qib_get_counters(struct qib_pportdata *ppd,
+		     struct qib_verbs_counters *cntrs)
+{
+	int ret;
+
+	if (!(ppd->dd->flags & QIB_PRESENT)) {
+		/* no hardware, freeze, etc. */
+		ret = -EINVAL;
+		goto bail;
+	}
+	cntrs->symbol_error_counter =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+	cntrs->link_error_recovery_counter =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
+	/*
+	 * The link downed counter counts when the other side downs the
+	 * connection.  We add in the number of times we downed the link
+	 * due to local link integrity errors to compensate.
+	 */
+	cntrs->link_downed_counter =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
+	cntrs->port_rcv_errors =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
+	cntrs->port_rcv_errors +=
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
+	cntrs->port_rcv_errors +=
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
+	cntrs->port_rcv_remphys_errors =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
+	cntrs->port_xmit_discards =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
+	cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
+			QIBPORTCNTR_WORDSEND);
+	cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
+			QIBPORTCNTR_WORDRCV);
+	cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
+			QIBPORTCNTR_PKTSEND);
+	cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
+			QIBPORTCNTR_PKTRCV);
+	cntrs->local_link_integrity_errors =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
+	cntrs->excessive_buffer_overrun_errors =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
+	cntrs->vl15_dropped =
+		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_ib_piobufavail - callback when a PIO buffer is available
+ * @dd: the device pointer
+ *
+ * This is called from qib_intr() at interrupt level when a PIO buffer is
+ * available after qib_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+void qib_ib_piobufavail(struct qib_devdata *dd)
+{
+	struct qib_ibdev *dev = &dd->verbs_dev;
+	struct list_head *list;
+	struct qib_qp *qps[5];
+	struct qib_qp *qp;
+	unsigned long flags;
+	unsigned i, n;
+
+	list = &dev->piowait;
+	n = 0;
+
+	/*
+	 * Note: checking that the piowait list is empty and clearing
+	 * the buffer available interrupt needs to be atomic or we
+	 * could end up with QPs on the wait list with the interrupt
+	 * disabled.
+	 */
+	spin_lock_irqsave(&dev->pending_lock, flags);
+	while (!list_empty(list)) {
+		if (n == ARRAY_SIZE(qps))
+			goto full;
+		qp = list_entry(list->next, struct qib_qp, iowait);
+		list_del_init(&qp->iowait);
+		atomic_inc(&qp->refcount);
+		qps[n++] = qp;
+	}
+	dd->f_wantpiobuf_intr(dd, 0);
+full:
+	spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+	for (i = 0; i < n; i++) {
+		qp = qps[i];
+
+		spin_lock_irqsave(&qp->s_lock, flags);
+		if (qp->s_flags & QIB_S_WAIT_PIO) {
+			qp->s_flags &= ~QIB_S_WAIT_PIO;
+			qib_schedule_send(qp);
+		}
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+
+		/* Notify qib_destroy_qp() if it is waiting. */
+		if (atomic_dec_and_test(&qp->refcount))
+			wake_up(&qp->wait);
+	}
+}
+
+static int qib_query_device(struct ib_device *ibdev,
+			    struct ib_device_attr *props)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	struct qib_ibdev *dev = to_idev(ibdev);
+
+	memset(props, 0, sizeof(*props));
+
+	props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+		IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+	props->page_size_cap = PAGE_SIZE;
+	props->vendor_id =
+		QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+	props->vendor_part_id = dd->deviceid;
+	props->hw_ver = dd->minrev;
+	props->sys_image_guid = ib_qib_sys_image_guid;
+	props->max_mr_size = ~0ULL;
+	props->max_qp = ib_qib_max_qps;
+	props->max_qp_wr = ib_qib_max_qp_wrs;
+	props->max_sge = ib_qib_max_sges;
+	props->max_cq = ib_qib_max_cqs;
+	props->max_ah = ib_qib_max_ahs;
+	props->max_cqe = ib_qib_max_cqes;
+	props->max_mr = dev->lk_table.max;
+	props->max_fmr = dev->lk_table.max;
+	props->max_map_per_fmr = 32767;
+	props->max_pd = ib_qib_max_pds;
+	props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+	props->max_qp_init_rd_atom = 255;
+	/* props->max_res_rd_atom */
+	props->max_srq = ib_qib_max_srqs;
+	props->max_srq_wr = ib_qib_max_srq_wrs;
+	props->max_srq_sge = ib_qib_max_srq_sges;
+	/* props->local_ca_ack_delay */
+	props->atomic_cap = IB_ATOMIC_GLOB;
+	props->max_pkeys = qib_get_npkeys(dd);
+	props->max_mcast_grp = ib_qib_max_mcast_grps;
+	props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+		props->max_mcast_grp;
+
+	return 0;
+}
+
+static int qib_query_port(struct ib_device *ibdev, u8 port,
+			  struct ib_port_attr *props)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	enum ib_mtu mtu;
+	u16 lid = ppd->lid;
+
+	memset(props, 0, sizeof(*props));
+	props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
+	props->lmc = ppd->lmc;
+	props->sm_lid = ibp->sm_lid;
+	props->sm_sl = ibp->sm_sl;
+	props->state = dd->f_iblink_state(ppd->lastibcstat);
+	props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
+	props->port_cap_flags = ibp->port_cap_flags;
+	props->gid_tbl_len = QIB_GUIDS_PER_PORT;
+	props->max_msg_sz = 0x80000000;
+	props->pkey_tbl_len = qib_get_npkeys(dd);
+	props->bad_pkey_cntr = ibp->pkey_violations;
+	props->qkey_viol_cntr = ibp->qkey_violations;
+	props->active_width = ppd->link_width_active;
+	/* See rate_show() */
+	props->active_speed = ppd->link_speed_active;
+	props->max_vl_num = qib_num_vls(ppd->vls_supported);
+	props->init_type_reply = 0;
+
+	props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+	switch (ppd->ibmtu) {
+	case 4096:
+		mtu = IB_MTU_4096;
+		break;
+	case 2048:
+		mtu = IB_MTU_2048;
+		break;
+	case 1024:
+		mtu = IB_MTU_1024;
+		break;
+	case 512:
+		mtu = IB_MTU_512;
+		break;
+	case 256:
+		mtu = IB_MTU_256;
+		break;
+	default:
+		mtu = IB_MTU_2048;
+	}
+	props->active_mtu = mtu;
+	props->subnet_timeout = ibp->subnet_timeout;
+
+	return 0;
+}
+
+static int qib_modify_device(struct ib_device *device,
+			     int device_modify_mask,
+			     struct ib_device_modify *device_modify)
+{
+	struct qib_devdata *dd = dd_from_ibdev(device);
+	unsigned i;
+	int ret;
+
+	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+				   IB_DEVICE_MODIFY_NODE_DESC)) {
+		ret = -EOPNOTSUPP;
+		goto bail;
+	}
+
+	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+		memcpy(device->node_desc, device_modify->node_desc, 64);
+		for (i = 0; i < dd->num_pports; i++) {
+			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+			qib_node_desc_chg(ibp);
+		}
+	}
+
+	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+		ib_qib_sys_image_guid =
+			cpu_to_be64(device_modify->sys_image_guid);
+		for (i = 0; i < dd->num_pports; i++) {
+			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+			qib_sys_guid_chg(ibp);
+		}
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+static int qib_modify_port(struct ib_device *ibdev, u8 port,
+			   int port_modify_mask, struct ib_port_modify *props)
+{
+	struct qib_ibport *ibp = to_iport(ibdev, port);
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+	ibp->port_cap_flags |= props->set_port_cap_mask;
+	ibp->port_cap_flags &= ~props->clr_port_cap_mask;
+	if (props->set_port_cap_mask || props->clr_port_cap_mask)
+		qib_cap_mask_chg(ibp);
+	if (port_modify_mask & IB_PORT_SHUTDOWN)
+		qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
+	if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+		ibp->qkey_violations = 0;
+	return 0;
+}
+
+static int qib_query_gid(struct ib_device *ibdev, u8 port,
+			 int index, union ib_gid *gid)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	int ret = 0;
+
+	if (!port || port > dd->num_pports)
+		ret = -EINVAL;
+	else {
+		struct qib_ibport *ibp = to_iport(ibdev, port);
+		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+		gid->global.subnet_prefix = ibp->gid_prefix;
+		if (index == 0)
+			gid->global.interface_id = ppd->guid;
+		else if (index < QIB_GUIDS_PER_PORT)
+			gid->global.interface_id = ibp->guids[index - 1];
+		else
+			ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
+				  struct ib_ucontext *context,
+				  struct ib_udata *udata)
+{
+	struct qib_ibdev *dev = to_idev(ibdev);
+	struct qib_pd *pd;
+	struct ib_pd *ret;
+
+	/*
+	 * This is actually totally arbitrary.  Some correctness tests
+	 * assume there's a maximum number of PDs that can be allocated.
+	 * We don't actually have this limit, but we fail the test if
+	 * we allow allocations of more than we report for this value.
+	 */
+
+	pd = kmalloc(sizeof *pd, GFP_KERNEL);
+	if (!pd) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	spin_lock(&dev->n_pds_lock);
+	if (dev->n_pds_allocated == ib_qib_max_pds) {
+		spin_unlock(&dev->n_pds_lock);
+		kfree(pd);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	dev->n_pds_allocated++;
+	spin_unlock(&dev->n_pds_lock);
+
+	/* ib_alloc_pd() will initialize pd->ibpd. */
+	pd->user = udata != NULL;
+
+	ret = &pd->ibpd;
+
+bail:
+	return ret;
+}
+
+static int qib_dealloc_pd(struct ib_pd *ibpd)
+{
+	struct qib_pd *pd = to_ipd(ibpd);
+	struct qib_ibdev *dev = to_idev(ibpd->device);
+
+	spin_lock(&dev->n_pds_lock);
+	dev->n_pds_allocated--;
+	spin_unlock(&dev->n_pds_lock);
+
+	kfree(pd);
+
+	return 0;
+}
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+{
+	/* A multicast address requires a GRH (see ch. 8.4.1). */
+	if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+	    ah_attr->dlid != QIB_PERMISSIVE_LID &&
+	    !(ah_attr->ah_flags & IB_AH_GRH))
+		goto bail;
+	if ((ah_attr->ah_flags & IB_AH_GRH) &&
+	    ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
+		goto bail;
+	if (ah_attr->dlid == 0)
+		goto bail;
+	if (ah_attr->port_num < 1 ||
+	    ah_attr->port_num > ibdev->phys_port_cnt)
+		goto bail;
+	if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+	    ib_rate_to_mult(ah_attr->static_rate) < 0)
+		goto bail;
+	if (ah_attr->sl > 15)
+		goto bail;
+	return 0;
+bail:
+	return -EINVAL;
+}
+
+/**
+ * qib_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *qib_create_ah(struct ib_pd *pd,
+				   struct ib_ah_attr *ah_attr)
+{
+	struct qib_ah *ah;
+	struct ib_ah *ret;
+	struct qib_ibdev *dev = to_idev(pd->device);
+	unsigned long flags;
+
+	if (qib_check_ah(pd->device, ah_attr)) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+	if (!ah) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	spin_lock_irqsave(&dev->n_ahs_lock, flags);
+	if (dev->n_ahs_allocated == ib_qib_max_ahs) {
+		spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+		kfree(ah);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	dev->n_ahs_allocated++;
+	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+	/* ib_create_ah() will initialize ah->ibah. */
+	ah->attr = *ah_attr;
+	atomic_set(&ah->refcount, 0);
+
+	ret = &ah->ibah;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_destroy_ah(struct ib_ah *ibah)
+{
+	struct qib_ibdev *dev = to_idev(ibah->device);
+	struct qib_ah *ah = to_iah(ibah);
+	unsigned long flags;
+
+	if (atomic_read(&ah->refcount) != 0)
+		return -EBUSY;
+
+	spin_lock_irqsave(&dev->n_ahs_lock, flags);
+	dev->n_ahs_allocated--;
+	spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+	kfree(ah);
+
+	return 0;
+}
+
+static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+	struct qib_ah *ah = to_iah(ibah);
+
+	if (qib_check_ah(ibah->device, ah_attr))
+		return -EINVAL;
+
+	ah->attr = *ah_attr;
+
+	return 0;
+}
+
+static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+	struct qib_ah *ah = to_iah(ibah);
+
+	*ah_attr = ah->attr;
+
+	return 0;
+}
+
+/**
+ * qib_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the qlogic_ib device
+ */
+unsigned qib_get_npkeys(struct qib_devdata *dd)
+{
+	return ARRAY_SIZE(dd->rcd[0]->pkeys);
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ * No need to validate rcd[ctxt]; the port is setup if we are here.
+ */
+unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
+{
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+	struct qib_devdata *dd = ppd->dd;
+	unsigned ctxt = ppd->hw_pidx;
+	unsigned ret;
+
+	/* dd->rcd null if mini_init or some init failures */
+	if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
+		ret = 0;
+	else
+		ret = dd->rcd[ctxt]->pkeys[index];
+
+	return ret;
+}
+
+static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+			  u16 *pkey)
+{
+	struct qib_devdata *dd = dd_from_ibdev(ibdev);
+	int ret;
+
+	if (index >= qib_get_npkeys(dd)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	*pkey = qib_get_pkey(to_iport(ibdev, port), index);
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * qib_alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the QLogic_IB driver
+ */
+
+static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
+					      struct ib_udata *udata)
+{
+	struct qib_ucontext *context;
+	struct ib_ucontext *ret;
+
+	context = kmalloc(sizeof *context, GFP_KERNEL);
+	if (!context) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	ret = &context->ibucontext;
+
+bail:
+	return ret;
+}
+
+static int qib_dealloc_ucontext(struct ib_ucontext *context)
+{
+	kfree(to_iucontext(context));
+	return 0;
+}
+
+static void init_ibport(struct qib_pportdata *ppd)
+{
+	struct qib_verbs_counters cntrs;
+	struct qib_ibport *ibp = &ppd->ibport_data;
+
+	spin_lock_init(&ibp->lock);
+	/* Set the prefix to the default value (see ch. 4.1.1) */
+	ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
+	ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+	ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+		IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
+		IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
+		IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
+		IB_PORT_OTHER_LOCAL_CHANGES_SUP;
+	if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
+		ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+	ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+	ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+	ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+	ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+	ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+	/* Snapshot current HW counters to "clear" them. */
+	qib_get_counters(ppd, &cntrs);
+	ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+	ibp->z_link_error_recovery_counter =
+		cntrs.link_error_recovery_counter;
+	ibp->z_link_downed_counter = cntrs.link_downed_counter;
+	ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+	ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
+	ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+	ibp->z_port_xmit_data = cntrs.port_xmit_data;
+	ibp->z_port_rcv_data = cntrs.port_rcv_data;
+	ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+	ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+	ibp->z_local_link_integrity_errors =
+		cntrs.local_link_integrity_errors;
+	ibp->z_excessive_buffer_overrun_errors =
+		cntrs.excessive_buffer_overrun_errors;
+	ibp->z_vl15_dropped = cntrs.vl15_dropped;
+}
+
+/**
+ * qib_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return the allocated qib_ibdev pointer or NULL on error.
+ */
+int qib_register_ib_device(struct qib_devdata *dd)
+{
+	struct qib_ibdev *dev = &dd->verbs_dev;
+	struct ib_device *ibdev = &dev->ibdev;
+	struct qib_pportdata *ppd = dd->pport;
+	unsigned i, lk_tab_size;
+	int ret;
+
+	dev->qp_table_size = ib_qib_qp_table_size;
+	dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+				GFP_KERNEL);
+	if (!dev->qp_table) {
+		ret = -ENOMEM;
+		goto err_qpt;
+	}
+
+	for (i = 0; i < dd->num_pports; i++)
+		init_ibport(ppd + i);
+
+	/* Only need to initialize non-zero fields. */
+	spin_lock_init(&dev->qpt_lock);
+	spin_lock_init(&dev->n_pds_lock);
+	spin_lock_init(&dev->n_ahs_lock);
+	spin_lock_init(&dev->n_cqs_lock);
+	spin_lock_init(&dev->n_qps_lock);
+	spin_lock_init(&dev->n_srqs_lock);
+	spin_lock_init(&dev->n_mcast_grps_lock);
+	init_timer(&dev->mem_timer);
+	dev->mem_timer.function = mem_timer;
+	dev->mem_timer.data = (unsigned long) dev;
+
+	qib_init_qpn_table(dd, &dev->qpn_table);
+
+	/*
+	 * The top ib_qib_lkey_table_size bits are used to index the
+	 * table.  The lower 8 bits can be owned by the user (copied from
+	 * the LKEY).  The remaining bits act as a generation number or tag.
+	 */
+	spin_lock_init(&dev->lk_table.lock);
+	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+	dev->lk_table.table = (struct qib_mregion **)
+		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+	if (dev->lk_table.table == NULL) {
+		ret = -ENOMEM;
+		goto err_lk;
+	}
+	memset(dev->lk_table.table, 0, lk_tab_size);
+	INIT_LIST_HEAD(&dev->pending_mmaps);
+	spin_lock_init(&dev->pending_lock);
+	dev->mmap_offset = PAGE_SIZE;
+	spin_lock_init(&dev->mmap_offset_lock);
+	INIT_LIST_HEAD(&dev->piowait);
+	INIT_LIST_HEAD(&dev->dmawait);
+	INIT_LIST_HEAD(&dev->txwait);
+	INIT_LIST_HEAD(&dev->memwait);
+	INIT_LIST_HEAD(&dev->txreq_free);
+
+	if (ppd->sdma_descq_cnt) {
+		dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
+						ppd->sdma_descq_cnt *
+						sizeof(struct qib_pio_header),
+						&dev->pio_hdrs_phys,
+						GFP_KERNEL);
+		if (!dev->pio_hdrs) {
+			ret = -ENOMEM;
+			goto err_hdrs;
+		}
+	}
+
+	for (i = 0; i < ppd->sdma_descq_cnt; i++) {
+		struct qib_verbs_txreq *tx;
+
+		tx = kzalloc(sizeof *tx, GFP_KERNEL);
+		if (!tx) {
+			ret = -ENOMEM;
+			goto err_tx;
+		}
+		tx->hdr_inx = i;
+		list_add(&tx->txreq.list, &dev->txreq_free);
+	}
+
+	/*
+	 * The system image GUID is supposed to be the same for all
+	 * IB HCAs in a single system but since there can be other
+	 * device types in the system, we can't be sure this is unique.
+	 */
+	if (!ib_qib_sys_image_guid)
+		ib_qib_sys_image_guid = ppd->guid;
+
+	strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
+	ibdev->owner = THIS_MODULE;
+	ibdev->node_guid = ppd->guid;
+	ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
+	ibdev->uverbs_cmd_mask =
+		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
+		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
+		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
+		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
+		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
+		(1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
+		(1ull << IB_USER_VERBS_CMD_MODIFY_AH)           |
+		(1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
+		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
+		(1ull << IB_USER_VERBS_CMD_REG_MR)              |
+		(1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
+		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
+		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
+		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
+		(1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
+		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
+		(1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
+		(1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
+		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
+		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
+		(1ull << IB_USER_VERBS_CMD_POST_SEND)           |
+		(1ull << IB_USER_VERBS_CMD_POST_RECV)           |
+		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
+		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
+		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
+		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
+		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
+		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
+		(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+	ibdev->node_type = RDMA_NODE_IB_CA;
+	ibdev->phys_port_cnt = dd->num_pports;
+	ibdev->num_comp_vectors = 1;
+	ibdev->dma_device = &dd->pcidev->dev;
+	ibdev->query_device = qib_query_device;
+	ibdev->modify_device = qib_modify_device;
+	ibdev->query_port = qib_query_port;
+	ibdev->modify_port = qib_modify_port;
+	ibdev->query_pkey = qib_query_pkey;
+	ibdev->query_gid = qib_query_gid;
+	ibdev->alloc_ucontext = qib_alloc_ucontext;
+	ibdev->dealloc_ucontext = qib_dealloc_ucontext;
+	ibdev->alloc_pd = qib_alloc_pd;
+	ibdev->dealloc_pd = qib_dealloc_pd;
+	ibdev->create_ah = qib_create_ah;
+	ibdev->destroy_ah = qib_destroy_ah;
+	ibdev->modify_ah = qib_modify_ah;
+	ibdev->query_ah = qib_query_ah;
+	ibdev->create_srq = qib_create_srq;
+	ibdev->modify_srq = qib_modify_srq;
+	ibdev->query_srq = qib_query_srq;
+	ibdev->destroy_srq = qib_destroy_srq;
+	ibdev->create_qp = qib_create_qp;
+	ibdev->modify_qp = qib_modify_qp;
+	ibdev->query_qp = qib_query_qp;
+	ibdev->destroy_qp = qib_destroy_qp;
+	ibdev->post_send = qib_post_send;
+	ibdev->post_recv = qib_post_receive;
+	ibdev->post_srq_recv = qib_post_srq_receive;
+	ibdev->create_cq = qib_create_cq;
+	ibdev->destroy_cq = qib_destroy_cq;
+	ibdev->resize_cq = qib_resize_cq;
+	ibdev->poll_cq = qib_poll_cq;
+	ibdev->req_notify_cq = qib_req_notify_cq;
+	ibdev->get_dma_mr = qib_get_dma_mr;
+	ibdev->reg_phys_mr = qib_reg_phys_mr;
+	ibdev->reg_user_mr = qib_reg_user_mr;
+	ibdev->dereg_mr = qib_dereg_mr;
+	ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
+	ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
+	ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
+	ibdev->alloc_fmr = qib_alloc_fmr;
+	ibdev->map_phys_fmr = qib_map_phys_fmr;
+	ibdev->unmap_fmr = qib_unmap_fmr;
+	ibdev->dealloc_fmr = qib_dealloc_fmr;
+	ibdev->attach_mcast = qib_multicast_attach;
+	ibdev->detach_mcast = qib_multicast_detach;
+	ibdev->process_mad = qib_process_mad;
+	ibdev->mmap = qib_mmap;
+	ibdev->dma_ops = &qib_dma_mapping_ops;
+
+	snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
+		 QIB_IDSTR " %s", init_utsname()->nodename);
+
+	ret = ib_register_device(ibdev, qib_create_port_files);
+	if (ret)
+		goto err_reg;
+
+	ret = qib_create_agents(dev);
+	if (ret)
+		goto err_agents;
+
+	if (qib_verbs_register_sysfs(dd))
+		goto err_class;
+
+	goto bail;
+
+err_class:
+	qib_free_agents(dev);
+err_agents:
+	ib_unregister_device(ibdev);
+err_reg:
+err_tx:
+	while (!list_empty(&dev->txreq_free)) {
+		struct list_head *l = dev->txreq_free.next;
+		struct qib_verbs_txreq *tx;
+
+		list_del(l);
+		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+		kfree(tx);
+	}
+	if (ppd->sdma_descq_cnt)
+		dma_free_coherent(&dd->pcidev->dev,
+				  ppd->sdma_descq_cnt *
+					sizeof(struct qib_pio_header),
+				  dev->pio_hdrs, dev->pio_hdrs_phys);
+err_hdrs:
+	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+err_lk:
+	kfree(dev->qp_table);
+err_qpt:
+	qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+bail:
+	return ret;
+}
+
+void qib_unregister_ib_device(struct qib_devdata *dd)
+{
+	struct qib_ibdev *dev = &dd->verbs_dev;
+	struct ib_device *ibdev = &dev->ibdev;
+	u32 qps_inuse;
+	unsigned lk_tab_size;
+
+	qib_verbs_unregister_sysfs(dd);
+
+	qib_free_agents(dev);
+
+	ib_unregister_device(ibdev);
+
+	if (!list_empty(&dev->piowait))
+		qib_dev_err(dd, "piowait list not empty!\n");
+	if (!list_empty(&dev->dmawait))
+		qib_dev_err(dd, "dmawait list not empty!\n");
+	if (!list_empty(&dev->txwait))
+		qib_dev_err(dd, "txwait list not empty!\n");
+	if (!list_empty(&dev->memwait))
+		qib_dev_err(dd, "memwait list not empty!\n");
+	if (dev->dma_mr)
+		qib_dev_err(dd, "DMA MR not NULL!\n");
+
+	qps_inuse = qib_free_all_qps(dd);
+	if (qps_inuse)
+		qib_dev_err(dd, "QP memory leak! %u still in use\n",
+			    qps_inuse);
+
+	del_timer_sync(&dev->mem_timer);
+	qib_free_qpn_table(&dev->qpn_table);
+	while (!list_empty(&dev->txreq_free)) {
+		struct list_head *l = dev->txreq_free.next;
+		struct qib_verbs_txreq *tx;
+
+		list_del(l);
+		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+		kfree(tx);
+	}
+	if (dd->pport->sdma_descq_cnt)
+		dma_free_coherent(&dd->pcidev->dev,
+				  dd->pport->sdma_descq_cnt *
+					sizeof(struct qib_pio_header),
+				  dev->pio_hdrs, dev->pio_hdrs_phys);
+	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+	free_pages((unsigned long) dev->lk_table.table,
+		   get_order(lk_tab_size));
+	kfree(dev->qp_table);
+}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 000000000000..bd57c1273225
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef QIB_VERBS_H
+#define QIB_VERBS_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+
+struct qib_ctxtdata;
+struct qib_pportdata;
+struct qib_devdata;
+struct qib_verbs_txreq;
+
+#define QIB_MAX_RDMA_ATOMIC     16
+#define QIB_GUIDS_PER_PORT	5
+
+#define QPN_MAX                 (1 << 24)
+#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define QIB_UVERBS_ABI_VERSION       2
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
+
+#define IB_SEQ_NAK	(3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK                      0x20
+#define IB_NAK_PSN_ERROR                0x60
+#define IB_NAK_INVALID_REQUEST          0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR      0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST       0x64
+
+/* Flags for checking QP state (see ib_qib_state_ops[]) */
+#define QIB_POST_SEND_OK                0x01
+#define QIB_POST_RECV_OK                0x02
+#define QIB_PROCESS_RECV_OK             0x04
+#define QIB_PROCESS_SEND_OK             0x08
+#define QIB_PROCESS_NEXT_SEND_OK        0x10
+#define QIB_FLUSH_SEND			0x20
+#define QIB_FLUSH_RECV			0x40
+#define QIB_PROCESS_OR_FLUSH_SEND \
+	(QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE       0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED    0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
+
+#define QIB_VENDOR_IPG		cpu_to_be16(0xFFA0)
+
+#define IB_BTH_REQ_ACK		(1 << 31)
+#define IB_BTH_SOLICITED	(1 << 23)
+#define IB_BTH_MIG_REQ		(1 << 22)
+
+/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
+#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
+
+#define IB_GRH_VERSION		6
+#define IB_GRH_VERSION_MASK	0xF
+#define IB_GRH_VERSION_SHIFT	28
+#define IB_GRH_TCLASS_MASK	0xFF
+#define IB_GRH_TCLASS_SHIFT	20
+#define IB_GRH_FLOW_MASK	0xFFFFF
+#define IB_GRH_FLOW_SHIFT	0
+#define IB_GRH_NEXT_HDR		0x1B
+
+#define IB_DEFAULT_GID_PREFIX	cpu_to_be64(0xfe80000000000000ULL)
+
+/* Values for set/get portinfo VLCap OperationalVLs */
+#define IB_VL_VL0       1
+#define IB_VL_VL0_1     2
+#define IB_VL_VL0_3     3
+#define IB_VL_VL0_7     4
+#define IB_VL_VL0_14    5
+
+static inline int qib_num_vls(int vls)
+{
+	switch (vls) {
+	default:
+	case IB_VL_VL0:
+		return 1;
+	case IB_VL_VL0_1:
+		return 2;
+	case IB_VL_VL0_3:
+		return 4;
+	case IB_VL_VL0_7:
+		return 8;
+	case IB_VL_VL0_14:
+		return 15;
+	}
+}
+
+struct ib_reth {
+	__be64 vaddr;
+	__be32 rkey;
+	__be32 length;
+} __attribute__ ((packed));
+
+struct ib_atomic_eth {
+	__be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
+	__be32 rkey;
+	__be64 swap_data;
+	__be64 compare_data;
+} __attribute__ ((packed));
+
+struct qib_other_headers {
+	__be32 bth[3];
+	union {
+		struct {
+			__be32 deth[2];
+			__be32 imm_data;
+		} ud;
+		struct {
+			struct ib_reth reth;
+			__be32 imm_data;
+		} rc;
+		struct {
+			__be32 aeth;
+			__be32 atomic_ack_eth[2];
+		} at;
+		__be32 imm_data;
+		__be32 aeth;
+		struct ib_atomic_eth atomic_eth;
+	} u;
+} __attribute__ ((packed));
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
+ * will be in the eager header buffer.  The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct qib_ib_header {
+	__be16 lrh[4];
+	union {
+		struct {
+			struct ib_grh grh;
+			struct qib_other_headers oth;
+		} l;
+		struct qib_other_headers oth;
+	} u;
+} __attribute__ ((packed));
+
+struct qib_pio_header {
+	__le32 pbc[2];
+	struct qib_ib_header hdr;
+} __attribute__ ((packed));
+
+/*
+ * There is one struct qib_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct qib_mcast_qp.
+ */
+struct qib_mcast_qp {
+	struct list_head list;
+	struct qib_qp *qp;
+};
+
+struct qib_mcast {
+	struct rb_node rb_node;
+	union ib_gid mgid;
+	struct list_head qp_list;
+	wait_queue_head_t wait;
+	atomic_t refcount;
+	int n_attached;
+};
+
+/* Protection domain */
+struct qib_pd {
+	struct ib_pd ibpd;
+	int user;               /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct qib_ah {
+	struct ib_ah ibah;
+	struct ib_ah_attr attr;
+	atomic_t refcount;
+};
+
+/*
+ * This structure is used by qib_mmap() to validate an offset
+ * when an mmap() request is made.  The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct qib_mmap_info {
+	struct list_head pending_mmaps;
+	struct ib_ucontext *context;
+	void *obj;
+	__u64 offset;
+	struct kref ref;
+	unsigned size;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct qib_cq_wc {
+	u32 head;               /* index of next entry to fill */
+	u32 tail;               /* index of next ib_poll_cq() entry */
+	union {
+		/* these are actually size ibcq.cqe + 1 */
+		struct ib_uverbs_wc uqueue[0];
+		struct ib_wc kqueue[0];
+	};
+};
+
+/*
+ * The completion queue structure.
+ */
+struct qib_cq {
+	struct ib_cq ibcq;
+	struct work_struct comptask;
+	spinlock_t lock; /* protect changes in this struct */
+	u8 notify;
+	u8 triggered;
+	struct qib_cq_wc *queue;
+	struct qib_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct qib_seg {
+	void *vaddr;
+	size_t length;
+};
+
+/* The number of qib_segs that fit in a page. */
+#define QIB_SEGSZ     (PAGE_SIZE / sizeof(struct qib_seg))
+
+struct qib_segarray {
+	struct qib_seg segs[QIB_SEGSZ];
+};
+
+struct qib_mregion {
+	struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
+	u64 user_base;          /* User's address for this region */
+	u64 iova;               /* IB start address of this region */
+	size_t length;
+	u32 lkey;
+	u32 offset;             /* offset (bytes) to start of region */
+	int access_flags;
+	u32 max_segs;           /* number of qib_segs in all the arrays */
+	u32 mapsz;              /* size of the map array */
+	atomic_t refcount;
+	struct qib_segarray *map[0];    /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct qib_sge {
+	struct qib_mregion *mr;
+	void *vaddr;            /* kernel virtual address of segment */
+	u32 sge_length;         /* length of the SGE */
+	u32 length;             /* remaining length of the segment */
+	u16 m;                  /* current index: mr->map[m] */
+	u16 n;                  /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct qib_mr {
+	struct ib_mr ibmr;
+	struct ib_umem *umem;
+	struct qib_mregion mr;  /* must be last */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct qib_swqe {
+	struct ib_send_wr wr;   /* don't use wr.sg_list */
+	u32 psn;                /* first packet sequence number */
+	u32 lpsn;               /* last packet sequence number */
+	u32 ssn;                /* send sequence number */
+	u32 length;             /* total length of data in sg_list */
+	struct qib_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct qib_rwqe {
+	u64 wr_id;
+	u8 num_sge;
+	struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct qib_rwq {
+	u32 head;               /* new work requests posted to the head */
+	u32 tail;               /* receives pull requests from here. */
+	struct qib_rwqe wq[0];
+};
+
+struct qib_rq {
+	struct qib_rwq *wq;
+	spinlock_t lock; /* protect changes in this struct */
+	u32 size;               /* size of RWQE array */
+	u8 max_sge;
+};
+
+struct qib_srq {
+	struct ib_srq ibsrq;
+	struct qib_rq rq;
+	struct qib_mmap_info *ip;
+	/* send signal when number of RWQEs < limit */
+	u32 limit;
+};
+
+struct qib_sge_state {
+	struct qib_sge *sg_list;      /* next SGE to be used if any */
+	struct qib_sge sge;   /* progress state for the current SGE */
+	u32 total_len;
+	u8 num_sge;
+};
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct qib_ack_entry {
+	u8 opcode;
+	u8 sent;
+	u32 psn;
+	u32 lpsn;
+	union {
+		struct qib_sge rdma_sge;
+		u64 atomic_data;
+	};
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct qib_qp {
+	struct ib_qp ibqp;
+	struct qib_qp *next;            /* link list for QPN hash table */
+	struct qib_qp *timer_next;      /* link list for qib_ib_timer() */
+	struct list_head iowait;        /* link for wait PIO buf */
+	struct list_head rspwait;       /* link for waititing to respond */
+	struct ib_ah_attr remote_ah_attr;
+	struct ib_ah_attr alt_ah_attr;
+	struct qib_ib_header s_hdr;     /* next packet header to send */
+	atomic_t refcount;
+	wait_queue_head_t wait;
+	wait_queue_head_t wait_dma;
+	struct timer_list s_timer;
+	struct work_struct s_work;
+	struct qib_mmap_info *ip;
+	struct qib_sge_state *s_cur_sge;
+	struct qib_verbs_txreq *s_tx;
+	struct qib_mregion *s_rdma_mr;
+	struct qib_sge_state s_sge;     /* current send request data */
+	struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
+	struct qib_sge_state s_ack_rdma_sge;
+	struct qib_sge_state s_rdma_read_sge;
+	struct qib_sge_state r_sge;     /* current receive data */
+	spinlock_t r_lock;      /* used for APM */
+	spinlock_t s_lock;
+	atomic_t s_dma_busy;
+	unsigned processor_id;	/* Processor ID QP is bound to */
+	u32 s_flags;
+	u32 s_cur_size;         /* size of send packet in bytes */
+	u32 s_len;              /* total length of s_sge */
+	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
+	u32 s_next_psn;         /* PSN for next request */
+	u32 s_last_psn;         /* last response PSN processed */
+	u32 s_sending_psn;      /* lowest PSN that is being sent */
+	u32 s_sending_hpsn;     /* highest PSN that is being sent */
+	u32 s_psn;              /* current packet sequence number */
+	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
+	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
+	u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
+	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
+	u64 r_wr_id;            /* ID for current receive WQE */
+	unsigned long r_aflags;
+	u32 r_len;              /* total length of r_sge */
+	u32 r_rcv_len;          /* receive data len processed */
+	u32 r_psn;              /* expected rcv packet sequence number */
+	u32 r_msn;              /* message sequence number */
+	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
+	u16 s_rdma_ack_cnt;
+	u8 state;               /* QP state */
+	u8 s_state;             /* opcode of last packet sent */
+	u8 s_ack_state;         /* opcode of packet to ACK */
+	u8 s_nak_state;         /* non-zero if NAK is pending */
+	u8 r_state;             /* opcode of last packet received */
+	u8 r_nak_state;         /* non-zero if NAK is pending */
+	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
+	u8 r_flags;
+	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
+	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
+	u8 qp_access_flags;
+	u8 s_max_sge;           /* size of s_wq->sg_list */
+	u8 s_retry_cnt;         /* number of times to retry */
+	u8 s_rnr_retry_cnt;
+	u8 s_retry;             /* requester retry counter */
+	u8 s_rnr_retry;         /* requester RNR retry counter */
+	u8 s_pkey_index;        /* PKEY index to use */
+	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
+	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
+	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
+	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
+	u8 s_srate;
+	u8 s_draining;
+	u8 s_mig_state;
+	u8 timeout;             /* Timeout for this QP */
+	u8 alt_timeout;         /* Alternate path timeout for this QP */
+	u8 port_num;
+	enum ib_mtu path_mtu;
+	u32 remote_qpn;
+	u32 qkey;               /* QKEY for this QP (for UD or RD) */
+	u32 s_size;             /* send work queue size */
+	u32 s_head;             /* new entries added here */
+	u32 s_tail;             /* next entry to process */
+	u32 s_cur;              /* current work queue entry */
+	u32 s_acked;            /* last un-ACK'ed entry */
+	u32 s_last;             /* last completed entry */
+	u32 s_ssn;              /* SSN of tail entry */
+	u32 s_lsn;              /* limit sequence number (credit) */
+	struct qib_swqe *s_wq;  /* send work queue */
+	struct qib_swqe *s_wqe;
+	struct qib_rq r_rq;             /* receive work queue */
+	struct qib_sge r_sg_list[0];    /* verified SGEs */
+};
+
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define QIB_R_WRID_VALID        0
+#define QIB_R_REWIND_SGE        1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define QIB_R_REUSE_SGE 0x01
+#define QIB_R_RDMAR_SEQ 0x02
+#define QIB_R_RSP_NAK   0x04
+#define QIB_R_RSP_SEND  0x08
+#define QIB_R_COMM_EST  0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * QIB_S_BUSY - send tasklet is processing the QP
+ * QIB_S_TIMER - the RC retry timer is active
+ * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ *                         before processing the next SWQE
+ * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ *                         before processing the next SWQE
+ * QIB_S_WAIT_RNR - waiting for RNR timeout
+ * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ *                  next send completion entry not via send DMA
+ * QIB_S_WAIT_PIO - waiting for a send buffer to be available
+ * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
+ * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
+ * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ */
+#define QIB_S_SIGNAL_REQ_WR	0x0001
+#define QIB_S_BUSY		0x0002
+#define QIB_S_TIMER		0x0004
+#define QIB_S_RESP_PENDING	0x0008
+#define QIB_S_ACK_PENDING	0x0010
+#define QIB_S_WAIT_FENCE	0x0020
+#define QIB_S_WAIT_RDMAR	0x0040
+#define QIB_S_WAIT_RNR		0x0080
+#define QIB_S_WAIT_SSN_CREDIT	0x0100
+#define QIB_S_WAIT_DMA		0x0200
+#define QIB_S_WAIT_PIO		0x0400
+#define QIB_S_WAIT_TX		0x0800
+#define QIB_S_WAIT_DMA_DESC	0x1000
+#define QIB_S_WAIT_KMEM		0x2000
+#define QIB_S_WAIT_PSN		0x4000
+#define QIB_S_WAIT_ACK		0x8000
+#define QIB_S_SEND_ONE		0x10000
+#define QIB_S_UNLIMITED_CREDIT	0x20000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
+	QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
+	QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
+	QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
+
+#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
+
+#define QIB_PSN_CREDIT  16
+
+/*
+ * Since struct qib_swqe is not a fixed size, we can't simply index into
+ * struct qib_qp.s_wq.  This function does the array index computation.
+ */
+static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
+					      unsigned n)
+{
+	return (struct qib_swqe *)((char *)qp->s_wq +
+				     (sizeof(struct qib_swqe) +
+				      qp->s_max_sge *
+				      sizeof(struct qib_sge)) * n);
+}
+
+/*
+ * Since struct qib_rwqe is not a fixed size, we can't simply index into
+ * struct qib_rwq.wq.  This function does the array index computation.
+ */
+static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
+{
+	return (struct qib_rwqe *)
+		((char *) rq->wq->wq +
+		 (sizeof(struct qib_rwqe) +
+		  rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+	void *page;
+};
+
+struct qib_qpn_table {
+	spinlock_t lock; /* protect changes in this struct */
+	unsigned flags;         /* flags for QP0/1 allocated for each port */
+	u32 last;               /* last QP number allocated */
+	u32 nmaps;              /* size of the map table */
+	u16 limit;
+	u16 mask;
+	/* bit map of free QP numbers other than 0/1 */
+	struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct qib_lkey_table {
+	spinlock_t lock; /* protect changes in this struct */
+	u32 next;               /* next unused index (speeds search) */
+	u32 gen;                /* generation count */
+	u32 max;                /* size of the table */
+	struct qib_mregion **table;
+};
+
+struct qib_opcode_stats {
+	u64 n_packets;          /* number of packets */
+	u64 n_bytes;            /* total number of bytes */
+};
+
+struct qib_ibport {
+	struct qib_qp *qp0;
+	struct qib_qp *qp1;
+	struct ib_mad_agent *send_agent;	/* agent for SMI (traps) */
+	struct qib_ah *sm_ah;
+	struct qib_ah *smi_ah;
+	struct rb_root mcast_tree;
+	spinlock_t lock;		/* protect changes in this struct */
+
+	/* non-zero when timer is set */
+	unsigned long mkey_lease_timeout;
+	unsigned long trap_timeout;
+	__be64 gid_prefix;      /* in network order */
+	__be64 mkey;
+	__be64 guids[QIB_GUIDS_PER_PORT	- 1];	/* writable GUIDs */
+	u64 tid;		/* TID for traps */
+	u64 n_unicast_xmit;     /* total unicast packets sent */
+	u64 n_unicast_rcv;      /* total unicast packets received */
+	u64 n_multicast_xmit;   /* total multicast packets sent */
+	u64 n_multicast_rcv;    /* total multicast packets received */
+	u64 z_symbol_error_counter;             /* starting count for PMA */
+	u64 z_link_error_recovery_counter;      /* starting count for PMA */
+	u64 z_link_downed_counter;              /* starting count for PMA */
+	u64 z_port_rcv_errors;                  /* starting count for PMA */
+	u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
+	u64 z_port_xmit_discards;               /* starting count for PMA */
+	u64 z_port_xmit_data;                   /* starting count for PMA */
+	u64 z_port_rcv_data;                    /* starting count for PMA */
+	u64 z_port_xmit_packets;                /* starting count for PMA */
+	u64 z_port_rcv_packets;                 /* starting count for PMA */
+	u32 z_local_link_integrity_errors;      /* starting count for PMA */
+	u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
+	u32 z_vl15_dropped;                     /* starting count for PMA */
+	u32 n_rc_resends;
+	u32 n_rc_acks;
+	u32 n_rc_qacks;
+	u32 n_rc_delayed_comp;
+	u32 n_seq_naks;
+	u32 n_rdma_seq;
+	u32 n_rnr_naks;
+	u32 n_other_naks;
+	u32 n_loop_pkts;
+	u32 n_pkt_drops;
+	u32 n_vl15_dropped;
+	u32 n_rc_timeouts;
+	u32 n_dmawait;
+	u32 n_unaligned;
+	u32 n_rc_dupreq;
+	u32 n_rc_seqnak;
+	u32 port_cap_flags;
+	u32 pma_sample_start;
+	u32 pma_sample_interval;
+	__be16 pma_counter_select[5];
+	u16 pma_tag;
+	u16 pkey_violations;
+	u16 qkey_violations;
+	u16 mkey_violations;
+	u16 mkey_lease_period;
+	u16 sm_lid;
+	u16 repress_traps;
+	u8 sm_sl;
+	u8 mkeyprot;
+	u8 subnet_timeout;
+	u8 vl_high_limit;
+	u8 sl_to_vl[16];
+
+	struct qib_opcode_stats opstats[128];
+};
+
+struct qib_ibdev {
+	struct ib_device ibdev;
+	struct list_head pending_mmaps;
+	spinlock_t mmap_offset_lock; /* protect mmap_offset */
+	u32 mmap_offset;
+	struct qib_mregion *dma_mr;
+
+	/* QP numbers are shared by all IB ports */
+	struct qib_qpn_table qpn_table;
+	struct qib_lkey_table lk_table;
+	struct list_head piowait;       /* list for wait PIO buf */
+	struct list_head dmawait;	/* list for wait DMA */
+	struct list_head txwait;        /* list for wait qib_verbs_txreq */
+	struct list_head memwait;       /* list for wait kernel memory */
+	struct list_head txreq_free;
+	struct timer_list mem_timer;
+	struct qib_qp **qp_table;
+	struct qib_pio_header *pio_hdrs;
+	dma_addr_t pio_hdrs_phys;
+	/* list of QPs waiting for RNR timer */
+	spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
+	unsigned qp_table_size; /* size of the hash table */
+	spinlock_t qpt_lock;
+
+	u32 n_piowait;
+	u32 n_txwait;
+
+	u32 n_pds_allocated;    /* number of PDs allocated for device */
+	spinlock_t n_pds_lock;
+	u32 n_ahs_allocated;    /* number of AHs allocated for device */
+	spinlock_t n_ahs_lock;
+	u32 n_cqs_allocated;    /* number of CQs allocated for device */
+	spinlock_t n_cqs_lock;
+	u32 n_qps_allocated;    /* number of QPs allocated for device */
+	spinlock_t n_qps_lock;
+	u32 n_srqs_allocated;   /* number of SRQs allocated for device */
+	spinlock_t n_srqs_lock;
+	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+	spinlock_t n_mcast_grps_lock;
+};
+
+struct qib_verbs_counters {
+	u64 symbol_error_counter;
+	u64 link_error_recovery_counter;
+	u64 link_downed_counter;
+	u64 port_rcv_errors;
+	u64 port_rcv_remphys_errors;
+	u64 port_xmit_discards;
+	u64 port_xmit_data;
+	u64 port_rcv_data;
+	u64 port_xmit_packets;
+	u64 port_rcv_packets;
+	u32 local_link_integrity_errors;
+	u32 excessive_buffer_overrun_errors;
+	u32 vl15_dropped;
+};
+
+static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct qib_mr, ibmr);
+}
+
+static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
+{
+	return container_of(ibpd, struct qib_pd, ibpd);
+}
+
+static inline struct qib_ah *to_iah(struct ib_ah *ibah)
+{
+	return container_of(ibah, struct qib_ah, ibah);
+}
+
+static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct qib_cq, ibcq);
+}
+
+static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
+{
+	return container_of(ibsrq, struct qib_srq, ibsrq);
+}
+
+static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
+{
+	return container_of(ibqp, struct qib_qp, ibqp);
+}
+
+static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
+{
+	return container_of(ibdev, struct qib_ibdev, ibdev);
+}
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int qib_send_ok(struct qib_qp *qp)
+{
+	return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
+		(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
+		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+}
+
+extern struct workqueue_struct *qib_wq;
+extern struct workqueue_struct *qib_cq_wq;
+
+/*
+ * This must be called with s_lock held.
+ */
+static inline void qib_schedule_send(struct qib_qp *qp)
+{
+	if (qib_send_ok(qp)) {
+		if (qp->processor_id == smp_processor_id())
+			queue_work(qib_wq, &qp->s_work);
+		else
+			queue_work_on(qp->processor_id,
+				      qib_wq, &qp->s_work);
+	}
+}
+
+static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+{
+	u16 p1 = pkey1 & 0x7FFF;
+	u16 p2 = pkey2 & 0x7FFF;
+
+	/*
+	 * Low 15 bits must be non-zero and match, and
+	 * one of the two must be a full member.
+	 */
+	return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
+}
+
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+		   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_sys_guid_chg(struct qib_ibport *ibp);
+void qib_node_desc_chg(struct qib_ibport *ibp);
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+		    struct ib_wc *in_wc, struct ib_grh *in_grh,
+		    struct ib_mad *in_mad, struct ib_mad *out_mad);
+int qib_create_agents(struct qib_ibdev *dev);
+void qib_free_agents(struct qib_ibdev *dev);
+
+/*
+ * Compare the lower 24 bits of the two values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int qib_cmp24(u32 a, u32 b)
+{
+	return (((int) a) - ((int) b)) << 8;
+}
+
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+			  u64 *rwords, u64 *spkts, u64 *rpkts,
+			  u64 *xmit_wait);
+
+int qib_get_counters(struct qib_pportdata *ppd,
+		     struct qib_verbs_counters *cntrs);
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp);
+
+__be32 qib_compute_aeth(struct qib_qp *qp);
+
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
+
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+			    struct ib_qp_init_attr *init_attr,
+			    struct ib_udata *udata);
+
+int qib_destroy_qp(struct ib_qp *ibqp);
+
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		  int attr_mask, struct ib_udata *udata);
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		 int attr_mask, struct ib_qp_init_attr *init_attr);
+
+unsigned qib_free_all_qps(struct qib_devdata *dd);
+
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
+
+void qib_free_qpn_table(struct qib_qpn_table *qpt);
+
+void qib_get_credit(struct qib_qp *qp, u32 aeth);
+
+unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
+
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
+
+void qib_put_txreq(struct qib_verbs_txreq *tx);
+
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+		   u32 hdrwords, struct qib_sge_state *ss, u32 len);
+
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+		  int release);
+
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+void qib_rc_rnr_retry(unsigned long arg);
+
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
+
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
+
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+		struct qib_sge *isge, struct ib_sge *sge, int acc);
+
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+		u32 len, u64 vaddr, u32 rkey, int acc);
+
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+			 struct ib_recv_wr **bad_wr);
+
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+			      struct ib_srq_init_attr *srq_init_attr,
+			      struct ib_udata *udata);
+
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+		   enum ib_srq_attr_mask attr_mask,
+		   struct ib_udata *udata);
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int qib_destroy_srq(struct ib_srq *ibsrq);
+
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
+
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+			    int comp_vector, struct ib_ucontext *context,
+			    struct ib_udata *udata);
+
+int qib_destroy_cq(struct ib_cq *ibcq);
+
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+			      struct ib_phys_buf *buffer_list,
+			      int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+			      u64 virt_addr, int mr_access_flags,
+			      struct ib_udata *udata);
+
+int qib_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
+				struct ib_device *ibdev, int page_list_len);
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
+
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
+
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+			     struct ib_fmr_attr *fmr_attr);
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+		     int list_len, u64 iova);
+
+int qib_unmap_fmr(struct list_head *fmr_list);
+
+int qib_dealloc_fmr(struct ib_fmr *ibfmr);
+
+void qib_release_mmap_info(struct kref *ref);
+
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
+					   struct ib_ucontext *context,
+					   void *obj);
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+			  u32 size, void *obj);
+
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
+
+void qib_migrate_qp(struct qib_qp *qp);
+
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+		      int has_grh, struct qib_qp *qp, u32 bth0);
+
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+		 struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+			 u32 bth0, u32 bth2);
+
+void qib_do_send(struct work_struct *work);
+
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+		       enum ib_wc_status status);
+
+void qib_send_rc_ack(struct qib_qp *qp);
+
+int qib_make_rc_req(struct qib_qp *qp);
+
+int qib_make_uc_req(struct qib_qp *qp);
+
+int qib_make_ud_req(struct qib_qp *qp);
+
+int qib_register_ib_device(struct qib_devdata *);
+
+void qib_unregister_ib_device(struct qib_devdata *);
+
+void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
+
+void qib_ib_piobufavail(struct qib_devdata *);
+
+unsigned qib_get_npkeys(struct qib_devdata *);
+
+unsigned qib_get_pkey(struct qib_ibport *, unsigned);
+
+extern const enum ib_wc_opcode ib_qib_wc_opcode[];
+
+/*
+ * Below  HCA-independent IB PhysPortState values, returned
+ * by the f_ibphys_portstate() routine.
+ */
+#define IB_PHYSPORTSTATE_SLEEP 1
+#define IB_PHYSPORTSTATE_POLL 2
+#define IB_PHYSPORTSTATE_DISABLED 3
+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
+#define IB_PHYSPORTSTATE_LINKUP 5
+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
+#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
+#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
+#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
+#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
+#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
+#define IB_PHYSPORTSTATE_CFG_ENH 0x10
+#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
+
+extern const int ib_qib_state_ops[];
+
+extern __be64 ib_qib_sys_image_guid;    /* in network order */
+
+extern unsigned int ib_qib_lkey_table_size;
+
+extern unsigned int ib_qib_max_cqes;
+
+extern unsigned int ib_qib_max_cqs;
+
+extern unsigned int ib_qib_max_qp_wrs;
+
+extern unsigned int ib_qib_max_qps;
+
+extern unsigned int ib_qib_max_sges;
+
+extern unsigned int ib_qib_max_mcast_grps;
+
+extern unsigned int ib_qib_max_mcast_qp_attached;
+
+extern unsigned int ib_qib_max_srqs;
+
+extern unsigned int ib_qib_max_srq_sges;
+
+extern unsigned int ib_qib_max_srq_wrs;
+
+extern const u32 ib_qib_rnr_table[];
+
+extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
+
+#endif                          /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 000000000000..dabb697b1c2a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rculist.h>
+
+#include "qib.h"
+
+/**
+ * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
+{
+	struct qib_mcast_qp *mqp;
+
+	mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+	if (!mqp)
+		goto bail;
+
+	mqp->qp = qp;
+	atomic_inc(&qp->refcount);
+
+bail:
+	return mqp;
+}
+
+static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
+{
+	struct qib_qp *qp = mqp->qp;
+
+	/* Notify qib_destroy_qp() if it is waiting. */
+	if (atomic_dec_and_test(&qp->refcount))
+		wake_up(&qp->wait);
+
+	kfree(mqp);
+}
+
+/**
+ * qib_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
+{
+	struct qib_mcast *mcast;
+
+	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+	if (!mcast)
+		goto bail;
+
+	mcast->mgid = *mgid;
+	INIT_LIST_HEAD(&mcast->qp_list);
+	init_waitqueue_head(&mcast->wait);
+	atomic_set(&mcast->refcount, 0);
+	mcast->n_attached = 0;
+
+bail:
+	return mcast;
+}
+
+static void qib_mcast_free(struct qib_mcast *mcast)
+{
+	struct qib_mcast_qp *p, *tmp;
+
+	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+		qib_mcast_qp_free(p);
+
+	kfree(mcast);
+}
+
+/**
+ * qib_mcast_find - search the global table for the given multicast GID
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
+{
+	struct rb_node *n;
+	unsigned long flags;
+	struct qib_mcast *mcast;
+
+	spin_lock_irqsave(&ibp->lock, flags);
+	n = ibp->mcast_tree.rb_node;
+	while (n) {
+		int ret;
+
+		mcast = rb_entry(n, struct qib_mcast, rb_node);
+
+		ret = memcmp(mgid->raw, mcast->mgid.raw,
+			     sizeof(union ib_gid));
+		if (ret < 0)
+			n = n->rb_left;
+		else if (ret > 0)
+			n = n->rb_right;
+		else {
+			atomic_inc(&mcast->refcount);
+			spin_unlock_irqrestore(&ibp->lock, flags);
+			goto bail;
+		}
+	}
+	spin_unlock_irqrestore(&ibp->lock, flags);
+
+	mcast = NULL;
+
+bail:
+	return mcast;
+}
+
+/**
+ * qib_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added.  Return EEXIST if the GID was already in
+ * the table but the QP was added.  Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
+			 struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
+{
+	struct rb_node **n = &ibp->mcast_tree.rb_node;
+	struct rb_node *pn = NULL;
+	int ret;
+
+	spin_lock_irq(&ibp->lock);
+
+	while (*n) {
+		struct qib_mcast *tmcast;
+		struct qib_mcast_qp *p;
+
+		pn = *n;
+		tmcast = rb_entry(pn, struct qib_mcast, rb_node);
+
+		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+			     sizeof(union ib_gid));
+		if (ret < 0) {
+			n = &pn->rb_left;
+			continue;
+		}
+		if (ret > 0) {
+			n = &pn->rb_right;
+			continue;
+		}
+
+		/* Search the QP list to see if this is already there. */
+		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+			if (p->qp == mqp->qp) {
+				ret = ESRCH;
+				goto bail;
+			}
+		}
+		if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
+			ret = ENOMEM;
+			goto bail;
+		}
+
+		tmcast->n_attached++;
+
+		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+		ret = EEXIST;
+		goto bail;
+	}
+
+	spin_lock(&dev->n_mcast_grps_lock);
+	if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
+		spin_unlock(&dev->n_mcast_grps_lock);
+		ret = ENOMEM;
+		goto bail;
+	}
+
+	dev->n_mcast_grps_allocated++;
+	spin_unlock(&dev->n_mcast_grps_lock);
+
+	mcast->n_attached++;
+
+	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+	atomic_inc(&mcast->refcount);
+	rb_link_node(&mcast->rb_node, pn, n);
+	rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+	ret = 0;
+
+bail:
+	spin_unlock_irq(&ibp->lock);
+
+	return ret;
+}
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+	struct qib_ibdev *dev = to_idev(ibqp->device);
+	struct qib_ibport *ibp;
+	struct qib_mcast *mcast;
+	struct qib_mcast_qp *mqp;
+	int ret;
+
+	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Allocate data structures since its better to do this outside of
+	 * spin locks and it will most likely be needed.
+	 */
+	mcast = qib_mcast_alloc(gid);
+	if (mcast == NULL) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+	mqp = qib_mcast_qp_alloc(qp);
+	if (mqp == NULL) {
+		qib_mcast_free(mcast);
+		ret = -ENOMEM;
+		goto bail;
+	}
+	ibp = to_iport(ibqp->device, qp->port_num);
+	switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
+	case ESRCH:
+		/* Neither was used: OK to attach the same QP twice. */
+		qib_mcast_qp_free(mqp);
+		qib_mcast_free(mcast);
+		break;
+
+	case EEXIST:            /* The mcast wasn't used */
+		qib_mcast_free(mcast);
+		break;
+
+	case ENOMEM:
+		/* Exceeded the maximum number of mcast groups. */
+		qib_mcast_qp_free(mqp);
+		qib_mcast_free(mcast);
+		ret = -ENOMEM;
+		goto bail;
+
+	default:
+		break;
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+	struct qib_qp *qp = to_iqp(ibqp);
+	struct qib_ibdev *dev = to_idev(ibqp->device);
+	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+	struct qib_mcast *mcast = NULL;
+	struct qib_mcast_qp *p, *tmp;
+	struct rb_node *n;
+	int last = 0;
+	int ret;
+
+	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	spin_lock_irq(&ibp->lock);
+
+	/* Find the GID in the mcast table. */
+	n = ibp->mcast_tree.rb_node;
+	while (1) {
+		if (n == NULL) {
+			spin_unlock_irq(&ibp->lock);
+			ret = -EINVAL;
+			goto bail;
+		}
+
+		mcast = rb_entry(n, struct qib_mcast, rb_node);
+		ret = memcmp(gid->raw, mcast->mgid.raw,
+			     sizeof(union ib_gid));
+		if (ret < 0)
+			n = n->rb_left;
+		else if (ret > 0)
+			n = n->rb_right;
+		else
+			break;
+	}
+
+	/* Search the QP list. */
+	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+		if (p->qp != qp)
+			continue;
+		/*
+		 * We found it, so remove it, but don't poison the forward
+		 * link until we are sure there are no list walkers.
+		 */
+		list_del_rcu(&p->list);
+		mcast->n_attached--;
+
+		/* If this was the last attached QP, remove the GID too. */
+		if (list_empty(&mcast->qp_list)) {
+			rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+			last = 1;
+		}
+		break;
+	}
+
+	spin_unlock_irq(&ibp->lock);
+
+	if (p) {
+		/*
+		 * Wait for any list walkers to finish before freeing the
+		 * list element.
+		 */
+		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+		qib_mcast_qp_free(p);
+	}
+	if (last) {
+		atomic_dec(&mcast->refcount);
+		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+		qib_mcast_free(mcast);
+		spin_lock_irq(&dev->n_mcast_grps_lock);
+		dev->n_mcast_grps_allocated--;
+		spin_unlock_irq(&dev->n_mcast_grps_lock);
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp)
+{
+	return ibp->mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
index 74fa5cc5131d..673cf4c22ebd 100644
--- a/drivers/infiniband/hw/ipath/ipath_7220.h
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -1,7 +1,5 @@
-#ifndef _IPATH_7220_H
-#define _IPATH_7220_H
 /*
- * Copyright (c) 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -33,25 +31,32 @@
  */
 
 /*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
+ * This file is conditionally built on PowerPC only.  Otherwise weak symbol
+ * versions of the functions exported from here are used.
  */
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
-	int len, int offset);
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
-	int len, int offset);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
 
-int ipath_sd7220_ib_load(struct ipath_devdata *dd);
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
+#include "qib.h"
 
-#endif /* _IPATH_7220_H */
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * Nothing to do on PowerPC, so just return without error.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+	return 0;
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is unordered
+ *
+ * Because our performance depends on our ability to do write
+ * combining mmio writes in the most efficient way, we need to
+ * know if we are on a processor that may reorder stores when
+ * write combining.
+ */
+int qib_unordered_wc(void)
+{
+	return 1;
+}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 000000000000..561b8bca4060
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on x86_64 only.  Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include <linux/pci.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+
+#include "qib.h"
+
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
+ * write combining.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+	int ret = 0;
+	u64 pioaddr, piolen;
+	unsigned bits;
+	const unsigned long addr = pci_resource_start(dd->pcidev, 0);
+	const size_t len = pci_resource_len(dd->pcidev, 0);
+
+	/*
+	 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
+	 * chip.  Linux (possibly the hardware) requires it to be on a power
+	 * of 2 address matching the length (which has to be a power of 2).
+	 * For rev1, that means the base address, for rev2, it will be just
+	 * the PIO buffers themselves.
+	 * For chips with two sets of buffers, the calculations are
+	 * somewhat more complicated; we need to sum, and the piobufbase
+	 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
+	 * The buffers are still packed, so a single range covers both.
+	 */
+	if (dd->piobcnt2k && dd->piobcnt4k) {
+		/* 2 sizes for chip */
+		unsigned long pio2kbase, pio4kbase;
+		pio2kbase = dd->piobufbase & 0xffffffffUL;
+		pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
+		if (pio2kbase < pio4kbase) {
+			/* all current chips */
+			pioaddr = addr + pio2kbase;
+			piolen = pio4kbase - pio2kbase +
+				dd->piobcnt4k * dd->align4k;
+		} else {
+			pioaddr = addr + pio4kbase;
+			piolen = pio2kbase - pio4kbase +
+				dd->piobcnt2k * dd->palign;
+		}
+	} else {  /* single buffer size (2K, currently) */
+		pioaddr = addr + dd->piobufbase;
+		piolen = dd->piobcnt2k * dd->palign +
+			dd->piobcnt4k * dd->align4k;
+	}
+
+	for (bits = 0; !(piolen & (1ULL << bits)); bits++)
+		/* do nothing */ ;
+
+	if (piolen != (1ULL << bits)) {
+		piolen >>= bits;
+		while (piolen >>= 1)
+			bits++;
+		piolen = 1ULL << (bits + 1);
+	}
+	if (pioaddr & (piolen - 1)) {
+		u64 atmp;
+		atmp = pioaddr & ~(piolen - 1);
+		if (atmp < addr || (atmp + piolen) > (addr + len)) {
+			qib_dev_err(dd, "No way to align address/size "
+				    "(%llx/%llx), no WC mtrr\n",
+				    (unsigned long long) atmp,
+				    (unsigned long long) piolen << 1);
+			ret = -ENODEV;
+		} else {
+			pioaddr = atmp;
+			piolen <<= 1;
+		}
+	}
+
+	if (!ret) {
+		int cookie;
+
+		cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
+		if (cookie < 0) {
+			{
+				qib_devinfo(dd->pcidev,
+					 "mtrr_add()  WC for PIO bufs "
+					 "failed (%d)\n",
+					 cookie);
+				ret = -EINVAL;
+			}
+		} else {
+			dd->wc_cookie = cookie;
+			dd->wc_base = (unsigned long) pioaddr;
+			dd->wc_len = (unsigned long) piolen;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * qib_disable_wc - disable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ */
+void qib_disable_wc(struct qib_devdata *dd)
+{
+	if (dd->wc_cookie) {
+		int r;
+
+		r = mtrr_del(dd->wc_cookie, dd->wc_base,
+			     dd->wc_len);
+		if (r < 0)
+			qib_devinfo(dd->pcidev,
+				 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
+				 dd->wc_cookie, dd->wc_base,
+				 dd->wc_len, r);
+		dd->wc_cookie = 0; /* even on failure */
+	}
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is ordered
+ *
+ * Because our performance depends on our ability to do write combining mmio
+ * writes in the most efficient way, we need to know if we are on an Intel
+ * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
+ * the order completed, and so no special flushing is required to get
+ * correct ordering.  Intel processors, however, will flush write buffers
+ * out in "random" orders, and so explicit ordering is needed at times.
+ */
+int qib_unordered_wc(void)
+{
+	return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+}
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395f..b07e4dee80aa 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
 			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
 						   cur_order, gfp_mask);
 
-		if (!ret) {
-			++chunk->npages;
-
-			if (coherent)
-				++chunk->nsg;
-			else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
-				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
-							chunk->npages,
-							PCI_DMA_BIDIRECTIONAL);
+		if (ret) {
+			if (--cur_order < 0)
+				goto fail;
+			else
+				continue;
+		}
 
-				if (chunk->nsg <= 0)
-					goto fail;
+		++chunk->npages;
 
-				chunk = NULL;
-			}
+		if (coherent)
+			++chunk->nsg;
+		else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
+			chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+						chunk->npages,
+						PCI_DMA_BIDIRECTIONAL);
 
-			npages -= 1 << cur_order;
-		} else {
-			--cur_order;
-			if (cur_order < 0)
+			if (chunk->nsg <= 0)
 				goto fail;
 		}
+
+		if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+			chunk = NULL;
+
+		npages -= 1 << cur_order;
 	}
 
 	if (!coherent && chunk) {
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 310d31474034..f3e8f3c07725 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1172,7 +1172,9 @@ struct ib_client {
 struct ib_device *ib_alloc_device(size_t size);
 void ib_dealloc_device(struct ib_device *device);
 
-int ib_register_device   (struct ib_device *device);
+int ib_register_device(struct ib_device *device,
+		       int (*port_callback)(struct ib_device *,
+					    u8, struct kobject *));
 void ib_unregister_device(struct ib_device *device);
 
 int ib_register_client   (struct ib_client *client);