summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:55:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:55:29 -0700
commit424a6f6ef990b7e9f56f6627bfc6c46b493faeb4 (patch)
tree0028356ed8003495fbbe1f716f359e3c8ebc35b6 /drivers
parent1ab142d499294b844ecc81e8004db4ce029b0b61 (diff)
parentcd8df932d894f3128c884e3ae1b2b484540513db (diff)
downloadlinux-424a6f6ef990b7e9f56f6627bfc6c46b493faeb4.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
SCSI updates from James Bottomley:
 "The update includes the usual assortment of driver updates (lpfc,
  qla2xxx, qla4xxx, bfa, bnx2fc, bnx2i, isci, fcoe, hpsa) plus a huge
  amount of infrastructure work in the SAS library and transport class
  as well as an iSCSI update.  There's also a new SCSI based virtio
  driver."

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (177 commits)
  [SCSI] qla4xxx: Update driver version to 5.02.00-k15
  [SCSI] qla4xxx: trivial cleanup
  [SCSI] qla4xxx: Fix sparse warning
  [SCSI] qla4xxx: Add support for multiple session per host.
  [SCSI] qla4xxx: Export CHAP index as sysfs attribute
  [SCSI] scsi_transport: Export CHAP index as sysfs attribute
  [SCSI] qla4xxx: Add support to display CHAP list and delete CHAP entry
  [SCSI] iscsi_transport: Add support to display CHAP list and delete CHAP entry
  [SCSI] pm8001: fix endian issue with code optimization.
  [SCSI] pm8001: Fix possible racing condition.
  [SCSI] pm8001: Fix bogus interrupt state flag issue.
  [SCSI] ipr: update PCI ID definitions for new adapters
  [SCSI] qla2xxx: handle default case in qla2x00_request_firmware()
  [SCSI] isci: improvements in driver unloading routine
  [SCSI] isci: improve phy event warnings
  [SCSI] isci: debug, provide state-enum-to-string conversions
  [SCSI] scsi_transport_sas: 'enable' phys on reset
  [SCSI] libsas: don't recover end devices attached to disabled phys
  [SCSI] libsas: fixup target_port_protocols for expanders that don't report sata
  [SCSI] libsas: set attached device type and target protocols for local phys
  ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c13
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h27
-rw-r--r--drivers/scsi/aacraid/comminit.c21
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/aacraid/rx.c1
-rw-r--r--drivers/scsi/aacraid/sa.c1
-rw-r--r--drivers/scsi/aacraid/src.c293
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c38
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c20
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c167
-rw-r--r--drivers/scsi/fcoe/fcoe.h3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c4
-rw-r--r--drivers/scsi/hpsa.c344
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h9
-rw-r--r--drivers/scsi/ipr.c14
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/host.c17
-rw-r--r--drivers/scsi/isci/host.h19
-rw-r--r--drivers/scsi/isci/init.c24
-rw-r--r--drivers/scsi/isci/phy.c171
-rw-r--r--drivers/scsi/isci/phy.h155
-rw-r--r--drivers/scsi/isci/port.c263
-rw-r--r--drivers/scsi/isci/port.h114
-rw-r--r--drivers/scsi/isci/registers.h27
-rw-r--r--drivers/scsi/isci/remote_device.c82
-rw-r--r--drivers/scsi/isci/remote_device.h212
-rw-r--r--drivers/scsi/isci/remote_node_context.c19
-rw-r--r--drivers/scsi/isci/remote_node_context.h97
-rw-r--r--drivers/scsi/isci/request.c370
-rw-r--r--drivers/scsi/isci/request.h228
-rw-r--r--drivers/scsi/isci/scu_task_context.h55
-rw-r--r--drivers/scsi/isci/task.c158
-rw-r--r--drivers/scsi/isci/task.h40
-rw-r--r--drivers/scsi/iscsi_tcp.c13
-rw-r--r--drivers/scsi/libfc/fc_disc.c7
-rw-r--r--drivers/scsi/libfc/fc_elsct.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c7
-rw-r--r--drivers/scsi/libfc/fc_lport.c227
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/libsas/sas_ata.c828
-rw-r--r--drivers/scsi/libsas/sas_discover.c246
-rw-r--r--drivers/scsi/libsas/sas_event.c96
-rw-r--r--drivers/scsi/libsas/sas_expander.c342
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c11
-rw-r--r--drivers/scsi/libsas/sas_init.c214
-rw-r--r--drivers/scsi/libsas/sas_internal.h97
-rw-r--r--drivers/scsi/libsas/sas_phy.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c32
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c364
-rw-r--r--drivers/scsi/lpfc/lpfc.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h49
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c137
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1054
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c240
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c6
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c434
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c127
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c630
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h63
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h117
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c445
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c410
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c435
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c148
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h45
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h24
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c92
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c563
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c6
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c268
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/sd.c86
-rw-r--r--drivers/scsi/sd.h35
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/virtio_scsi.c594
134 files changed, 9251 insertions, 4232 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c06e0ec11556..e0bda9ff89cd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5936,29 +5936,31 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 	host->ops = ops;
 }
 
-int ata_port_probe(struct ata_port *ap)
+void __ata_port_probe(struct ata_port *ap)
 {
-	int rc = 0;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	unsigned long flags;
 
-	/* probe */
-	if (ap->ops->error_handler) {
-		struct ata_eh_info *ehi = &ap->link.eh_info;
-		unsigned long flags;
+	/* kick EH for boot probing */
+	spin_lock_irqsave(ap->lock, flags);
 
-		/* kick EH for boot probing */
-		spin_lock_irqsave(ap->lock, flags);
+	ehi->probe_mask |= ATA_ALL_DEVICES;
+	ehi->action |= ATA_EH_RESET;
+	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
 
-		ehi->probe_mask |= ATA_ALL_DEVICES;
-		ehi->action |= ATA_EH_RESET;
-		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
+	ap->pflags |= ATA_PFLAG_LOADING;
+	ata_port_schedule_eh(ap);
 
-		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
-		ap->pflags |= ATA_PFLAG_LOADING;
-		ata_port_schedule_eh(ap);
+	spin_unlock_irqrestore(ap->lock, flags);
+}
 
-		spin_unlock_irqrestore(ap->lock, flags);
+int ata_port_probe(struct ata_port *ap)
+{
+	int rc = 0;
 
-		/* wait for EH to finish */
+	if (ap->ops->error_handler) {
+		__ata_port_probe(ap);
 		ata_port_wait_eh(ap);
 	} else {
 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a9b282038000..c61316e9d2f7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -863,6 +863,7 @@ void ata_port_wait_eh(struct ata_port *ap)
 		goto retry;
 	}
 }
+EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 
 static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 508a60bfe5c1..1ee00c8b5b04 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3838,6 +3838,19 @@ void ata_sas_port_stop(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_stop);
 
+int ata_sas_async_port_init(struct ata_port *ap)
+{
+	int rc = ap->ops->port_start(ap);
+
+	if (!rc) {
+		ap->print_id = ata_print_id++;
+		__ata_port_probe(ap);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
+
 /**
  *	ata_sas_port_init - Initialize a SATA device
  *	@ap: SATA port to initialize
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 814486d35c44..2e26fcaf635b 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -105,6 +105,7 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
 extern struct ata_port *ata_port_alloc(struct ata_host *host);
 extern const char *sata_spd_string(unsigned int spd);
 extern int ata_port_probe(struct ata_port *ap);
+extern void __ata_port_probe(struct ata_port *ap);
 
 /* libata-acpi.c */
 #ifdef CONFIG_ATA_ACPI
@@ -151,7 +152,6 @@ extern void ata_eh_acquire(struct ata_port *ap);
 extern void ata_eh_release(struct ata_port *ap);
 extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
-extern void ata_port_wait_eh(struct ata_port *ap);
 extern void ata_eh_fastdrain_timerfn(unsigned long arg);
 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
 extern void ata_dev_disable(struct ata_device *dev);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e89103204dc..a06e608789e3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1903,6 +1903,14 @@ config SCSI_BFA_FC
 	  To compile this driver as a module, choose M here. The module will
 	  be called bfa.
 
+config SCSI_VIRTIO
+	tristate "virtio-scsi support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && VIRTIO
+	help
+          This is the virtual HBA driver for virtio.  If the kernel will
+          be used in a virtual machine, say Y or M.
+
+
 endif # SCSI_LOWLEVEL
 
 source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e4c1a69f8fab..ad24e065b1e5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
+obj-$(CONFIG_SCSI_VIRTIO)	+= virtio_scsi.o
 obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o
 obj-$(CONFIG_HYPERV_STORAGE)	+= hv_storvsc.o
 
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 409f5805bdd6..52551662d107 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -151,7 +151,11 @@ int aac_msi;
 int aac_commit = -1;
 int startup_timeout = 180;
 int aif_timeout = 120;
+int aac_sync_mode;  /* Only Sync. transfer - disabled */
 
+module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
+	" 0=off, 1=on");
 module_param(nondasd, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
 	" 0=off, 1=on");
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index ffb587817efc..3fcf62724fad 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 28000
+# define AAC_DRIVER_BUILD 28900
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
@@ -756,8 +756,16 @@ struct src_mu_registers {
 
 struct src_registers {
 	struct src_mu_registers MUnit;	/* 00h - c7h */
-	__le32 reserved1[130790];	/* c8h - 7fc5fh */
-	struct src_inbound IndexRegs;	/* 7fc60h */
+	union {
+		struct {
+			__le32 reserved1[130790];	/* c8h - 7fc5fh */
+			struct src_inbound IndexRegs;	/* 7fc60h */
+		} tupelo;
+		struct {
+			__le32 reserved1[974];		/* c8h - fffh */
+			struct src_inbound IndexRegs;	/* 1000h */
+		} denali;
+	} u;
 };
 
 #define src_readb(AEP, CSR)		readb(&((AEP)->regs.src.bar0->CSR))
@@ -999,6 +1007,10 @@ struct aac_bus_info_response {
 #define AAC_OPT_NEW_COMM		cpu_to_le32(1<<17)
 #define AAC_OPT_NEW_COMM_64		cpu_to_le32(1<<18)
 #define AAC_OPT_NEW_COMM_TYPE1		cpu_to_le32(1<<28)
+#define AAC_OPT_NEW_COMM_TYPE2		cpu_to_le32(1<<29)
+#define AAC_OPT_NEW_COMM_TYPE3		cpu_to_le32(1<<30)
+#define AAC_OPT_NEW_COMM_TYPE4		cpu_to_le32(1<<31)
+
 
 struct aac_dev
 {
@@ -1076,6 +1088,8 @@ struct aac_dev
 #	define AAC_MIN_FOOTPRINT_SIZE 8192
 #	define AAC_MIN_SRC_BAR0_SIZE 0x400000
 #	define AAC_MIN_SRC_BAR1_SIZE 0x800
+#	define AAC_MIN_SRCV_BAR0_SIZE 0x100000
+#	define AAC_MIN_SRCV_BAR1_SIZE 0x400
 #endif
 	union
 	{
@@ -1116,7 +1130,10 @@ struct aac_dev
 	u8			msi;
 	int			management_fib_count;
 	spinlock_t		manage_lock;
-
+	spinlock_t		sync_lock;
+	int			sync_mode;
+	struct fib		*sync_fib;
+	struct list_head	sync_fib_list;
 };
 
 #define aac_adapter_interrupt(dev) \
@@ -1163,6 +1180,7 @@ struct aac_dev
 
 #define FIB_CONTEXT_FLAG_TIMED_OUT		(0x00000001)
 #define FIB_CONTEXT_FLAG			(0x00000002)
+#define FIB_CONTEXT_FLAG_WAIT			(0x00000004)
 
 /*
  *	Define the command values
@@ -1970,6 +1988,7 @@ int aac_rkt_init(struct aac_dev *dev);
 int aac_nark_init(struct aac_dev *dev);
 int aac_sa_init(struct aac_dev *dev);
 int aac_src_init(struct aac_dev *dev);
+int aac_srcv_init(struct aac_dev *dev);
 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
 unsigned int aac_response_normal(struct aac_queue * q);
 unsigned int aac_command_normal(struct aac_queue * q);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 7ac8fdb5577b..a35f54ebdce0 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -325,12 +325,14 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 {
 	u32 status[5];
 	struct Scsi_Host * host = dev->scsi_host_ptr;
+	extern int aac_sync_mode;
 
 	/*
 	 *	Check the preferred comm settings, defaults from template.
 	 */
 	dev->management_fib_count = 0;
 	spin_lock_init(&dev->manage_lock);
+	spin_lock_init(&dev->sync_lock);
 	dev->max_fib_size = sizeof(struct hw_fib);
 	dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
 		- sizeof(struct aac_fibhdr)
@@ -344,13 +346,21 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 	 		(status[0] == 0x00000001)) {
 		if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
 			dev->raw_io_64 = 1;
-		if (dev->a_ops.adapter_comm) {
-			if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
-				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
-				dev->raw_io_interface = 1;
-			} else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
+		dev->sync_mode = aac_sync_mode;
+		if (dev->a_ops.adapter_comm &&
+			(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
 				dev->comm_interface = AAC_COMM_MESSAGE;
 				dev->raw_io_interface = 1;
+			if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+				/* driver supports TYPE1 (Tupelo) */
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+			} else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
+				  (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
+				  (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
+					/* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
+					/* switch to sync. mode */
+					dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+					dev->sync_mode = 1;
 			}
 		}
 		if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
@@ -455,6 +465,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 	}
 		
 	INIT_LIST_HEAD(&dev->fib_list);
+	INIT_LIST_HEAD(&dev->sync_fib_list);
 
 	return dev;
 }
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e5f2d7d9002e..4b32ca442433 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -416,6 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 	unsigned long flags = 0;
 	unsigned long qflags;
 	unsigned long mflags = 0;
+	unsigned long sflags = 0;
 
 
 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
@@ -512,6 +513,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 		spin_lock_irqsave(&fibptr->event_lock, flags);
 	}
 
+	if (dev->sync_mode) {
+		if (wait)
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		spin_lock_irqsave(&dev->sync_lock, sflags);
+		if (dev->sync_fib) {
+			list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
+			spin_unlock_irqrestore(&dev->sync_lock, sflags);
+		} else {
+			dev->sync_fib = fibptr;
+			spin_unlock_irqrestore(&dev->sync_lock, sflags);
+			aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+				(u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
+				NULL, NULL, NULL, NULL, NULL);
+		}
+		if (wait) {
+			fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+			if (down_interruptible(&fibptr->event_wait)) {
+				fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
+				return -EFAULT;
+			}
+			return 0;
+		}
+		return -EINPROGRESS;
+	}
+
 	if (aac_adapter_deliver(fibptr) != 0) {
 		printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
 		if (wait) {
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 705e13e470af..0d279c445a30 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -56,7 +56,7 @@
 
 #include "aacraid.h"
 
-#define AAC_DRIVER_VERSION		"1.1-7"
+#define AAC_DRIVER_VERSION		"1.2-0"
 #ifndef AAC_DRIVER_BRANCH
 #define AAC_DRIVER_BRANCH		""
 #endif
@@ -162,7 +162,10 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
 	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
 	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
 	{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
-	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */
+	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
+	{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
+	{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
+	{ 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -238,7 +241,10 @@ static struct aac_driver_ident aac_drivers[] = {
 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
 	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
 	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
-	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Catch All */
+	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 7 (Denali) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 8 */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Series 9 */
 };
 
 /**
@@ -1102,6 +1108,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
 	int error = -ENODEV;
 	int unique_id = 0;
 	u64 dmamask;
+	extern int aac_sync_mode;
 
 	list_for_each_entry(aac, &aac_devices, entry) {
 		if (aac->id > unique_id)
@@ -1162,6 +1169,21 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
 	if ((*aac_drivers[index].init)(aac))
 		goto out_unmap;
 
+	if (aac->sync_mode) {
+		if (aac_sync_mode)
+			printk(KERN_INFO "%s%d: Sync. mode enforced "
+				"by driver parameter. This will cause "
+				"a significant performance decrease!\n",
+				aac->name,
+				aac->id);
+		else
+			printk(KERN_INFO "%s%d: Async. mode not supported "
+				"by current driver, sync. mode enforced."
+				"\nPlease update driver to get full performance.\n",
+				aac->name,
+				aac->id);
+	}
+
 	/*
 	 *	Start any kernel threads needed
 	 */
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ce530f113fdb..b029c7cc785b 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -643,6 +643,7 @@ int _aac_rx_init(struct aac_dev *dev)
 	if (aac_init_adapter(dev) == NULL)
 		goto error_iounmap;
 	aac_adapter_comm(dev, dev->comm_interface);
+	dev->sync_mode = 0;	/* sync. mode not supported */
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
 			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index e5d4457121ea..beb533630d4b 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -385,6 +385,7 @@ int aac_sa_init(struct aac_dev *dev)
 
 	if(aac_init_adapter(dev) == NULL)
 		goto error_irq;
+	dev->sync_mode = 0;	/* sync. mode not supported */
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
 			IRQF_SHARED|IRQF_DISABLED,
 			"aacraid", (void *)dev ) < 0) {
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 957595a7a45c..2bee51506a91 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -96,6 +96,38 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
 			our_interrupt = 1;
 			/* handle AIF */
 			aac_intr_normal(dev, 0, 2, 0, NULL);
+		} else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
+			unsigned long sflags;
+			struct list_head *entry;
+			int send_it = 0;
+
+			if (dev->sync_fib) {
+				our_interrupt = 1;
+				if (dev->sync_fib->callback)
+					dev->sync_fib->callback(dev->sync_fib->callback_data,
+						dev->sync_fib);
+				spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+				if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+					dev->management_fib_count--;
+					up(&dev->sync_fib->event_wait);
+				}
+				spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
+				spin_lock_irqsave(&dev->sync_lock, sflags);
+				if (!list_empty(&dev->sync_fib_list)) {
+					entry = dev->sync_fib_list.next;
+					dev->sync_fib = list_entry(entry, struct fib, fiblink);
+					list_del(entry);
+					send_it = 1;
+				} else {
+					dev->sync_fib = NULL;
+				}
+				spin_unlock_irqrestore(&dev->sync_lock, sflags);
+				if (send_it) {
+					aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+						(u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
+						NULL, NULL, NULL, NULL, NULL);
+				}
+			}
 		}
 	}
 
@@ -177,56 +209,63 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
 	 */
 	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
 
-	ok = 0;
-	start = jiffies;
+	if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
+		ok = 0;
+		start = jiffies;
 
-	/*
-	 *	Wait up to 30 seconds
-	 */
-	while (time_before(jiffies, start+30*HZ)) {
-		/* Delay 5 microseconds to let Mon960 get info. */
-		udelay(5);
-
-		/* Mon960 will set doorbell0 bit
-		 * when it has completed the command
+		/*
+		 *	Wait up to 5 minutes
 		 */
-		if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
-			/* Clear the doorbell */
-			src_writel(dev,
-				MUnit.ODR_C,
-				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
-			ok = 1;
-			break;
+		while (time_before(jiffies, start+300*HZ)) {
+			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+			/*
+			 *	Mon960 will set doorbell0 bit when it has completed the command.
+			 */
+			if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+				/*
+				 *	Clear the doorbell.
+				 */
+				src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+				ok = 1;
+				break;
+			}
+			/*
+			 *	Yield the processor in case we are slow
+			 */
+			msleep(1);
 		}
-
-		 /* Yield the processor in case we are slow */
-		msleep(1);
-	}
-	if (unlikely(ok != 1)) {
-		 /* Restore interrupt mask even though we timed out */
-		aac_adapter_enable_int(dev);
-		return -ETIMEDOUT;
+		if (unlikely(ok != 1)) {
+			/*
+			 *	Restore interrupt mask even though we timed out
+			 */
+			aac_adapter_enable_int(dev);
+			return -ETIMEDOUT;
+		}
+		/*
+		 *	Pull the synch status from Mailbox 0.
+		 */
+		if (status)
+			*status = readl(&dev->IndexRegs->Mailbox[0]);
+		if (r1)
+			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
+		if (r2)
+			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
+		if (r3)
+			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
+		if (r4)
+			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
+
+		/*
+		 *	Clear the synch command doorbell.
+		 */
+		src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
 	}
 
-	 /* Pull the synch status from Mailbox 0 */
-	if (status)
-		*status = readl(&dev->IndexRegs->Mailbox[0]);
-	if (r1)
-		*r1 = readl(&dev->IndexRegs->Mailbox[1]);
-	if (r2)
-		*r2 = readl(&dev->IndexRegs->Mailbox[2]);
-	if (r3)
-		*r3 = readl(&dev->IndexRegs->Mailbox[3]);
-	if (r4)
-		*r4 = readl(&dev->IndexRegs->Mailbox[4]);
-
-	 /* Clear the synch command doorbell */
-	src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
-
-	 /* Restore interrupt mask */
+	/*
+	 *	Restore interrupt mask
+	 */
 	aac_adapter_enable_int(dev);
 	return 0;
-
 }
 
 /**
@@ -386,9 +425,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
 {
 	if (!size) {
 		iounmap(dev->regs.src.bar0);
-		dev->regs.src.bar0 = NULL;
-		iounmap(dev->base);
-		dev->base = NULL;
+		dev->base = dev->regs.src.bar0 = NULL;
 		return 0;
 	}
 	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
@@ -404,7 +441,27 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
 		return -1;
 	}
 	dev->IndexRegs = &((struct src_registers __iomem *)
-		dev->base)->IndexRegs;
+		dev->base)->u.tupelo.IndexRegs;
+	return 0;
+}
+
+/**
+ *  aac_srcv_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.src.bar0);
+		dev->base = dev->regs.src.bar0 = NULL;
+		return 0;
+	}
+	dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
+	if (dev->base == NULL)
+		return -1;
+	dev->IndexRegs = &((struct src_registers __iomem *)
+		dev->base)->u.denali.IndexRegs;
 	return 0;
 }
 
@@ -419,7 +476,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
 		bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
 			0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
 			if (bled || (var != 0x00000001))
-				bled = -EINVAL;
+				return -EINVAL;
 		if (dev->supplement_adapter_info.SupportedOptions2 &
 			AAC_OPTION_DOORBELL_RESET) {
 			src_writel(dev, MUnit.IDR, reset_mask);
@@ -579,15 +636,149 @@ int aac_src_init(struct aac_dev *dev)
 	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
 
 	aac_adapter_enable_int(dev);
+
+	if (!dev->sync_mode) {
+		/*
+		 * Tell the adapter that all is configured, and it can
+		 * start accepting requests
+		 */
+		aac_src_start_adapter(dev);
+	}
+	return 0;
+
+error_iounmap:
+
+	return -1;
+}
+
+/**
+ *  aac_srcv_init	-	initialize an SRCv card
+ *  @dev: device to configure
+ *
+ */
+
+int aac_srcv_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int restart = 0;
+	int instance = dev->id;
+	const char *name = dev->name;
+
+	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
+	dev->a_ops.adapter_comm = aac_src_select_comm;
+
+	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
+	if (aac_adapter_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/* Failure to reset here is an option ... */
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+	if ((aac_reset_devices || reset_devices) &&
+		!aac_src_restart_adapter(dev, 0))
+		++restart;
 	/*
-	 *	Tell the adapter that all is configured, and it can
-	 * start accepting requests
+	 *	Check to see if the board panic'd while booting.
 	 */
-	aac_src_start_adapter(dev);
+	status = src_readl(dev, MUnit.OMR);
+	if (status & KERNEL_PANIC) {
+		if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+			goto error_iounmap;
+		++restart;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (status & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
+		if ((restart &&
+		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+		  time_after(jiffies, start+HZ*startup_timeout)) {
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		if (!restart &&
+		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+		  time_after(jiffies, start + HZ *
+		  ((startup_timeout > 60)
+		    ? (startup_timeout - 60)
+		    : (startup_timeout / 2))))) {
+			if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+				start = jiffies;
+			++restart;
+		}
+		msleep(1);
+	}
+	if (restart && aac_commit)
+		aac_commit = 1;
+	/*
+	 *	Fill in the common function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_src_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_src_check_health;
+	dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+	aac_adapter_disable_int(dev);
+	src_writel(dev, MUnit.ODR_C, 0xffffffff);
+	aac_adapter_enable_int(dev);
 
+	if (aac_init_adapter(dev) == NULL)
+		goto error_iounmap;
+	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+		goto error_iounmap;
+	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+		IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+		if (dev->msi)
+			pci_disable_msi(dev->pdev);
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+			name, instance);
+		goto error_iounmap;
+	}
+	dev->dbg_base = dev->scsi_host_ptr->base;
+	dev->dbg_base_mapped = dev->base;
+	dev->dbg_size = dev->base_size;
+
+	aac_adapter_enable_int(dev);
+
+	if (!dev->sync_mode) {
+		/*
+		 * Tell the adapter that all is configured, and it can
+		 * start accepting requests
+		 */
+		aac_src_start_adapter(dev);
+	}
 	return 0;
 
 error_iounmap:
 
 	return -1;
 }
+
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2863a9d22851..66cda669b417 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -80,6 +80,8 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
 
 int  asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags);
 
+void asd_set_dmamode(struct domain_device *dev);
+
 /* ---------- TMFs ---------- */
 int  asd_abort_task(struct sas_task *);
 int  asd_abort_task_set(struct domain_device *, u8 *lun);
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 2e2ddec9c0b6..64136c56e706 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -109,26 +109,37 @@ static int asd_init_sata_tag_ddb(struct domain_device *dev)
 	return 0;
 }
 
-static int asd_init_sata(struct domain_device *dev)
+void asd_set_dmamode(struct domain_device *dev)
 {
 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+	struct ata_device *ata_dev = sas_to_ata_dev(dev);
 	int ddb = (int) (unsigned long) dev->lldd_dev;
 	u32 qdepth = 0;
-	int res = 0;
 
-	asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-	if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
-	    dev->sata_dev.identify_device &&
-	    dev->sata_dev.identify_device[10] != 0) {
-		u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
-		u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
-
-		if (w76 & 0x100) /* NCQ? */
-			qdepth = (w75 & 0x1F) + 1;
+	if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+		if (ata_id_has_ncq(ata_dev->id))
+			qdepth = ata_id_queue_depth(ata_dev->id);
 		asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
 					(1ULL<<qdepth)-1);
 		asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
 	}
+
+	if (qdepth > 0)
+		if (asd_init_sata_tag_ddb(dev) != 0) {
+			unsigned long flags;
+
+			spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+			ata_dev->flags |= ATA_DFLAG_NCQ_OFF;
+			spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+		}
+}
+
+static int asd_init_sata(struct domain_device *dev)
+{
+	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+	int ddb = (int) (unsigned long) dev->lldd_dev;
+
+	asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
 	if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
 	    dev->dev_type == SATA_PM_PORT) {
 		struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
@@ -136,9 +147,8 @@ static int asd_init_sata(struct domain_device *dev)
 		asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
 	}
 	asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
-	if (qdepth > 0)
-		res = asd_init_sata_tag_ddb(dev);
-	return res;
+
+	return 0;
 }
 
 static int asd_init_target_ddb(struct domain_device *dev)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index d5ff142c93a2..ff80552ead84 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
 	.queuecommand		= sas_queuecommand,
 	.target_alloc		= sas_target_alloc,
 	.slave_configure	= sas_slave_configure,
-	.slave_destroy		= sas_slave_destroy,
 	.scan_finished		= asd_scan_finished,
 	.scan_start		= asd_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
@@ -82,7 +81,6 @@ static struct scsi_host_template aic94xx_sht = {
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler	= sas_eh_device_reset_handler,
 	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
-	.slave_alloc		= sas_slave_alloc,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 };
@@ -972,7 +970,7 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
 	if (time < HZ)
 		return 0;
 	/* Wait for discovery to finish */
-	scsi_flush_work(shost);
+	sas_drain_work(SHOST_TO_SAS_HA(shost));
 	return 1;
 }
 
@@ -1010,6 +1008,8 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
 	.lldd_clear_nexus_ha	= asd_clear_nexus_ha,
 
 	.lldd_control_phy	= asd_control_phy,
+
+	.lldd_ata_set_dmamode	= asd_set_dmamode,
 };
 
 static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 0add73bdf2a4..cf9040933da6 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -181,7 +181,7 @@ static int asd_clear_nexus_I_T(struct domain_device *dev,
 int asd_I_T_nexus_reset(struct domain_device *dev)
 {
 	int res, tmp_res, i;
-	struct sas_phy *phy = sas_find_local_phy(dev);
+	struct sas_phy *phy = sas_get_local_phy(dev);
 	/* Standard mandates link reset for ATA  (type 0) and
 	 * hard reset for SSP (type 1) */
 	int reset_type = (dev->dev_type == SATA_DEV ||
@@ -192,7 +192,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
 	ASD_DPRINTK("sending %s reset to %s\n",
 		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
 	res = sas_phy_reset(phy, reset_type);
-	if (res == TMF_RESP_FUNC_COMPLETE) {
+	if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
 		/* wait for the maximum settle time */
 		msleep(500);
 		/* clear all outstanding commands (keep nexus suspended) */
@@ -201,7 +201,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
 	for (i = 0 ; i < 3; i++) {
 		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
 		if (tmp_res == TC_RESUME)
-			return res;
+			goto out;
 		msleep(500);
 	}
 
@@ -211,7 +211,10 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
 	dev_printk(KERN_ERR, &phy->dev,
 		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
 
-	return TMF_RESP_FUNC_FAILED;
+	res = TMF_RESP_FUNC_FAILED;
+ out:
+	sas_put_local_phy(phy);
+	return res;
 }
 
 static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 530de2b1200a..8005c6c5a080 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3047,8 +3047,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
 	 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
 	 * buffer of size bsg_data->payload_len
 	 */
-	bsg_fcpt = (struct bfa_bsg_fcpt_s *)
-		   kzalloc(bsg_data->payload_len, GFP_KERNEL);
+	bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
 	if (!bsg_fcpt)
 		goto out;
 
@@ -3060,6 +3059,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
 
 	drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
 	if (drv_fcxp == NULL) {
+		kfree(bsg_fcpt);
 		rc = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 049ea907e04a..a4953ef9e53a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.9"
+#define BNX2FC_VERSION		"1.0.10"
 
 #define PFX			"bnx2fc: "
 
@@ -114,6 +114,8 @@
 #define BNX2FC_HASH_TBL_CHUNK_SIZE	(16 * 1024)
 
 #define BNX2FC_MAX_SEQS			255
+#define BNX2FC_MAX_RETRY_CNT		3
+#define BNX2FC_MAX_RPORT_RETRY_CNT	255
 
 #define BNX2FC_READ			(1 << 1)
 #define BNX2FC_WRITE			(1 << 0)
@@ -121,8 +123,10 @@
 #define BNX2FC_MIN_XID			0
 #define BNX2FC_MAX_XID			\
 			(BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
+#define FCOE_MAX_NUM_XIDS		0x2000
 #define FCOE_MIN_XID			(BNX2FC_MAX_XID + 1)
-#define FCOE_MAX_XID			(FCOE_MIN_XID + 4095)
+#define FCOE_MAX_XID			(FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1)
+#define FCOE_XIDS_PER_CPU		(FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
 #define BNX2FC_MAX_LUN			0xFFFF
 #define BNX2FC_MAX_FCP_TGT		256
 #define BNX2FC_MAX_CMD_LEN		16
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a9af42e83632..abd72a01856d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Oct 21, 2011"
+#define DRV_MODULE_RELDATE	"Jan 22, 2011"
 
 
 static char version[] __devinitdata =
@@ -939,8 +939,14 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
 
 static int bnx2fc_em_config(struct fc_lport *lport)
 {
+	int max_xid;
+
+	if (nr_cpu_ids <= 2)
+		max_xid = FCOE_XIDS_PER_CPU;
+	else
+		max_xid = FCOE_MAX_XID;
 	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
-				FCOE_MAX_XID, NULL)) {
+				max_xid, NULL)) {
 		printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
 		return -ENOMEM;
 	}
@@ -952,8 +958,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
 {
 	lport->link_up = 0;
 	lport->qfull = 0;
-	lport->max_retry_count = 3;
-	lport->max_rport_retry_count = 3;
+	lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
+	lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
 	lport->e_d_tov = 2 * 1000;
 	lport->r_a_tov = 10 * 1000;
 
@@ -1536,6 +1542,7 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
 static int bnx2fc_destroy(struct net_device *netdev)
 {
 	struct bnx2fc_interface *interface = NULL;
+	struct workqueue_struct *timer_work_queue;
 	int rc = 0;
 
 	rtnl_lock();
@@ -1548,9 +1555,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
 		goto netdev_err;
 	}
 
-
-	destroy_workqueue(interface->timer_work_queue);
+	timer_work_queue = interface->timer_work_queue;
 	__bnx2fc_destroy(interface);
+	destroy_workqueue(timer_work_queue);
 
 netdev_err:
 	mutex_unlock(&bnx2fc_dev_lock);
@@ -2054,6 +2061,7 @@ if_create_err:
 ifput_err:
 	bnx2fc_net_cleanup(interface);
 	bnx2fc_interface_put(interface);
+	goto mod_err;
 netdev_err:
 	module_put(THIS_MODULE);
 mod_err:
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 1ad0b8225560..f9d6f4129093 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1312,14 +1312,18 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
 		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
 		/* EMC */
 		(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
-	if (error_mask1)
+	if (error_mask1) {
 		iscsi_init2.error_bit_map[0] = error_mask1;
-	else
+		mask64 &= (u32)(~mask64);
+		mask64 |= error_mask1;
+	} else
 		iscsi_init2.error_bit_map[0] = (u32) mask64;
 
-	if (error_mask2)
+	if (error_mask2) {
 		iscsi_init2.error_bit_map[1] = error_mask2;
-	else
+		mask64 &= 0xffffffff;
+		mask64 |= ((u64)error_mask2 << 32);
+	} else
 		iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
 
 	iscsi_error_mask = mask64;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1a947f1b9729..4927cca733d3 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -49,11 +49,11 @@ module_param(en_tcp_dack, int, 0664);
 MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
 
 unsigned int error_mask1 = 0x00;
-module_param(error_mask1, int, 0664);
+module_param(error_mask1, uint, 0664);
 MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
 
 unsigned int error_mask2 = 0x00;
-module_param(error_mask2, int, 0664);
+module_param(error_mask2, uint, 0664);
 MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
 
 unsigned int sq_size;
@@ -393,8 +393,9 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
 
 	p = &per_cpu(bnx2i_percpu, cpu);
 
-	thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
-				"bnx2i_thread/%d", cpu);
+	thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
+					cpu_to_node(cpu),
+					"bnx2i_thread/%d", cpu);
 	/* bind thread to the cpu */
 	if (likely(!IS_ERR(thread))) {
 		kthread_bind(thread, cpu);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 89afd6d21d89..d9253db1d0e2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2147,11 +2147,10 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
 			enum iscsi_param param, char *buf, int buflen)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
-	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
 	struct cxgbi_sock *csk = cconn->cep->csk;
-	int value, err = 0;
+	int err;
 
 	log_debug(1 << CXGBI_DBG_ISCSI,
 		"cls_conn 0x%p, param %d, buf(%d) %s.\n",
@@ -2173,15 +2172,7 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
 							conn->datadgst_en, 0);
 		break;
 	case ISCSI_PARAM_MAX_R2T:
-		sscanf(buf, "%d", &value);
-		if (value <= 0 || !is_power_of_2(value))
-			return -EINVAL;
-		if (session->max_r2t == value)
-			break;
-		iscsi_tcp_r2tpool_free(session);
-		err = iscsi_set_param(cls_conn, param, buf, buflen);
-		if (!err && iscsi_tcp_r2tpool_alloc(session))
-			return -ENOMEM;
+		return iscsi_tcp_set_max_r2t(conn, buf);
 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
 		err = iscsi_set_param(cls_conn, param, buf, buflen);
 		if (!err)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc75cbea936b..ae7d15c44e2a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -168,6 +168,14 @@ static struct fc_function_template fcoe_nport_fc_functions = {
 	.show_host_supported_fc4s = 1,
 	.show_host_active_fc4s = 1,
 	.show_host_maxframe_size = 1,
+	.show_host_serial_number = 1,
+	.show_host_manufacturer = 1,
+	.show_host_model = 1,
+	.show_host_model_description = 1,
+	.show_host_hardware_version = 1,
+	.show_host_driver_version = 1,
+	.show_host_firmware_version = 1,
+	.show_host_optionrom_version = 1,
 
 	.show_host_port_id = 1,
 	.show_host_supported_speeds = 1,
@@ -208,6 +216,14 @@ static struct fc_function_template fcoe_vport_fc_functions = {
 	.show_host_supported_fc4s = 1,
 	.show_host_active_fc4s = 1,
 	.show_host_maxframe_size = 1,
+	.show_host_serial_number = 1,
+	.show_host_manufacturer = 1,
+	.show_host_model = 1,
+	.show_host_model_description = 1,
+	.show_host_hardware_version = 1,
+	.show_host_driver_version = 1,
+	.show_host_firmware_version = 1,
+	.show_host_optionrom_version = 1,
 
 	.show_host_port_id = 1,
 	.show_host_supported_speeds = 1,
@@ -364,11 +380,10 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
 	if (!fcoe) {
 		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
 		fcoe = ERR_PTR(-ENOMEM);
-		goto out_nomod;
+		goto out_putmod;
 	}
 
 	dev_hold(netdev);
-	kref_init(&fcoe->kref);
 
 	/*
 	 * Initialize FIP.
@@ -384,54 +399,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
 		kfree(fcoe);
 		dev_put(netdev);
 		fcoe = ERR_PTR(err);
-		goto out_nomod;
+		goto out_putmod;
 	}
 
 	goto out;
 
-out_nomod:
+out_putmod:
 	module_put(THIS_MODULE);
 out:
 	return fcoe;
 }
 
 /**
- * fcoe_interface_release() - fcoe_port kref release function
- * @kref: Embedded reference count in an fcoe_interface struct
- */
-static void fcoe_interface_release(struct kref *kref)
-{
-	struct fcoe_interface *fcoe;
-	struct net_device *netdev;
-
-	fcoe = container_of(kref, struct fcoe_interface, kref);
-	netdev = fcoe->netdev;
-	/* tear-down the FCoE controller */
-	fcoe_ctlr_destroy(&fcoe->ctlr);
-	kfree(fcoe);
-	dev_put(netdev);
-	module_put(THIS_MODULE);
-}
-
-/**
- * fcoe_interface_get() - Get a reference to a FCoE interface
- * @fcoe: The FCoE interface to be held
- */
-static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
-{
-	kref_get(&fcoe->kref);
-}
-
-/**
- * fcoe_interface_put() - Put a reference to a FCoE interface
- * @fcoe: The FCoE interface to be released
- */
-static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
-{
-	kref_put(&fcoe->kref, fcoe_interface_release);
-}
-
-/**
  * fcoe_interface_cleanup() - Clean up a FCoE interface
  * @fcoe: The FCoE interface to be cleaned up
  *
@@ -478,7 +457,11 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 	rtnl_unlock();
 
 	/* Release the self-reference taken during fcoe_interface_create() */
-	fcoe_interface_put(fcoe);
+	/* tear-down the FCoE controller */
+	fcoe_ctlr_destroy(fip);
+	kfree(fcoe);
+	dev_put(netdev);
+	module_put(THIS_MODULE);
 }
 
 /**
@@ -734,6 +717,85 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
 	return 0;
 }
 
+
+/**
+ * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE
+ * @lport:  The local port that is associated with the net device
+ * @netdev: The associated net device
+ *
+ * Must be called after fcoe_shost_config() as it will use local port mutex
+ *
+ */
+static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
+{
+	struct fcoe_interface *fcoe;
+	struct fcoe_port *port;
+	struct net_device *realdev;
+	int rc;
+	struct netdev_fcoe_hbainfo fdmi;
+
+	port = lport_priv(lport);
+	fcoe = port->priv;
+	realdev = fcoe->realdev;
+
+	if (!realdev)
+		return;
+
+	/* No FDMI state m/c for NPIV ports */
+	if (lport->vport)
+		return;
+
+	if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
+		memset(&fdmi, 0, sizeof(fdmi));
+		rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
+							       &fdmi);
+		if (rc) {
+			printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
+					"information from netdev.\n");
+			return;
+		}
+
+		snprintf(fc_host_serial_number(lport->host),
+			 FC_SERIAL_NUMBER_SIZE,
+			 "%s",
+			 fdmi.serial_number);
+		snprintf(fc_host_manufacturer(lport->host),
+			 FC_SERIAL_NUMBER_SIZE,
+			 "%s",
+			 fdmi.manufacturer);
+		snprintf(fc_host_model(lport->host),
+			 FC_SYMBOLIC_NAME_SIZE,
+			 "%s",
+			 fdmi.model);
+		snprintf(fc_host_model_description(lport->host),
+			 FC_SYMBOLIC_NAME_SIZE,
+			 "%s",
+			 fdmi.model_description);
+		snprintf(fc_host_hardware_version(lport->host),
+			 FC_VERSION_STRING_SIZE,
+			 "%s",
+			 fdmi.hardware_version);
+		snprintf(fc_host_driver_version(lport->host),
+			 FC_VERSION_STRING_SIZE,
+			 "%s",
+			 fdmi.driver_version);
+		snprintf(fc_host_optionrom_version(lport->host),
+			 FC_VERSION_STRING_SIZE,
+			 "%s",
+			 fdmi.optionrom_version);
+		snprintf(fc_host_firmware_version(lport->host),
+			 FC_VERSION_STRING_SIZE,
+			 "%s",
+			 fdmi.firmware_version);
+
+		/* Enable FDMI lport states */
+		lport->fdmi_enabled = 1;
+	} else {
+		lport->fdmi_enabled = 0;
+		printk(KERN_INFO "fcoe: No FDMI support.\n");
+	}
+}
+
 /**
  * fcoe_oem_match() - The match routine for the offloaded exchange manager
  * @fp: The I/O frame
@@ -881,9 +943,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
 		dev_uc_del(netdev, port->data_src_addr);
 	rtnl_unlock();
 
-	/* Release reference held in fcoe_if_create() */
-	fcoe_interface_put(fcoe);
-
 	/* Free queued packets for the per-CPU receive threads */
 	fcoe_percpu_clean(lport);
 
@@ -1047,6 +1106,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 		goto out_lp_destroy;
 	}
 
+	/* Initialized FDMI information */
+	fcoe_fdmi_info(lport, netdev);
+
 	/*
 	 * fcoe_em_alloc() and fcoe_hostlist_add() both
 	 * need to be atomic with respect to other changes to the
@@ -1070,7 +1132,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 		goto out_lp_destroy;
 	}
 
-	fcoe_interface_get(fcoe);
 	return lport;
 
 out_lp_destroy:
@@ -2009,20 +2070,13 @@ static void fcoe_destroy_work(struct work_struct *work)
 {
 	struct fcoe_port *port;
 	struct fcoe_interface *fcoe;
-	int npiv = 0;
 
 	port = container_of(work, struct fcoe_port, destroy_work);
 	mutex_lock(&fcoe_config_mutex);
 
-	/* set if this is an NPIV port */
-	npiv = port->lport->vport ? 1 : 0;
-
 	fcoe = port->priv;
 	fcoe_if_destroy(port->lport);
-
-	/* Do not tear down the fcoe interface for NPIV port */
-	if (!npiv)
-		fcoe_interface_cleanup(fcoe);
+	fcoe_interface_cleanup(fcoe);
 
 	mutex_unlock(&fcoe_config_mutex);
 }
@@ -2593,12 +2647,15 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
 	struct Scsi_Host *shost = vport_to_shost(vport);
 	struct fc_lport *n_port = shost_priv(shost);
 	struct fc_lport *vn_port = vport->dd_data;
-	struct fcoe_port *port = lport_priv(vn_port);
 
 	mutex_lock(&n_port->lp_mutex);
 	list_del(&vn_port->list);
 	mutex_unlock(&n_port->lp_mutex);
-	queue_work(fcoe_wq, &port->destroy_work);
+
+	mutex_lock(&fcoe_config_mutex);
+	fcoe_if_destroy(vn_port);
+	mutex_unlock(&fcoe_config_mutex);
+
 	return 0;
 }
 
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index bcc89e639495..3c2733a12aa1 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,8 +71,6 @@ do {                                                            	\
  * @ctlr:	      The FCoE controller (for FIP)
  * @oem:	      The offload exchange manager for all local port
  *		      instances associated with this port
- * @kref:	      The kernel reference
- *
  * This structure is 1:1 with a net devive.
  */
 struct fcoe_interface {
@@ -83,7 +81,6 @@ struct fcoe_interface {
 	struct packet_type fip_packet_type;
 	struct fcoe_ctlr   ctlr;
 	struct fc_exch_mgr *oem;
-	struct kref	   kref;
 };
 
 #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 4d119a326d3b..710e149d41b6 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -619,8 +619,8 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
 
 	switch (event) {
 	case NETDEV_UNREGISTER:
-		printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
-				netdev->name);
+		LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n",
+				      netdev->name);
 		fcoe_del_netdev_mapping(netdev);
 		break;
 	}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b96962c39449..500e20dd56ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -56,6 +56,7 @@
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
 #define HPSA_DRIVER_VERSION "2.0.2-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+#define HPSA "hpsa"
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
 #define MAX_CONFIG_WAIT 30000
@@ -202,30 +203,31 @@ static int check_for_unit_attention(struct ctlr_info *h,
 
 	switch (c->err_info->SenseInfo[12]) {
 	case STATE_CHANGED:
-		dev_warn(&h->pdev->dev, "hpsa%d: a state change "
+		dev_warn(&h->pdev->dev, HPSA "%d: a state change "
 			"detected, command retried\n", h->ctlr);
 		break;
 	case LUN_FAILED:
-		dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
+		dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
 			"detected, action required\n", h->ctlr);
 		break;
 	case REPORT_LUNS_CHANGED:
-		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+		dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
 			"changed, action required\n", h->ctlr);
 	/*
-	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
+	 * target (array) devices.
 	 */
 		break;
 	case POWER_OR_RESET:
-		dev_warn(&h->pdev->dev, "hpsa%d: a power on "
+		dev_warn(&h->pdev->dev, HPSA "%d: a power on "
 			"or device reset detected\n", h->ctlr);
 		break;
 	case UNIT_ATTENTION_CLEARED:
-		dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
+		dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
 		    "cleared by another initiator\n", h->ctlr);
 		break;
 	default:
-		dev_warn(&h->pdev->dev, "hpsa%d: unknown "
+		dev_warn(&h->pdev->dev, HPSA "%d: unknown "
 			"unit attention detected\n", h->ctlr);
 		break;
 	}
@@ -296,11 +298,23 @@ static u32 unresettable_controller[] = {
 	0x40800E11, /* Smart Array 5i */
 	0x409C0E11, /* Smart Array 6400 */
 	0x409D0E11, /* Smart Array 6400 EM */
+	0x40700E11, /* Smart Array 5300 */
+	0x40820E11, /* Smart Array 532 */
+	0x40830E11, /* Smart Array 5312 */
+	0x409A0E11, /* Smart Array 641 */
+	0x409B0E11, /* Smart Array 642 */
+	0x40910E11, /* Smart Array 6i */
 };
 
 /* List of controllers which cannot even be soft reset */
 static u32 soft_unresettable_controller[] = {
 	0x40800E11, /* Smart Array 5i */
+	0x40700E11, /* Smart Array 5300 */
+	0x40820E11, /* Smart Array 532 */
+	0x40830E11, /* Smart Array 5312 */
+	0x409A0E11, /* Smart Array 641 */
+	0x409B0E11, /* Smart Array 642 */
+	0x40910E11, /* Smart Array 6i */
 	/* Exclude 640x boards.  These are two pci devices in one slot
 	 * which share a battery backed cache module.  One controls the
 	 * cache, the other accesses the cache through the one that controls
@@ -475,8 +489,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
 
 static struct scsi_host_template hpsa_driver_template = {
 	.module			= THIS_MODULE,
-	.name			= "hpsa",
-	.proc_name		= "hpsa",
+	.name			= HPSA,
+	.proc_name		= HPSA,
 	.queuecommand		= hpsa_scsi_queue_command,
 	.scan_start		= hpsa_scan_start,
 	.scan_finished		= hpsa_scan_finished,
@@ -577,21 +591,19 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
 	int i, found = 0;
 	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
 
-	memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
+	bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
 
 	for (i = 0; i < h->ndevices; i++) {
 		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
-			set_bit(h->dev[i]->target, lun_taken);
+			__set_bit(h->dev[i]->target, lun_taken);
 	}
 
-	for (i = 0; i < HPSA_MAX_DEVICES; i++) {
-		if (!test_bit(i, lun_taken)) {
-			/* *bus = 1; */
-			*target = i;
-			*lun = 0;
-			found = 1;
-			break;
-		}
+	i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
+	if (i < HPSA_MAX_DEVICES) {
+		/* *bus = 1; */
+		*target = i;
+		*lun = 0;
+		found = 1;
 	}
 	return !found;
 }
@@ -675,6 +687,20 @@ lun_assigned:
 	return 0;
 }
 
+/* Update an entry in h->dev[] array. */
+static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
+	int entry, struct hpsa_scsi_dev_t *new_entry)
+{
+	/* assumes h->devlock is held */
+	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
+
+	/* Raid level changed. */
+	h->dev[entry]->raid_level = new_entry->raid_level;
+	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
+		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+		new_entry->target, new_entry->lun);
+}
+
 /* Replace an entry from h->dev[] array. */
 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
 	int entry, struct hpsa_scsi_dev_t *new_entry,
@@ -781,10 +807,25 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 	return 1;
 }
 
+static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
+	struct hpsa_scsi_dev_t *dev2)
+{
+	/* Device attributes that can change, but don't mean
+	 * that the device is a different device, nor that the OS
+	 * needs to be told anything about the change.
+	 */
+	if (dev1->raid_level != dev2->raid_level)
+		return 1;
+	return 0;
+}
+
 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
  * and return needle location in *index.  If scsi3addr matches, but not
  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
- * location in *index.  If needle not found, return DEVICE_NOT_FOUND.
+ * location in *index.
+ * In the case of a minor device attribute change, such as RAID level, just
+ * return DEVICE_UPDATED, along with the updated device's location in index.
+ * If needle not found, return DEVICE_NOT_FOUND.
  */
 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
 	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
@@ -794,15 +835,19 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
 #define DEVICE_NOT_FOUND 0
 #define DEVICE_CHANGED 1
 #define DEVICE_SAME 2
+#define DEVICE_UPDATED 3
 	for (i = 0; i < haystack_size; i++) {
 		if (haystack[i] == NULL) /* previously removed. */
 			continue;
 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
 			*index = i;
-			if (device_is_the_same(needle, haystack[i]))
+			if (device_is_the_same(needle, haystack[i])) {
+				if (device_updated(needle, haystack[i]))
+					return DEVICE_UPDATED;
 				return DEVICE_SAME;
-			else
+			} else {
 				return DEVICE_CHANGED;
+			}
 		}
 	}
 	*index = -1;
@@ -838,6 +883,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 	 * sd[] and remove them from h->dev[], and for any
 	 * devices which have changed, remove the old device
 	 * info and add the new device info.
+	 * If minor device attributes change, just update
+	 * the existing device structure.
 	 */
 	i = 0;
 	nremoved = 0;
@@ -858,6 +905,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 			 * at the bottom of hpsa_update_scsi_devices()
 			 */
 			sd[entry] = NULL;
+		} else if (device_change == DEVICE_UPDATED) {
+			hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
 		}
 		i++;
 	}
@@ -1257,46 +1306,6 @@ static void complete_scsi_command(struct CommandList *cp)
 	cmd_free(h, cp);
 }
 
-static int hpsa_scsi_detect(struct ctlr_info *h)
-{
-	struct Scsi_Host *sh;
-	int error;
-
-	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
-	if (sh == NULL)
-		goto fail;
-
-	sh->io_port = 0;
-	sh->n_io_port = 0;
-	sh->this_id = -1;
-	sh->max_channel = 3;
-	sh->max_cmd_len = MAX_COMMAND_SIZE;
-	sh->max_lun = HPSA_MAX_LUN;
-	sh->max_id = HPSA_MAX_LUN;
-	sh->can_queue = h->nr_cmds;
-	sh->cmd_per_lun = h->nr_cmds;
-	sh->sg_tablesize = h->maxsgentries;
-	h->scsi_host = sh;
-	sh->hostdata[0] = (unsigned long) h;
-	sh->irq = h->intr[h->intr_mode];
-	sh->unique_id = sh->irq;
-	error = scsi_add_host(sh, &h->pdev->dev);
-	if (error)
-		goto fail_host_put;
-	scsi_scan_host(sh);
-	return 0;
-
- fail_host_put:
-	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
-		" failed for controller %d\n", h->ctlr);
-	scsi_host_put(sh);
-	return error;
- fail:
-	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
-		" failed for controller %d\n", h->ctlr);
-	return -ENOMEM;
-}
-
 static void hpsa_pci_unmap(struct pci_dev *pdev,
 	struct CommandList *c, int sg_used, int data_direction)
 {
@@ -1641,7 +1650,7 @@ bail_out:
 	return 1;
 }
 
-static unsigned char *msa2xxx_model[] = {
+static unsigned char *ext_target_model[] = {
 	"MSA2012",
 	"MSA2024",
 	"MSA2312",
@@ -1650,78 +1659,54 @@ static unsigned char *msa2xxx_model[] = {
 	NULL,
 };
 
-static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
 {
 	int i;
 
-	for (i = 0; msa2xxx_model[i]; i++)
-		if (strncmp(device->model, msa2xxx_model[i],
-			strlen(msa2xxx_model[i])) == 0)
+	for (i = 0; ext_target_model[i]; i++)
+		if (strncmp(device->model, ext_target_model[i],
+			strlen(ext_target_model[i])) == 0)
 			return 1;
 	return 0;
 }
 
 /* Helper function to assign bus, target, lun mapping of devices.
- * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
+ * Puts non-external target logical volumes on bus 0, external target logical
  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
  * Logical drive target and lun are assigned at this time, but
  * physical device lun and target assignment are deferred (assigned
  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
  */
 static void figure_bus_target_lun(struct ctlr_info *h,
-	u8 *lunaddrbytes, int *bus, int *target, int *lun,
-	struct hpsa_scsi_dev_t *device)
+	u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
 {
-	u32 lunid;
+	u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
 
-	if (is_logical_dev_addr_mode(lunaddrbytes)) {
-		/* logical device */
-		if (unlikely(is_scsi_rev_5(h))) {
-			/* p1210m, logical drives lun assignments
-			 * match SCSI REPORT LUNS data.
-			 */
-			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
-			*bus = 0;
-			*target = 0;
-			*lun = (lunid & 0x3fff) + 1;
-		} else {
-			/* not p1210m... */
-			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
-			if (is_msa2xxx(h, device)) {
-				/* msa2xxx way, put logicals on bus 1
-				 * and match target/lun numbers box
-				 * reports.
-				 */
-				*bus = 1;
-				*target = (lunid >> 16) & 0x3fff;
-				*lun = lunid & 0x00ff;
-			} else {
-				/* Traditional smart array way. */
-				*bus = 0;
-				*lun = 0;
-				*target = lunid & 0x3fff;
-			}
-		}
-	} else {
-		/* physical device */
+	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
+		/* physical device, target and lun filled in later */
 		if (is_hba_lunid(lunaddrbytes))
-			if (unlikely(is_scsi_rev_5(h))) {
-				*bus = 0; /* put p1210m ctlr at 0,0,0 */
-				*target = 0;
-				*lun = 0;
-				return;
-			} else
-				*bus = 3; /* traditional smartarray */
+			hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
 		else
-			*bus = 2; /* physical disk */
-		*target = -1;
-		*lun = -1; /* we will fill these in later. */
+			/* defer target, lun assignment for physical devices */
+			hpsa_set_bus_target_lun(device, 2, -1, -1);
+		return;
+	}
+	/* It's a logical device */
+	if (is_ext_target(h, device)) {
+		/* external target way, put logicals on bus 1
+		 * and match target/lun numbers box
+		 * reports, other smart array, bus 0, target 0, match lunid
+		 */
+		hpsa_set_bus_target_lun(device,
+			1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
+		return;
 	}
+	hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
 }
 
 /*
  * If there is no lun 0 on a target, linux won't find any devices.
- * For the MSA2xxx boxes, we have to manually detect the enclosure
+ * For the external targets (arrays), we have to manually detect the enclosure
  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
  * it for some reason.  *tmpdevice is the target we're adding,
  * this_device is a pointer into the current element of currentsd[]
@@ -1730,46 +1715,46 @@ static void figure_bus_target_lun(struct ctlr_info *h,
  * lun 0 assigned.
  * Returns 1 if an enclosure was added, 0 if not.
  */
-static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+static int add_ext_target_dev(struct ctlr_info *h,
 	struct hpsa_scsi_dev_t *tmpdevice,
 	struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
-	int bus, int target, int lun, unsigned long lunzerobits[],
-	int *nmsa2xxx_enclosures)
+	unsigned long lunzerobits[], int *n_ext_target_devs)
 {
 	unsigned char scsi3addr[8];
 
-	if (test_bit(target, lunzerobits))
+	if (test_bit(tmpdevice->target, lunzerobits))
 		return 0; /* There is already a lun 0 on this target. */
 
 	if (!is_logical_dev_addr_mode(lunaddrbytes))
 		return 0; /* It's the logical targets that may lack lun 0. */
 
-	if (!is_msa2xxx(h, tmpdevice))
-		return 0; /* It's only the MSA2xxx that have this problem. */
+	if (!is_ext_target(h, tmpdevice))
+		return 0; /* Only external target devices have this problem. */
 
-	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
+	if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
 		return 0;
 
 	memset(scsi3addr, 0, 8);
-	scsi3addr[3] = target;
+	scsi3addr[3] = tmpdevice->target;
 	if (is_hba_lunid(scsi3addr))
 		return 0; /* Don't add the RAID controller here. */
 
 	if (is_scsi_rev_5(h))
 		return 0; /* p1210m doesn't need to do this. */
 
-	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
-		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
-			"enclosures exceeded.  Check your hardware "
+	if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
+		dev_warn(&h->pdev->dev, "Maximum number of external "
+			"target devices exceeded.  Check your hardware "
 			"configuration.");
 		return 0;
 	}
 
 	if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
 		return 0;
-	(*nmsa2xxx_enclosures)++;
-	hpsa_set_bus_target_lun(this_device, bus, target, 0);
-	set_bit(target, lunzerobits);
+	(*n_ext_target_devs)++;
+	hpsa_set_bus_target_lun(this_device,
+				tmpdevice->bus, tmpdevice->target, 0);
+	set_bit(tmpdevice->target, lunzerobits);
 	return 1;
 }
 
@@ -1863,10 +1848,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
 	int ncurrent = 0;
 	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
-	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
-	int bus, target, lun;
+	int i, n_ext_target_devs, ndevs_to_allocate;
 	int raid_ctlr_position;
-	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
 	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
@@ -1883,11 +1867,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 			logdev_list, &nlogicals))
 		goto out;
 
-	/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
-	 * but each of them 4 times through different paths.  The plus 1
-	 * is for the RAID controller.
+	/* We might see up to the maximum number of logical and physical disks
+	 * plus external target devices, and a device for the local RAID
+	 * controller.
 	 */
-	ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
+	ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
 
 	/* Allocate the per device structures */
 	for (i = 0; i < ndevs_to_allocate; i++) {
@@ -1913,7 +1897,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 		raid_ctlr_position = nphysicals + nlogicals;
 
 	/* adjust our table of devices */
-	nmsa2xxx_enclosures = 0;
+	n_ext_target_devs = 0;
 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
 		u8 *lunaddrbytes, is_OBDR = 0;
 
@@ -1929,26 +1913,24 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
 							&is_OBDR))
 			continue; /* skip it if we can't talk to it. */
-		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
-			tmpdevice);
+		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
 		this_device = currentsd[ncurrent];
 
 		/*
-		 * For the msa2xxx boxes, we have to insert a LUN 0 which
+		 * For external target devices, we have to insert a LUN 0 which
 		 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
 		 * is nonetheless an enclosure device there.  We have to
 		 * present that otherwise linux won't find anything if
 		 * there is no lun 0.
 		 */
-		if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
-				lunaddrbytes, bus, target, lun, lunzerobits,
-				&nmsa2xxx_enclosures)) {
+		if (add_ext_target_dev(h, tmpdevice, this_device,
+				lunaddrbytes, lunzerobits,
+				&n_ext_target_devs)) {
 			ncurrent++;
 			this_device = currentsd[ncurrent];
 		}
 
 		*this_device = *tmpdevice;
-		hpsa_set_bus_target_lun(this_device, bus, target, lun);
 
 		switch (this_device->devtype) {
 		case TYPE_ROM:
@@ -2228,13 +2210,42 @@ static void hpsa_unregister_scsi(struct ctlr_info *h)
 
 static int hpsa_register_scsi(struct ctlr_info *h)
 {
-	int rc;
+	struct Scsi_Host *sh;
+	int error;
 
-	rc = hpsa_scsi_detect(h);
-	if (rc != 0)
-		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
-			" hpsa_scsi_detect(), rc is %d\n", rc);
-	return rc;
+	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+	if (sh == NULL)
+		goto fail;
+
+	sh->io_port = 0;
+	sh->n_io_port = 0;
+	sh->this_id = -1;
+	sh->max_channel = 3;
+	sh->max_cmd_len = MAX_COMMAND_SIZE;
+	sh->max_lun = HPSA_MAX_LUN;
+	sh->max_id = HPSA_MAX_LUN;
+	sh->can_queue = h->nr_cmds;
+	sh->cmd_per_lun = h->nr_cmds;
+	sh->sg_tablesize = h->maxsgentries;
+	h->scsi_host = sh;
+	sh->hostdata[0] = (unsigned long) h;
+	sh->irq = h->intr[h->intr_mode];
+	sh->unique_id = sh->irq;
+	error = scsi_add_host(sh, &h->pdev->dev);
+	if (error)
+		goto fail_host_put;
+	scsi_scan_host(sh);
+	return 0;
+
+ fail_host_put:
+	dev_err(&h->pdev->dev, "%s: scsi_add_host"
+		" failed for controller %d\n", __func__, h->ctlr);
+	scsi_host_put(sh);
+	return error;
+ fail:
+	dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
+		" failed for controller %d\n", __func__, h->ctlr);
+	return -ENOMEM;
 }
 
 static int wait_for_device_to_become_ready(struct ctlr_info *h,
@@ -2700,16 +2711,16 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		status = -EINVAL;
 		goto cleanup1;
 	}
-	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+	if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
 		status = -EINVAL;
 		goto cleanup1;
 	}
-	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+	buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
 	if (!buff) {
 		status = -ENOMEM;
 		goto cleanup1;
 	}
-	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
+	buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
 	if (!buff_size) {
 		status = -ENOMEM;
 		goto cleanup1;
@@ -3354,7 +3365,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
 static __devinit void init_driver_version(char *driver_version, int len)
 {
 	memset(driver_version, 0, len);
-	strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
+	strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
 }
 
 static __devinit int write_driver_ver_to_cfgtable(
@@ -3935,7 +3946,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
 		return err;
 	}
 
-	err = pci_request_regions(h->pdev, "hpsa");
+	err = pci_request_regions(h->pdev, HPSA);
 	if (err) {
 		dev_err(&h->pdev->dev,
 			"cannot obtain PCI resources, aborting\n");
@@ -4253,7 +4264,7 @@ static void start_controller_lockup_detector(struct ctlr_info *h)
 		spin_lock_init(&lockup_detector_lock);
 		hpsa_lockup_detector =
 			kthread_run(detect_controller_lockup_thread,
-						NULL, "hpsa");
+						NULL, HPSA);
 	}
 	if (!hpsa_lockup_detector) {
 		dev_warn(&h->pdev->dev,
@@ -4325,7 +4336,7 @@ reinit_after_soft_reset:
 	if (rc != 0)
 		goto clean1;
 
-	sprintf(h->devname, "hpsa%d", number_of_controllers);
+	sprintf(h->devname, HPSA "%d", number_of_controllers);
 	h->ctlr = number_of_controllers;
 	number_of_controllers++;
 
@@ -4482,6 +4493,14 @@ static void hpsa_shutdown(struct pci_dev *pdev)
 #endif				/* CONFIG_PCI_MSI */
 }
 
+static void __devexit hpsa_free_device_info(struct ctlr_info *h)
+{
+	int i;
+
+	for (i = 0; i < h->ndevices; i++)
+		kfree(h->dev[i]);
+}
+
 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
 {
 	struct ctlr_info *h;
@@ -4497,6 +4516,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
 	iounmap(h->vaddr);
 	iounmap(h->transtable);
 	iounmap(h->cfgtable);
+	hpsa_free_device_info(h);
 	hpsa_free_sg_chain_blocks(h);
 	pci_free_consistent(h->pdev,
 		h->nr_cmds * sizeof(struct CommandList),
@@ -4530,7 +4550,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
 }
 
 static struct pci_driver hpsa_pci_driver = {
-	.name = "hpsa",
+	.name = HPSA,
 	.probe = hpsa_init_one,
 	.remove = __devexit_p(hpsa_remove_one),
 	.id_table = hpsa_pci_device_id,	/* id_table */
@@ -4592,15 +4612,15 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
 	 * Each SG entry requires 16 bytes.  The eight registers are programmed
 	 * with the number of 16-byte blocks a command of that size requires.
 	 * The smallest command possible requires 5 such 16 byte blocks.
-	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
+	 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
 	 * blocks.  Note, this only extends to the SG entries contained
 	 * within the command block, and does not extend to chained blocks
 	 * of SG elements.   bft[] contains the eight values we write to
 	 * the registers.  They are not evenly distributed, but have more
 	 * sizes for small commands, and fewer sizes for larger commands.
 	 */
-	int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
-	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
 	/*  5 = 1 s/g entry or 4k
 	 *  6 = 2 s/g entry or 8k
 	 *  8 = 4 s/g entry or 16k
@@ -4613,8 +4633,9 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
 	memset(h->reply_pool, 0, h->reply_pool_size);
 	h->reply_pool_head = h->reply_pool;
 
-	bft[7] = h->max_sg_entries + 4;
-	calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
+	bft[7] = SG_ENTRIES_IN_CMD + 4;
+	calc_bucket_map(bft, ARRAY_SIZE(bft),
+				SG_ENTRIES_IN_CMD, h->blockFetchTable);
 	for (i = 0; i < 8; i++)
 		writel(bft[i], &h->transtable->BlockFetch[i]);
 
@@ -4652,14 +4673,13 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 		return;
 
 	hpsa_get_max_perf_mode_cmds(h);
-	h->max_sg_entries = 32;
 	/* Performant mode ring buffer and supporting data structures */
 	h->reply_pool_size = h->max_commands * sizeof(u64);
 	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
 				&(h->reply_pool_dhandle));
 
 	/* Need a block fetch table for performant mode */
-	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
+	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
 				sizeof(u32)), GFP_KERNEL);
 
 	if ((h->reply_pool == NULL)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 91edafb8c7e6..7b28d54fa878 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -58,7 +58,6 @@ struct ctlr_info {
 	unsigned long paddr;
 	int 	nr_cmds; /* Number of commands allowed on this controller */
 	struct CfgTable __iomem *cfgtable;
-	int     max_sg_entries;
 	int	interrupts_enabled;
 	int	major;
 	int 	max_commands;
@@ -317,7 +316,7 @@ static unsigned long SA5_completed(struct ctlr_info *h)
 		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
 			register_value);
 	else
-		dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
+		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
 #endif
 
 	return register_value;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3fd4715935c2..8049815d8c1e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,7 @@
 
 /* general boundary defintions */
 #define SENSEINFOBYTES          32 /* may vary between hbas */
-#define MAXSGENTRIES            32
+#define SG_ENTRIES_IN_CMD	32 /* Max SG entries excluding chain blocks */
 #define HPSA_SG_CHAIN		0x80000000
 #define MAXREPLYQS              256
 
@@ -122,12 +122,11 @@ union u64bit {
 };
 
 /* FIXME this is a per controller value (barf!) */
-#define HPSA_MAX_TARGETS_PER_CTLR 16
 #define HPSA_MAX_LUN 1024
 #define HPSA_MAX_PHYS_LUN 1024
-#define MAX_MSA2XXX_ENCLOSURES 32
+#define MAX_EXT_TARGETS 32
 #define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
-	MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
+	MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
 #pragma pack(1)
@@ -282,7 +281,7 @@ struct CommandList {
 	struct CommandListHeader Header;
 	struct RequestBlock      Request;
 	struct ErrDescriptor     ErrDesc;
-	struct SGDescriptor      SG[MAXSGENTRIES];
+	struct SGDescriptor      SG[SG_ENTRIES_IN_CMD];
 	/* information associated with the command */
 	u32			   busaddr; /* physical addr of this record */
 	struct ErrorInfo *err_info; /* pointer to the allocated mem */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b538f0883fd2..cdfe5a16de2a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -183,7 +183,7 @@ static const struct ipr_chip_t ipr_chip[] = {
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 };
 
 static int ipr_max_bus_speeds [] = {
@@ -9191,15 +9191,15 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
-		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
 	{ }
 };
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b13f9cc12279..f94eaee2ff16 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -58,7 +58,7 @@
 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E	0x0339
 
 #define PCI_DEVICE_ID_IBM_CROC_FPGA_E2          0x033D
-#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2          0x034A
+#define PCI_DEVICE_ID_IBM_CROCODILE             0x034A
 
 #define IPR_SUBS_DEV_ID_2780	0x0264
 #define IPR_SUBS_DEV_ID_5702	0x0266
@@ -92,7 +92,7 @@
 #define IPR_SUBS_DEV_ID_57B1    0x0355
 
 #define IPR_SUBS_DEV_ID_574D    0x0356
-#define IPR_SUBS_DEV_ID_575D    0x035D
+#define IPR_SUBS_DEV_ID_57C8    0x035D
 
 #define IPR_NAME				"ipr"
 
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 6ca9b26bb2fb..d4bf9c12ecd4 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -649,15 +649,13 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
 
 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
-	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	struct isci_host *ihost = ha->lldd_ha;
 
 	if (test_bit(IHOST_START_PENDING, &ihost->flags))
 		return 0;
 
-	/* todo: use sas_flush_discovery once it is upstream */
-	scsi_flush_work(shost);
-
-	scsi_flush_work(shost);
+	sas_drain_work(ha);
 
 	dev_dbg(&ihost->pdev->dev,
 		"%s: ihost->status = %d, time = %ld\n",
@@ -1490,6 +1488,15 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
 {
 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+	u32 val;
+
+	/* enable clock gating for power control of the scu unit */
+	val = readl(&ihost->smu_registers->clock_gating_control);
+	val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
+		 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
+		 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
+	val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
+	writel(val, &ihost->smu_registers->clock_gating_control);
 
 	/* set the default interrupt coalescence number and timeout value. */
 	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 5477f0fa8233..adbad69d1069 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -187,6 +187,7 @@ struct isci_host {
 	int id; /* unique within a given pci device */
 	struct isci_phy phys[SCI_MAX_PHYS];
 	struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+	struct asd_sas_port sas_ports[SCI_MAX_PORTS];
 	struct sas_ha_struct sas_ha;
 
 	spinlock_t state_lock;
@@ -393,24 +394,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
 #define sci_controller_clear_invalid_phy(controller, phy) \
 	((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
 
-static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
-{
-
-	if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
-		return NULL;
-
-	return &iphy->isci_port->isci_host->pdev->dev;
-}
-
-static inline struct device *sciport_to_dev(struct isci_port *iport)
-{
-
-	if (!iport || !iport->isci_host)
-		return NULL;
-
-	return &iport->isci_host->pdev->dev;
-}
-
 static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
 {
 	if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 17c4c2c89c2e..5137db5a5d85 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -60,6 +60,7 @@
 #include <linux/efi.h>
 #include <asm/string.h>
 #include <scsi/scsi_host.h>
+#include "host.h"
 #include "isci.h"
 #include "task.h"
 #include "probe_roms.h"
@@ -154,7 +155,6 @@ static struct scsi_host_template isci_sht = {
 	.queuecommand			= sas_queuecommand,
 	.target_alloc			= sas_target_alloc,
 	.slave_configure		= sas_slave_configure,
-	.slave_destroy			= sas_slave_destroy,
 	.scan_finished			= isci_host_scan_finished,
 	.scan_start			= isci_host_scan_start,
 	.change_queue_depth		= sas_change_queue_depth,
@@ -166,9 +166,6 @@ static struct scsi_host_template isci_sht = {
 	.sg_tablesize			= SG_ALL,
 	.max_sectors			= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering			= ENABLE_CLUSTERING,
-	.eh_device_reset_handler	= sas_eh_device_reset_handler,
-	.eh_bus_reset_handler		= isci_bus_reset_handler,
-	.slave_alloc			= sas_slave_alloc,
 	.target_destroy			= sas_target_destroy,
 	.ioctl				= sas_ioctl,
 	.shost_attrs			= isci_host_attrs,
@@ -194,6 +191,9 @@ static struct sas_domain_function_template isci_transport_ops  = {
 	.lldd_lu_reset		= isci_task_lu_reset,
 	.lldd_query_task	= isci_task_query_task,
 
+	/* ata recovery called from ata-eh */
+	.lldd_ata_check_ready	= isci_ata_check_ready,
+
 	/* Port and Adapter management */
 	.lldd_clear_nexus_port	= isci_task_clear_nexus_port,
 	.lldd_clear_nexus_ha	= isci_task_clear_nexus_ha,
@@ -242,18 +242,13 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
 	if (!sas_ports)
 		return -ENOMEM;
 
-	/*----------------- Libsas Initialization Stuff----------------------
-	 * Set various fields in the sas_ha struct:
-	 */
-
 	sas_ha->sas_ha_name = DRV_NAME;
 	sas_ha->lldd_module = THIS_MODULE;
 	sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
 
-	/* set the array of phy and port structs.  */
 	for (i = 0; i < SCI_MAX_PHYS; i++) {
 		sas_phys[i] = &isci_host->phys[i].sas_phy;
-		sas_ports[i] = &isci_host->ports[i].sas_port;
+		sas_ports[i] = &isci_host->sas_ports[i];
 	}
 
 	sas_ha->sas_phy  = sas_phys;
@@ -528,6 +523,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
 			goto err_host_alloc;
 		}
 		pci_info->hosts[i] = h;
+
+		/* turn on DIF support */
+		scsi_host_set_prot(h->shost,
+				   SHOST_DIF_TYPE1_PROTECTION |
+				   SHOST_DIF_TYPE2_PROTECTION |
+				   SHOST_DIF_TYPE3_PROTECTION);
+		scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
 	}
 
 	err = isci_setup_interrupts(pdev);
@@ -551,9 +553,9 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
 	int i;
 
 	for_each_isci_host(i, ihost, pdev) {
+		wait_for_start(ihost);
 		isci_unregister(ihost);
 		isci_host_deinit(ihost);
-		sci_controller_disable_interrupts(ihost);
 	}
 }
 
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fe18acfd6eb3..fab3586840b5 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -59,6 +59,16 @@
 #include "scu_event_codes.h"
 #include "probe_roms.h"
 
+#undef C
+#define C(a) (#a)
+static const char *phy_state_name(enum sci_phy_states state)
+{
+	static const char * const strings[] = PHY_STATES;
+
+	return strings[state];
+}
+#undef C
+
 /* Maximum arbitration wait time in micro-seconds */
 #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME  (700)
 
@@ -67,6 +77,19 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
 	return iphy->max_negotiated_speed;
 }
 
+static struct isci_host *phy_to_host(struct isci_phy *iphy)
+{
+	struct isci_phy *table = iphy - iphy->phy_index;
+	struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
+
+	return ihost;
+}
+
+static struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+	return &phy_to_host(iphy)->pdev->dev;
+}
+
 static enum sci_status
 sci_phy_transport_layer_initialization(struct isci_phy *iphy,
 				       struct scu_transport_layer_registers __iomem *reg)
@@ -446,8 +469,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy)
 	enum sci_phy_states state = iphy->sm.current_state_id;
 
 	if (state != SCI_PHY_STOPPED) {
-		dev_dbg(sciphy_to_dev(iphy),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -472,8 +495,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy)
 	case SCI_PHY_READY:
 		break;
 	default:
-		dev_dbg(sciphy_to_dev(iphy),
-			"%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -486,8 +509,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy)
 	enum sci_phy_states state = iphy->sm.current_state_id;
 
 	if (state != SCI_PHY_READY) {
-		dev_dbg(sciphy_to_dev(iphy),
-			"%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -536,8 +559,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
 		return SCI_SUCCESS;
 	}
 	default:
-		dev_dbg(sciphy_to_dev(iphy),
-			"%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -591,6 +614,60 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy,
 	sci_change_state(&iphy->sm, next_state);
 }
 
+static const char *phy_event_name(u32 event_code)
+{
+	switch (scu_get_event_code(event_code)) {
+	case SCU_EVENT_PORT_SELECTOR_DETECTED:
+		return "port selector";
+	case SCU_EVENT_SENT_PORT_SELECTION:
+		return "port selection";
+	case SCU_EVENT_HARD_RESET_TRANSMITTED:
+		return "tx hard reset";
+	case SCU_EVENT_HARD_RESET_RECEIVED:
+		return "rx hard reset";
+	case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		return "identify timeout";
+	case SCU_EVENT_LINK_FAILURE:
+		return "link fail";
+	case SCU_EVENT_SATA_SPINUP_HOLD:
+		return "sata spinup hold";
+	case SCU_EVENT_SAS_15_SSC:
+	case SCU_EVENT_SAS_15:
+		return "sas 1.5";
+	case SCU_EVENT_SAS_30_SSC:
+	case SCU_EVENT_SAS_30:
+		return "sas 3.0";
+	case SCU_EVENT_SAS_60_SSC:
+	case SCU_EVENT_SAS_60:
+		return "sas 6.0";
+	case SCU_EVENT_SATA_15_SSC:
+	case SCU_EVENT_SATA_15:
+		return "sata 1.5";
+	case SCU_EVENT_SATA_30_SSC:
+	case SCU_EVENT_SATA_30:
+		return "sata 3.0";
+	case SCU_EVENT_SATA_60_SSC:
+	case SCU_EVENT_SATA_60:
+		return "sata 6.0";
+	case SCU_EVENT_SAS_PHY_DETECTED:
+		return "sas detect";
+	case SCU_EVENT_SATA_PHY_DETECTED:
+		return "sata detect";
+	default:
+		return "unknown";
+	}
+}
+
+#define phy_event_dbg(iphy, state, code) \
+	dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+		phy_to_host(iphy)->id, iphy->phy_index, \
+		phy_state_name(state), phy_event_name(code), code)
+
+#define phy_event_warn(iphy, state, code) \
+	dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+		phy_to_host(iphy)->id, iphy->phy_index, \
+		phy_state_name(state), phy_event_name(code), code)
+
 enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 {
 	enum sci_phy_states state = iphy->sm.current_state_id;
@@ -607,11 +684,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			iphy->is_in_link_training = true;
 			break;
 		default:
-			dev_dbg(sciphy_to_dev(iphy),
-				"%s: PHY starting substate machine received "
-				"unexpected event_code %x\n",
-				__func__,
-				event_code);
+			phy_event_dbg(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -648,11 +721,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__, event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 			break;
 		}
@@ -677,10 +746,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__, event_code);
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -691,11 +757,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				"%s: PHY starting substate machine received unexpected "
-				"event_code %x\n",
-				__func__,
-				event_code);
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -719,11 +781,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			break;
 
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__, event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -751,12 +809,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_phy_start_sas_link_training(iphy);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__,
-				 event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -793,11 +846,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_phy_start_sas_link_training(iphy);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__, event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 
@@ -815,12 +864,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			break;
 
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: PHY starting substate machine received "
-				 "unexpected event_code %x\n",
-				 __func__,
-				 event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
 		}
 		return SCI_SUCCESS;
@@ -838,10 +882,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 				iphy->bcn_received_while_port_unassigned = true;
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%sP SCIC PHY 0x%p ready state machine received "
-				 "unexpected event_code %x\n",
-				 __func__, iphy, event_code);
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE_INVALID_STATE;
 		}
 		return SCI_SUCCESS;
@@ -852,18 +893,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
 		default:
-			dev_warn(sciphy_to_dev(iphy),
-				 "%s: SCIC PHY 0x%p resetting state machine received "
-				 "unexpected event_code %x\n",
-				 __func__, iphy, event_code);
-
+			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE_INVALID_STATE;
 			break;
 		}
 		return SCI_SUCCESS;
 	default:
-		dev_dbg(sciphy_to_dev(iphy),
-			"%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -956,8 +993,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
 		return result;
 	}
 	default:
-		dev_dbg(sciphy_to_dev(iphy),
-			"%s: in wrong state: %d\n", __func__, state);
+		dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+			__func__, phy_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1299,7 +1336,6 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
 	sas_addr = cpu_to_be64(sci_sas_addr);
 	memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
 
-	iphy->isci_port = NULL;
 	iphy->sas_phy.enabled = 0;
 	iphy->sas_phy.id = index;
 	iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
@@ -1333,13 +1369,13 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
 {
 	int ret = 0;
 	struct isci_phy *iphy = sas_phy->lldd_phy;
-	struct isci_port *iport = iphy->isci_port;
+	struct asd_sas_port *port = sas_phy->port;
 	struct isci_host *ihost = sas_phy->ha->lldd_ha;
 	unsigned long flags;
 
 	dev_dbg(&ihost->pdev->dev,
 		"%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
-		__func__, sas_phy, func, buf, iphy, iport);
+		__func__, sas_phy, func, buf, iphy, port);
 
 	switch (func) {
 	case PHY_FUNC_DISABLE:
@@ -1356,11 +1392,10 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
 		break;
 
 	case PHY_FUNC_HARD_RESET:
-		if (!iport)
+		if (!port)
 			return -ENODEV;
 
-		/* Perform the port reset. */
-		ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+		ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
 
 		break;
 	case PHY_FUNC_GET_EVENTS: {
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 67699c8e321c..0e45833ba06d 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -103,7 +103,6 @@ struct isci_phy {
 	struct scu_transport_layer_registers __iomem *transport_layer_registers;
 	struct scu_link_layer_registers __iomem *link_layer_registers;
 	struct asd_sas_phy sas_phy;
-	struct isci_port *isci_port;
 	u8 sas_addr[SAS_ADDR_SIZE];
 	union {
 		struct sas_identify_frame iaf;
@@ -344,101 +343,65 @@ enum sci_phy_counter_id {
 	SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
 };
 
-enum sci_phy_states {
-	/**
-	 * Simply the initial state for the base domain state machine.
-	 */
-	SCI_PHY_INITIAL,
-
-	/**
-	 * This state indicates that the phy has successfully been stopped.
-	 * In this state no new IO operations are permitted on this phy.
-	 * This state is entered from the INITIAL state.
-	 * This state is entered from the STARTING state.
-	 * This state is entered from the READY state.
-	 * This state is entered from the RESETTING state.
-	 */
-	SCI_PHY_STOPPED,
-
-	/**
-	 * This state indicates that the phy is in the process of becomming
-	 * ready.  In this state no new IO operations are permitted on this phy.
-	 * This state is entered from the STOPPED state.
-	 * This state is entered from the READY state.
-	 * This state is entered from the RESETTING state.
-	 */
-	SCI_PHY_STARTING,
-
-	/**
-	 * Initial state
-	 */
-	SCI_PHY_SUB_INITIAL,
-
-	/**
-	 * Wait state for the hardware OSSP event type notification
-	 */
-	SCI_PHY_SUB_AWAIT_OSSP_EN,
-
-	/**
-	 * Wait state for the PHY speed notification
-	 */
-	SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
-
-	/**
-	 * Wait state for the IAF Unsolicited frame notification
-	 */
-	SCI_PHY_SUB_AWAIT_IAF_UF,
-
-	/**
-	 * Wait state for the request to consume power
-	 */
-	SCI_PHY_SUB_AWAIT_SAS_POWER,
-
-	/**
-	 * Wait state for request to consume power
-	 */
-	SCI_PHY_SUB_AWAIT_SATA_POWER,
-
-	/**
-	 * Wait state for the SATA PHY notification
-	 */
-	SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
-
-	/**
-	 * Wait for the SATA PHY speed notification
-	 */
-	SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
-
-	/**
-	 * Wait state for the SIGNATURE FIS unsolicited frame notification
-	 */
-	SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
-
-	/**
-	 * Exit state for this state machine
-	 */
-	SCI_PHY_SUB_FINAL,
-
-	/**
-	 * This state indicates the the phy is now ready.  Thus, the user
-	 * is able to perform IO operations utilizing this phy as long as it
-	 * is currently part of a valid port.
-	 * This state is entered from the STARTING state.
-	 */
-	SCI_PHY_READY,
-
-	/**
-	 * This state indicates that the phy is in the process of being reset.
-	 * In this state no new IO operations are permitted on this phy.
-	 * This state is entered from the READY state.
-	 */
-	SCI_PHY_RESETTING,
-
-	/**
-	 * Simply the final state for the base phy state machine.
-	 */
-	SCI_PHY_FINAL,
-};
+/**
+ * enum sci_phy_states - phy state machine states
+ * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
+ *		     machine.
+ * @SCI_PHY_STOPPED: phy has successfully been stopped.  In this state
+ *		     no new IO operations are permitted on this phy.
+ * @SCI_PHY_STARTING: the phy is in the process of becomming ready.  In
+ *		      this state no new IO operations are permitted on
+ *		      this phy.
+ * @SCI_PHY_SUB_INITIAL: Initial state
+ * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
+ *			       type notification
+ * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
+ *				    notification
+ * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
+ *			      notification
+ * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
+ *				 power
+ * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
+ *				  power
+ * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
+ *				   notification
+ * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
+ *				     notification
+ * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
+ *				  unsolicited frame notification
+ * @SCI_PHY_SUB_FINAL: Exit state for this state machine
+ * @SCI_PHY_READY: phy is now ready.  Thus, the user is able to perform
+ *		   IO operations utilizing this phy as long as it is
+ *		   currently part of a valid port.  This state is
+ *		   entered from the STARTING state.
+ * @SCI_PHY_RESETTING: phy is in the process of being reset.  In this
+ *		       state no new IO operations are permitted on this
+ *		       phy.  This state is entered from the READY state.
+ * @SCI_PHY_FINAL: Simply the final state for the base phy state
+ *		   machine.
+ */
+#define PHY_STATES {\
+	C(PHY_INITIAL),\
+	C(PHY_STOPPED),\
+	C(PHY_STARTING),\
+	C(PHY_SUB_INITIAL),\
+	C(PHY_SUB_AWAIT_OSSP_EN),\
+	C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
+	C(PHY_SUB_AWAIT_IAF_UF),\
+	C(PHY_SUB_AWAIT_SAS_POWER),\
+	C(PHY_SUB_AWAIT_SATA_POWER),\
+	C(PHY_SUB_AWAIT_SATA_PHY_EN),\
+	C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
+	C(PHY_SUB_AWAIT_SIG_FIS_UF),\
+	C(PHY_SUB_FINAL),\
+	C(PHY_READY),\
+	C(PHY_RESETTING),\
+	C(PHY_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_phy_states PHY_STATES;
+#undef C
 
 void sci_phy_construct(
 	struct isci_phy *iphy,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 7c6ac58a5c4c..5fada73b71ff 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -60,18 +60,29 @@
 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
 #define SCU_DUMMY_INDEX    (0xFFFF)
 
-static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+#undef C
+#define C(a) (#a)
+const char *port_state_name(enum sci_port_states state)
 {
-	unsigned long flags;
+	static const char * const strings[] = PORT_STATES;
+
+	return strings[state];
+}
+#undef C
+
+static struct device *sciport_to_dev(struct isci_port *iport)
+{
+	int i = iport->physical_port_index;
+	struct isci_port *table;
+	struct isci_host *ihost;
+
+	if (i == SCIC_SDS_DUMMY_PORT)
+		i = SCI_MAX_PORTS+1;
 
-	dev_dbg(&iport->isci_host->pdev->dev,
-		"%s: iport = %p, state = 0x%x\n",
-		__func__, iport, status);
+	table = iport - i;
+	ihost = container_of(table, typeof(*ihost), ports[0]);
 
-	/* XXX pointless lock */
-	spin_lock_irqsave(&iport->state_lock, flags);
-	iport->status = status;
-	spin_unlock_irqrestore(&iport->state_lock, flags);
+	return &ihost->pdev->dev;
 }
 
 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
@@ -165,18 +176,12 @@ static void isci_port_link_up(struct isci_host *isci_host,
 	struct sci_port_properties properties;
 	unsigned long success = true;
 
-	BUG_ON(iphy->isci_port != NULL);
-
-	iphy->isci_port = iport;
-
 	dev_dbg(&isci_host->pdev->dev,
 		"%s: isci_port = %p\n",
 		__func__, iport);
 
 	spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
 
-	isci_port_change_state(iphy->isci_port, isci_starting);
-
 	sci_port_get_properties(iport, &properties);
 
 	if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
@@ -258,7 +263,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
 					__func__, isci_device);
 				set_bit(IDEV_GONE, &isci_device->flags);
 			}
-			isci_port_change_state(isci_port, isci_stopping);
 		}
 	}
 
@@ -269,52 +273,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
 	isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
 					   PHYE_LOSS_OF_SIGNAL);
 
-	isci_phy->isci_port = NULL;
-
 	dev_dbg(&isci_host->pdev->dev,
 		"%s: isci_port = %p - Done\n", __func__, isci_port);
 }
 
-
-/**
- * isci_port_ready() - This function is called by the sci core when a link
- *    becomes ready.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the sci port with the active link.
- *
- */
-static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
-{
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_port = %p\n", __func__, isci_port);
-
-	complete_all(&isci_port->start_complete);
-	isci_port_change_state(isci_port, isci_ready);
-	return;
-}
-
-/**
- * isci_port_not_ready() - This function is called by the sci core when a link
- *    is not ready. All remote devices on this link will be removed if they are
- *    in the stopping state.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the sci port with the active link.
- *
- */
-static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
-{
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_port = %p\n", __func__, isci_port);
-}
-
-static void isci_port_stop_complete(struct isci_host *ihost,
-				    struct isci_port *iport,
-				    enum sci_status completion_status)
-{
-	dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
-}
-
-
 static bool is_port_ready_state(enum sci_port_states state)
 {
 	switch (state) {
@@ -353,7 +315,9 @@ static void port_state_machine_change(struct isci_port *iport,
 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
 					  enum sci_status completion_status)
 {
-	dev_dbg(&isci_port->isci_host->pdev->dev,
+	struct isci_host *ihost = isci_port->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev,
 		"%s: isci_port = %p, completion_status=%x\n",
 		     __func__, isci_port, completion_status);
 
@@ -364,23 +328,24 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
 
 		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
 		if (isci_port->active_phy_mask == 0) {
+			int phy_idx = isci_port->last_active_phy;
+			struct isci_phy *iphy = &ihost->phys[phy_idx];
 
 			/* Generate the link down now to the host, since it
 			 * was intercepted by the hard reset state machine when
 			 * it really happened.
 			 */
-			isci_port_link_down(isci_port->isci_host,
-					    &isci_port->isci_host->phys[
-						   isci_port->last_active_phy],
-					    isci_port);
+			isci_port_link_down(ihost, iphy, isci_port);
 		}
 		/* Advance the port state so that link state changes will be
-		* noticed.
-		*/
+		 * noticed.
+		 */
 		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
 
 	}
-	complete_all(&isci_port->hard_reset_complete);
+	clear_bit(IPORT_RESET_PENDING, &isci_port->state);
+	wake_up(&ihost->eventq);
+
 }
 
 /* This method will return a true value if the specified phy can be assigned to
@@ -835,10 +800,9 @@ static void port_timeout(unsigned long data)
 			__func__,
 			iport);
 	} else if (current_state == SCI_PORT_STOPPING) {
-		/* if the port is still stopping then the stop has not completed */
-		isci_port_stop_complete(iport->owning_controller,
-					iport,
-					SCI_FAILURE_TIMEOUT);
+		dev_dbg(sciport_to_dev(iport),
+			"%s: port%d: stop complete timeout\n",
+			__func__, iport->physical_port_index);
 	} else {
 		/* The port is in the ready state and we have a timer
 		 * reporting a timeout this should not happen.
@@ -1003,7 +967,8 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
 	struct isci_host *ihost = iport->owning_controller;
 
-	isci_port_ready(ihost, iport);
+	dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
+		__func__, iport->physical_port_index);
 
 	for (index = 0; index < SCI_MAX_PHYS; index++) {
 		if (iport->phy_table[index]) {
@@ -1069,7 +1034,8 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi
 	 */
 	sci_port_abort_dummy_request(iport);
 
-	isci_port_not_ready(ihost, iport);
+	dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+		__func__, iport->physical_port_index);
 
 	if (iport->ready_exit)
 		sci_port_invalidate_dummy_remote_node(iport);
@@ -1081,7 +1047,8 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
 	struct isci_host *ihost = iport->owning_controller;
 
 	if (iport->active_phy_mask == 0) {
-		isci_port_not_ready(ihost, iport);
+		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+			__func__, iport->physical_port_index);
 
 		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
 	} else
@@ -1097,8 +1064,8 @@ enum sci_status sci_port_start(struct isci_port *iport)
 
 	state = iport->sm.current_state_id;
 	if (state != SCI_PORT_STOPPED) {
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1172,8 +1139,8 @@ enum sci_status sci_port_stop(struct isci_port *iport)
 					  SCI_PORT_STOPPING);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1187,8 +1154,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
 
 	state = iport->sm.current_state_id;
 	if (state != SCI_PORT_SUB_OPERATIONAL) {
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1282,8 +1249,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
 					  SCI_PORT_SUB_CONFIGURING);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1332,8 +1299,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport,
 					  SCI_PORT_SUB_CONFIGURING);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1375,8 +1342,8 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
 		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1405,8 +1372,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport,
 		sci_port_deactivate_phy(iport, iphy, false);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1425,8 +1392,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport,
 		iport->started_request_count++;
 		return SCI_SUCCESS;
 	default:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -1440,8 +1407,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport,
 	state = iport->sm.current_state_id;
 	switch (state) {
 	case SCI_PORT_STOPPED:
-		dev_warn(sciport_to_dev(iport),
-			 "%s: in wrong state: %d\n", __func__, state);
+		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+			 __func__, port_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_PORT_STOPPING:
 		sci_port_decrement_request_count(iport);
@@ -1547,7 +1514,8 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
 	if (prev_state  == SCI_PORT_RESETTING)
 		isci_port_hard_reset_complete(iport, SCI_SUCCESS);
 	else
-		isci_port_not_ready(ihost, iport);
+		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+			__func__, iport->physical_port_index);
 
 	/* Post and suspend the dummy remote node context for this port. */
 	sci_port_post_dummy_remote_node(iport);
@@ -1644,22 +1612,7 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
 {
 	INIT_LIST_HEAD(&iport->remote_dev_list);
 	INIT_LIST_HEAD(&iport->domain_dev_list);
-	spin_lock_init(&iport->state_lock);
-	init_completion(&iport->start_complete);
 	iport->isci_host = ihost;
-	isci_port_change_state(iport, isci_freed);
-}
-
-/**
- * isci_port_get_state() - This function gets the status of the port object.
- * @isci_port: This parameter points to the isci_port object
- *
- * status of the object as a isci_status enum.
- */
-enum isci_status isci_port_get_state(
-	struct isci_port *isci_port)
-{
-	return isci_port->status;
 }
 
 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
@@ -1670,6 +1623,11 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy
 	isci_port_bc_change_received(ihost, iport, iphy);
 }
 
+static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
+{
+	wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
+}
+
 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
 				 struct isci_phy *iphy)
 {
@@ -1680,9 +1638,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
 	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
 		__func__, iport);
 
-	init_completion(&iport->hard_reset_complete);
-
 	spin_lock_irqsave(&ihost->scic_lock, flags);
+	set_bit(IPORT_RESET_PENDING, &iport->state);
 
 	#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
 	status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
@@ -1690,7 +1647,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
 	if (status == SCI_SUCCESS) {
-		wait_for_completion(&iport->hard_reset_complete);
+		wait_port_reset(ihost, iport);
 
 		dev_dbg(&ihost->pdev->dev,
 			"%s: iport = %p; hard reset completion\n",
@@ -1704,6 +1661,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
 				__func__, iport, iport->hard_reset_status);
 		}
 	} else {
+		clear_bit(IPORT_RESET_PENDING, &iport->state);
+		wake_up(&ihost->eventq);
 		ret = TMF_RESP_FUNC_FAILED;
 
 		dev_err(&ihost->pdev->dev,
@@ -1726,24 +1685,80 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
 	return ret;
 }
 
-/**
- * isci_port_deformed() - This function is called by libsas when a port becomes
- *    inactive.
- * @phy: This parameter specifies the libsas phy with the inactive port.
- *
- */
+int isci_ata_check_ready(struct domain_device *dev)
+{
+	struct isci_port *iport = dev->port->lldd_port;
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_lookup_device(dev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (!idev)
+		goto out;
+
+	if (test_bit(IPORT_RESET_PENDING, &iport->state))
+		goto out;
+
+	rc = !!iport->active_phy_mask;
+ out:
+	isci_put_device(idev);
+
+	return rc;
+}
+
 void isci_port_deformed(struct asd_sas_phy *phy)
 {
-	pr_debug("%s: sas_phy = %p\n", __func__, phy);
+	struct isci_host *ihost = phy->ha->lldd_ha;
+	struct isci_port *iport = phy->port->lldd_port;
+	unsigned long flags;
+	int i;
+
+	/* we got a port notification on a port that was subsequently
+	 * torn down and libsas is just now catching up
+	 */
+	if (!iport)
+		return;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		if (iport->active_phy_mask & 1 << i)
+			break;
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (i >= SCI_MAX_PHYS)
+		dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
+			__func__, (long) (iport - &ihost->ports[0]));
 }
 
-/**
- * isci_port_formed() - This function is called by libsas when a port becomes
- *    active.
- * @phy: This parameter specifies the libsas phy with the active port.
- *
- */
 void isci_port_formed(struct asd_sas_phy *phy)
 {
-	pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+	struct isci_host *ihost = phy->ha->lldd_ha;
+	struct isci_phy *iphy = to_iphy(phy);
+	struct asd_sas_port *port = phy->port;
+	struct isci_port *iport;
+	unsigned long flags;
+	int i;
+
+	/* initial ports are formed as the driver is still initializing,
+	 * wait for that process to complete
+	 */
+	wait_for_start(ihost);
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	for (i = 0; i < SCI_MAX_PORTS; i++) {
+		iport = &ihost->ports[i];
+		if (iport->active_phy_mask & 1 << iphy->phy_index)
+			break;
+	}
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	if (i >= SCI_MAX_PORTS)
+		iport = NULL;
+
+	port->lldd_port = iport;
 }
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 08116090eb70..6b56240c2051 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -95,14 +95,11 @@ enum isci_status {
  * @timer: timeout start/stop operations
  */
 struct isci_port {
-	enum isci_status status;
 	struct isci_host *isci_host;
-	struct asd_sas_port sas_port;
 	struct list_head remote_dev_list;
-	spinlock_t state_lock;
 	struct list_head domain_dev_list;
-	struct completion start_complete;
-	struct completion hard_reset_complete;
+	#define IPORT_RESET_PENDING 0
+	unsigned long state;
 	enum sci_status hard_reset_status;
 	struct sci_base_state_machine sm;
 	bool ready_exit;
@@ -147,70 +144,47 @@ struct sci_port_properties {
 };
 
 /**
- * enum sci_port_states - This enumeration depicts all the states for the
- *    common port state machine.
- *
- *
+ * enum sci_port_states - port state machine states
+ * @SCI_PORT_STOPPED: port has successfully been stopped.  In this state
+ *		      no new IO operations are permitted.  This state is
+ *		      entered from the STOPPING state.
+ * @SCI_PORT_STOPPING: port is in the process of stopping.  In this
+ *		       state no new IO operations are permitted, but
+ *		       existing IO operations are allowed to complete.
+ *		       This state is entered from the READY state.
+ * @SCI_PORT_READY: port is now ready.  Thus, the user is able to
+ *		    perform IO operations on this port. This state is
+ *		    entered from the STARTING state.
+ * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
+ *			  phys.
+ * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
+ *			      least one phy operational.
+ * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
+ *			      add/remove phy event.  This state is only
+ *			      used in Automatic Port Configuration Mode
+ *			      (APC)
+ * @SCI_PORT_RESETTING: port is in the process of performing a hard
+ *			reset.  Thus, the user is unable to perform IO
+ *			operations on this port.  This state is entered
+ *			from the READY state.
+ * @SCI_PORT_FAILED: port has failed a reset request.  This state is
+ *		     entered when a port reset request times out. This
+ *		     state is entered from the RESETTING state.
  */
-enum sci_port_states {
-	/**
-	 * This state indicates that the port has successfully been stopped.
-	 * In this state no new IO operations are permitted.
-	 * This state is entered from the STOPPING state.
-	 */
-	SCI_PORT_STOPPED,
-
-	/**
-	 * This state indicates that the port is in the process of stopping.
-	 * In this state no new IO operations are permitted, but existing IO
-	 * operations are allowed to complete.
-	 * This state is entered from the READY state.
-	 */
-	SCI_PORT_STOPPING,
-
-	/**
-	 * This state indicates the port is now ready.  Thus, the user is
-	 * able to perform IO operations on this port.
-	 * This state is entered from the STARTING state.
-	 */
-	SCI_PORT_READY,
-
-	/**
-	 * The substate where the port is started and ready but has no
-	 * active phys.
-	 */
-	SCI_PORT_SUB_WAITING,
-
-	/**
-	 * The substate where the port is started and ready and there is
-	 * at least one phy operational.
-	 */
-	SCI_PORT_SUB_OPERATIONAL,
-
-	/**
-	 * The substate where the port is started and there was an
-	 * add/remove phy event.  This state is only used in Automatic
-	 * Port Configuration Mode (APC)
-	 */
-	SCI_PORT_SUB_CONFIGURING,
-
-	/**
-	 * This state indicates the port is in the process of performing a hard
-	 * reset.  Thus, the user is unable to perform IO operations on this
-	 * port.
-	 * This state is entered from the READY state.
-	 */
-	SCI_PORT_RESETTING,
-
-	/**
-	 * This state indicates the port has failed a reset request.  This state
-	 * is entered when a port reset request times out.
-	 * This state is entered from the RESETTING state.
-	 */
-	SCI_PORT_FAILED,
-
-
-};
+#define PORT_STATES {\
+	C(PORT_STOPPED),\
+	C(PORT_STOPPING),\
+	C(PORT_READY),\
+	C(PORT_SUB_WAITING),\
+	C(PORT_SUB_OPERATIONAL),\
+	C(PORT_SUB_CONFIGURING),\
+	C(PORT_RESETTING),\
+	C(PORT_FAILED),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_port_states PORT_STATES;
+#undef C
 
 static inline void sci_port_decrement_request_count(struct isci_port *iport)
 {
@@ -296,9 +270,6 @@ void sci_port_get_attached_sas_address(
 	struct isci_port *iport,
 	struct sci_sas_address *sas_address);
 
-enum isci_status isci_port_get_state(
-	struct isci_port *isci_port);
-
 void isci_port_formed(struct asd_sas_phy *);
 void isci_port_deformed(struct asd_sas_phy *);
 
@@ -309,4 +280,5 @@ void isci_port_init(
 
 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
 				 struct isci_phy *iphy);
+int isci_ata_check_ready(struct domain_device *dev);
 #endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index eaa541afc755..7eb0ccd45fe6 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -370,6 +370,27 @@ struct scu_iit_entry {
 		>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
 	)
 
+/* ***************************************************************************** */
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT    (0)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK     (0x00000001)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT    (1)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK     (0x00000002)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT   (2)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK    (0x00000004)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT  (3)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK   (0x00000008)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT   (16)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK    (0x000F0000)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT     (31)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK      (0x80000000)
+#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK        (0x7FF0FFF0)
+
+#define SMU_CGUCR_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
+
+#define SMU_CGUCR_GEN_BIT(name) \
+	SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
+
 /* -------------------------------------------------------------------------- */
 
 #define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT      (0)
@@ -992,8 +1013,10 @@ struct smu_registers {
 	u32 mmr_address_window;
 /* 0x00A4 SMDW */
 	u32 mmr_data_window;
-	u32 reserved_A8;
-	u32 reserved_AC;
+/* 0x00A8 CGUCR */
+	u32 clock_gating_control;
+/* 0x00AC CGUPC */
+	u32 clock_gating_performance;
 /* A whole bunch of reserved space */
 	u32 reserved_Bx[4];
 	u32 reserved_Cx[4];
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index dd74b6ceeb82..8f501b0a81d6 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -62,6 +62,16 @@
 #include "scu_event_codes.h"
 #include "task.h"
 
+#undef C
+#define C(a) (#a)
+const char *dev_state_name(enum sci_remote_device_states state)
+{
+	static const char * const strings[] = REMOTE_DEV_STATES;
+
+	return strings[state];
+}
+#undef C
+
 /**
  * isci_remote_device_not_ready() - This function is called by the ihost when
  *    the remote device is not ready. We mark the isci device as ready (not
@@ -167,8 +177,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
 	case SCI_DEV_FAILED:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_DEV_STOPPED:
 		return SCI_SUCCESS;
@@ -226,8 +236,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
 	case SCI_DEV_RESETTING:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_DEV_READY:
 	case SCI_STP_DEV_IDLE:
@@ -246,8 +256,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
 	enum sci_remote_device_states state = sm->current_state_id;
 
 	if (state != SCI_DEV_RESETTING) {
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -262,8 +272,8 @@ enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
 	enum sci_remote_device_states state = sm->current_state_id;
 
 	if (state != SCI_STP_DEV_CMD) {
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -287,8 +297,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
 	case SCI_SMP_DEV_IDLE:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		/* Return the frame back to the controller */
 		sci_controller_release_frame(ihost, frame_index);
 		return SCI_FAILURE_INVALID_STATE;
@@ -502,8 +512,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
 	case SCI_DEV_RESETTING:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_DEV_READY:
 		/* attempt to start an io request for this device object. The remote
@@ -637,8 +647,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
 	case SCI_DEV_FAILED:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_DEV_READY:
 	case SCI_STP_DEV_AWAIT_RESET:
@@ -721,8 +731,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
 	case SCI_DEV_RESETTING:
 	case SCI_DEV_FINAL:
 	default:
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	case SCI_STP_DEV_IDLE:
 	case SCI_STP_DEV_CMD:
@@ -853,8 +863,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide
 	struct isci_host *ihost;
 
 	if (state != SCI_DEV_STOPPED) {
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1204,8 +1214,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
 	enum sci_status status;
 
 	if (state != SCI_DEV_STOPPED) {
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
-			 __func__, state);
+		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+			 __func__, dev_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1308,7 +1318,6 @@ void isci_remote_device_release(struct kref *kref)
 	clear_bit(IDEV_STOP_PENDING, &idev->flags);
 	clear_bit(IDEV_IO_READY, &idev->flags);
 	clear_bit(IDEV_GONE, &idev->flags);
-	clear_bit(IDEV_EH, &idev->flags);
 	smp_mb__before_clear_bit();
 	clear_bit(IDEV_ALLOCATED, &idev->flags);
 	wake_up(&ihost->eventq);
@@ -1381,34 +1390,17 @@ void isci_remote_device_gone(struct domain_device *dev)
  *
  * status, zero indicates success.
  */
-int isci_remote_device_found(struct domain_device *domain_dev)
+int isci_remote_device_found(struct domain_device *dev)
 {
-	struct isci_host *isci_host = dev_to_ihost(domain_dev);
-	struct isci_port *isci_port;
-	struct isci_phy *isci_phy;
-	struct asd_sas_port *sas_port;
-	struct asd_sas_phy *sas_phy;
+	struct isci_host *isci_host = dev_to_ihost(dev);
+	struct isci_port *isci_port = dev->port->lldd_port;
 	struct isci_remote_device *isci_device;
 	enum sci_status status;
 
 	dev_dbg(&isci_host->pdev->dev,
-		"%s: domain_device = %p\n", __func__, domain_dev);
-
-	wait_for_start(isci_host);
-
-	sas_port = domain_dev->port;
-	sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
-				   port_phy_el);
-	isci_phy = to_iphy(sas_phy);
-	isci_port = isci_phy->isci_port;
-
-	/* we are being called for a device on this port,
-	 * so it has to come up eventually
-	 */
-	wait_for_completion(&isci_port->start_complete);
+		"%s: domain_device = %p\n", __func__, dev);
 
-	if ((isci_stopping == isci_port_get_state(isci_port)) ||
-	    (isci_stopped == isci_port_get_state(isci_port)))
+	if (!isci_port)
 		return -ENODEV;
 
 	isci_device = isci_remote_device_alloc(isci_host, isci_port);
@@ -1419,7 +1411,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
 	INIT_LIST_HEAD(&isci_device->node);
 
 	spin_lock_irq(&isci_host->scic_lock);
-	isci_device->domain_dev = domain_dev;
+	isci_device->domain_dev = dev;
 	isci_device->isci_port = isci_port;
 	list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
 
@@ -1432,7 +1424,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
 
 	if (status == SCI_SUCCESS) {
 		/* device came up, advertise it to the world */
-		domain_dev->lldd_dev = isci_device;
+		dev->lldd_dev = isci_device;
 	} else
 		isci_put_device(isci_device);
 	spin_unlock_irq(&isci_host->scic_lock);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 483ee50152f3..58637ee08f55 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -82,10 +82,9 @@ struct isci_remote_device {
 	#define IDEV_START_PENDING 0
 	#define IDEV_STOP_PENDING 1
 	#define IDEV_ALLOCATED 2
-	#define IDEV_EH 3
-	#define IDEV_GONE 4
-	#define IDEV_IO_READY 5
-	#define IDEV_IO_NCQERROR 6
+	#define IDEV_GONE 3
+	#define IDEV_IO_READY 4
+	#define IDEV_IO_NCQERROR 5
 	unsigned long flags;
 	struct kref kref;
 	struct isci_port *isci_port;
@@ -180,122 +179,101 @@ enum sci_status sci_remote_device_reset_complete(
 /**
  * enum sci_remote_device_states - This enumeration depicts all the states
  *    for the common remote device state machine.
+ * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
+ * state machine.
  *
+ * @SCI_DEV_STOPPED: This state indicates that the remote device has
+ * successfully been stopped.  In this state no new IO operations are
+ * permitted.  This state is entered from the INITIAL state.  This state
+ * is entered from the STOPPING state.
  *
+ * @SCI_DEV_STARTING: This state indicates the the remote device is in
+ * the process of becoming ready (i.e. starting).  In this state no new
+ * IO operations are permitted.  This state is entered from the STOPPED
+ * state.
+ *
+ * @SCI_DEV_READY: This state indicates the remote device is now ready.
+ * Thus, the user is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ *
+ * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
+ * device.  When there are no active IO for the device it is is in this
+ * state.
+ *
+ * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * device.  This state is entered when the device is processing a
+ * non-NCQ command.  The device object will fail any new start IO
+ * requests until this command is complete.
+ *
+ * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
+ * This state is entered when the device is processing an NCQ reuqest.
+ * It will remain in this state so long as there is one or more NCQ
+ * requests being processed.
+ *
+ * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
+ * remote device.  This state is entered when an SDB error FIS is
+ * received by the device object while in the NCQ state.  The device
+ * object will only accept a READ LOG command while in this state.
+ *
+ * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
+ * ATAPI remote device.  This state is entered when ATAPI device sends
+ * error status FIS without data while the device object is in CMD
+ * state.  A suspension event is expected in this state.  The device
+ * object will resume right away.
+ *
+ * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
+ * device is waiting for the RESET task coming to be recovered from
+ * certain hardware specific error.
+ *
+ * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
+ * remote device.  This is the normal operational state for a remote
+ * device.
+ *
+ * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
+ * This is the state that the device is placed in when a RNC suspend is
+ * received by the SCU hardware.
+ *
+ * @SCI_DEV_STOPPING: This state indicates that the remote device is in
+ * the process of stopping.  In this state no new IO operations are
+ * permitted, but existing IO operations are allowed to complete.  This
+ * state is entered from the READY state.  This state is entered from
+ * the FAILED state.
+ *
+ * @SCI_DEV_FAILED: This state indicates that the remote device has
+ * failed.  In this state no new IO operations are permitted.  This
+ * state is entered from the INITIALIZING state.  This state is entered
+ * from the READY state.
+ *
+ * @SCI_DEV_RESETTING: This state indicates the device is being reset.
+ * In this state no new IO operations are permitted.  This state is
+ * entered from the READY state.
+ *
+ * @SCI_DEV_FINAL: Simply the final state for the base remote device
+ * state machine.
  */
-enum sci_remote_device_states {
-	/**
-	 * Simply the initial state for the base remote device state machine.
-	 */
-	SCI_DEV_INITIAL,
-
-	/**
-	 * This state indicates that the remote device has successfully been
-	 * stopped.  In this state no new IO operations are permitted.
-	 * This state is entered from the INITIAL state.
-	 * This state is entered from the STOPPING state.
-	 */
-	SCI_DEV_STOPPED,
-
-	/**
-	 * This state indicates the the remote device is in the process of
-	 * becoming ready (i.e. starting).  In this state no new IO operations
-	 * are permitted.
-	 * This state is entered from the STOPPED state.
-	 */
-	SCI_DEV_STARTING,
-
-	/**
-	 * This state indicates the remote device is now ready.  Thus, the user
-	 * is able to perform IO operations on the remote device.
-	 * This state is entered from the STARTING state.
-	 */
-	SCI_DEV_READY,
-
-	/**
-	 * This is the idle substate for the stp remote device.  When there are no
-	 * active IO for the device it is is in this state.
-	 */
-	SCI_STP_DEV_IDLE,
-
-	/**
-	 * This is the command state for for the STP remote device.  This state is
-	 * entered when the device is processing a non-NCQ command.  The device object
-	 * will fail any new start IO requests until this command is complete.
-	 */
-	SCI_STP_DEV_CMD,
-
-	/**
-	 * This is the NCQ state for the STP remote device.  This state is entered
-	 * when the device is processing an NCQ reuqest.  It will remain in this state
-	 * so long as there is one or more NCQ requests being processed.
-	 */
-	SCI_STP_DEV_NCQ,
-
-	/**
-	 * This is the NCQ error state for the STP remote device.  This state is
-	 * entered when an SDB error FIS is received by the device object while in the
-	 * NCQ state.  The device object will only accept a READ LOG command while in
-	 * this state.
-	 */
-	SCI_STP_DEV_NCQ_ERROR,
-
-	/**
-	 * This is the ATAPI error state for the STP ATAPI remote device.
-	 * This state is entered when ATAPI device sends error status FIS
-	 * without data while the device object is in CMD state.
-	 * A suspension event is expected in this state.
-	 * The device object will resume right away.
-	 */
-	SCI_STP_DEV_ATAPI_ERROR,
-
-	/**
-	 * This is the READY substate indicates the device is waiting for the RESET task
-	 * coming to be recovered from certain hardware specific error.
-	 */
-	SCI_STP_DEV_AWAIT_RESET,
-
-	/**
-	 * This is the ready operational substate for the remote device.  This is the
-	 * normal operational state for a remote device.
-	 */
-	SCI_SMP_DEV_IDLE,
-
-	/**
-	 * This is the suspended state for the remote device.  This is the state that
-	 * the device is placed in when a RNC suspend is received by the SCU hardware.
-	 */
-	SCI_SMP_DEV_CMD,
-
-	/**
-	 * This state indicates that the remote device is in the process of
-	 * stopping.  In this state no new IO operations are permitted, but
-	 * existing IO operations are allowed to complete.
-	 * This state is entered from the READY state.
-	 * This state is entered from the FAILED state.
-	 */
-	SCI_DEV_STOPPING,
-
-	/**
-	 * This state indicates that the remote device has failed.
-	 * In this state no new IO operations are permitted.
-	 * This state is entered from the INITIALIZING state.
-	 * This state is entered from the READY state.
-	 */
-	SCI_DEV_FAILED,
-
-	/**
-	 * This state indicates the device is being reset.
-	 * In this state no new IO operations are permitted.
-	 * This state is entered from the READY state.
-	 */
-	SCI_DEV_RESETTING,
-
-	/**
-	 * Simply the final state for the base remote device state machine.
-	 */
-	SCI_DEV_FINAL,
-};
+#define REMOTE_DEV_STATES {\
+	C(DEV_INITIAL),\
+	C(DEV_STOPPED),\
+	C(DEV_STARTING),\
+	C(DEV_READY),\
+	C(STP_DEV_IDLE),\
+	C(STP_DEV_CMD),\
+	C(STP_DEV_NCQ),\
+	C(STP_DEV_NCQ_ERROR),\
+	C(STP_DEV_ATAPI_ERROR),\
+	C(STP_DEV_AWAIT_RESET),\
+	C(SMP_DEV_IDLE),\
+	C(SMP_DEV_CMD),\
+	C(DEV_STOPPING),\
+	C(DEV_FAILED),\
+	C(DEV_RESETTING),\
+	C(DEV_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_remote_device_states REMOTE_DEV_STATES;
+#undef C
+const char *dev_state_name(enum sci_remote_device_states state);
 
 static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
 {
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 748e8339d1ec..3a9463481f38 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -60,18 +60,15 @@
 #include "scu_event_codes.h"
 #include "scu_task_context.h"
 
+#undef C
+#define C(a) (#a)
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
+{
+	static const char * const strings[] = RNC_STATES;
 
-/**
- *
- * @sci_rnc: The RNC for which the is posted request is being made.
- *
- * This method will return true if the RNC is not in the initial state.  In all
- * other states the RNC is considered active and this will return true. The
- * destroy request of the state machine drives the RNC back to the initial
- * state.  If the state machine changes then this routine will also have to be
- * changed. bool true if the state machine is not in the initial state false if
- * the state machine is in the initial state
- */
+	return strings[state];
+}
+#undef C
 
 /**
  *
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index 41580ad12520..a241e0f4c865 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -85,61 +85,50 @@ struct sci_remote_node_context;
 typedef void (*scics_sds_remote_node_context_callback)(void *);
 
 /**
- * This is the enumeration of the remote node context states.
+ * enum sci_remote_node_context_states
+ * @SCI_RNC_INITIAL initial state for a remote node context.  On a resume
+ * request the remote node context will transition to the posting state.
+ *
+ * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
+ * the RNC is posted the remote node context will be made ready.
+ *
+ * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
+ * the hardware.  Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ *
+ * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
+ * hardare.  Once the event notification of resume complete is received the
+ * remote node context will transition to the ready state.
+ *
+ * @SCI_RNC_READY: state that the remote node context must be in to accept io
+ * request operations.
+ *
+ * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
+ * it gets a TX suspend notification from the hardware.
+ *
+ * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
+ * when it gets a TX RX suspend notification from the hardware.
+ *
+ * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
+ * for a suspend notification from the hardware.  This state is entered when
+ * either there is a request to supend the remote node context or when there is
+ * a TC completion where the remote node will be suspended by the hardware.
  */
-enum scis_sds_remote_node_context_states {
-	/**
-	 * This state is the initial state for a remote node context.  On a resume
-	 * request the remote node context will transition to the posting state.
-	 */
-	SCI_RNC_INITIAL,
-
-	/**
-	 * This is a transition state that posts the RNi to the hardware. Once the RNC
-	 * is posted the remote node context will be made ready.
-	 */
-	SCI_RNC_POSTING,
-
-	/**
-	 * This is a transition state that will post an RNC invalidate to the
-	 * hardware.  Once the invalidate is complete the remote node context will
-	 * transition to the posting state.
-	 */
-	SCI_RNC_INVALIDATING,
-
-	/**
-	 * This is a transition state that will post an RNC resume to the hardare.
-	 * Once the event notification of resume complete is received the remote node
-	 * context will transition to the ready state.
-	 */
-	SCI_RNC_RESUMING,
-
-	/**
-	 * This is the state that the remote node context must be in to accept io
-	 * request operations.
-	 */
-	SCI_RNC_READY,
-
-	/**
-	 * This is the state that the remote node context transitions to when it gets
-	 * a TX suspend notification from the hardware.
-	 */
-	SCI_RNC_TX_SUSPENDED,
-
-	/**
-	 * This is the state that the remote node context transitions to when it gets
-	 * a TX RX suspend notification from the hardware.
-	 */
-	SCI_RNC_TX_RX_SUSPENDED,
-
-	/**
-	 * This state is a wait state for the remote node context that waits for a
-	 * suspend notification from the hardware.  This state is entered when either
-	 * there is a request to supend the remote node context or when there is a TC
-	 * completion where the remote node will be suspended by the hardware.
-	 */
-	SCI_RNC_AWAIT_SUSPENSION
-};
+#define RNC_STATES {\
+	C(RNC_INITIAL),\
+	C(RNC_POSTING),\
+	C(RNC_INVALIDATING),\
+	C(RNC_RESUMING),\
+	C(RNC_READY),\
+	C(RNC_TX_SUSPENDED),\
+	C(RNC_TX_RX_SUSPENDED),\
+	C(RNC_AWAIT_SUSPENSION),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum scis_sds_remote_node_context_states RNC_STATES;
+#undef C
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
 
 /**
  *
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ee0dc05c6269..2def1e3960f6 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -53,6 +53,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <scsi/scsi_cmnd.h>
 #include "isci.h"
 #include "task.h"
 #include "request.h"
@@ -60,6 +61,16 @@
 #include "scu_event_codes.h"
 #include "sas.h"
 
+#undef C
+#define C(a) (#a)
+const char *req_state_name(enum sci_base_request_states state)
+{
+	static const char * const strings[] = REQUEST_STATES;
+
+	return strings[state];
+}
+#undef C
+
 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
 							int idx)
 {
@@ -264,6 +275,141 @@ static void scu_ssp_reqeust_construct_task_context(
 	task_context->response_iu_lower = lower_32_bits(dma_addr);
 }
 
+static u8 scu_bg_blk_size(struct scsi_device *sdp)
+{
+	switch (sdp->sector_size) {
+	case 512:
+		return 0;
+	case 1024:
+		return 1;
+	case 4096:
+		return 3;
+	default:
+		return 0xff;
+	}
+}
+
+static u32 scu_dif_bytes(u32 len, u32 sector_size)
+{
+	return (len >> ilog2(sector_size)) * 8;
+}
+
+static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
+{
+	struct scu_task_context *tc = ireq->tc;
+	struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+	u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+	tc->block_guard_enable = 1;
+	tc->blk_prot_en = 1;
+	tc->blk_sz = blk_sz;
+	/* DIF write insert */
+	tc->blk_prot_func = 0x2;
+
+	tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+						   scmd->device->sector_size);
+
+	/* always init to 0, used by hw */
+	tc->interm_crc_val = 0;
+
+	tc->init_crc_seed = 0;
+	tc->app_tag_verify = 0;
+	tc->app_tag_gen = 0;
+	tc->ref_tag_seed_verify = 0;
+
+	/* always init to same as bg_blk_sz */
+	tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+	tc->reserved_DC_0 = 0;
+
+	/* always init to 8 */
+	tc->DIF_bytes_immed_val = 8;
+
+	tc->reserved_DC_1 = 0;
+	tc->bgc_blk_sz = scmd->device->sector_size;
+	tc->reserved_E0_0 = 0;
+	tc->app_tag_gen_mask = 0;
+
+	/** setup block guard control **/
+	tc->bgctl = 0;
+
+	/* DIF write insert */
+	tc->bgctl_f.op = 0x2;
+
+	tc->app_tag_verify_mask = 0;
+
+	/* must init to 0 for hw */
+	tc->blk_guard_err = 0;
+
+	tc->reserved_E8_0 = 0;
+
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+		tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+	else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->ref_tag_seed_gen = 0;
+}
+
+static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
+{
+	struct scu_task_context *tc = ireq->tc;
+	struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+	u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+	tc->block_guard_enable = 1;
+	tc->blk_prot_en = 1;
+	tc->blk_sz = blk_sz;
+	/* DIF read strip */
+	tc->blk_prot_func = 0x1;
+
+	tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+						   scmd->device->sector_size);
+
+	/* always init to 0, used by hw */
+	tc->interm_crc_val = 0;
+
+	tc->init_crc_seed = 0;
+	tc->app_tag_verify = 0;
+	tc->app_tag_gen = 0;
+
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+		tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+	else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->ref_tag_seed_verify = 0;
+
+	/* always init to same as bg_blk_sz */
+	tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+	tc->reserved_DC_0 = 0;
+
+	/* always init to 8 */
+	tc->DIF_bytes_immed_val = 8;
+
+	tc->reserved_DC_1 = 0;
+	tc->bgc_blk_sz = scmd->device->sector_size;
+	tc->reserved_E0_0 = 0;
+	tc->app_tag_gen_mask = 0;
+
+	/** setup block guard control **/
+	tc->bgctl = 0;
+
+	/* DIF read strip */
+	tc->bgctl_f.crc_verify = 1;
+	tc->bgctl_f.op = 0x1;
+	if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
+		tc->bgctl_f.ref_tag_chk = 1;
+		tc->bgctl_f.app_f_detect = 1;
+	} else if (type & SCSI_PROT_DIF_TYPE3)
+		tc->bgctl_f.app_ref_f_detect = 1;
+
+	tc->app_tag_verify_mask = 0;
+
+	/* must init to 0 for hw */
+	tc->blk_guard_err = 0;
+
+	tc->reserved_E8_0 = 0;
+	tc->ref_tag_seed_gen = 0;
+}
+
 /**
  * This method is will fill in the SCU Task Context for a SSP IO request.
  * @sci_req:
@@ -274,6 +420,10 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
 						      u32 len)
 {
 	struct scu_task_context *task_context = ireq->tc;
+	struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
+	struct scsi_cmnd *scmd = sas_task->uldd_task;
+	u8 prot_type = scsi_get_prot_type(scmd);
+	u8 prot_op = scsi_get_prot_op(scmd);
 
 	scu_ssp_reqeust_construct_task_context(ireq, task_context);
 
@@ -296,6 +446,13 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
 
 	if (task_context->transfer_length_bytes > 0)
 		sci_request_build_sgl(ireq);
+
+	if (prot_type != SCSI_PROT_DIF_TYPE0) {
+		if (prot_op == SCSI_PROT_READ_STRIP)
+			scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
+		else if (prot_op == SCSI_PROT_WRITE_INSERT)
+			scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
+	}
 }
 
 /**
@@ -519,18 +676,12 @@ sci_io_request_construct_sata(struct isci_request *ireq,
 	if (test_bit(IREQ_TMF, &ireq->flags)) {
 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 
-		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
-		    tmf->tmf_code == isci_tmf_sata_srst_low) {
-			scu_stp_raw_request_construct_task_context(ireq);
-			return SCI_SUCCESS;
-		} else {
-			dev_err(&ireq->owning_controller->pdev->dev,
-				"%s: Request 0x%p received un-handled SAT "
-				"management protocol 0x%x.\n",
-				__func__, ireq, tmf->tmf_code);
+		dev_err(&ireq->owning_controller->pdev->dev,
+			"%s: Request 0x%p received un-handled SAT "
+			"management protocol 0x%x.\n",
+			__func__, ireq, tmf->tmf_code);
 
-			return SCI_FAILURE;
-		}
+		return SCI_FAILURE;
 	}
 
 	if (!sas_protocol_ata(task->task_proto)) {
@@ -627,34 +778,6 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
 	return status;
 }
 
-enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
-{
-	enum sci_status status = SCI_SUCCESS;
-
-	/* check for management protocols */
-	if (test_bit(IREQ_TMF, &ireq->flags)) {
-		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-
-		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
-		    tmf->tmf_code == isci_tmf_sata_srst_low) {
-			scu_stp_raw_request_construct_task_context(ireq);
-		} else {
-			dev_err(&ireq->owning_controller->pdev->dev,
-				"%s: Request 0x%p received un-handled SAT "
-				"Protocol 0x%x.\n",
-				__func__, ireq, tmf->tmf_code);
-
-			return SCI_FAILURE;
-		}
-	}
-
-	if (status != SCI_SUCCESS)
-		return status;
-	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
-
-	return status;
-}
-
 /**
  * sci_req_tx_bytes - bytes transferred when reply underruns request
  * @ireq: request that was terminated early
@@ -756,9 +879,6 @@ sci_io_request_terminate(struct isci_request *ireq)
 	case SCI_REQ_STP_PIO_WAIT_FRAME:
 	case SCI_REQ_STP_PIO_DATA_IN:
 	case SCI_REQ_STP_PIO_DATA_OUT:
-	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
-	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
-	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
 	case SCI_REQ_ATAPI_WAIT_H2D:
 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
 	case SCI_REQ_ATAPI_WAIT_D2H:
@@ -800,7 +920,8 @@ enum sci_status sci_request_complete(struct isci_request *ireq)
 
 	state = ireq->sm.current_state_id;
 	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
-		      "isci: request completion from wrong state (%d)\n", state))
+		      "isci: request completion from wrong state (%s)\n",
+		      req_state_name(state)))
 		return SCI_FAILURE_INVALID_STATE;
 
 	if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
@@ -821,8 +942,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
 	state = ireq->sm.current_state_id;
 
 	if (state != SCI_REQ_STP_PIO_DATA_IN) {
-		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
-			 __func__, event_code, state);
+		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
+			 __func__, event_code, req_state_name(state));
 
 		return SCI_FAILURE_INVALID_STATE;
 	}
@@ -1938,59 +2059,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
 		return status;
 	}
 
-	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
-		struct dev_to_host_fis *frame_header;
-		u32 *frame_buffer;
-
-		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
-								       frame_index,
-								       (void **)&frame_header);
-		if (status != SCI_SUCCESS) {
-			dev_err(&ihost->pdev->dev,
-				"%s: SCIC IO Request 0x%p could not get frame "
-				"header for frame index %d, status %x\n",
-				__func__,
-				stp_req,
-				frame_index,
-				status);
-			return status;
-		}
-
-		switch (frame_header->fis_type) {
-		case FIS_REGD2H:
-			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
-								      frame_index,
-								      (void **)&frame_buffer);
-
-			sci_controller_copy_sata_response(&ireq->stp.rsp,
-							       frame_header,
-							       frame_buffer);
-
-			/* The command has completed with error */
-			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
-			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
-			break;
-
-		default:
-			dev_warn(&ihost->pdev->dev,
-				 "%s: IO Request:0x%p Frame Id:%d protocol "
-				 "violation occurred\n",
-				 __func__,
-				 stp_req,
-				 frame_index);
-
-			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
-			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
-			break;
-		}
-
-		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-
-		/* Frame has been decoded return it to the controller */
-		sci_controller_release_frame(ihost, frame_index);
-
-		return status;
-	}
 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
 		struct sas_task *task = isci_request_access_task(ireq);
 
@@ -2088,57 +2156,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
 	return status;
 }
 
-static enum sci_status
-stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
-						   u32 completion_code)
-{
-	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
-		ireq->scu_status = SCU_TASK_DONE_GOOD;
-		ireq->sci_status = SCI_SUCCESS;
-		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
-		break;
-
-	default:
-		/*
-		 * All other completion status cause the IO to be complete.
-		 * If a NAK was received, then it is up to the user to retry
-		 * the request.
-		 */
-		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
-		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
-		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-		break;
-	}
-
-	return SCI_SUCCESS;
-}
-
-static enum sci_status
-stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
-						     u32 completion_code)
-{
-	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
-		ireq->scu_status = SCU_TASK_DONE_GOOD;
-		ireq->sci_status = SCI_SUCCESS;
-		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
-		break;
-
-	default:
-		/* All other completion status cause the IO to be complete.  If
-		 * a NAK was received, then it is up to the user to retry the
-		 * request.
-		 */
-		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
-		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
-		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-		break;
-	}
-
-	return SCI_SUCCESS;
-}
-
 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
 						  enum sci_base_request_states next)
 {
@@ -2284,14 +2301,6 @@ sci_io_request_tc_completion(struct isci_request *ireq,
 	case SCI_REQ_STP_PIO_DATA_OUT:
 		return pio_data_out_tx_done_tc_event(ireq, completion_code);
 
-	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
-		return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
-									  completion_code);
-
-	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
-		return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
-									    completion_code);
-
 	case SCI_REQ_ABORTING:
 		return request_aborting_state_tc_event(ireq,
 						       completion_code);
@@ -2308,12 +2317,8 @@ sci_io_request_tc_completion(struct isci_request *ireq,
 		return atapi_data_tc_completion_handler(ireq, completion_code);
 
 	default:
-		dev_warn(&ihost->pdev->dev,
-			 "%s: SCIC IO Request given task completion "
-			 "notification %x while in wrong state %d\n",
-			 __func__,
-			 completion_code,
-			 state);
+		dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
+			 __func__, completion_code, req_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -3065,10 +3070,6 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
 	 */
 	if (!task && dev->dev_type == SAS_END_DEV) {
 		state = SCI_REQ_TASK_WAIT_TC_COMP;
-	} else if (!task &&
-		   (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
-		    isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
-		state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
 	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
 		state = SCI_REQ_SMP_WAIT_RESP;
 	} else if (task && sas_protocol_ata(task->task_proto) &&
@@ -3125,31 +3126,6 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba
 	ireq->target_device->working_request = ireq;
 }
 
-static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
-{
-	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
-
-	ireq->target_device->working_request = ireq;
-}
-
-static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
-{
-	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
-	struct scu_task_context *tc = ireq->tc;
-	struct host_to_dev_fis *h2d_fis;
-	enum sci_status status;
-
-	/* Clear the SRST bit */
-	h2d_fis = &ireq->stp.cmd;
-	h2d_fis->control = 0;
-
-	/* Clear the TC control bit */
-	tc->control_frame = 0;
-
-	status = sci_controller_continue_io(ireq);
-	WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
-}
-
 static const struct sci_base_state sci_request_state_table[] = {
 	[SCI_REQ_INIT] = { },
 	[SCI_REQ_CONSTRUCTED] = { },
@@ -3168,13 +3144,6 @@ static const struct sci_base_state sci_request_state_table[] = {
 	[SCI_REQ_STP_PIO_DATA_OUT] = { },
 	[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
 	[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
-	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
-		.enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
-	},
-	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
-		.enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
-	},
-	[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
 	[SCI_REQ_TASK_WAIT_TC_COMP] = { },
 	[SCI_REQ_TASK_WAIT_TC_RESP] = { },
 	[SCI_REQ_SMP_WAIT_RESP] = { },
@@ -3649,8 +3618,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
 		/* Cause this task to be scheduled in the SCSI error
 		 * handler thread.
 		 */
-		isci_execpath_callback(ihost, task,
-				       sas_task_abort);
+		sas_task_abort(task);
 
 		/* Change the status, since we are holding
 		 * the I/O until it is managed by the SCSI
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index be38933dd6df..057f2378452d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -182,138 +182,103 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
 }
 
 /**
- * enum sci_base_request_states - This enumeration depicts all the states for
- *    the common request state machine.
+ * enum sci_base_request_states - request state machine states
  *
+ * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
  *
+ * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
+ * constructed.  This state is entered from the INITIAL state.
+ *
+ * @SCI_REQ_STARTED: This state indicates that the request has been started.
+ * This state is entered from the CONSTRUCTED state.
+ *
+ * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ * @SCI_REQ_STP_UDMA_WAIT_D2H:
+ * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ *
+ * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
+ * waiting for either a PIO Setup FIS or a D2H register FIS.  The type of frame
+ * received is based on the result of the prior frame and line conditions.
+ *
+ * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
+ * waiting for a DATA frame from the device.
+ *
+ * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
+ * waiting to transmit the next data frame to the device.
+ *
+ * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
+ * waiting for either a PIO Setup.
+ *
+ * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
+ * after receiving TC completion. While in this state IO request object is
+ * waiting for D2H status frame as UF.
+ *
+ * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
+ * task context completion after every frame submission, so in the
+ * non-accelerated case we need to expect the completion for the "cdb" frame.
+ *
+ * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started raw task management request is waiting for the transmission of
+ * the initial frame (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e.  response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e.  response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started SMP request is waiting for the transmission of the initial frame
+ * (i.e.  command, task, etc.).
+ *
+ * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered from the
+ * ABORTING state.
+ *
+ * @SCI_REQ_ABORTING: This state indicates that the request is in the process
+ * of being terminated/aborted.  This state is entered from the CONSTRUCTED
+ * state.  This state is entered from the STARTED state.
+ *
+ * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
  */
-enum sci_base_request_states {
-	/*
-	 * Simply the initial state for the base request state machine.
-	 */
-	SCI_REQ_INIT,
-
-	/*
-	 * This state indicates that the request has been constructed.
-	 * This state is entered from the INITIAL state.
-	 */
-	SCI_REQ_CONSTRUCTED,
-
-	/*
-	 * This state indicates that the request has been started. This state
-	 * is entered from the CONSTRUCTED state.
-	 */
-	SCI_REQ_STARTED,
-
-	SCI_REQ_STP_UDMA_WAIT_TC_COMP,
-	SCI_REQ_STP_UDMA_WAIT_D2H,
-
-	SCI_REQ_STP_NON_DATA_WAIT_H2D,
-	SCI_REQ_STP_NON_DATA_WAIT_D2H,
-
-	SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
-	SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
-	SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
-
-	/*
-	 * While in this state the IO request object is waiting for the TC
-	 * completion notification for the H2D Register FIS
-	 */
-	SCI_REQ_STP_PIO_WAIT_H2D,
-
-	/*
-	 * While in this state the IO request object is waiting for either a
-	 * PIO Setup FIS or a D2H register FIS.  The type of frame received is
-	 * based on the result of the prior frame and line conditions.
-	 */
-	SCI_REQ_STP_PIO_WAIT_FRAME,
-
-	/*
-	 * While in this state the IO request object is waiting for a DATA
-	 * frame from the device.
-	 */
-	SCI_REQ_STP_PIO_DATA_IN,
-
-	/*
-	 * While in this state the IO request object is waiting to transmit
-	 * the next data frame to the device.
-	 */
-	SCI_REQ_STP_PIO_DATA_OUT,
-
-	/*
-	 * While in this state the IO request object is waiting for the TC
-	 * completion notification for the H2D Register FIS
-	 */
-	SCI_REQ_ATAPI_WAIT_H2D,
-
-	/*
-	 * While in this state the IO request object is waiting for either a
-	 * PIO Setup.
-	 */
-	SCI_REQ_ATAPI_WAIT_PIO_SETUP,
-
-	/*
-	 * The non-data IO transit to this state in this state after receiving
-	 * TC completion. While in this state IO request object is waiting for
-	 * D2H status frame as UF.
-	 */
-	SCI_REQ_ATAPI_WAIT_D2H,
-
-	/*
-	 * When transmitting raw frames hardware reports task context completion
-	 * after every frame submission, so in the non-accelerated case we need
-	 * to expect the completion for the "cdb" frame.
-	 */
-	SCI_REQ_ATAPI_WAIT_TC_COMP,
-
-	/*
-	 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
-	 * task management request is waiting for the transmission of the
-	 * initial frame (i.e. command, task, etc.).
-	 */
-	SCI_REQ_TASK_WAIT_TC_COMP,
-
-	/*
-	 * This sub-state indicates that the started task management request
-	 * is waiting for the reception of an unsolicited frame
-	 * (i.e. response IU).
-	 */
-	SCI_REQ_TASK_WAIT_TC_RESP,
-
-	/*
-	 * This sub-state indicates that the started task management request
-	 * is waiting for the reception of an unsolicited frame
-	 * (i.e. response IU).
-	 */
-	SCI_REQ_SMP_WAIT_RESP,
-
-	/*
-	 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
-	 * request is waiting for the transmission of the initial frame
-	 * (i.e. command, task, etc.).
-	 */
-	SCI_REQ_SMP_WAIT_TC_COMP,
-
-	/*
-	 * This state indicates that the request has completed.
-	 * This state is entered from the STARTED state. This state is entered
-	 * from the ABORTING state.
-	 */
-	SCI_REQ_COMPLETED,
-
-	/*
-	 * This state indicates that the request is in the process of being
-	 * terminated/aborted.
-	 * This state is entered from the CONSTRUCTED state.
-	 * This state is entered from the STARTED state.
-	 */
-	SCI_REQ_ABORTING,
-
-	/*
-	 * Simply the final state for the base request state machine.
-	 */
-	SCI_REQ_FINAL,
-};
+#define REQUEST_STATES {\
+	C(REQ_INIT),\
+	C(REQ_CONSTRUCTED),\
+	C(REQ_STARTED),\
+	C(REQ_STP_UDMA_WAIT_TC_COMP),\
+	C(REQ_STP_UDMA_WAIT_D2H),\
+	C(REQ_STP_NON_DATA_WAIT_H2D),\
+	C(REQ_STP_NON_DATA_WAIT_D2H),\
+	C(REQ_STP_PIO_WAIT_H2D),\
+	C(REQ_STP_PIO_WAIT_FRAME),\
+	C(REQ_STP_PIO_DATA_IN),\
+	C(REQ_STP_PIO_DATA_OUT),\
+	C(REQ_ATAPI_WAIT_H2D),\
+	C(REQ_ATAPI_WAIT_PIO_SETUP),\
+	C(REQ_ATAPI_WAIT_D2H),\
+	C(REQ_ATAPI_WAIT_TC_COMP),\
+	C(REQ_TASK_WAIT_TC_COMP),\
+	C(REQ_TASK_WAIT_TC_RESP),\
+	C(REQ_SMP_WAIT_RESP),\
+	C(REQ_SMP_WAIT_TC_COMP),\
+	C(REQ_COMPLETED),\
+	C(REQ_ABORTING),\
+	C(REQ_FINAL),\
+	}
+#undef C
+#define C(a) SCI_##a
+enum sci_base_request_states REQUEST_STATES;
+#undef C
+const char *req_state_name(enum sci_base_request_states state);
 
 enum sci_status sci_request_start(struct isci_request *ireq);
 enum sci_status sci_io_request_terminate(struct isci_request *ireq);
@@ -446,10 +411,7 @@ sci_task_request_construct(struct isci_host *ihost,
 			    struct isci_remote_device *idev,
 			    u16 io_tag,
 			    struct isci_request *ireq);
-enum sci_status
-sci_task_request_construct_ssp(struct isci_request *ireq);
-enum sci_status
-sci_task_request_construct_sata(struct isci_request *ireq);
+enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
 void sci_smp_request_copy_response(struct isci_request *ireq);
 
 static inline int isci_task_is_ncq_recovery(struct sas_task *task)
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
index 7df87d923285..869a979eb5b2 100644
--- a/drivers/scsi/isci/scu_task_context.h
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -866,9 +866,9 @@ struct scu_task_context {
 	struct transport_snapshot snapshot; /* read only set to 0 */
 
 	/* OFFSET 0x5C */
-	u32 block_protection_enable:1;
-	u32 block_size:2;
-	u32 block_protection_function:2;
+	u32 blk_prot_en:1;
+	u32 blk_sz:2;
+	u32 blk_prot_func:2;
 	u32 reserved_5C_0:9;
 	u32 active_sgl_element:2;  /* read only set to 0 */
 	u32 sgl_exhausted:1;  /* read only set to 0 */
@@ -896,33 +896,56 @@ struct scu_task_context {
 	u32 reserved_C4_CC[3];
 
 	/* OFFSET 0xD0 */
-	u32 intermediate_crc_value:16;
-	u32 initial_crc_seed:16;
+	u32 interm_crc_val:16;
+	u32 init_crc_seed:16;
 
 	/* OFFSET 0xD4 */
-	u32 application_tag_for_verify:16;
-	u32 application_tag_for_generate:16;
+	u32 app_tag_verify:16;
+	u32 app_tag_gen:16;
 
 	/* OFFSET 0xD8 */
-	u32 reference_tag_seed_for_verify_function;
+	u32 ref_tag_seed_verify;
 
 	/* OFFSET 0xDC */
-	u32 reserved_DC;
+	u32 UD_bytes_immed_val:13;
+	u32 reserved_DC_0:3;
+	u32 DIF_bytes_immed_val:4;
+	u32 reserved_DC_1:12;
 
 	/* OFFSET 0xE0 */
-	u32 reserved_E0_0:16;
-	u32 application_tag_mask_for_generate:16;
+	u32 bgc_blk_sz:13;
+	u32 reserved_E0_0:3;
+	u32 app_tag_gen_mask:16;
 
 	/* OFFSET 0xE4 */
-	u32 block_protection_control:16;
-	u32 application_tag_mask_for_verify:16;
+	union {
+		u16 bgctl;
+		struct {
+			u16 crc_verify:1;
+			u16 app_tag_chk:1;
+			u16 ref_tag_chk:1;
+			u16 op:2;
+			u16 legacy:1;
+			u16 invert_crc_seed:1;
+			u16 ref_tag_gen:1;
+			u16 fixed_ref_tag:1;
+			u16 invert_crc:1;
+			u16 app_ref_f_detect:1;
+			u16 uninit_dif_check_err:1;
+			u16 uninit_dif_bypass:1;
+			u16 app_f_detect:1;
+			u16 reserved_0:2;
+		} bgctl_f;
+	};
+
+	u16 app_tag_verify_mask;
 
 	/* OFFSET 0xE8 */
-	u32 block_protection_error:8;
+	u32 blk_guard_err:8;
 	u32 reserved_E8_0:24;
 
 	/* OFFSET 0xEC */
-	u32 reference_tag_seed_for_verify;
+	u32 ref_tag_seed_gen;
 
 	/* OFFSET 0xF0 */
 	u32 intermediate_crc_valid_snapshot:16;
@@ -937,6 +960,6 @@ struct scu_task_context {
 	/* OFFSET 0xFC */
 	u32 reference_tag_seed_for_generate_function_snapshot;
 
-};
+} __packed;
 
 #endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index f5a3f7d2bdab..374254ede9d4 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -96,8 +96,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
 			__func__, task, response, status);
 
 		task->lldd_task = NULL;
-
-		isci_execpath_callback(ihost, task, task->task_done);
+		task->task_done(task);
 		break;
 
 	case isci_perform_aborted_io_completion:
@@ -117,8 +116,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
 			"%s: Error - task = %p, response=%d, "
 			"status=%d\n",
 			__func__, task, response, status);
-
-		isci_execpath_callback(ihost, task, sas_task_abort);
+		sas_task_abort(task);
 		break;
 
 	default:
@@ -249,46 +247,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
 	return 0;
 }
 
-static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
-{
-	struct isci_tmf *isci_tmf;
-	enum sci_status status;
-
-	if (!test_bit(IREQ_TMF, &ireq->flags))
-		return SCI_FAILURE;
-
-	isci_tmf = isci_request_access_tmf(ireq);
-
-	switch (isci_tmf->tmf_code) {
-
-	case isci_tmf_sata_srst_high:
-	case isci_tmf_sata_srst_low: {
-		struct host_to_dev_fis *fis = &ireq->stp.cmd;
-
-		memset(fis, 0, sizeof(*fis));
-
-		fis->fis_type  =  0x27;
-		fis->flags     &= ~0x80;
-		fis->flags     &= 0xF0;
-		if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
-			fis->control |= ATA_SRST;
-		else
-			fis->control &= ~ATA_SRST;
-		break;
-	}
-	/* other management commnd go here... */
-	default:
-		return SCI_FAILURE;
-	}
-
-	/* core builds the protocol specific request
-	 *  based on the h2d fis.
-	 */
-	status = sci_task_request_construct_sata(ireq);
-
-	return status;
-}
-
 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
 						    struct isci_remote_device *idev,
 						    u16 tag, struct isci_tmf *isci_tmf)
@@ -328,13 +286,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
 			return NULL;
 	}
 
-	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
-		isci_tmf->proto = SAS_PROTOCOL_SATA;
-		status = isci_sata_management_task_request_build(ireq);
-
-		if (status != SCI_SUCCESS)
-			return NULL;
-	}
 	return ireq;
 }
 
@@ -873,53 +824,20 @@ static int isci_task_send_lu_reset_sas(
 	return ret;
 }
 
-static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
-				 struct isci_remote_device *idev, u8 *lun)
-{
-	int ret = TMF_RESP_FUNC_FAILED;
-	struct isci_tmf tmf;
-
-	/* Send the soft reset to the target */
-	#define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
-	isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
-
-	ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
-
-	if (ret != TMF_RESP_FUNC_COMPLETE) {
-		dev_dbg(&ihost->pdev->dev,
-			 "%s: Assert SRST failed (%p) = %x",
-			 __func__, idev, ret);
-
-		/* Return the failure so that the LUN reset is escalated
-		 * to a target reset.
-		 */
-	}
-	return ret;
-}
-
-/**
- * isci_task_lu_reset() - This function is one of the SAS Domain Template
- *    functions. This is one of the Task Management functoins called by libsas,
- *    to reset the given lun. Note the assumption that while this call is
- *    executing, no I/O will be sent by the host to the device.
- * @lun: This parameter specifies the lun to be reset.
- *
- * status, zero indicates success.
- */
-int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
 {
-	struct isci_host *isci_host = dev_to_ihost(domain_device);
+	struct isci_host *isci_host = dev_to_ihost(dev);
 	struct isci_remote_device *isci_device;
 	unsigned long flags;
 	int ret;
 
 	spin_lock_irqsave(&isci_host->scic_lock, flags);
-	isci_device = isci_lookup_device(domain_device);
+	isci_device = isci_lookup_device(dev);
 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
 	dev_dbg(&isci_host->pdev->dev,
 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
-		 __func__, domain_device, isci_host, isci_device);
+		 __func__, dev, isci_host, isci_device);
 
 	if (!isci_device) {
 		/* If the device is gone, stop the escalations. */
@@ -928,11 +846,11 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
 		ret = TMF_RESP_FUNC_COMPLETE;
 		goto out;
 	}
-	set_bit(IDEV_EH, &isci_device->flags);
 
 	/* Send the task management part of the reset. */
-	if (sas_protocol_ata(domain_device->tproto)) {
-		ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+	if (dev_is_sata(dev)) {
+		sas_ata_schedule_reset(dev);
+		ret = TMF_RESP_FUNC_COMPLETE;
 	} else
 		ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
 
@@ -1062,9 +980,6 @@ int isci_task_abort_task(struct sas_task *task)
 		"%s: dev = %p, task = %p, old_request == %p\n",
 		__func__, isci_device, task, old_request);
 
-	if (isci_device)
-		set_bit(IDEV_EH, &isci_device->flags);
-
 	/* Device reset conditions signalled in task_state_flags are the
 	 * responsbility of libsas to observe at the start of the error
 	 * handler thread.
@@ -1332,29 +1247,35 @@ isci_task_request_complete(struct isci_host *ihost,
 }
 
 static int isci_reset_device(struct isci_host *ihost,
+			     struct domain_device *dev,
 			     struct isci_remote_device *idev)
 {
-	struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
-	enum sci_status status;
-	unsigned long flags;
 	int rc;
+	unsigned long flags;
+	enum sci_status status;
+	struct sas_phy *phy = sas_get_local_phy(dev);
+	struct isci_port *iport = dev->port->lldd_port;
 
 	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
 
 	spin_lock_irqsave(&ihost->scic_lock, flags);
 	status = sci_remote_device_reset(idev);
-	if (status != SCI_SUCCESS) {
-		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
+	if (status != SCI_SUCCESS) {
 		dev_dbg(&ihost->pdev->dev,
 			 "%s: sci_remote_device_reset(%p) returned %d!\n",
 			 __func__, idev, status);
-
-		return TMF_RESP_FUNC_FAILED;
+		rc = TMF_RESP_FUNC_FAILED;
+		goto out;
 	}
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	rc = sas_phy_reset(phy, true);
+	if (scsi_is_sas_phy_local(phy)) {
+		struct isci_phy *iphy = &ihost->phys[phy->number];
+
+		rc = isci_port_perform_hard_reset(ihost, iport, iphy);
+	} else
+		rc = sas_phy_reset(phy, !dev_is_sata(dev));
 
 	/* Terminate in-progress I/O now. */
 	isci_remote_device_nuke_requests(ihost, idev);
@@ -1371,7 +1292,8 @@ static int isci_reset_device(struct isci_host *ihost,
 	}
 
 	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
-
+ out:
+	sas_put_local_phy(phy);
 	return rc;
 }
 
@@ -1386,35 +1308,15 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
 	idev = isci_lookup_device(dev);
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
-		ret = TMF_RESP_FUNC_COMPLETE;
-		goto out;
-	}
-
-	ret = isci_reset_device(ihost, idev);
- out:
-	isci_put_device(idev);
-	return ret;
-}
-
-int isci_bus_reset_handler(struct scsi_cmnd *cmd)
-{
-	struct domain_device *dev = sdev_to_domain_dev(cmd->device);
-	struct isci_host *ihost = dev_to_ihost(dev);
-	struct isci_remote_device *idev;
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-	idev = isci_lookup_device(dev);
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
 	if (!idev) {
+		/* XXX: need to cleanup any ireqs targeting this
+		 * domain_device
+		 */
 		ret = TMF_RESP_FUNC_COMPLETE;
 		goto out;
 	}
 
-	ret = isci_reset_device(ihost, idev);
+	ret = isci_reset_device(ihost, dev, idev);
  out:
 	isci_put_device(idev);
 	return ret;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 1b27b3797c6c..7b6d0e32fd9b 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -86,8 +86,6 @@ enum isci_tmf_function_codes {
 	isci_tmf_func_none      = 0,
 	isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
 	isci_tmf_ssp_lun_reset  = TMF_LU_RESET,
-	isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
-	isci_tmf_sata_srst_low  = TMF_LU_RESET + 0x101  /* Non SCSI */
 };
 /**
  * struct isci_tmf - This class represents the task management object which
@@ -210,8 +208,6 @@ int isci_queuecommand(
 	struct scsi_cmnd *scsi_cmd,
 	void (*donefunc)(struct scsi_cmnd *));
 
-int isci_bus_reset_handler(struct scsi_cmnd *cmd);
-
 /**
  * enum isci_completion_selection - This enum defines the possible actions to
  *    take with respect to a given request's notification back to libsas.
@@ -321,40 +317,4 @@ isci_task_set_completion_status(
 	return task_notification_selection;
 
 }
-/**
-* isci_execpath_callback() - This function is called from the task
-* execute path when the task needs to callback libsas about the submit-time
-* task failure.  The callback occurs either through the task's done function
-* or through sas_task_abort.  In the case of regular non-discovery SATA/STP I/O
-* requests, libsas takes the host lock before calling execute task.  Therefore
-* in this situation the host lock must be managed before calling the func.
-*
-* @ihost: This parameter is the controller to which the I/O request was sent.
-* @task: This parameter is the I/O request.
-* @func: This parameter is the function to call in the correct context.
-* @status: This parameter is the status code for the completed task.
-*
-*/
-static inline void isci_execpath_callback(struct isci_host *ihost,
-					  struct sas_task  *task,
-					  void (*func)(struct sas_task *))
-{
-	struct domain_device *dev = task->dev;
-
-	if (dev_is_sata(dev) && task->uldd_task) {
-		unsigned long flags;
-
-		/* Since we are still in the submit path, and since
-		 * libsas takes the host lock on behalf of SATA
-		 * devices before I/O starts (in the non-discovery case),
-		 * we need to unlock before we can call the callback function.
-		 */
-		raw_local_irq_save(flags);
-		spin_unlock(dev->sata_dev.ap->lock);
-		func(task);
-		spin_lock(dev->sata_dev.ap->lock);
-		raw_local_irq_restore(flags);
-	} else
-		func(task);
-}
 #endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index db47158e0dde..453a740fa68e 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -684,10 +684,8 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
 				       int buflen)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
-	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
-	int value;
 
 	switch(param) {
 	case ISCSI_PARAM_HDRDGST_EN:
@@ -699,16 +697,7 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
 			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
 		break;
 	case ISCSI_PARAM_MAX_R2T:
-		sscanf(buf, "%d", &value);
-		if (value <= 0 || !is_power_of_2(value))
-			return -EINVAL;
-		if (session->max_r2t == value)
-			break;
-		iscsi_tcp_r2tpool_free(session);
-		iscsi_set_param(cls_conn, param, buf, buflen);
-		if (iscsi_tcp_r2tpool_alloc(session))
-			return -ENOMEM;
-		break;
+		return iscsi_tcp_set_max_r2t(conn, buf);
 	default:
 		return iscsi_set_param(cls_conn, param, buf, buflen);
 	}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1d1b0c9da29b..8e561e6a557c 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -337,6 +337,13 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
 			schedule_delayed_work(&disc->disc_work, delay);
 		} else
 			fc_disc_done(disc, DISC_EV_FAILED);
+	} else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
+		/*
+		 * if discovery fails due to lport reset, clear
+		 * pending flag so that subsequent discovery can
+		 * continue
+		 */
+		disc->pending = 0;
 	}
 }
 
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index e17a28d324d0..c2384d501470 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -56,8 +56,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
 		rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
 	else {
 		/* CT requests */
-		rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
-		did = FC_FID_DIR_SERV;
+		rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
 	}
 
 	if (rc) {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 4d70d96fa5dc..630291f01826 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1642,9 +1642,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
 		case FC_RCTL_ACK_0:
 			break;
 		default:
-			FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
-				    fh->fh_r_ctl,
-				    fc_exch_rctl_name(fh->fh_r_ctl));
+			if (ep)
+				FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+					    fh->fh_r_ctl,
+					    fc_exch_rctl_name(fh->fh_r_ctl));
 			break;
 		}
 		fc_frame_free(fp);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c1a808cc5920..bd5d31d022d9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -116,6 +116,8 @@ static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
 static void fc_lport_enter_scr(struct fc_lport *);
 static void fc_lport_enter_ready(struct fc_lport *);
 static void fc_lport_enter_logo(struct fc_lport *);
+static void fc_lport_enter_fdmi(struct fc_lport *lport);
+static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
 
 static const char *fc_lport_state_names[] = {
 	[LPORT_ST_DISABLED] = "disabled",
@@ -126,6 +128,11 @@ static const char *fc_lport_state_names[] = {
 	[LPORT_ST_RSPN_ID] =  "RSPN_ID",
 	[LPORT_ST_RFT_ID] =   "RFT_ID",
 	[LPORT_ST_RFF_ID] =   "RFF_ID",
+	[LPORT_ST_FDMI] =     "FDMI",
+	[LPORT_ST_RHBA] =     "RHBA",
+	[LPORT_ST_RPA] =      "RPA",
+	[LPORT_ST_DHBA] =     "DHBA",
+	[LPORT_ST_DPRT] =     "DPRT",
 	[LPORT_ST_SCR] =      "SCR",
 	[LPORT_ST_READY] =    "Ready",
 	[LPORT_ST_LOGO] =     "LOGO",
@@ -183,11 +190,14 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
 		if (lport->state == LPORT_ST_DNS) {
 			lport->dns_rdata = rdata;
 			fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
+		} else if (lport->state == LPORT_ST_FDMI) {
+			lport->ms_rdata = rdata;
+			fc_lport_enter_ms(lport, LPORT_ST_DHBA);
 		} else {
 			FC_LPORT_DBG(lport, "Received an READY event "
 				     "on port (%6.6x) for the directory "
 				     "server, but the lport is not "
-				     "in the DNS state, it's in the "
+				     "in the DNS or FDMI state, it's in the "
 				     "%d state", rdata->ids.port_id,
 				     lport->state);
 			lport->tt.rport_logoff(rdata);
@@ -196,7 +206,10 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
 	case RPORT_EV_LOGO:
 	case RPORT_EV_FAILED:
 	case RPORT_EV_STOP:
-		lport->dns_rdata = NULL;
+		if (rdata->ids.port_id == FC_FID_DIR_SERV)
+			lport->dns_rdata = NULL;
+		else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
+			lport->ms_rdata = NULL;
 		break;
 	case RPORT_EV_NONE:
 		break;
@@ -1148,7 +1161,10 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
 			fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
 			break;
 		case LPORT_ST_RFF_ID:
-			fc_lport_enter_scr(lport);
+			if (lport->fdmi_enabled)
+				fc_lport_enter_fdmi(lport);
+			else
+				fc_lport_enter_scr(lport);
 			break;
 		default:
 			/* should have already been caught by state checks */
@@ -1163,6 +1179,85 @@ err:
 }
 
 /**
+ * fc_lport_ms_resp() - Handle response to a management server
+ *			exchange
+ * @sp:	    current sequence in exchange
+ * @fp:	    response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
+			     void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	struct fc_frame_header *fh;
+	struct fc_ct_hdr *ct;
+
+	FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
+		FC_LPORT_DBG(lport, "Received a management server response, "
+			     "but in state %s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	fh = fc_frame_header_get(fp);
+	ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+	    ct->ct_fs_type == FC_FST_MGMT &&
+	    ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
+		FC_LPORT_DBG(lport, "Received a management server response, "
+				    "reason=%d explain=%d\n",
+				    ct->ct_reason,
+				    ct->ct_explan);
+
+		switch (lport->state) {
+		case LPORT_ST_RHBA:
+			if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+				fc_lport_enter_ms(lport, LPORT_ST_RPA);
+			else /* Error Skip RPA */
+				fc_lport_enter_scr(lport);
+			break;
+		case LPORT_ST_RPA:
+			fc_lport_enter_scr(lport);
+			break;
+		case LPORT_ST_DPRT:
+			fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+			break;
+		case LPORT_ST_DHBA:
+			fc_lport_enter_ms(lport, LPORT_ST_DPRT);
+			break;
+		default:
+			/* should have already been caught by state checks */
+			break;
+		}
+	} else {
+		/* Invalid Frame? */
+		fc_lport_error(lport, fp);
+	}
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
  * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
  * @sp:	    current sequence in SCR exchange
  * @fp:	    response frame
@@ -1339,6 +1434,123 @@ err:
 }
 
 /**
+ * fc_lport_enter_ms() - management server commands
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
+{
+	struct fc_frame *fp;
+	enum fc_fdmi_req cmd;
+	int size = sizeof(struct fc_ct_hdr);
+	size_t len;
+	int numattrs;
+
+	FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+		     fc_lport_state_names[state],
+		     fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, state);
+
+	switch (state) {
+	case LPORT_ST_RHBA:
+		cmd = FC_FDMI_RHBA;
+		/* Number of HBA Attributes */
+		numattrs = 10;
+		len = sizeof(struct fc_fdmi_rhba);
+		len -= sizeof(struct fc_fdmi_attr_entry);
+		len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+		len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+		len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+		len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+		len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+		len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+		len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+		len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+		len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+		len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+		len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+
+		size += len;
+		break;
+	case LPORT_ST_RPA:
+		cmd = FC_FDMI_RPA;
+		/* Number of Port Attributes */
+		numattrs = 6;
+		len = sizeof(struct fc_fdmi_rpa);
+		len -= sizeof(struct fc_fdmi_attr_entry);
+		len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+		len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+		len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+		len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+		len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+		len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+		len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+		size += len;
+		break;
+	case LPORT_ST_DPRT:
+		cmd = FC_FDMI_DPRT;
+		len = sizeof(struct fc_fdmi_dprt);
+		size += len;
+		break;
+	case LPORT_ST_DHBA:
+		cmd = FC_FDMI_DHBA;
+		len = sizeof(struct fc_fdmi_dhba);
+		size += len;
+		break;
+	default:
+		fc_lport_error(lport, NULL);
+		return;
+	}
+
+	FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
+			     cmd, (int)len, size);
+	fp = fc_frame_alloc(lport, size);
+	if (!fp) {
+		fc_lport_error(lport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
+				  fc_lport_ms_resp,
+				  lport, 3 * lport->r_a_tov))
+		fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * @lport: The local port requesting a remote port for the management server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_fdmi(struct fc_lport *lport)
+{
+	struct fc_rport_priv *rdata;
+
+	FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
+		     fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_FDMI);
+
+	mutex_lock(&lport->disc.disc_mutex);
+	rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+	mutex_unlock(&lport->disc.disc_mutex);
+	if (!rdata)
+		goto err;
+
+	rdata->ops = &fc_lport_rport_ops;
+	lport->tt.rport_login(rdata);
+	return;
+
+err:
+	fc_lport_error(lport, NULL);
+}
+
+/**
  * fc_lport_timeout() - Handler for the retry_work timer
  * @work: The work struct of the local port
  */
@@ -1371,6 +1583,15 @@ static void fc_lport_timeout(struct work_struct *work)
 	case LPORT_ST_RFF_ID:
 		fc_lport_enter_ns(lport, lport->state);
 		break;
+	case LPORT_ST_FDMI:
+		fc_lport_enter_fdmi(lport);
+		break;
+	case LPORT_ST_RHBA:
+	case LPORT_ST_RPA:
+	case LPORT_ST_DHBA:
+	case LPORT_ST_DPRT:
+		fc_lport_enter_ms(lport, lport->state);
+		break;
 	case LPORT_ST_SCR:
 		fc_lport_enter_scr(lport);
 		break;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 143bbe448bec..82c3fd4bc938 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1909,6 +1909,16 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
 
 	spin_lock(&session->lock);
+	task = (struct iscsi_task *)sc->SCp.ptr;
+	if (!task) {
+		/*
+		 * Raced with completion. Blk layer has taken ownership
+		 * so let timeout code complete it now.
+		 */
+		rc = BLK_EH_HANDLED;
+		goto done;
+	}
+
 	if (session->state != ISCSI_STATE_LOGGED_IN) {
 		/*
 		 * We are probably in the middle of iscsi recovery so let
@@ -1925,16 +1935,6 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 		goto done;
 	}
 
-	task = (struct iscsi_task *)sc->SCp.ptr;
-	if (!task) {
-		/*
-		 * Raced with completion. Just reset timer, and let it
-		 * complete normally
-		 */
-		rc = BLK_EH_RESET_TIMER;
-		goto done;
-	}
-
 	/*
 	 * If we have sent (at least queued to the network layer) a pdu or
 	 * recvd one for the task since the last timeout ask for
@@ -2807,6 +2807,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	kfree(session->username);
 	kfree(session->username_in);
 	kfree(session->targetname);
+	kfree(session->targetalias);
 	kfree(session->initiatorname);
 	kfree(session->ifacename);
 
@@ -3200,7 +3201,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		sscanf(buf, "%d", &session->initial_r2t_en);
 		break;
 	case ISCSI_PARAM_MAX_R2T:
-		sscanf(buf, "%d", &session->max_r2t);
+		sscanf(buf, "%hu", &session->max_r2t);
 		break;
 	case ISCSI_PARAM_IMM_DATA_EN:
 		sscanf(buf, "%d", &session->imm_data_en);
@@ -3233,6 +3234,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		return iscsi_switch_str_param(&session->password_in, buf);
 	case ISCSI_PARAM_TARGET_NAME:
 		return iscsi_switch_str_param(&session->targetname, buf);
+	case ISCSI_PARAM_TARGET_ALIAS:
+		return iscsi_switch_str_param(&session->targetalias, buf);
 	case ISCSI_PARAM_TPGT:
 		sscanf(buf, "%d", &session->tpgt);
 		break;
@@ -3299,6 +3302,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_TARGET_NAME:
 		len = sprintf(buf, "%s\n", session->targetname);
 		break;
+	case ISCSI_PARAM_TARGET_ALIAS:
+		len = sprintf(buf, "%s\n", session->targetalias);
+		break;
 	case ISCSI_PARAM_TPGT:
 		len = sprintf(buf, "%d\n", session->tpgt);
 		break;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 7f0465b9623e..552e8a2b6f5f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -1170,6 +1170,24 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
 
+int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
+{
+	struct iscsi_session *session = conn->session;
+	unsigned short r2ts = 0;
+
+	sscanf(buf, "%hu", &r2ts);
+	if (session->max_r2t == r2ts)
+		return 0;
+
+	if (!r2ts || !is_power_of_2(r2ts))
+		return -EINVAL;
+
+	session->max_r2t = r2ts;
+	iscsi_tcp_r2tpool_free(session);
+	return iscsi_tcp_r2tpool_alloc(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
+
 void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
 			      struct iscsi_stats *stats)
 {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index db9238f2ecb8..bc0cecc6ad62 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -23,6 +23,8 @@
 
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/async.h>
+#include <linux/export.h>
 
 #include <scsi/sas_ata.h>
 #include "sas_internal.h"
@@ -93,22 +95,47 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
 static void sas_ata_task_done(struct sas_task *task)
 {
 	struct ata_queued_cmd *qc = task->uldd_task;
-	struct domain_device *dev;
+	struct domain_device *dev = task->dev;
 	struct task_status_struct *stat = &task->task_status;
 	struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
-	struct sas_ha_struct *sas_ha;
+	struct sas_ha_struct *sas_ha = dev->port->ha;
 	enum ata_completion_errors ac;
 	unsigned long flags;
 	struct ata_link *link;
+	struct ata_port *ap;
+
+	spin_lock_irqsave(&dev->done_lock, flags);
+	if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
+		task = NULL;
+	else if (qc && qc->scsicmd)
+		ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+	spin_unlock_irqrestore(&dev->done_lock, flags);
+
+	/* check if libsas-eh got to the task before us */
+	if (unlikely(!task))
+		return;
 
 	if (!qc)
 		goto qc_already_gone;
 
-	dev = qc->ap->private_data;
-	sas_ha = dev->port->ha;
-	link = &dev->sata_dev.ap->link;
+	ap = qc->ap;
+	link = &ap->link;
+
+	spin_lock_irqsave(ap->lock, flags);
+	/* check if we lost the race with libata/sas_ata_post_internal() */
+	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		if (qc->scsicmd)
+			goto qc_already_gone;
+		else {
+			/* if eh is not involved and the port is frozen then the
+			 * ata internal abort process has taken responsibility
+			 * for this sas_task
+			 */
+			return;
+		}
+	}
 
-	spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
 	if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
 	    ((stat->stat == SAM_STAT_CHECK_CONDITION &&
 	      dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
@@ -121,10 +148,6 @@ static void sas_ata_task_done(struct sas_task *task)
 			if (unlikely(link->eh_info.err_mask))
 				qc->flags |= ATA_QCFLAG_FAILED;
 		}
-
-		dev->sata_dev.sstatus = resp->sstatus;
-		dev->sata_dev.serror = resp->serror;
-		dev->sata_dev.scontrol = resp->scontrol;
 	} else {
 		ac = sas_to_ata_err(stat);
 		if (ac) {
@@ -144,24 +167,8 @@ static void sas_ata_task_done(struct sas_task *task)
 	}
 
 	qc->lldd_task = NULL;
-	if (qc->scsicmd)
-		ASSIGN_SAS_TASK(qc->scsicmd, NULL);
 	ata_qc_complete(qc);
-	spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
-
-	/*
-	 * If the sas_task has an ata qc, a scsi_cmnd and the aborted
-	 * flag is set, then we must have come in via the libsas EH
-	 * functions.  When we exit this function, we need to put the
-	 * scsi_cmnd on the list of finished errors.  The ata_qc_complete
-	 * call cleans up the libata side of things but we're protected
-	 * from the scsi_cmnd going away because the scsi_cmnd is owned
-	 * by the EH, making libata's call to scsi_done a NOP.
-	 */
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
-		scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
+	spin_unlock_irqrestore(ap->lock, flags);
 
 qc_already_gone:
 	list_del_init(&task->list);
@@ -170,23 +177,30 @@ qc_already_gone:
 
 static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
 {
-	int res;
+	unsigned long flags;
 	struct sas_task *task;
-	struct domain_device *dev = qc->ap->private_data;
+	struct scatterlist *sg;
+	int ret = AC_ERR_SYSTEM;
+	unsigned int si, xfer = 0;
+	struct ata_port *ap = qc->ap;
+	struct domain_device *dev = ap->private_data;
 	struct sas_ha_struct *sas_ha = dev->port->ha;
 	struct Scsi_Host *host = sas_ha->core.shost;
 	struct sas_internal *i = to_sas_internal(host->transportt);
-	struct scatterlist *sg;
-	unsigned int xfer = 0;
-	unsigned int si;
+
+	/* TODO: audit callers to ensure they are ready for qc_issue to
+	 * unconditionally re-enable interrupts
+	 */
+	local_irq_save(flags);
+	spin_unlock(ap->lock);
 
 	/* If the device fell off, no sense in issuing commands */
-	if (dev->gone)
-		return AC_ERR_SYSTEM;
+	if (test_bit(SAS_DEV_GONE, &dev->state))
+		goto out;
 
 	task = sas_alloc_task(GFP_ATOMIC);
 	if (!task)
-		return AC_ERR_SYSTEM;
+		goto out;
 	task->dev = dev;
 	task->task_proto = SAS_PROTOCOL_STP;
 	task->task_done = sas_ata_task_done;
@@ -231,21 +245,24 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
 		ASSIGN_SAS_TASK(qc->scsicmd, task);
 
 	if (sas_ha->lldd_max_execute_num < 2)
-		res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
+		ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
 	else
-		res = sas_queue_up(task);
+		ret = sas_queue_up(task);
 
 	/* Examine */
-	if (res) {
-		SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+	if (ret) {
+		SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
 
 		if (qc->scsicmd)
 			ASSIGN_SAS_TASK(qc->scsicmd, NULL);
 		sas_free_task(task);
-		return AC_ERR_SYSTEM;
+		ret = AC_ERR_SYSTEM;
 	}
 
-	return 0;
+ out:
+	spin_lock(ap->lock);
+	local_irq_restore(flags);
+	return ret;
 }
 
 static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
@@ -256,83 +273,222 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 	return true;
 }
 
-static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
-			       unsigned long deadline)
+static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
+{
+	return to_sas_internal(dev->port->ha->core.shost->transportt);
+}
+
+static void sas_get_ata_command_set(struct domain_device *dev);
+
+int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
 {
+	if (phy->attached_tproto & SAS_PROTOCOL_STP)
+		dev->tproto = phy->attached_tproto;
+	if (phy->attached_sata_dev)
+		dev->tproto |= SATA_DEV;
+
+	if (phy->attached_dev_type == SATA_PENDING)
+		dev->dev_type = SATA_PENDING;
+	else {
+		int res;
+
+		dev->dev_type = SATA_DEV;
+		res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
+					      &dev->sata_dev.rps_resp);
+		if (res) {
+			SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
+				    "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
+				    phy->phy_id, res);
+			return res;
+		}
+		memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
+		       sizeof(struct dev_to_host_fis));
+		/* TODO switch to ata_dev_classify() */
+		sas_get_ata_command_set(dev);
+	}
+	return 0;
+}
+
+static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
+{
+	int res;
+
+	/* we weren't pending, so successfully end the reset sequence now */
+	if (dev->dev_type != SATA_PENDING)
+		return 1;
+
+	/* hmmm, if this succeeds do we need to repost the domain_device to the
+	 * lldd so it can pick up new parameters?
+	 */
+	res = sas_get_ata_info(dev, phy);
+	if (res)
+		return 0; /* retry */
+	else
+		return 1;
+}
+
+static int smp_ata_check_ready(struct ata_link *link)
+{
+	int res;
 	struct ata_port *ap = link->ap;
 	struct domain_device *dev = ap->private_data;
-	struct sas_internal *i =
-		to_sas_internal(dev->port->ha->core.shost->transportt);
-	int res = TMF_RESP_FUNC_FAILED;
-	int ret = 0;
+	struct domain_device *ex_dev = dev->parent;
+	struct sas_phy *phy = sas_get_local_phy(dev);
+	struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
 
-	if (i->dft->lldd_I_T_nexus_reset)
-		res = i->dft->lldd_I_T_nexus_reset(dev);
+	res = sas_ex_phy_discover(ex_dev, phy->number);
+	sas_put_local_phy(phy);
 
-	if (res != TMF_RESP_FUNC_COMPLETE) {
-		SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
-		ret = -EAGAIN;
+	/* break the wait early if the expander is unreachable,
+	 * otherwise keep polling
+	 */
+	if (res == -ECOMM)
+		return res;
+	if (res != SMP_RESP_FUNC_ACC)
+		return 0;
+
+	switch (ex_phy->attached_dev_type) {
+	case SATA_PENDING:
+		return 0;
+	case SAS_END_DEV:
+		if (ex_phy->attached_sata_dev)
+			return sas_ata_clear_pending(dev, ex_phy);
+	default:
+		return -ENODEV;
 	}
+}
 
-	switch (dev->sata_dev.command_set) {
-		case ATA_COMMAND_SET:
-			SAS_DPRINTK("%s: Found ATA device.\n", __func__);
-			*class = ATA_DEV_ATA;
-			break;
-		case ATAPI_COMMAND_SET:
-			SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
-			*class = ATA_DEV_ATAPI;
-			break;
-		default:
-			SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
-				    __func__,
-				    dev->sata_dev.command_set);
-			*class = ATA_DEV_UNKNOWN;
-			break;
+static int local_ata_check_ready(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct domain_device *dev = ap->private_data;
+	struct sas_internal *i = dev_to_sas_internal(dev);
+
+	if (i->dft->lldd_ata_check_ready)
+		return i->dft->lldd_ata_check_ready(dev);
+	else {
+		/* lldd's that don't implement 'ready' checking get the
+		 * old default behavior of not coordinating reset
+		 * recovery with libata
+		 */
+		return 1;
 	}
+}
 
-	ap->cbl = ATA_CBL_SATA;
-	return ret;
+static int sas_ata_printk(const char *level, const struct domain_device *ddev,
+			  const char *fmt, ...)
+{
+	struct ata_port *ap = ddev->sata_dev.ap;
+	struct device *dev = &ddev->rphy->dev;
+	struct va_format vaf;
+	va_list args;
+	int r;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	r = printk("%ssas: ata%u: %s: %pV",
+		   level, ap->print_id, dev_name(dev), &vaf);
+
+	va_end(args);
+
+	return r;
 }
 
-static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
-			       unsigned long deadline)
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
 {
+	int ret = 0, res;
+	struct sas_phy *phy;
 	struct ata_port *ap = link->ap;
+	int (*check_ready)(struct ata_link *link);
 	struct domain_device *dev = ap->private_data;
-	struct sas_internal *i =
-		to_sas_internal(dev->port->ha->core.shost->transportt);
-	int res = TMF_RESP_FUNC_FAILED;
-	int ret = 0;
+	struct sas_internal *i = dev_to_sas_internal(dev);
 
-	if (i->dft->lldd_ata_soft_reset)
-		res = i->dft->lldd_ata_soft_reset(dev);
+	res = i->dft->lldd_I_T_nexus_reset(dev);
+	if (res == -ENODEV)
+		return res;
 
-	if (res != TMF_RESP_FUNC_COMPLETE) {
-		SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
-		ret = -EAGAIN;
-	}
+	if (res != TMF_RESP_FUNC_COMPLETE)
+		sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+	phy = sas_get_local_phy(dev);
+	if (scsi_is_sas_phy_local(phy))
+		check_ready = local_ata_check_ready;
+	else
+		check_ready = smp_ata_check_ready;
+	sas_put_local_phy(phy);
+
+	ret = ata_wait_after_reset(link, deadline, check_ready);
+	if (ret && ret != -EAGAIN)
+		sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
 
+	/* XXX: if the class changes during the reset the upper layer
+	 * should be informed, if the device has gone away we assume
+	 * libsas will eventually delete it
+	 */
 	switch (dev->sata_dev.command_set) {
 	case ATA_COMMAND_SET:
-		SAS_DPRINTK("%s: Found ATA device.\n", __func__);
 		*class = ATA_DEV_ATA;
 		break;
 	case ATAPI_COMMAND_SET:
-		SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
 		*class = ATA_DEV_ATAPI;
 		break;
-	default:
-		SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
-			    __func__, dev->sata_dev.command_set);
-		*class = ATA_DEV_UNKNOWN;
-		break;
 	}
 
 	ap->cbl = ATA_CBL_SATA;
 	return ret;
 }
 
+/*
+ * notify the lldd to forget the sas_task for this internal ata command
+ * that bypasses scsi-eh
+ */
+static void sas_ata_internal_abort(struct sas_task *task)
+{
+	struct sas_internal *si = dev_to_sas_internal(task->dev);
+	unsigned long flags;
+	int res;
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
+	    task->task_state_flags & SAS_TASK_STATE_DONE) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
+			    task);
+		goto out;
+	}
+	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	res = si->dft->lldd_abort_task(task);
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_DONE ||
+	    res == TMF_RESP_FUNC_COMPLETE) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		goto out;
+	}
+
+	/* XXX we are not prepared to deal with ->lldd_abort_task()
+	 * failures.  TODO: lldds need to unconditionally forget about
+	 * aborted ata tasks, otherwise we (likely) leak the sas task
+	 * here
+	 */
+	SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
+
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+		task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	return;
+ out:
+	list_del_init(&task->list);
+	sas_free_task(task);
+}
+
 static void sas_ata_post_internal(struct ata_queued_cmd *qc)
 {
 	if (qc->flags & ATA_QCFLAG_FAILED)
@@ -340,30 +496,35 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
 
 	if (qc->err_mask) {
 		/*
-		 * Find the sas_task and kill it.  By this point,
-		 * libata has decided to kill the qc, so we needn't
-		 * bother with sas_ata_task_done.  But we still
-		 * ought to abort the task.
+		 * Find the sas_task and kill it.  By this point, libata
+		 * has decided to kill the qc and has frozen the port.
+		 * In this state sas_ata_task_done() will no longer free
+		 * the sas_task, so we need to notify the lldd (via
+		 * ->lldd_abort_task) that the task is dead and free it
+		 *  ourselves.
 		 */
 		struct sas_task *task = qc->lldd_task;
-		unsigned long flags;
 
 		qc->lldd_task = NULL;
-		if (task) {
-			/* Should this be a AT(API) device reset? */
-			spin_lock_irqsave(&task->task_state_lock, flags);
-			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-			task->uldd_task = NULL;
-			__sas_task_abort(task);
-		}
+		if (!task)
+			return;
+		task->uldd_task = NULL;
+		sas_ata_internal_abort(task);
 	}
 }
 
+
+static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
+{
+	struct domain_device *dev = ap->private_data;
+	struct sas_internal *i = dev_to_sas_internal(dev);
+
+	if (i->dft->lldd_ata_set_dmamode)
+		i->dft->lldd_ata_set_dmamode(dev);
+}
+
 static struct ata_port_operations sas_sata_ops = {
 	.prereset		= ata_std_prereset,
-	.softreset		= sas_ata_soft_reset,
 	.hardreset		= sas_ata_hard_reset,
 	.postreset		= ata_std_postreset,
 	.error_handler		= ata_std_error_handler,
@@ -374,6 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
 	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
 	.port_start		= ata_sas_port_start,
 	.port_stop		= ata_sas_port_stop,
+	.set_dmamode		= sas_ata_set_dmamode,
 };
 
 static struct ata_port_info sata_port_info = {
@@ -384,11 +546,10 @@ static struct ata_port_info sata_port_info = {
 	.port_ops = &sas_sata_ops
 };
 
-int sas_ata_init_host_and_port(struct domain_device *found_dev,
-			       struct scsi_target *starget)
+int sas_ata_init_host_and_port(struct domain_device *found_dev)
 {
-	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
-	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	struct sas_ha_struct *ha = found_dev->port->ha;
+	struct Scsi_Host *shost = ha->core.shost;
 	struct ata_port *ap;
 
 	ata_host_init(&found_dev->sata_dev.ata_host,
@@ -406,6 +567,8 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
 	ap->private_data = found_dev;
 	ap->cbl = ATA_CBL_SATA;
 	ap->scsi_host = shost;
+	/* publish initialized ata port */
+	smp_wmb();
 	found_dev->sata_dev.ap = ap;
 
 	return 0;
@@ -436,168 +599,14 @@ void sas_ata_task_abort(struct sas_task *task)
 	complete(waiting);
 }
 
-static void sas_task_timedout(unsigned long _task)
-{
-	struct sas_task *task = (void *) _task;
-	unsigned long flags;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
-		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	complete(&task->completion);
-}
-
-static void sas_disc_task_done(struct sas_task *task)
-{
-	if (!del_timer(&task->timer))
-		return;
-	complete(&task->completion);
-}
-
-#define SAS_DEV_TIMEOUT 10
-
-/**
- * sas_execute_task -- Basic task processing for discovery
- * @task: the task to be executed
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @dma_dir: DMA direction.  DMA_xxx
- */
-static int sas_execute_task(struct sas_task *task, void *buffer, int size,
-			    enum dma_data_direction dma_dir)
-{
-	int res = 0;
-	struct scatterlist *scatter = NULL;
-	struct task_status_struct *ts = &task->task_status;
-	int num_scatter = 0;
-	int retries = 0;
-	struct sas_internal *i =
-		to_sas_internal(task->dev->port->ha->core.shost->transportt);
-
-	if (dma_dir != DMA_NONE) {
-		scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
-		if (!scatter)
-			goto out;
-
-		sg_init_one(scatter, buffer, size);
-		num_scatter = 1;
-	}
-
-	task->task_proto = task->dev->tproto;
-	task->scatter = scatter;
-	task->num_scatter = num_scatter;
-	task->total_xfer_len = size;
-	task->data_dir = dma_dir;
-	task->task_done = sas_disc_task_done;
-	if (dma_dir != DMA_NONE &&
-	    sas_protocol_ata(task->task_proto)) {
-		task->num_scatter = dma_map_sg(task->dev->port->ha->dev,
-					       task->scatter,
-					       task->num_scatter,
-					       task->data_dir);
-	}
-
-	for (retries = 0; retries < 5; retries++) {
-		task->task_state_flags = SAS_TASK_STATE_PENDING;
-		init_completion(&task->completion);
-
-		task->timer.data = (unsigned long) task;
-		task->timer.function = sas_task_timedout;
-		task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
-		add_timer(&task->timer);
-
-		res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
-		if (res) {
-			del_timer(&task->timer);
-			SAS_DPRINTK("executing SAS discovery task failed:%d\n",
-				    res);
-			goto ex_err;
-		}
-		wait_for_completion(&task->completion);
-		res = -ECOMM;
-		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
-			int res2;
-			SAS_DPRINTK("task aborted, flags:0x%x\n",
-				    task->task_state_flags);
-			res2 = i->dft->lldd_abort_task(task);
-			SAS_DPRINTK("came back from abort task\n");
-			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-				if (res2 == TMF_RESP_FUNC_COMPLETE)
-					continue; /* Retry the task */
-				else
-					goto ex_err;
-			}
-		}
-		if (task->task_status.stat == SAM_STAT_BUSY ||
-			   task->task_status.stat == SAM_STAT_TASK_SET_FULL ||
-			   task->task_status.stat == SAS_QUEUE_FULL) {
-			SAS_DPRINTK("task: q busy, sleeping...\n");
-			schedule_timeout_interruptible(HZ);
-		} else if (task->task_status.stat == SAM_STAT_CHECK_CONDITION) {
-			struct scsi_sense_hdr shdr;
-
-			if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
-						  &shdr)) {
-				SAS_DPRINTK("couldn't normalize sense\n");
-				continue;
-			}
-			if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
-			    (shdr.sense_key == 2 && shdr.asc == 4 &&
-			     shdr.ascq == 1)) {
-				SAS_DPRINTK("device %016llx LUN: %016llx "
-					    "powering up or not ready yet, "
-					    "sleeping...\n",
-					    SAS_ADDR(task->dev->sas_addr),
-					    SAS_ADDR(task->ssp_task.LUN));
-
-				schedule_timeout_interruptible(5*HZ);
-			} else if (shdr.sense_key == 1) {
-				res = 0;
-				break;
-			} else if (shdr.sense_key == 5) {
-				break;
-			} else {
-				SAS_DPRINTK("dev %016llx LUN: %016llx "
-					    "sense key:0x%x ASC:0x%x ASCQ:0x%x"
-					    "\n",
-					    SAS_ADDR(task->dev->sas_addr),
-					    SAS_ADDR(task->ssp_task.LUN),
-					    shdr.sense_key,
-					    shdr.asc, shdr.ascq);
-			}
-		} else if (task->task_status.resp != SAS_TASK_COMPLETE ||
-			   task->task_status.stat != SAM_STAT_GOOD) {
-			SAS_DPRINTK("task finished with resp:0x%x, "
-				    "stat:0x%x\n",
-				    task->task_status.resp,
-				    task->task_status.stat);
-			goto ex_err;
-		} else {
-			res = 0;
-			break;
-		}
-	}
-ex_err:
-	if (dma_dir != DMA_NONE) {
-		if (sas_protocol_ata(task->task_proto))
-			dma_unmap_sg(task->dev->port->ha->dev,
-				     task->scatter, task->num_scatter,
-				     task->data_dir);
-		kfree(scatter);
-	}
-out:
-	return res;
-}
-
-/* ---------- SATA ---------- */
-
 static void sas_get_ata_command_set(struct domain_device *dev)
 {
 	struct dev_to_host_fis *fis =
 		(struct dev_to_host_fis *) dev->frame_rcvd;
 
+	if (dev->dev_type == SATA_PENDING)
+		return;
+
 	if ((fis->sector_count == 1 && /* ATA */
 	     fis->lbal         == 1 &&
 	     fis->lbam         == 0 &&
@@ -636,224 +645,152 @@ static void sas_get_ata_command_set(struct domain_device *dev)
 		dev->sata_dev.command_set = ATAPI_COMMAND_SET;
 }
 
-/**
- * sas_issue_ata_cmd -- Basic SATA command processing for discovery
- * @dev: the device to send the command to
- * @command: the command register
- * @features: the features register
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @dma_dir: DMA direction.  DMA_xxx
- */
-static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
-			     u8 features, void *buffer, int size,
-			     enum dma_data_direction dma_dir)
-{
-	int res = 0;
-	struct sas_task *task;
-	struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
-		&dev->frame_rcvd[0];
-
-	res = -ENOMEM;
-	task = sas_alloc_task(GFP_KERNEL);
-	if (!task)
-		goto out;
-
-	task->dev = dev;
-
-	task->ata_task.fis.fis_type = 0x27;
-	task->ata_task.fis.command = command;
-	task->ata_task.fis.features = features;
-	task->ata_task.fis.device = d2h_fis->device;
-	task->ata_task.retry_count = 1;
-
-	res = sas_execute_task(task, buffer, size, dma_dir);
-
-	sas_free_task(task);
-out:
-	return res;
-}
-
-#define ATA_IDENTIFY_DEV         0xEC
-#define ATA_IDENTIFY_PACKET_DEV  0xA1
-#define ATA_SET_FEATURES         0xEF
-#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
-
-/**
- * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
- * @dev: STP/SATA device of interest (ATA/ATAPI)
- *
- * The LLDD has already been notified of this device, so that we can
- * send FISes to it.  Here we try to get IDENTIFY DEVICE or IDENTIFY
- * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
- * performance for this device.
- */
-static int sas_discover_sata_dev(struct domain_device *dev)
+void sas_probe_sata(struct asd_sas_port *port)
 {
-	int     res;
-	__le16  *identify_x;
-	u8      command;
+	struct domain_device *dev, *n;
+	int err;
 
-	identify_x = kzalloc(512, GFP_KERNEL);
-	if (!identify_x)
-		return -ENOMEM;
-
-	if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
-		dev->sata_dev.identify_device = identify_x;
-		command = ATA_IDENTIFY_DEV;
-	} else {
-		dev->sata_dev.identify_packet_device = identify_x;
-		command = ATA_IDENTIFY_PACKET_DEV;
-	}
+	mutex_lock(&port->ha->disco_mutex);
+	list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+		if (!dev_is_sata(dev))
+			continue;
 
-	res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
-				DMA_FROM_DEVICE);
-	if (res)
-		goto out_err;
-
-	/* lives on the media? */
-	if (le16_to_cpu(identify_x[0]) & 4) {
-		/* incomplete response */
-		SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
-			    "dev %llx\n", SAS_ADDR(dev->sas_addr));
-		if (!(identify_x[83] & cpu_to_le16(1<<6)))
-			goto cont1;
-		res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
-					ATA_FEATURE_PUP_STBY_SPIN_UP,
-					NULL, 0, DMA_NONE);
-		if (res)
-			goto cont1;
-
-		schedule_timeout_interruptible(5*HZ); /* More time? */
-		res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
-					DMA_FROM_DEVICE);
-		if (res)
-			goto out_err;
+		err = sas_ata_init_host_and_port(dev);
+		if (err)
+			sas_fail_probe(dev, __func__, err);
+		else
+			ata_sas_async_port_init(dev->sata_dev.ap);
 	}
-cont1:
-	/* XXX Hint: register this SATA device with SATL.
-	   When this returns, dev->sata_dev->lu is alive and
-	   present.
-	sas_satl_register_dev(dev);
-	*/
+	mutex_unlock(&port->ha->disco_mutex);
 
-	sas_fill_in_rphy(dev, dev->rphy);
+	list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+		if (!dev_is_sata(dev))
+			continue;
 
-	return 0;
-out_err:
-	dev->sata_dev.identify_packet_device = NULL;
-	dev->sata_dev.identify_device = NULL;
-	kfree(identify_x);
-	return res;
-}
+		sas_ata_wait_eh(dev);
 
-static int sas_discover_sata_pm(struct domain_device *dev)
-{
-	return -ENODEV;
+		/* if libata could not bring the link up, don't surface
+		 * the device
+		 */
+		if (ata_dev_disabled(sas_to_ata_dev(dev)))
+			sas_fail_probe(dev, __func__, -ENODEV);
+	}
 }
 
 /**
  * sas_discover_sata -- discover an STP/SATA domain device
  * @dev: pointer to struct domain_device of interest
  *
- * First we notify the LLDD of this device, so we can send frames to
- * it.  Then depending on the type of device we call the appropriate
- * discover functions.  Once device discover is done, we notify the
- * LLDD so that it can fine-tune its parameters for the device, by
- * removing it and then adding it.  That is, the second time around,
- * the driver would have certain fields, that it is looking at, set.
- * Finally we initialize the kobj so that the device can be added to
- * the system at registration time.  Devices directly attached to a HA
- * port, have no parents.  All other devices do, and should have their
- * "parent" pointer set appropriately before calling this function.
+ * Devices directly attached to a HA port, have no parents.  All other
+ * devices do, and should have their "parent" pointer set appropriately
+ * before calling this function.
  */
 int sas_discover_sata(struct domain_device *dev)
 {
 	int res;
 
+	if (dev->dev_type == SATA_PM)
+		return -ENODEV;
+
 	sas_get_ata_command_set(dev);
+	sas_fill_in_rphy(dev, dev->rphy);
 
 	res = sas_notify_lldd_dev_found(dev);
 	if (res)
 		return res;
 
-	switch (dev->dev_type) {
-	case SATA_DEV:
-		res = sas_discover_sata_dev(dev);
-		break;
-	case SATA_PM:
-		res = sas_discover_sata_pm(dev);
-		break;
-	default:
-		break;
-	}
-	sas_notify_lldd_dev_gone(dev);
-	if (!res) {
-		sas_notify_lldd_dev_found(dev);
-		res = sas_rphy_add(dev->rphy);
-	}
-
-	return res;
+	sas_discover_event(dev->port, DISCE_PROBE);
+	return 0;
 }
 
-void sas_ata_strategy_handler(struct Scsi_Host *shost)
+static void async_sas_ata_eh(void *data, async_cookie_t cookie)
 {
-	struct scsi_device *sdev;
+	struct domain_device *dev = data;
+	struct ata_port *ap = dev->sata_dev.ap;
+	struct sas_ha_struct *ha = dev->port->ha;
 
-	shost_for_each_device(sdev, shost) {
-		struct domain_device *ddev = sdev_to_domain_dev(sdev);
-		struct ata_port *ap = ddev->sata_dev.ap;
+	/* hold a reference over eh since we may be racing with final
+	 * remove once all commands are completed
+	 */
+	kref_get(&dev->kref);
+	sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
+	ata_scsi_port_error_handler(ha->core.shost, ap);
+	sas_put_device(dev);
+}
 
-		if (!dev_is_sata(ddev))
-			continue;
+static bool sas_ata_dev_eh_valid(struct domain_device *dev)
+{
+	struct ata_port *ap;
 
-		ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
-		ata_scsi_port_error_handler(shost, ap);
-	}
+	if (!dev_is_sata(dev))
+		return false;
+	ap = dev->sata_dev.ap;
+	/* consume fully initialized ata ports */
+	smp_rmb();
+	return !!ap;
 }
 
-int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
-		      enum blk_eh_timer_return *rtn)
+void sas_ata_strategy_handler(struct Scsi_Host *shost)
 {
-	struct domain_device *ddev = cmd_to_domain_dev(cmd);
+	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+	LIST_HEAD(async);
+	int i;
+
+	/* it's ok to defer revalidation events during ata eh, these
+	 * disks are in one of three states:
+	 * 1/ present for initial domain discovery, and these
+	 *    resets will cause bcn flutters
+	 * 2/ hot removed, we'll discover that after eh fails
+	 * 3/ hot added after initial discovery, lost the race, and need
+	 *    to catch the next train.
+	 */
+	sas_disable_revalidation(sas_ha);
 
-	if (!dev_is_sata(ddev) || task)
-		return 0;
+	spin_lock_irq(&sas_ha->phy_port_lock);
+	for (i = 0; i < sas_ha->num_phys; i++) {
+		struct asd_sas_port *port = sas_ha->sas_port[i];
+		struct domain_device *dev;
 
-	/* we're a sata device with no task, so this must be a libata
-	 * eh timeout.  Ideally should hook into libata timeout
-	 * handling, but there's no point, it just wants to activate
-	 * the eh thread */
-	*rtn = BLK_EH_NOT_HANDLED;
-	return 1;
+		spin_lock(&port->dev_list_lock);
+		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+			if (!sas_ata_dev_eh_valid(dev))
+				continue;
+			async_schedule_domain(async_sas_ata_eh, dev, &async);
+		}
+		spin_unlock(&port->dev_list_lock);
+	}
+	spin_unlock_irq(&sas_ha->phy_port_lock);
+
+	async_synchronize_full_domain(&async);
+
+	sas_enable_revalidation(sas_ha);
 }
 
-int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
-	       struct list_head *done_q)
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+		struct list_head *done_q)
 {
-	int rtn = 0;
 	struct scsi_cmnd *cmd, *n;
-	struct ata_port *ap;
+	struct domain_device *eh_dev;
 
 	do {
 		LIST_HEAD(sata_q);
-
-		ap = NULL;
+		eh_dev = NULL;
 
 		list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 			struct domain_device *ddev = cmd_to_domain_dev(cmd);
 
 			if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
 				continue;
-			if (ap && ap != ddev->sata_dev.ap)
+			if (eh_dev && eh_dev != ddev)
 				continue;
-			ap = ddev->sata_dev.ap;
-			rtn = 1;
+			eh_dev = ddev;
 			list_move(&cmd->eh_entry, &sata_q);
 		}
 
 		if (!list_empty(&sata_q)) {
-			ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
+			struct ata_port *ap = eh_dev->sata_dev.ap;
+
+			sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
 			ata_scsi_cmd_error_handler(shost, ap, &sata_q);
 			/*
 			 * ata's error handler may leave the cmd on the list
@@ -869,7 +806,36 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
 			while (!list_empty(&sata_q))
 				list_del_init(sata_q.next);
 		}
-	} while (ap);
+	} while (eh_dev);
+}
+
+void sas_ata_schedule_reset(struct domain_device *dev)
+{
+	struct ata_eh_info *ehi;
+	struct ata_port *ap;
+	unsigned long flags;
+
+	if (!dev_is_sata(dev))
+		return;
+
+	ap = dev->sata_dev.ap;
+	ehi = &ap->link.eh_info;
+
+	spin_lock_irqsave(ap->lock, flags);
+	ehi->err_mask |= AC_ERR_TIMEOUT;
+	ehi->action |= ATA_EH_RESET;
+	ata_port_schedule_eh(ap);
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
+
+void sas_ata_wait_eh(struct domain_device *dev)
+{
+	struct ata_port *ap;
+
+	if (!dev_is_sata(dev))
+		return;
 
-	return rtn;
+	ap = dev->sata_dev.ap;
+	ata_port_wait_eh(ap);
 }
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 54a5199ceb56..364679675602 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -30,29 +30,30 @@
 
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
+#include <scsi/sas_ata.h>
 #include "../scsi_sas_internal.h"
 
 /* ---------- Basic task processing for discovery purposes ---------- */
 
 void sas_init_dev(struct domain_device *dev)
 {
-        INIT_LIST_HEAD(&dev->siblings);
-        INIT_LIST_HEAD(&dev->dev_list_node);
-        switch (dev->dev_type) {
-        case SAS_END_DEV:
-                break;
-        case EDGE_DEV:
-        case FANOUT_DEV:
-                INIT_LIST_HEAD(&dev->ex_dev.children);
-                break;
-        case SATA_DEV:
-        case SATA_PM:
-        case SATA_PM_PORT:
-                INIT_LIST_HEAD(&dev->sata_dev.children);
-                break;
-        default:
-                break;
-        }
+	switch (dev->dev_type) {
+	case SAS_END_DEV:
+		break;
+	case EDGE_DEV:
+	case FANOUT_DEV:
+		INIT_LIST_HEAD(&dev->ex_dev.children);
+		mutex_init(&dev->ex_dev.cmd_mutex);
+		break;
+	case SATA_DEV:
+	case SATA_PM:
+	case SATA_PM_PORT:
+	case SATA_PENDING:
+		INIT_LIST_HEAD(&dev->sata_dev.children);
+		break;
+	default:
+		break;
+	}
 }
 
 /* ---------- Domain device discovery ---------- */
@@ -68,19 +69,18 @@ void sas_init_dev(struct domain_device *dev)
  */
 static int sas_get_port_device(struct asd_sas_port *port)
 {
-	unsigned long flags;
 	struct asd_sas_phy *phy;
 	struct sas_rphy *rphy;
 	struct domain_device *dev;
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = sas_alloc_device();
 	if (!dev)
 		return -ENOMEM;
 
-	spin_lock_irqsave(&port->phy_list_lock, flags);
+	spin_lock_irq(&port->phy_list_lock);
 	if (list_empty(&port->phy_list)) {
-		spin_unlock_irqrestore(&port->phy_list_lock, flags);
-		kfree(dev);
+		spin_unlock_irq(&port->phy_list_lock);
+		sas_put_device(dev);
 		return -ENODEV;
 	}
 	phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
@@ -88,7 +88,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
 	memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
 					     (size_t)phy->frame_rcvd_size));
 	spin_unlock(&phy->frame_rcvd_lock);
-	spin_unlock_irqrestore(&port->phy_list_lock, flags);
+	spin_unlock_irq(&port->phy_list_lock);
 
 	if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
 		struct dev_to_host_fis *fis =
@@ -130,9 +130,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
 	}
 
 	if (!rphy) {
-		kfree(dev);
+		sas_put_device(dev);
 		return -ENODEV;
 	}
+
+	spin_lock_irq(&port->phy_list_lock);
+	list_for_each_entry(phy, &port->phy_list, port_phy_el)
+		sas_phy_set_target(phy, dev);
+	spin_unlock_irq(&port->phy_list_lock);
 	rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
 	memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
 	sas_fill_in_rphy(dev, rphy);
@@ -147,11 +152,17 @@ static int sas_get_port_device(struct asd_sas_port *port)
 	memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
 	memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
 	port->disc.max_level = 0;
+	sas_device_set_phy(dev, port->port);
 
 	dev->rphy = rphy;
-	spin_lock_irq(&port->dev_list_lock);
-	list_add_tail(&dev->dev_list_node, &port->dev_list);
-	spin_unlock_irq(&port->dev_list_lock);
+
+	if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+		list_add_tail(&dev->disco_list_node, &port->disco_list);
+	else {
+		spin_lock_irq(&port->dev_list_lock);
+		list_add_tail(&dev->dev_list_node, &port->dev_list);
+		spin_unlock_irq(&port->dev_list_lock);
+	}
 
 	return 0;
 }
@@ -173,6 +184,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
 			       dev_name(sas_ha->dev),
 			       SAS_ADDR(dev->sas_addr), res);
 		}
+		kref_get(&dev->kref);
 	}
 	return res;
 }
@@ -184,12 +196,40 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
 	struct Scsi_Host *shost = sas_ha->core.shost;
 	struct sas_internal *i = to_sas_internal(shost->transportt);
 
-	if (i->dft->lldd_dev_gone)
+	if (i->dft->lldd_dev_gone) {
 		i->dft->lldd_dev_gone(dev);
+		sas_put_device(dev);
+	}
 }
 
-/* ---------- Common/dispatchers ---------- */
+static void sas_probe_devices(struct work_struct *work)
+{
+	struct domain_device *dev, *n;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
+
+	clear_bit(DISCE_PROBE, &port->disc.pending);
 
+	/* devices must be domain members before link recovery and probe */
+	list_for_each_entry(dev, &port->disco_list, disco_list_node) {
+		spin_lock_irq(&port->dev_list_lock);
+		list_add_tail(&dev->dev_list_node, &port->dev_list);
+		spin_unlock_irq(&port->dev_list_lock);
+	}
+
+	sas_probe_sata(port);
+
+	list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+		int err;
+
+		err = sas_rphy_add(dev->rphy);
+		if (err)
+			sas_fail_probe(dev, __func__, err);
+		else
+			list_del_init(&dev->disco_list_node);
+	}
+}
 
 /**
  * sas_discover_end_dev -- discover an end device (SSP, etc)
@@ -203,22 +243,36 @@ int sas_discover_end_dev(struct domain_device *dev)
 
 	res = sas_notify_lldd_dev_found(dev);
 	if (res)
-		goto out_err2;
-
-	res = sas_rphy_add(dev->rphy);
-	if (res)
-		goto out_err;
+		return res;
+	sas_discover_event(dev->port, DISCE_PROBE);
 
 	return 0;
-
-out_err:
-	sas_notify_lldd_dev_gone(dev);
-out_err2:
-	return res;
 }
 
 /* ---------- Device registration and unregistration ---------- */
 
+void sas_free_device(struct kref *kref)
+{
+	struct domain_device *dev = container_of(kref, typeof(*dev), kref);
+
+	if (dev->parent)
+		sas_put_device(dev->parent);
+
+	sas_port_put_phy(dev->phy);
+	dev->phy = NULL;
+
+	/* remove the phys and ports, everything else should be gone */
+	if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+		kfree(dev->ex_dev.ex_phy);
+
+	if (dev_is_sata(dev) && dev->sata_dev.ap) {
+		ata_sas_port_destroy(dev->sata_dev.ap);
+		dev->sata_dev.ap = NULL;
+	}
+
+	kfree(dev);
+}
+
 static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
 {
 	sas_notify_lldd_dev_gone(dev);
@@ -230,34 +284,84 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
 	spin_lock_irq(&port->dev_list_lock);
 	list_del_init(&dev->dev_list_node);
 	spin_unlock_irq(&port->dev_list_lock);
+
+	sas_put_device(dev);
 }
 
-void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
+static void sas_destruct_devices(struct work_struct *work)
 {
-	if (dev->rphy) {
+	struct domain_device *dev, *n;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
+
+	clear_bit(DISCE_DESTRUCT, &port->disc.pending);
+
+	list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
+		list_del_init(&dev->disco_list_node);
+
 		sas_remove_children(&dev->rphy->dev);
 		sas_rphy_delete(dev->rphy);
 		dev->rphy = NULL;
+		sas_unregister_common_dev(port, dev);
 	}
-	if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
-		/* remove the phys and ports, everything else should be gone */
-		kfree(dev->ex_dev.ex_phy);
-		dev->ex_dev.ex_phy = NULL;
+}
+
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
+{
+	if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
+	    !list_empty(&dev->disco_list_node)) {
+		/* this rphy never saw sas_rphy_add */
+		list_del_init(&dev->disco_list_node);
+		sas_rphy_free(dev->rphy);
+		dev->rphy = NULL;
+		sas_unregister_common_dev(port, dev);
+	}
+
+	if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+		sas_rphy_unlink(dev->rphy);
+		list_move_tail(&dev->disco_list_node, &port->destroy_list);
+		sas_discover_event(dev->port, DISCE_DESTRUCT);
 	}
-	sas_unregister_common_dev(port, dev);
 }
 
-void sas_unregister_domain_devices(struct asd_sas_port *port)
+void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
 {
 	struct domain_device *dev, *n;
 
-	list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node)
+	list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
+		if (gone)
+			set_bit(SAS_DEV_GONE, &dev->state);
+		sas_unregister_dev(port, dev);
+	}
+
+	list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
 		sas_unregister_dev(port, dev);
 
 	port->port->rphy = NULL;
 
 }
 
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
+{
+	struct sas_ha_struct *ha;
+	struct sas_phy *new_phy;
+
+	if (!dev)
+		return;
+
+	ha = dev->port->ha;
+	new_phy = sas_port_get_phy(port);
+
+	/* pin and record last seen phy */
+	spin_lock_irq(&ha->phy_port_lock);
+	if (new_phy) {
+		sas_port_put_phy(dev->phy);
+		dev->phy = new_phy;
+	}
+	spin_unlock_irq(&ha->phy_port_lock);
+}
+
 /* ---------- Discovery and Revalidation ---------- */
 
 /**
@@ -277,8 +381,7 @@ static void sas_discover_domain(struct work_struct *work)
 		container_of(work, struct sas_discovery_event, work);
 	struct asd_sas_port *port = ev->port;
 
-	sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
-			&port->disc.pending);
+	clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
 
 	if (port->port_dev)
 		return;
@@ -318,11 +421,12 @@ static void sas_discover_domain(struct work_struct *work)
 		sas_rphy_free(dev->rphy);
 		dev->rphy = NULL;
 
+		list_del_init(&dev->disco_list_node);
 		spin_lock_irq(&port->dev_list_lock);
 		list_del_init(&dev->dev_list_node);
 		spin_unlock_irq(&port->dev_list_lock);
 
-		kfree(dev); /* not kobject_register-ed yet */
+		sas_put_device(dev);
 		port->port_dev = NULL;
 	}
 
@@ -336,21 +440,51 @@ static void sas_revalidate_domain(struct work_struct *work)
 	struct sas_discovery_event *ev =
 		container_of(work, struct sas_discovery_event, work);
 	struct asd_sas_port *port = ev->port;
+	struct sas_ha_struct *ha = port->ha;
+
+	/* prevent revalidation from finding sata links in recovery */
+	mutex_lock(&ha->disco_mutex);
+	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+		SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
+			    port->id, task_pid_nr(current));
+		goto out;
+	}
 
-	sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
-			&port->disc.pending);
+	clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
 
 	SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
 		    task_pid_nr(current));
+
 	if (port->port_dev)
 		res = sas_ex_revalidate_domain(port->port_dev);
 
 	SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
 		    port->id, task_pid_nr(current), res);
+ out:
+	mutex_unlock(&ha->disco_mutex);
 }
 
 /* ---------- Events ---------- */
 
+static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+	/* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
+	scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_chain_event(int event, unsigned long *pending,
+			    struct work_struct *work,
+			    struct sas_ha_struct *ha)
+{
+	if (!test_and_set_bit(event, pending)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&ha->state_lock, flags);
+		sas_chain_work(ha, work);
+		spin_unlock_irqrestore(&ha->state_lock, flags);
+	}
+}
+
 int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
 {
 	struct sas_discovery *disc;
@@ -361,8 +495,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
 
 	BUG_ON(ev >= DISC_NUM_EVENTS);
 
-	sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
-			&disc->disc_work[ev].work, port->ha);
+	sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
 
 	return 0;
 }
@@ -380,9 +513,10 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
 	static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
 		[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
 		[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
+		[DISCE_PROBE] = sas_probe_devices,
+		[DISCE_DESTRUCT] = sas_destruct_devices,
 	};
 
-	spin_lock_init(&disc->disc_event_lock);
 	disc->pending = 0;
 	for (i = 0; i < DISC_NUM_EVENTS; i++) {
 		INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 9db30fb5caf2..16639bbae629 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -22,15 +22,103 @@
  *
  */
 
+#include <linux/export.h>
 #include <scsi/scsi_host.h>
 #include "sas_internal.h"
 #include "sas_dump.h"
 
+void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+	if (!test_bit(SAS_HA_REGISTERED, &ha->state))
+		return;
+
+	if (test_bit(SAS_HA_DRAINING, &ha->state))
+		list_add(&work->entry, &ha->defer_q);
+	else
+		scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_queue_event(int event, unsigned long *pending,
+			    struct work_struct *work,
+			    struct sas_ha_struct *ha)
+{
+	if (!test_and_set_bit(event, pending)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&ha->state_lock, flags);
+		sas_queue_work(ha, work);
+		spin_unlock_irqrestore(&ha->state_lock, flags);
+	}
+}
+
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
+	struct workqueue_struct *wq = ha->core.shost->work_q;
+	struct work_struct *w, *_w;
+
+	set_bit(SAS_HA_DRAINING, &ha->state);
+	/* flush submitters */
+	spin_lock_irq(&ha->state_lock);
+	spin_unlock_irq(&ha->state_lock);
+
+	drain_workqueue(wq);
+
+	spin_lock_irq(&ha->state_lock);
+	clear_bit(SAS_HA_DRAINING, &ha->state);
+	list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
+		list_del_init(&w->entry);
+		sas_queue_work(ha, w);
+	}
+	spin_unlock_irq(&ha->state_lock);
+}
+
+int sas_drain_work(struct sas_ha_struct *ha)
+{
+	int err;
+
+	err = mutex_lock_interruptible(&ha->drain_mutex);
+	if (err)
+		return err;
+	if (test_bit(SAS_HA_REGISTERED, &ha->state))
+		__sas_drain_work(ha);
+	mutex_unlock(&ha->drain_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sas_drain_work);
+
+void sas_disable_revalidation(struct sas_ha_struct *ha)
+{
+	mutex_lock(&ha->disco_mutex);
+	set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+	mutex_unlock(&ha->disco_mutex);
+}
+
+void sas_enable_revalidation(struct sas_ha_struct *ha)
+{
+	int i;
+
+	mutex_lock(&ha->disco_mutex);
+	clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+	for (i = 0; i < ha->num_phys; i++) {
+		struct asd_sas_port *port = ha->sas_port[i];
+		const int ev = DISCE_REVALIDATE_DOMAIN;
+		struct sas_discovery *d = &port->disc;
+
+		if (!test_and_clear_bit(ev, &d->pending))
+			continue;
+
+		sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+	}
+	mutex_unlock(&ha->disco_mutex);
+}
+
 static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
 {
 	BUG_ON(event >= HA_NUM_EVENTS);
 
-	sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
+	sas_queue_event(event, &sas_ha->pending,
 			&sas_ha->ha_events[event].work, sas_ha);
 }
 
@@ -40,7 +128,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
 
 	BUG_ON(event >= PORT_NUM_EVENTS);
 
-	sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
+	sas_queue_event(event, &phy->port_events_pending,
 			&phy->port_events[event].work, ha);
 }
 
@@ -50,7 +138,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
 
 	BUG_ON(event >= PHY_NUM_EVENTS);
 
-	sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
+	sas_queue_event(event, &phy->phy_events_pending,
 			&phy->phy_events[event].work, ha);
 }
 
@@ -62,8 +150,6 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
 
 	int i;
 
-	spin_lock_init(&sas_ha->event_lock);
-
 	for (i = 0; i < HA_NUM_EVENTS; i++) {
 		INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
 		sas_ha->ha_events[i].ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 1b831c55ec6e..05acd9e35fc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -28,6 +28,7 @@
 
 #include "sas_internal.h"
 
+#include <scsi/sas_ata.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
 #include "../scsi_sas_internal.h"
@@ -71,11 +72,18 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 	struct sas_internal *i =
 		to_sas_internal(dev->port->ha->core.shost->transportt);
 
+	mutex_lock(&dev->ex_dev.cmd_mutex);
 	for (retry = 0; retry < 3; retry++) {
-		task = sas_alloc_task(GFP_KERNEL);
-		if (!task)
-			return -ENOMEM;
+		if (test_bit(SAS_DEV_GONE, &dev->state)) {
+			res = -ECOMM;
+			break;
+		}
 
+		task = sas_alloc_task(GFP_KERNEL);
+		if (!task) {
+			res = -ENOMEM;
+			break;
+		}
 		task->dev = dev;
 		task->task_proto = dev->tproto;
 		sg_init_one(&task->smp_task.smp_req, req, req_size);
@@ -93,7 +101,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 		if (res) {
 			del_timer(&task->timer);
 			SAS_DPRINTK("executing SMP task failed:%d\n", res);
-			goto ex_err;
+			break;
 		}
 
 		wait_for_completion(&task->completion);
@@ -103,24 +111,30 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 			i->dft->lldd_abort_task(task);
 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 				SAS_DPRINTK("SMP task aborted and not done\n");
-				goto ex_err;
+				break;
 			}
 		}
 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 		    task->task_status.stat == SAM_STAT_GOOD) {
 			res = 0;
 			break;
-		} if (task->task_status.resp == SAS_TASK_COMPLETE &&
-		      task->task_status.stat == SAS_DATA_UNDERRUN) {
+		}
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		    task->task_status.stat == SAS_DATA_UNDERRUN) {
 			/* no error, but return the number of bytes of
 			 * underrun */
 			res = task->task_status.residual;
 			break;
-		} if (task->task_status.resp == SAS_TASK_COMPLETE &&
-		      task->task_status.stat == SAS_DATA_OVERRUN) {
+		}
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		    task->task_status.stat == SAS_DATA_OVERRUN) {
 			res = -EMSGSIZE;
 			break;
-		} else {
+		}
+		if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
+		    task->task_status.stat == SAS_DEVICE_UNKNOWN)
+			break;
+		else {
 			SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
 				    "status 0x%x\n", __func__,
 				    SAS_ADDR(dev->sas_addr),
@@ -130,11 +144,10 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 			task = NULL;
 		}
 	}
-ex_err:
+	mutex_unlock(&dev->ex_dev.cmd_mutex);
+
 	BUG_ON(retry == 3 && task != NULL);
-	if (task != NULL) {
-		sas_free_task(task);
-	}
+	sas_free_task(task);
 	return res;
 }
 
@@ -153,19 +166,49 @@ static inline void *alloc_smp_resp(int size)
 	return kzalloc(size, GFP_KERNEL);
 }
 
-/* ---------- Expander configuration ---------- */
+static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
+{
+	switch (phy->routing_attr) {
+	case TABLE_ROUTING:
+		if (dev->ex_dev.t2t_supp)
+			return 'U';
+		else
+			return 'T';
+	case DIRECT_ROUTING:
+		return 'D';
+	case SUBTRACTIVE_ROUTING:
+		return 'S';
+	default:
+		return '?';
+	}
+}
+
+static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+{
+	/* This is detecting a failure to transmit initial dev to host
+	 * FIS as described in section J.5 of sas-2 r16
+	 */
+	if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+	    dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
+		return SATA_PENDING;
+	else
+		return dr->attached_dev_type;
+}
 
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
-			   void *disc_resp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
+	enum sas_dev_type dev_type;
+	enum sas_linkrate linkrate;
+	u8 sas_addr[SAS_ADDR_SIZE];
+	struct smp_resp *resp = rsp;
+	struct discover_resp *dr = &resp->disc;
 	struct expander_device *ex = &dev->ex_dev;
 	struct ex_phy *phy = &ex->ex_phy[phy_id];
-	struct smp_resp *resp = disc_resp;
-	struct discover_resp *dr = &resp->disc;
 	struct sas_rphy *rphy = dev->rphy;
-	int rediscover = (phy->phy != NULL);
+	bool new_phy = !phy->phy;
+	char *type;
 
-	if (!rediscover) {
+	if (new_phy) {
 		phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
 
 		/* FIXME: error_handling */
@@ -184,8 +227,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
 		break;
 	}
 
+	/* check if anything important changed to squelch debug */
+	dev_type = phy->attached_dev_type;
+	linkrate  = phy->linkrate;
+	memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+
+	phy->attached_dev_type = to_dev_type(dr);
 	phy->phy_id = phy_id;
-	phy->attached_dev_type = dr->attached_dev_type;
 	phy->linkrate = dr->linkrate;
 	phy->attached_sata_host = dr->attached_sata_host;
 	phy->attached_sata_dev  = dr->attached_sata_dev;
@@ -200,9 +248,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
 	phy->last_da_index = -1;
 
 	phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
-	phy->phy->identify.device_type = phy->attached_dev_type;
+	phy->phy->identify.device_type = dr->attached_dev_type;
 	phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
 	phy->phy->identify.target_port_protocols = phy->attached_tproto;
+	if (!phy->attached_tproto && dr->attached_sata_dev)
+		phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
 	phy->phy->identify.phy_identifier = phy_id;
 	phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
 	phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
@@ -210,20 +260,76 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
 	phy->phy->maximum_linkrate = dr->pmax_linkrate;
 	phy->phy->negotiated_linkrate = phy->linkrate;
 
-	if (!rediscover)
+	if (new_phy)
 		if (sas_phy_add(phy->phy)) {
 			sas_phy_free(phy->phy);
 			return;
 		}
 
-	SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
+	switch (phy->attached_dev_type) {
+	case SATA_PENDING:
+		type = "stp pending";
+		break;
+	case NO_DEVICE:
+		type = "no device";
+		break;
+	case SAS_END_DEV:
+		if (phy->attached_iproto) {
+			if (phy->attached_tproto)
+				type = "host+target";
+			else
+				type = "host";
+		} else {
+			if (dr->attached_sata_dev)
+				type = "stp";
+			else
+				type = "ssp";
+		}
+		break;
+	case EDGE_DEV:
+	case FANOUT_DEV:
+		type = "smp";
+		break;
+	default:
+		type = "unknown";
+	}
+
+	/* this routine is polled by libata error recovery so filter
+	 * unimportant messages
+	 */
+	if (new_phy || phy->attached_dev_type != dev_type ||
+	    phy->linkrate != linkrate ||
+	    SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
+		/* pass */;
+	else
+		return;
+
+	SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
 		    SAS_ADDR(dev->sas_addr), phy->phy_id,
-		    phy->routing_attr == TABLE_ROUTING ? 'T' :
-		    phy->routing_attr == DIRECT_ROUTING ? 'D' :
-		    phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
-		    SAS_ADDR(phy->attached_sas_addr));
+		    sas_route_char(dev, phy), phy->linkrate,
+		    SAS_ADDR(phy->attached_sas_addr), type);
+}
+
+/* check if we have an existing attached ata device on this expander phy */
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
+{
+	struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
+	struct domain_device *dev;
+	struct sas_rphy *rphy;
+
+	if (!ex_phy->port)
+		return NULL;
 
-	return;
+	rphy = ex_phy->port->rphy;
+	if (!rphy)
+		return NULL;
+
+	dev = sas_find_dev_by_rphy(rphy);
+
+	if (dev && dev_is_sata(dev))
+		return dev;
+
+	return NULL;
 }
 
 #define DISCOVER_REQ_SIZE  16
@@ -232,39 +338,25 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
 				      u8 *disc_resp, int single)
 {
-	int i, res;
+	struct discover_resp *dr;
+	int res;
 
 	disc_req[9] = single;
-	for (i = 1 ; i < 3; i++) {
-		struct discover_resp *dr;
 
-		res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
-				       disc_resp, DISCOVER_RESP_SIZE);
-		if (res)
-			return res;
-		/* This is detecting a failure to transmit initial
-		 * dev to host FIS as described in section G.5 of
-		 * sas-2 r 04b */
-		dr = &((struct smp_resp *)disc_resp)->disc;
-		if (memcmp(dev->sas_addr, dr->attached_sas_addr,
-			  SAS_ADDR_SIZE) == 0) {
-			sas_printk("Found loopback topology, just ignore it!\n");
-			return 0;
-		}
-		if (!(dr->attached_dev_type == 0 &&
-		      dr->attached_sata_dev))
-			break;
-		/* In order to generate the dev to host FIS, we
-		 * send a link reset to the expander port */
-		sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
-		/* Wait for the reset to trigger the negotiation */
-		msleep(500);
+	res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
+			       disc_resp, DISCOVER_RESP_SIZE);
+	if (res)
+		return res;
+	dr = &((struct smp_resp *)disc_resp)->disc;
+	if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
+		sas_printk("Found loopback topology, just ignore it!\n");
+		return 0;
 	}
 	sas_set_ex_phy(dev, single, disc_resp);
 	return 0;
 }
 
-static int sas_ex_phy_discover(struct domain_device *dev, int single)
+int sas_ex_phy_discover(struct domain_device *dev, int single)
 {
 	struct expander_device *ex = &dev->ex_dev;
 	int  res = 0;
@@ -569,9 +661,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
 #define RPS_REQ_SIZE  16
 #define RPS_RESP_SIZE 60
 
-static int sas_get_report_phy_sata(struct domain_device *dev,
-					  int phy_id,
-					  struct smp_resp *rps_resp)
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+			    struct smp_resp *rps_resp)
 {
 	int res;
 	u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -657,10 +748,11 @@ static struct domain_device *sas_ex_discover_end_dev(
 	if (phy->attached_sata_host || phy->attached_sata_ps)
 		return NULL;
 
-	child = kzalloc(sizeof(*child), GFP_KERNEL);
+	child = sas_alloc_device();
 	if (!child)
 		return NULL;
 
+	kref_get(&parent->kref);
 	child->parent = parent;
 	child->port   = parent->port;
 	child->iproto = phy->attached_iproto;
@@ -676,24 +768,13 @@ static struct domain_device *sas_ex_discover_end_dev(
 		}
 	}
 	sas_ex_get_linkrate(parent, child, phy);
+	sas_device_set_phy(child, phy->port);
 
 #ifdef CONFIG_SCSI_SAS_ATA
 	if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
-		child->dev_type = SATA_DEV;
-		if (phy->attached_tproto & SAS_PROTOCOL_STP)
-			child->tproto = phy->attached_tproto;
-		if (phy->attached_sata_dev)
-			child->tproto |= SATA_DEV;
-		res = sas_get_report_phy_sata(parent, phy_id,
-					      &child->sata_dev.rps_resp);
-		if (res) {
-			SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
-				    "0x%x\n", SAS_ADDR(parent->sas_addr),
-				    phy_id, res);
+		res = sas_get_ata_info(child, phy);
+		if (res)
 			goto out_free;
-		}
-		memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
-		       sizeof(struct dev_to_host_fis));
 
 		rphy = sas_end_device_alloc(phy->port);
 		if (unlikely(!rphy))
@@ -703,9 +784,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
 		child->rphy = rphy;
 
-		spin_lock_irq(&parent->port->dev_list_lock);
-		list_add_tail(&child->dev_list_node, &parent->port->dev_list);
-		spin_unlock_irq(&parent->port->dev_list_lock);
+		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
 
 		res = sas_discover_sata(child);
 		if (res) {
@@ -729,9 +808,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 		child->rphy = rphy;
 		sas_fill_in_rphy(child, rphy);
 
-		spin_lock_irq(&parent->port->dev_list_lock);
-		list_add_tail(&child->dev_list_node, &parent->port->dev_list);
-		spin_unlock_irq(&parent->port->dev_list_lock);
+		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
 
 		res = sas_discover_end_dev(child);
 		if (res) {
@@ -755,6 +832,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 	sas_rphy_free(child->rphy);
 	child->rphy = NULL;
 
+	list_del(&child->disco_list_node);
 	spin_lock_irq(&parent->port->dev_list_lock);
 	list_del(&child->dev_list_node);
 	spin_unlock_irq(&parent->port->dev_list_lock);
@@ -762,7 +840,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 	sas_port_delete(phy->port);
  out_err:
 	phy->port = NULL;
-	kfree(child);
+	sas_put_device(child);
 	return NULL;
 }
 
@@ -809,7 +887,7 @@ static struct domain_device *sas_ex_discover_expander(
 			    phy->attached_phy_id);
 		return NULL;
 	}
-	child = kzalloc(sizeof(*child), GFP_KERNEL);
+	child = sas_alloc_device();
 	if (!child)
 		return NULL;
 
@@ -835,6 +913,7 @@ static struct domain_device *sas_ex_discover_expander(
 	child->rphy = rphy;
 	edev = rphy_to_expander_device(rphy);
 	child->dev_type = phy->attached_dev_type;
+	kref_get(&parent->kref);
 	child->parent = parent;
 	child->port = port;
 	child->iproto = phy->attached_iproto;
@@ -858,7 +937,7 @@ static struct domain_device *sas_ex_discover_expander(
 		spin_lock_irq(&parent->port->dev_list_lock);
 		list_del(&child->dev_list_node);
 		spin_unlock_irq(&parent->port->dev_list_lock);
-		kfree(child);
+		sas_put_device(child);
 		return NULL;
 	}
 	list_add_tail(&child->siblings, &parent->ex_dev.children);
@@ -908,7 +987,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
 
 	if (ex_phy->attached_dev_type != SAS_END_DEV &&
 	    ex_phy->attached_dev_type != FANOUT_DEV &&
-	    ex_phy->attached_dev_type != EDGE_DEV) {
+	    ex_phy->attached_dev_type != EDGE_DEV &&
+	    ex_phy->attached_dev_type != SATA_PENDING) {
 		SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
 			    "phy 0x%x\n", ex_phy->attached_dev_type,
 			    SAS_ADDR(dev->sas_addr),
@@ -934,6 +1014,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
 
 	switch (ex_phy->attached_dev_type) {
 	case SAS_END_DEV:
+	case SATA_PENDING:
 		child = sas_ex_discover_end_dev(dev, phy_id);
 		break;
 	case FANOUT_DEV:
@@ -1128,32 +1209,25 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
 						 struct ex_phy *parent_phy,
 						 struct ex_phy *child_phy)
 {
-	static const char ra_char[] = {
-		[DIRECT_ROUTING] = 'D',
-		[SUBTRACTIVE_ROUTING] = 'S',
-		[TABLE_ROUTING] = 'T',
-	};
 	static const char *ex_type[] = {
 		[EDGE_DEV] = "edge",
 		[FANOUT_DEV] = "fanout",
 	};
 	struct domain_device *parent = child->parent;
 
-	sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
-		   "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
+	sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
+		   "phy 0x%x has %c:%c routing link!\n",
 
 		   ex_type[parent->dev_type],
 		   SAS_ADDR(parent->sas_addr),
-		   parent->ex_dev.t2t_supp,
 		   parent_phy->phy_id,
 
 		   ex_type[child->dev_type],
 		   SAS_ADDR(child->sas_addr),
-		   child->ex_dev.t2t_supp,
 		   child_phy->phy_id,
 
-		   ra_char[parent_phy->routing_attr],
-		   ra_char[child_phy->routing_attr]);
+		   sas_route_char(parent, parent_phy),
+		   sas_route_char(child, child_phy));
 }
 
 static int sas_check_eeds(struct domain_device *child,
@@ -1610,8 +1684,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
 	return res;
 }
 
-static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
-					 int phy_id, u8 *attached_sas_addr)
+static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+				    u8 *sas_addr, enum sas_dev_type *type)
 {
 	int res;
 	struct smp_resp *disc_resp;
@@ -1623,10 +1697,11 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
 	dr = &disc_resp->disc;
 
 	res = sas_get_phy_discover(dev, phy_id, disc_resp);
-	if (!res) {
-		memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
-		if (dr->attached_dev_type == 0)
-			memset(attached_sas_addr, 0, 8);
+	if (res == 0) {
+		memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+		*type = to_dev_type(dr);
+		if (*type == 0)
+			memset(sas_addr, 0, 8);
 	}
 	kfree(disc_resp);
 	return res;
@@ -1748,7 +1823,7 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
 	struct domain_device *child, *n;
 
 	list_for_each_entry_safe(child, n, &ex->children, siblings) {
-		child->gone = 1;
+		set_bit(SAS_DEV_GONE, &child->state);
 		if (child->dev_type == EDGE_DEV ||
 		    child->dev_type == FANOUT_DEV)
 			sas_unregister_ex_tree(port, child);
@@ -1763,27 +1838,28 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
 {
 	struct expander_device *ex_dev = &parent->ex_dev;
 	struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
-	struct domain_device *child, *n;
+	struct domain_device *child, *n, *found = NULL;
 	if (last) {
 		list_for_each_entry_safe(child, n,
 			&ex_dev->children, siblings) {
 			if (SAS_ADDR(child->sas_addr) ==
 			    SAS_ADDR(phy->attached_sas_addr)) {
-				child->gone = 1;
+				set_bit(SAS_DEV_GONE, &child->state);
 				if (child->dev_type == EDGE_DEV ||
 				    child->dev_type == FANOUT_DEV)
 					sas_unregister_ex_tree(parent->port, child);
 				else
 					sas_unregister_dev(parent->port, child);
+				found = child;
 				break;
 			}
 		}
-		parent->gone = 1;
 		sas_disable_routing(parent, phy->attached_sas_addr);
 	}
 	memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
 	if (phy->port) {
 		sas_port_delete_phy(phy->port, phy->phy);
+		sas_device_set_phy(found, phy->port);
 		if (phy->port->num_phys == 0)
 			sas_port_delete(phy->port);
 		phy->port = NULL;
@@ -1874,39 +1950,71 @@ out:
 	return res;
 }
 
+static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+{
+	if (old == new)
+		return true;
+
+	/* treat device directed resets as flutter, if we went
+	 * SAS_END_DEV to SATA_PENDING the link needs recovery
+	 */
+	if ((old == SATA_PENDING && new == SAS_END_DEV) ||
+	    (old == SAS_END_DEV && new == SATA_PENDING))
+		return true;
+
+	return false;
+}
+
 static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 {
 	struct expander_device *ex = &dev->ex_dev;
 	struct ex_phy *phy = &ex->ex_phy[phy_id];
-	u8 attached_sas_addr[8];
+	enum sas_dev_type type = NO_DEVICE;
+	u8 sas_addr[8];
 	int res;
 
-	res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
+	res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
 	switch (res) {
 	case SMP_RESP_NO_PHY:
 		phy->phy_state = PHY_NOT_PRESENT;
 		sas_unregister_devs_sas_addr(dev, phy_id, last);
-		goto out; break;
+		return res;
 	case SMP_RESP_PHY_VACANT:
 		phy->phy_state = PHY_VACANT;
 		sas_unregister_devs_sas_addr(dev, phy_id, last);
-		goto out; break;
+		return res;
 	case SMP_RESP_FUNC_ACC:
 		break;
 	}
 
-	if (SAS_ADDR(attached_sas_addr) == 0) {
+	if (SAS_ADDR(sas_addr) == 0) {
 		phy->phy_state = PHY_EMPTY;
 		sas_unregister_devs_sas_addr(dev, phy_id, last);
-	} else if (SAS_ADDR(attached_sas_addr) ==
-		   SAS_ADDR(phy->attached_sas_addr)) {
-		SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
-			    SAS_ADDR(dev->sas_addr), phy_id);
+		return res;
+	} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+		   dev_type_flutter(type, phy->attached_dev_type)) {
+		struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
+		char *action = "";
+
 		sas_ex_phy_discover(dev, phy_id);
-	} else
-		res = sas_discover_new(dev, phy_id);
-out:
-	return res;
+
+		if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+			action = ", needs recovery";
+		SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
+			    SAS_ADDR(dev->sas_addr), phy_id, action);
+		return res;
+	}
+
+	/* delete the old link */
+	if (SAS_ADDR(phy->attached_sas_addr) &&
+	    SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
+		SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+			    SAS_ADDR(dev->sas_addr), phy_id,
+			    SAS_ADDR(phy->attached_sas_addr));
+		sas_unregister_devs_sas_addr(dev, phy_id, last);
+	}
+
+	return sas_discover_new(dev, phy_id);
 }
 
 /**
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 3814d3eed401..d24792575169 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -187,11 +187,14 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
 	struct sas_internal *i =
 		to_sas_internal(sas_ha->core.shost->transportt);
 	struct sas_phy_linkrates rates;
+	struct asd_sas_phy *asd_phy;
 
 	if (phy_id >= sas_ha->num_phys) {
 		resp_data[2] = SMP_RESP_NO_PHY;
 		return;
 	}
+
+	asd_phy = sas_ha->sas_phy[phy_id];
 	switch (phy_op) {
 	case PHY_FUNC_NOP:
 	case PHY_FUNC_LINK_RESET:
@@ -210,7 +213,13 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
 	rates.minimum_linkrate = min;
 	rates.maximum_linkrate = max;
 
-	if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
+	/* filter reset requests through libata eh */
+	if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
+		resp_data[2] = SMP_RESP_FUNC_ACC;
+		return;
+	}
+
+	if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
 		resp_data[2] = SMP_RESP_FUNC_FAILED;
 	else
 		resp_data[2] = SMP_RESP_FUNC_ACC;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index d81c3b1989f7..120bff64be30 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
+#include <scsi/sas_ata.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_transport.h>
@@ -97,14 +98,14 @@ void sas_hae_reset(struct work_struct *work)
 		container_of(work, struct sas_ha_event, work);
 	struct sas_ha_struct *ha = ev->ha;
 
-	sas_begin_event(HAE_RESET, &ha->event_lock,
-			&ha->pending);
+	clear_bit(HAE_RESET, &ha->pending);
 }
 
 int sas_register_ha(struct sas_ha_struct *sas_ha)
 {
 	int error = 0;
 
+	mutex_init(&sas_ha->disco_mutex);
 	spin_lock_init(&sas_ha->phy_port_lock);
 	sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
 
@@ -113,8 +114,10 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
 	else if (sas_ha->lldd_queue_size == -1)
 		sas_ha->lldd_queue_size = 128; /* Sanity */
 
-	sas_ha->state = SAS_HA_REGISTERED;
+	set_bit(SAS_HA_REGISTERED, &sas_ha->state);
 	spin_lock_init(&sas_ha->state_lock);
+	mutex_init(&sas_ha->drain_mutex);
+	INIT_LIST_HEAD(&sas_ha->defer_q);
 
 	error = sas_register_phys(sas_ha);
 	if (error) {
@@ -144,6 +147,7 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
 	}
 
 	INIT_LIST_HEAD(&sas_ha->eh_done_q);
+	INIT_LIST_HEAD(&sas_ha->eh_ata_q);
 
 	return 0;
 
@@ -156,17 +160,23 @@ Undo_phys:
 
 int sas_unregister_ha(struct sas_ha_struct *sas_ha)
 {
-	unsigned long flags;
-
-	/* Set the state to unregistered to avoid further
-	 * events to be queued */
-	spin_lock_irqsave(&sas_ha->state_lock, flags);
-	sas_ha->state = SAS_HA_UNREGISTERED;
-	spin_unlock_irqrestore(&sas_ha->state_lock, flags);
-	scsi_flush_work(sas_ha->core.shost);
+	/* Set the state to unregistered to avoid further unchained
+	 * events to be queued, and flush any in-progress drainers
+	 */
+	mutex_lock(&sas_ha->drain_mutex);
+	spin_lock_irq(&sas_ha->state_lock);
+	clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
+	spin_unlock_irq(&sas_ha->state_lock);
+	__sas_drain_work(sas_ha);
+	mutex_unlock(&sas_ha->drain_mutex);
 
 	sas_unregister_ports(sas_ha);
 
+	/* flush unregistration work */
+	mutex_lock(&sas_ha->drain_mutex);
+	__sas_drain_work(sas_ha);
+	mutex_unlock(&sas_ha->drain_mutex);
+
 	if (sas_ha->lldd_max_execute_num > 1) {
 		sas_shutdown_queue(sas_ha);
 		sas_ha->lldd_max_execute_num = 1;
@@ -190,15 +200,75 @@ static int sas_get_linkerrors(struct sas_phy *phy)
 	return sas_smp_get_phy_events(phy);
 }
 
-int sas_phy_enable(struct sas_phy *phy, int enable)
+int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
+{
+	struct domain_device *dev = NULL;
+
+	/* try to route user requested link resets through libata */
+	if (asd_phy->port)
+		dev = asd_phy->port->port_dev;
+
+	/* validate that dev has been probed */
+	if (dev)
+		dev = sas_find_dev_by_rphy(dev->rphy);
+
+	if (dev && dev_is_sata(dev)) {
+		sas_ata_schedule_reset(dev);
+		sas_ata_wait_eh(dev);
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * transport_sas_phy_reset - reset a phy and permit libata to manage the link
+ *
+ * phy reset request via sysfs in host workqueue context so we know we
+ * can block on eh and safely traverse the domain_device topology
+ */
+static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+	enum phy_func reset_type;
+
+	if (hard_reset)
+		reset_type = PHY_FUNC_HARD_RESET;
+	else
+		reset_type = PHY_FUNC_LINK_RESET;
+
+	if (scsi_is_sas_phy_local(phy)) {
+		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+		struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+		struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+		struct sas_internal *i =
+			to_sas_internal(sas_ha->core.shost->transportt);
+
+		if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
+			return 0;
+		return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
+	} else {
+		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+		struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
+
+		if (ata_dev && !hard_reset) {
+			sas_ata_schedule_reset(ata_dev);
+			sas_ata_wait_eh(ata_dev);
+			return 0;
+		} else
+			return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
+	}
+}
+
+static int sas_phy_enable(struct sas_phy *phy, int enable)
 {
 	int ret;
-	enum phy_func command;
+	enum phy_func cmd;
 
 	if (enable)
-		command = PHY_FUNC_LINK_RESET;
+		cmd = PHY_FUNC_LINK_RESET;
 	else
-		command = PHY_FUNC_DISABLE;
+		cmd = PHY_FUNC_DISABLE;
 
 	if (scsi_is_sas_phy_local(phy)) {
 		struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
@@ -207,15 +277,18 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
 		struct sas_internal *i =
 			to_sas_internal(sas_ha->core.shost->transportt);
 
-		if (!enable) {
-			sas_phy_disconnected(asd_phy);
-			sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL);
-		}
-		ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
+		if (enable)
+			ret = transport_sas_phy_reset(phy, 0);
+		else
+			ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
 	} else {
 		struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
 		struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
-		ret = sas_smp_phy_control(ddev, phy->number, command, NULL);
+
+		if (enable)
+			ret = transport_sas_phy_reset(phy, 0);
+		else
+			ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
 	}
 	return ret;
 }
@@ -225,6 +298,9 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
 	int ret;
 	enum phy_func reset_type;
 
+	if (!phy->enabled)
+		return -ENODEV;
+
 	if (hard_reset)
 		reset_type = PHY_FUNC_HARD_RESET;
 	else
@@ -285,9 +361,101 @@ int sas_set_phy_speed(struct sas_phy *phy,
 	return ret;
 }
 
+static void sas_phy_release(struct sas_phy *phy)
+{
+	kfree(phy->hostdata);
+	phy->hostdata = NULL;
+}
+
+static void phy_reset_work(struct work_struct *work)
+{
+	struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
+
+	d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
+}
+
+static void phy_enable_work(struct work_struct *work)
+{
+	struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
+
+	d->enable_result = sas_phy_enable(d->phy, d->enable);
+}
+
+static int sas_phy_setup(struct sas_phy *phy)
+{
+	struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
+
+	if (!d)
+		return -ENOMEM;
+
+	mutex_init(&d->event_lock);
+	INIT_WORK(&d->reset_work, phy_reset_work);
+	INIT_WORK(&d->enable_work, phy_enable_work);
+	d->phy = phy;
+	phy->hostdata = d;
+
+	return 0;
+}
+
+static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	struct sas_phy_data *d = phy->hostdata;
+	int rc;
+
+	if (!d)
+		return -ENOMEM;
+
+	/* libsas workqueue coordinates ata-eh reset with discovery */
+	mutex_lock(&d->event_lock);
+	d->reset_result = 0;
+	d->hard_reset = hard_reset;
+
+	spin_lock_irq(&ha->state_lock);
+	sas_queue_work(ha, &d->reset_work);
+	spin_unlock_irq(&ha->state_lock);
+
+	rc = sas_drain_work(ha);
+	if (rc == 0)
+		rc = d->reset_result;
+	mutex_unlock(&d->event_lock);
+
+	return rc;
+}
+
+static int queue_phy_enable(struct sas_phy *phy, int enable)
+{
+	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	struct sas_phy_data *d = phy->hostdata;
+	int rc;
+
+	if (!d)
+		return -ENOMEM;
+
+	/* libsas workqueue coordinates ata-eh reset with discovery */
+	mutex_lock(&d->event_lock);
+	d->enable_result = 0;
+	d->enable = enable;
+
+	spin_lock_irq(&ha->state_lock);
+	sas_queue_work(ha, &d->enable_work);
+	spin_unlock_irq(&ha->state_lock);
+
+	rc = sas_drain_work(ha);
+	if (rc == 0)
+		rc = d->enable_result;
+	mutex_unlock(&d->event_lock);
+
+	return rc;
+}
+
 static struct sas_function_template sft = {
-	.phy_enable = sas_phy_enable,
-	.phy_reset = sas_phy_reset,
+	.phy_enable = queue_phy_enable,
+	.phy_reset = queue_phy_reset,
+	.phy_setup = sas_phy_setup,
+	.phy_release = sas_phy_release,
 	.set_phy_speed = sas_set_phy_speed,
 	.get_linkerrors = sas_get_linkerrors,
 	.smp_handler = sas_smp_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 14e21b5fb8ba..f05c63879949 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -30,6 +30,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/libsas.h>
+#include <scsi/sas_ata.h>
 
 #define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
 
@@ -38,6 +39,18 @@
 #define TO_SAS_TASK(_scsi_cmd)  ((void *)(_scsi_cmd)->host_scribble)
 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
 
+struct sas_phy_data {
+	/* let reset be performed in sas_queue_work() context */
+	struct sas_phy *phy;
+	struct mutex event_lock;
+	int hard_reset;
+	int reset_result;
+	struct work_struct reset_work;
+	int enable;
+	int enable_result;
+	struct work_struct enable_work;
+};
+
 void sas_scsi_recover_host(struct Scsi_Host *shost);
 
 int sas_show_class(enum sas_class class, char *buf);
@@ -56,6 +69,9 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
 int  sas_init_queue(struct sas_ha_struct *sas_ha);
 int  sas_init_events(struct sas_ha_struct *sas_ha);
 void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
+void sas_disable_revalidation(struct sas_ha_struct *ha);
+void sas_enable_revalidation(struct sas_ha_struct *ha);
+void __sas_drain_work(struct sas_ha_struct *ha);
 
 void sas_deform_port(struct asd_sas_phy *phy, int gone);
 
@@ -64,6 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
 void sas_porte_link_reset_err(struct work_struct *work);
 void sas_porte_timer_event(struct work_struct *work);
 void sas_porte_hard_reset(struct work_struct *work);
+void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -72,10 +89,17 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
 			enum phy_func phy_func, struct sas_phy_linkrates *);
 int sas_smp_get_phy_events(struct sas_phy *phy);
 
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
-
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+int sas_ex_phy_discover(struct domain_device *dev, int single);
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+			    struct smp_resp *rps_resp);
+int sas_try_ata_reset(struct asd_sas_phy *phy);
 void sas_hae_reset(struct work_struct *work);
 
+void sas_free_device(struct kref *kref);
+
 #ifdef CONFIG_SCSI_SAS_HOST_SMP
 extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 				struct request *rsp);
@@ -90,36 +114,13 @@ static inline int sas_smp_host_handler(struct Scsi_Host *shost,
 }
 #endif
 
-static inline void sas_queue_event(int event, spinlock_t *lock,
-				   unsigned long *pending,
-				   struct work_struct *work,
-				   struct sas_ha_struct *sas_ha)
+static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(lock, flags);
-	if (test_bit(event, pending)) {
-		spin_unlock_irqrestore(lock, flags);
-		return;
-	}
-	__set_bit(event, pending);
-	spin_unlock_irqrestore(lock, flags);
-
-	spin_lock_irqsave(&sas_ha->state_lock, flags);
-	if (sas_ha->state != SAS_HA_UNREGISTERED) {
-		scsi_queue_work(sas_ha->core.shost, work);
-	}
-	spin_unlock_irqrestore(&sas_ha->state_lock, flags);
-}
-
-static inline void sas_begin_event(int event, spinlock_t *lock,
-				   unsigned long *pending)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(lock, flags);
-	__clear_bit(event, pending);
-	spin_unlock_irqrestore(lock, flags);
+	SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
+		    func, dev->parent ? "exp-attached" :
+					    "direct-attached",
+		    SAS_ADDR(dev->sas_addr), err);
+	sas_unregister_dev(dev->port, dev);
 }
 
 static inline void sas_fill_in_rphy(struct domain_device *dev,
@@ -132,6 +133,7 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
 	case SATA_DEV:
 		/* FIXME: need sata device type */
 	case SAS_END_DEV:
+	case SATA_PENDING:
 		rphy->identify.device_type = SAS_END_DEVICE;
 		break;
 	case EDGE_DEV:
@@ -146,6 +148,22 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
 	}
 }
 
+static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
+{
+	struct sas_phy *phy = p->phy;
+
+	if (dev) {
+		if (dev_is_sata(dev))
+			phy->identify.device_type = SAS_END_DEVICE;
+		else
+			phy->identify.device_type = dev->dev_type;
+		phy->identify.target_port_protocols = dev->tproto;
+	} else {
+		phy->identify.device_type = SAS_PHY_UNUSED;
+		phy->identify.target_port_protocols = 0;
+	}
+}
+
 static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
 {
 	struct expander_device *ex = &dev->ex_dev;
@@ -161,4 +179,23 @@ static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
 	sas_port_add_phy(ex->parent_port, ex_phy->phy);
 }
 
+static inline struct domain_device *sas_alloc_device(void)
+{
+	struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+	if (dev) {
+		INIT_LIST_HEAD(&dev->siblings);
+		INIT_LIST_HEAD(&dev->dev_list_node);
+		INIT_LIST_HEAD(&dev->disco_list_node);
+		kref_init(&dev->kref);
+		spin_lock_init(&dev->done_lock);
+	}
+	return dev;
+}
+
+static inline void sas_put_device(struct domain_device *dev)
+{
+	kref_put(&dev->kref, sas_free_device);
+}
+
 #endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index e0f5018e9071..dcfd4a9105c5 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -36,8 +36,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
-			&phy->phy_events_pending);
+	clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
 	phy->error = 0;
 	sas_deform_port(phy, 1);
 }
@@ -48,8 +47,7 @@ static void sas_phye_oob_done(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
-			&phy->phy_events_pending);
+	clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
 	phy->error = 0;
 }
 
@@ -63,8 +61,7 @@ static void sas_phye_oob_error(struct work_struct *work)
 	struct sas_internal *i =
 		to_sas_internal(sas_ha->core.shost->transportt);
 
-	sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
-			&phy->phy_events_pending);
+	clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
 
 	sas_deform_port(phy, 1);
 
@@ -95,8 +92,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
 	struct sas_internal *i =
 		to_sas_internal(sas_ha->core.shost->transportt);
 
-	sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
-			&phy->phy_events_pending);
+	clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
 
 	phy->error = 0;
 	i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 42fd1f25b664..eb19c016d500 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -104,13 +104,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
 
 	/* add the phy to the port */
 	list_add_tail(&phy->port_phy_el, &port->phy_list);
+	sas_phy_set_target(phy, port->port_dev);
 	phy->port = port;
 	port->num_phys++;
 	port->phy_mask |= (1U << phy->id);
 
-	if (!port->phy)
-		port->phy = phy->phy;
-
 	if (*(u64 *)port->attached_sas_addr == 0) {
 		port->class = phy->class;
 		memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@@ -125,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
 	spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
 
 	if (!port->port) {
-		port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
+		port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
 		BUG_ON(!port->port);
 		sas_port_add(port->port);
 	}
@@ -170,13 +168,13 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
 		dev->pathways--;
 
 	if (port->num_phys == 1) {
-		if (dev && gone)
-			dev->gone = 1;
-		sas_unregister_domain_devices(port);
+		sas_unregister_domain_devices(port, gone);
 		sas_port_delete(port->port);
 		port->port = NULL;
-	} else
+	} else {
 		sas_port_delete_phy(port->port, phy->phy);
+		sas_device_set_phy(dev, port->port);
+	}
 
 	if (si->dft->lldd_port_deformed)
 		si->dft->lldd_port_deformed(phy);
@@ -185,6 +183,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
 	spin_lock(&port->phy_list_lock);
 
 	list_del_init(&phy->port_phy_el);
+	sas_phy_set_target(phy, NULL);
 	phy->port = NULL;
 	port->num_phys--;
 	port->phy_mask &= ~(1U << phy->id);
@@ -213,8 +212,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
-			&phy->port_events_pending);
+	clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
 
 	sas_form_port(phy);
 }
@@ -227,8 +225,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
 	unsigned long flags;
 	u32 prim;
 
-	sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
-			&phy->port_events_pending);
+	clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
 
 	spin_lock_irqsave(&phy->sas_prim_lock, flags);
 	prim = phy->sas_prim;
@@ -244,8 +241,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
-			&phy->port_events_pending);
+	clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
 
 	sas_deform_port(phy, 1);
 }
@@ -256,8 +252,7 @@ void sas_porte_timer_event(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
-			&phy->port_events_pending);
+	clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
 
 	sas_deform_port(phy, 1);
 }
@@ -268,8 +263,7 @@ void sas_porte_hard_reset(struct work_struct *work)
 		container_of(work, struct asd_sas_event, work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
-			&phy->port_events_pending);
+	clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
 
 	sas_deform_port(phy, 1);
 }
@@ -282,6 +276,8 @@ static void sas_init_port(struct asd_sas_port *port,
 	memset(port, 0, sizeof(*port));
 	port->id = i;
 	INIT_LIST_HEAD(&port->dev_list);
+	INIT_LIST_HEAD(&port->disco_list);
+	INIT_LIST_HEAD(&port->destroy_list);
 	spin_lock_init(&port->phy_list_lock);
 	INIT_LIST_HEAD(&port->phy_list);
 	port->ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b6e233d9a0a1..f0b9b7bf1882 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -49,27 +49,12 @@
 #include <linux/scatterlist.h>
 #include <linux/libata.h>
 
-/* ---------- SCSI Host glue ---------- */
-
-static void sas_scsi_task_done(struct sas_task *task)
+/* record final status and free the task */
+static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
 {
 	struct task_status_struct *ts = &task->task_status;
-	struct scsi_cmnd *sc = task->uldd_task;
 	int hs = 0, stat = 0;
 
-	if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-		/* Aborted tasks will be completed by the error handler */
-		SAS_DPRINTK("task done but aborted\n");
-		return;
-	}
-
-	if (unlikely(!sc)) {
-		SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
-		list_del_init(&task->list);
-		sas_free_task(task);
-		return;
-	}
-
 	if (ts->resp == SAS_TASK_UNDELIVERED) {
 		/* transport error */
 		hs = DID_NO_CONNECT;
@@ -124,10 +109,41 @@ static void sas_scsi_task_done(struct sas_task *task)
 			break;
 		}
 	}
-	ASSIGN_SAS_TASK(sc, NULL);
+
 	sc->result = (hs << 16) | stat;
+	ASSIGN_SAS_TASK(sc, NULL);
 	list_del_init(&task->list);
 	sas_free_task(task);
+}
+
+static void sas_scsi_task_done(struct sas_task *task)
+{
+	struct scsi_cmnd *sc = task->uldd_task;
+	struct domain_device *dev = task->dev;
+	struct sas_ha_struct *ha = dev->port->ha;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->done_lock, flags);
+	if (test_bit(SAS_HA_FROZEN, &ha->state))
+		task = NULL;
+	else
+		ASSIGN_SAS_TASK(sc, NULL);
+	spin_unlock_irqrestore(&dev->done_lock, flags);
+
+	if (unlikely(!task)) {
+		/* task will be completed by the error handler */
+		SAS_DPRINTK("task done but aborted\n");
+		return;
+	}
+
+	if (unlikely(!sc)) {
+		SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
+		list_del_init(&task->list);
+		sas_free_task(task);
+		return;
+	}
+
+	sas_end_task(sc, task);
 	sc->scsi_done(sc);
 }
 
@@ -192,17 +208,15 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	int res = 0;
 
 	/* If the device fell off, no sense in issuing commands */
-	if (dev->gone) {
+	if (test_bit(SAS_DEV_GONE, &dev->state)) {
 		cmd->result = DID_BAD_TARGET << 16;
 		goto out_done;
 	}
 
 	if (dev_is_sata(dev)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+		spin_lock_irq(dev->sata_dev.ap->lock);
 		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
-		spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+		spin_unlock_irq(dev->sata_dev.ap->lock);
 		return res;
 	}
 
@@ -235,24 +249,38 @@ out_done:
 
 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
 {
-	struct sas_task *task = TO_SAS_TASK(cmd);
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+	struct sas_task *task = TO_SAS_TASK(cmd);
+
+	/* At this point, we only get called following an actual abort
+	 * of the task, so we should be guaranteed not to be racing with
+	 * any completions from the LLD.  Task is freed after this.
+	 */
+	sas_end_task(cmd, task);
 
-	/* remove the aborted task flag to allow the task to be
-	 * completed now. At this point, we only get called following
-	 * an actual abort of the task, so we should be guaranteed not
-	 * to be racing with any completions from the LLD (hence we
-	 * don't need the task state lock to clear the flag) */
-	task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
-	/* Now call task_done.  However, task will be free'd after
-	 * this */
-	task->task_done(task);
 	/* now finish the command and move it on to the error
 	 * handler done list, this also takes it off the
-	 * error handler pending list */
+	 * error handler pending list.
+	 */
 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
 }
 
+static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
+{
+	struct domain_device *dev = cmd_to_domain_dev(cmd);
+	struct sas_ha_struct *ha = dev->port->ha;
+	struct sas_task *task = TO_SAS_TASK(cmd);
+
+	if (!dev_is_sata(dev)) {
+		sas_eh_finish_cmd(cmd);
+		return;
+	}
+
+	/* report the timeout to libata */
+	sas_end_task(cmd, task);
+	list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
+}
+
 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
 {
 	struct scsi_cmnd *cmd, *n;
@@ -260,7 +288,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
 		    cmd->device->lun == my_cmd->device->lun)
-			sas_eh_finish_cmd(cmd);
+			sas_eh_defer_cmd(cmd);
 	}
 }
 
@@ -295,6 +323,7 @@ enum task_disposition {
 	TASK_IS_DONE,
 	TASK_IS_ABORTED,
 	TASK_IS_AT_LU,
+	TASK_IS_NOT_AT_HA,
 	TASK_IS_NOT_AT_LU,
 	TASK_ABORT_FAILED,
 };
@@ -311,19 +340,18 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
 		struct scsi_core *core = &ha->core;
 		struct sas_task *t, *n;
 
+		mutex_lock(&core->task_queue_flush);
 		spin_lock_irqsave(&core->task_queue_lock, flags);
-		list_for_each_entry_safe(t, n, &core->task_queue, list) {
+		list_for_each_entry_safe(t, n, &core->task_queue, list)
 			if (task == t) {
 				list_del_init(&t->list);
-				spin_unlock_irqrestore(&core->task_queue_lock,
-						       flags);
-				SAS_DPRINTK("%s: task 0x%p aborted from "
-					    "task_queue\n",
-					    __func__, task);
-				return TASK_IS_ABORTED;
+				break;
 			}
-		}
 		spin_unlock_irqrestore(&core->task_queue_lock, flags);
+		mutex_unlock(&core->task_queue_flush);
+
+		if (task == t)
+			return TASK_IS_NOT_AT_HA;
 	}
 
 	for (i = 0; i < 5; i++) {
@@ -411,30 +439,26 @@ static int sas_recover_I_T(struct domain_device *dev)
 	return res;
 }
 
-/* Find the sas_phy that's attached to this device */
-struct sas_phy *sas_find_local_phy(struct domain_device *dev)
+/* take a reference on the last known good phy for this device */
+struct sas_phy *sas_get_local_phy(struct domain_device *dev)
 {
-	struct domain_device *pdev = dev->parent;
-	struct ex_phy *exphy = NULL;
-	int i;
+	struct sas_ha_struct *ha = dev->port->ha;
+	struct sas_phy *phy;
+	unsigned long flags;
 
-	/* Directly attached device */
-	if (!pdev)
-		return dev->port->phy;
+	/* a published domain device always has a valid phy, it may be
+	 * stale, but it is never NULL
+	 */
+	BUG_ON(!dev->phy);
 
-	/* Otherwise look in the expander */
-	for (i = 0; i < pdev->ex_dev.num_phys; i++)
-		if (!memcmp(dev->sas_addr,
-			    pdev->ex_dev.ex_phy[i].attached_sas_addr,
-			    SAS_ADDR_SIZE)) {
-			exphy = &pdev->ex_dev.ex_phy[i];
-			break;
-		}
+	spin_lock_irqsave(&ha->phy_port_lock, flags);
+	phy = dev->phy;
+	get_device(&phy->dev);
+	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
 
-	BUG_ON(!exphy);
-	return exphy->phy;
+	return phy;
 }
-EXPORT_SYMBOL_GPL(sas_find_local_phy);
+EXPORT_SYMBOL_GPL(sas_get_local_phy);
 
 /* Attempt to send a LUN reset message to a device */
 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@@ -461,7 +485,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
 int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
 {
 	struct domain_device *dev = cmd_to_domain_dev(cmd);
-	struct sas_phy *phy = sas_find_local_phy(dev);
+	struct sas_phy *phy = sas_get_local_phy(dev);
 	int res;
 
 	res = sas_phy_reset(phy, 1);
@@ -469,6 +493,8 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
 		SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
 			    kobject_name(&phy->dev.kobj),
 			    res);
+	sas_put_local_phy(phy);
+
 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 		return SUCCESS;
 
@@ -495,9 +521,7 @@ try_bus_reset:
 	return FAILED;
 }
 
-static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
-				    struct list_head *work_q,
-				    struct list_head *done_q)
+static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
 {
 	struct scsi_cmnd *cmd, *n;
 	enum task_disposition res = TASK_IS_DONE;
@@ -505,13 +529,28 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
 	struct sas_internal *i = to_sas_internal(shost->transportt);
 	unsigned long flags;
 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+	LIST_HEAD(done);
 
-Again:
+	/* clean out any commands that won the completion vs eh race */
 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
-		struct sas_task *task = TO_SAS_TASK(cmd);
+		struct domain_device *dev = cmd_to_domain_dev(cmd);
+		struct sas_task *task;
+
+		spin_lock_irqsave(&dev->done_lock, flags);
+		/* by this point the lldd has either observed
+		 * SAS_HA_FROZEN and is leaving the task alone, or has
+		 * won the race with eh and decided to complete it
+		 */
+		task = TO_SAS_TASK(cmd);
+		spin_unlock_irqrestore(&dev->done_lock, flags);
 
 		if (!task)
-			continue;
+			list_move_tail(&cmd->eh_entry, &done);
+	}
+
+ Again:
+	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+		struct sas_task *task = TO_SAS_TASK(cmd);
 
 		list_del_init(&cmd->eh_entry);
 
@@ -531,15 +570,23 @@ Again:
 		cmd->eh_eflags = 0;
 
 		switch (res) {
+		case TASK_IS_NOT_AT_HA:
+			SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
+				    __func__, task,
+				    cmd->retries ? "retry" : "aborted");
+			if (cmd->retries)
+				cmd->retries--;
+			sas_eh_finish_cmd(cmd);
+			continue;
 		case TASK_IS_DONE:
 			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
 				    task);
-			sas_eh_finish_cmd(cmd);
+			sas_eh_defer_cmd(cmd);
 			continue;
 		case TASK_IS_ABORTED:
 			SAS_DPRINTK("%s: task 0x%p is aborted\n",
 				    __func__, task);
-			sas_eh_finish_cmd(cmd);
+			sas_eh_defer_cmd(cmd);
 			continue;
 		case TASK_IS_AT_LU:
 			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -550,7 +597,7 @@ Again:
 					    "recovered\n",
 					    SAS_ADDR(task->dev),
 					    cmd->device->lun);
-				sas_eh_finish_cmd(cmd);
+				sas_eh_defer_cmd(cmd);
 				sas_scsi_clear_queue_lu(work_q, cmd);
 				goto Again;
 			}
@@ -560,7 +607,8 @@ Again:
 			SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
 				    task);
 			tmf_resp = sas_recover_I_T(task->dev);
-			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
+			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
+			    tmf_resp == -ENODEV) {
 				struct domain_device *dev = task->dev;
 				SAS_DPRINTK("I_T %016llx recovered\n",
 					    SAS_ADDR(task->dev->sas_addr));
@@ -607,13 +655,16 @@ Again:
 			goto clear_q;
 		}
 	}
-	return list_empty(work_q);
-clear_q:
+ out:
+	list_splice_tail(&done, work_q);
+	list_splice_tail_init(&ha->eh_ata_q, work_q);
+	return;
+
+ clear_q:
 	SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
 		sas_eh_finish_cmd(cmd);
-
-	return list_empty(work_q);
+	goto out;
 }
 
 void sas_scsi_recover_host(struct Scsi_Host *shost)
@@ -627,12 +678,17 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
 	shost->host_eh_scheduled = 0;
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
-	SAS_DPRINTK("Enter %s\n", __func__);
+	SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
+		    __func__, shost->host_busy, shost->host_failed);
 	/*
 	 * Deal with commands that still have SAS tasks (i.e. they didn't
-	 * complete via the normal sas_task completion mechanism)
+	 * complete via the normal sas_task completion mechanism),
+	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
 	 */
-	if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
+	set_bit(SAS_HA_FROZEN, &ha->state);
+	sas_eh_handle_sas_errors(shost, &eh_work_q);
+	clear_bit(SAS_HA_FROZEN, &ha->state);
+	if (list_empty(&eh_work_q))
 		goto out;
 
 	/*
@@ -641,59 +697,26 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
 	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
 	 * command we see here has no sas_task and is thus unknown to the HA.
 	 */
-	if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
-		if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
-			scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
+	sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
+		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
 
 out:
+	if (ha->lldd_max_execute_num > 1)
+		wake_up_process(ha->core.queue_thread);
+
 	/* now link into libata eh --- if we have any ata devices */
 	sas_ata_strategy_handler(shost);
 
 	scsi_eh_flush_done_q(&ha->eh_done_q);
 
-	SAS_DPRINTK("--- Exit %s\n", __func__);
-	return;
+	SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
+		    __func__, shost->host_busy, shost->host_failed);
 }
 
 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
 {
-	struct sas_task *task = TO_SAS_TASK(cmd);
-	unsigned long flags;
-	enum blk_eh_timer_return rtn;
-
-	if (sas_ata_timed_out(cmd, task, &rtn))
-		return rtn;
-
-	if (!task) {
-		cmd->request->timeout /= 2;
-		SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
-			    cmd, task, (cmd->request->timeout ?
-			    "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
-		if (!cmd->request->timeout)
-			return BLK_EH_NOT_HANDLED;
-		return BLK_EH_RESET_TIMER;
-	}
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
-	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
-			    "BLK_EH_HANDLED\n", cmd, task);
-		return BLK_EH_HANDLED;
-	}
-	if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
-			    "BLK_EH_RESET_TIMER\n",
-			    cmd, task);
-		return BLK_EH_RESET_TIMER;
-	}
-	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
-		    cmd, task);
+	scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
 
 	return BLK_EH_NOT_HANDLED;
 }
@@ -737,27 +760,15 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
 	return found_dev;
 }
 
-static inline struct domain_device *sas_find_target(struct scsi_target *starget)
-{
-	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
-
-	return sas_find_dev_by_rphy(rphy);
-}
-
 int sas_target_alloc(struct scsi_target *starget)
 {
-	struct domain_device *found_dev = sas_find_target(starget);
-	int res;
+	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
+	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
 
 	if (!found_dev)
 		return -ENODEV;
 
-	if (dev_is_sata(found_dev)) {
-		res = sas_ata_init_host_and_port(found_dev, starget);
-		if (res)
-			return res;
-	}
-
+	kref_get(&found_dev->kref);
 	starget->hostdata = found_dev;
 	return 0;
 }
@@ -797,14 +808,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
 	return 0;
 }
 
-void sas_slave_destroy(struct scsi_device *scsi_dev)
-{
-	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-
-	if (dev_is_sata(dev))
-		sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
-}
-
 int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
 {
 	struct domain_device *dev = sdev_to_domain_dev(sdev);
@@ -871,9 +874,11 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
 	int res;
 	struct sas_internal *i = to_sas_internal(core->shost->transportt);
 
+	mutex_lock(&core->task_queue_flush);
 	spin_lock_irqsave(&core->task_queue_lock, flags);
 	while (!kthread_should_stop() &&
-	       !list_empty(&core->task_queue)) {
+	       !list_empty(&core->task_queue) &&
+	       !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
 
 		can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
 		if (can_queue >= 0) {
@@ -909,6 +914,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
 		}
 	}
 	spin_unlock_irqrestore(&core->task_queue_lock, flags);
+	mutex_unlock(&core->task_queue_flush);
 }
 
 /**
@@ -935,6 +941,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha)
 	struct scsi_core *core = &sas_ha->core;
 
 	spin_lock_init(&core->task_queue_lock);
+	mutex_init(&core->task_queue_flush);
 	core->task_queue_size = 0;
 	INIT_LIST_HEAD(&core->task_queue);
 
@@ -972,49 +979,6 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
 }
 
 /*
- * Call the LLDD task abort routine directly.  This function is intended for
- * use by upper layers that need to tell the LLDD to abort a task.
- */
-int __sas_task_abort(struct sas_task *task)
-{
-	struct sas_internal *si =
-		to_sas_internal(task->dev->port->ha->core.shost->transportt);
-	unsigned long flags;
-	int res;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
-	    task->task_state_flags & SAS_TASK_STATE_DONE) {
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
-			    task);
-		return 0;
-	}
-	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	if (!si->dft->lldd_abort_task)
-		return -ENODEV;
-
-	res = si->dft->lldd_abort_task(task);
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
-	    (res == TMF_RESP_FUNC_COMPLETE))
-	{
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		task->task_done(task);
-		return 0;
-	}
-
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
-		task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	return -EAGAIN;
-}
-
-/*
  * Tell an upper layer that it needs to initiate an abort for a given task.
  * This should only ever be called by an LLDD.
  */
@@ -1043,27 +1007,15 @@ void sas_task_abort(struct sas_task *task)
 	}
 }
 
-int sas_slave_alloc(struct scsi_device *scsi_dev)
-{
-	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-
-	if (dev_is_sata(dev))
-		return ata_sas_port_init(dev->sata_dev.ap);
-
-	return 0;
-}
-
 void sas_target_destroy(struct scsi_target *starget)
 {
-	struct domain_device *found_dev = sas_find_target(starget);
+	struct domain_device *found_dev = starget->hostdata;
 
 	if (!found_dev)
 		return;
 
-	if (dev_is_sata(found_dev))
-		ata_sas_port_destroy(found_dev->sata_dev.ap);
-
-	return;
+	starget->hostdata = NULL;
+	sas_put_device(found_dev);
 }
 
 static void sas_parse_addr(u8 *sas_addr, const char *p)
@@ -1108,16 +1060,12 @@ EXPORT_SYMBOL_GPL(sas_request_addr);
 EXPORT_SYMBOL_GPL(sas_queuecommand);
 EXPORT_SYMBOL_GPL(sas_target_alloc);
 EXPORT_SYMBOL_GPL(sas_slave_configure);
-EXPORT_SYMBOL_GPL(sas_slave_destroy);
 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
 EXPORT_SYMBOL_GPL(sas_change_queue_type);
 EXPORT_SYMBOL_GPL(sas_bios_param);
-EXPORT_SYMBOL_GPL(__sas_task_abort);
 EXPORT_SYMBOL_GPL(sas_task_abort);
 EXPORT_SYMBOL_GPL(sas_phy_reset);
-EXPORT_SYMBOL_GPL(sas_phy_enable);
 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
 EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
 EXPORT_SYMBOL_GPL(sas_target_destroy);
 EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 825f9307417a..5fc044ff656e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -534,6 +534,7 @@ struct lpfc_hba {
 	void (*lpfc_scsi_prep_cmnd)
 		(struct lpfc_vport *, struct lpfc_scsi_buf *,
 		 struct lpfc_nodelist *);
+
 	/* IOCB interface function jump table entries */
 	int (*__lpfc_sli_issue_iocb)
 		(struct lpfc_hba *, uint32_t,
@@ -541,8 +542,6 @@ struct lpfc_hba {
 	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
 			 struct lpfc_iocbq *);
 	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
-
-
 	IOCB_t * (*lpfc_get_iocb_from_iocbq)
 		(struct lpfc_iocbq *);
 	void (*lpfc_scsi_cmd_iocb_cmpl)
@@ -551,10 +550,12 @@ struct lpfc_hba {
 	/* MBOX interface function jump table entries */
 	int (*lpfc_sli_issue_mbox)
 		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
 	/* Slow-path IOCB process function jump table entries */
 	void (*lpfc_sli_handle_slow_ring_event)
 		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		 uint32_t mask);
+
 	/* INIT device interface function jump table entries */
 	int (*lpfc_sli_hbq_to_firmware)
 		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
@@ -573,6 +574,10 @@ struct lpfc_hba {
 	int (*lpfc_selective_reset)
 		(struct lpfc_hba *);
 
+	int (*lpfc_bg_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	/* Add new entries here */
+
 	/* SLI4 specific HBA data structure */
 	struct lpfc_sli4_hba sli4_hba;
 
@@ -838,6 +843,7 @@ struct lpfc_hba {
 	struct dentry *debug_writeGuard; /* inject write guard_tag errors */
 	struct dentry *debug_writeApp;   /* inject write app_tag errors */
 	struct dentry *debug_writeRef;   /* inject write ref_tag errors */
+	struct dentry *debug_readGuard;  /* inject read guard_tag errors */
 	struct dentry *debug_readApp;    /* inject read app_tag errors */
 	struct dentry *debug_readRef;    /* inject read ref_tag errors */
 
@@ -845,10 +851,11 @@ struct lpfc_hba {
 	uint32_t lpfc_injerr_wgrd_cnt;
 	uint32_t lpfc_injerr_wapp_cnt;
 	uint32_t lpfc_injerr_wref_cnt;
+	uint32_t lpfc_injerr_rgrd_cnt;
 	uint32_t lpfc_injerr_rapp_cnt;
 	uint32_t lpfc_injerr_rref_cnt;
 	sector_t lpfc_injerr_lba;
-#define LPFC_INJERR_LBA_OFF	(sector_t)0xffffffffffffffff
+#define LPFC_INJERR_LBA_OFF	(sector_t)(-1)
 
 	struct dentry *debug_slow_ring_trc;
 	struct lpfc_debugfs_trc *slow_ring_trc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f6697cb0e216..296ad5bc4240 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -353,7 +353,7 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba   *phba = vport->phba;
 	uint32_t if_type;
 	uint8_t sli_family;
-	char fwrev[32];
+	char fwrev[FW_REV_STR_SIZE];
 	int len;
 
 	lpfc_decode_firmware_rev(phba, fwrev, 1);
@@ -922,11 +922,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
 
 	if (rc == -EPERM) {
-		/* no privilage for reset, restore if needed */
-		if (before_fc_flag & FC_OFFLINE_MODE)
-			goto out;
+		/* no privilage for reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3150 No privilage to perform the requested "
+				"access: x%x\n", reg_val);
 	} else if (rc == -EIO) {
 		/* reset failed, there is nothing more we can do */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3153 Fail to perform the requested "
+				"access: x%x\n", reg_val);
 		return rc;
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 56a86baece5b..141e4b40bb55 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -589,7 +589,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	}
 	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
-	cmdiocbq->iocb.ulpContext = rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+	else
+		cmdiocbq->iocb.ulpContext = rpi;
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
 	cmdiocbq->context1 = NULL;
 	cmdiocbq->context2 = NULL;
@@ -1768,7 +1771,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
 	bf_set(lpfc_mbx_set_diag_state_link_type,
 	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
-	       LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
 
 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -3977,7 +3980,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"3106 Handled SLI_CONFIG "
-						"subsys_fcoe, opcode:x%x\n",
+						"subsys_comn, opcode:x%x\n",
 						opcode);
 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
 							nemb_mse, dmabuf);
@@ -3985,7 +3988,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 			default:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"3107 Reject SLI_CONFIG "
-						"subsys_fcoe, opcode:x%x\n",
+						"subsys_comn, opcode:x%x\n",
 						opcode);
 				rc = -EPERM;
 				break;
@@ -4556,7 +4559,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 							+ sizeof(MAILBOX_t));
 		}
 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
-		if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
+		/* Let type 4 (well known data) through because the data is
+		 * returned in varwords[4-8]
+		 * otherwise check the recieve length and fetch the buffer addr
+		 */
+		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
+			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
 			/* rebuild the command for sli4 using our own buffers
 			* like we do for biu diags
 			*/
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 26924b7a6cde..330dd7192a7f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -462,3 +462,4 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
 int lpfc_selective_reset(struct lpfc_hba *);
 int lpfc_sli4_read_config(struct lpfc_hba *phba);
 int lpfc_scsi_buf_update(struct lpfc_hba *phba);
+void lpfc_sli4_node_prep(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 707081d0a226..93e96b3c9097 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1076,7 +1076,7 @@ int
 lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
 	size_t size)
 {
-	char fwrev[16];
+	char fwrev[FW_REV_STR_SIZE];
 	int n;
 
 	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
@@ -1834,7 +1834,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
 	uint8_t *fwname;
 
 	if (phba->sli_rev == LPFC_SLI_REV4)
-		sprintf(fwrevision, "%s", vp->rev.opFwName);
+		snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
 	else if (vp->rev.rBit) {
 		if (psli->sli_flag & LPFC_SLI_ACTIVE)
 			rev = vp->rev.sli2FwRev;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3587a3fe8fcb..22e17be04d8a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1019,6 +1019,8 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
 		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
 	else if (dent == phba->debug_writeRef)
 		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+	else if (dent == phba->debug_readGuard)
+		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
 	else if (dent == phba->debug_readApp)
 		cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
 	else if (dent == phba->debug_readRef)
@@ -1057,6 +1059,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
 		phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
 	else if (dent == phba->debug_writeRef)
 		phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readGuard)
+		phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
 	else if (dent == phba->debug_readApp)
 		phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
 	else if (dent == phba->debug_readRef)
@@ -3978,6 +3982,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 			goto debug_failed;
 		}
 
+		snprintf(name, sizeof(name), "readGuardInjErr");
+		phba->debug_readGuard =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readGuard) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0808 Cannot create debugfs readGuard\n");
+			goto debug_failed;
+		}
+
 		snprintf(name, sizeof(name), "readAppInjErr");
 		phba->debug_readApp =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4318,6 +4333,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
 			debugfs_remove(phba->debug_writeRef); /* writeRef */
 			phba->debug_writeRef = NULL;
 		}
+		if (phba->debug_readGuard) {
+			debugfs_remove(phba->debug_readGuard); /* readGuard */
+			phba->debug_readGuard = NULL;
+		}
 		if (phba->debug_readApp) {
 			debugfs_remove(phba->debug_readApp); /* readApp */
 			phba->debug_readApp = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7afc757338de..8db2fb3b45ec 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1526,7 +1526,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 			memcpy(&ndlp->active_rrqs.xri_bitmap,
 				&rrq.xri_bitmap,
 				sizeof(ndlp->active_rrqs.xri_bitmap));
-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 		/* Since we are swapping the ndlp passed in with the new one
 		 * and the did has already been swapped, copy over the
 		 * state and names.
@@ -1536,6 +1535,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 		memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
 			sizeof(struct lpfc_name));
 		new_ndlp->nlp_state = ndlp->nlp_state;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 		/* Fix up the rport accordingly */
 		rport = ndlp->rport;
 		if (rport) {
@@ -7172,7 +7172,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			goto out;
 		/* FDISC failed */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "0126 FDISC failed. (%d/%d)\n",
+				 "0126 FDISC failed. (x%x/x%x)\n",
 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
 		goto fdisc_failed;
 	}
@@ -7283,6 +7283,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	int rc;
 
 	vport->port_state = LPFC_FDISC;
+	vport->fc_myDID = 0;
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
 				     ELS_CMD_FDISC);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 678a4b11059c..343d87ba4df8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2977,9 +2977,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 				"topology\n");
 				/* Get Loop Map information */
 		if (bf_get(lpfc_mbx_read_top_il, la)) {
-			spin_lock_irq(shost->host_lock);
+			spin_lock(shost->host_lock);
 			vport->fc_flag |= FC_LBIT;
-			spin_unlock_irq(shost->host_lock);
+			spin_unlock(shost->host_lock);
 		}
 
 		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
@@ -3029,9 +3029,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
 		}
 		vport->fc_myDID = phba->fc_pref_DID;
-		spin_lock_irq(shost->host_lock);
+		spin_lock(shost->host_lock);
 		vport->fc_flag |= FC_LBIT;
-		spin_unlock_irq(shost->host_lock);
+		spin_unlock(shost->host_lock);
 	}
 	spin_unlock_irq(&phba->hbalock);
 
@@ -5332,6 +5332,10 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
 {
 	uint16_t *rpi = param;
 
+	/* check for active node */
+	if (!NLP_CHK_NODE_ACT(ndlp))
+		return 0;
+
 	return ndlp->nlp_rpi == *rpi;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7245bead3755..5f280b5ae3db 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -70,6 +70,7 @@
 /* vendor ID used in SCSI netlink calls */
 #define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
 
+#define FW_REV_STR_SIZE	32
 /* Common Transport structures and definitions */
 
 union CtRevisionId {
@@ -2567,6 +2568,8 @@ typedef struct {
 
 #define  DMP_MEM_REG             0x1
 #define  DMP_NV_PARAMS           0x2
+#define  DMP_LMSD                0x3 /* Link Module Serial Data */
+#define  DMP_WELL_KNOWN          0x4
 
 #define  DMP_REGION_VPD          0xe
 #define  DMP_VPD_SIZE            0x400  /* maximum amount of VPD */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index e5bfa7f334e3..9e2b9b227e1a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -321,6 +321,10 @@ struct lpfc_cqe {
 #define CQE_STATUS_CMD_REJECT		0xb
 #define CQE_STATUS_FCP_TGT_LENCHECK	0xc
 #define CQE_STATUS_NEED_BUFF_ENTRY	0xf
+#define CQE_STATUS_DI_ERROR		0x16
+
+/* Used when mapping CQE status to IOCB */
+#define LPFC_IOCB_STATUS_MASK		0xf
 
 /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
 #define CQE_HW_STATUS_NO_ERR		0x0
@@ -348,6 +352,21 @@ struct lpfc_wcqe_complete {
 #define lpfc_wcqe_c_hw_status_WORD	word0
 	uint32_t total_data_placed;
 	uint32_t parameter;
+#define lpfc_wcqe_c_bg_edir_SHIFT	5
+#define lpfc_wcqe_c_bg_edir_MASK	0x00000001
+#define lpfc_wcqe_c_bg_edir_WORD	parameter
+#define lpfc_wcqe_c_bg_tdpv_SHIFT	3
+#define lpfc_wcqe_c_bg_tdpv_MASK	0x00000001
+#define lpfc_wcqe_c_bg_tdpv_WORD	parameter
+#define lpfc_wcqe_c_bg_re_SHIFT		2
+#define lpfc_wcqe_c_bg_re_MASK		0x00000001
+#define lpfc_wcqe_c_bg_re_WORD		parameter
+#define lpfc_wcqe_c_bg_ae_SHIFT		1
+#define lpfc_wcqe_c_bg_ae_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ae_WORD		parameter
+#define lpfc_wcqe_c_bg_ge_SHIFT		0
+#define lpfc_wcqe_c_bg_ge_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ge_WORD		parameter
 	uint32_t word3;
 #define lpfc_wcqe_c_valid_SHIFT		lpfc_cqe_valid_SHIFT
 #define lpfc_wcqe_c_valid_MASK		lpfc_cqe_valid_MASK
@@ -359,8 +378,8 @@ struct lpfc_wcqe_complete {
 #define lpfc_wcqe_c_pv_MASK		0x00000001
 #define lpfc_wcqe_c_pv_WORD		word3
 #define lpfc_wcqe_c_priority_SHIFT	24
-#define lpfc_wcqe_c_priority_MASK		0x00000007
-#define lpfc_wcqe_c_priority_WORD		word3
+#define lpfc_wcqe_c_priority_MASK	0x00000007
+#define lpfc_wcqe_c_priority_WORD	word3
 #define lpfc_wcqe_c_code_SHIFT		lpfc_cqe_code_SHIFT
 #define lpfc_wcqe_c_code_MASK		lpfc_cqe_code_MASK
 #define lpfc_wcqe_c_code_WORD		lpfc_cqe_code_WORD
@@ -715,12 +734,20 @@ struct lpfc_register {
 #define lpfc_eqcq_doorbell_eqci_SHIFT		9
 #define lpfc_eqcq_doorbell_eqci_MASK		0x0001
 #define lpfc_eqcq_doorbell_eqci_WORD		word0
-#define lpfc_eqcq_doorbell_cqid_SHIFT		0
-#define lpfc_eqcq_doorbell_cqid_MASK		0x03FF
-#define lpfc_eqcq_doorbell_cqid_WORD		word0
-#define lpfc_eqcq_doorbell_eqid_SHIFT		0
-#define lpfc_eqcq_doorbell_eqid_MASK		0x01FF
-#define lpfc_eqcq_doorbell_eqid_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_cqid_lo_MASK		0x03FF
+#define lpfc_eqcq_doorbell_cqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_cqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_cqid_hi_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_eqid_lo_MASK		0x01FF
+#define lpfc_eqcq_doorbell_eqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_eqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_eqid_hi_WORD		word0
+#define LPFC_CQID_HI_FIELD_SHIFT		10
+#define LPFC_EQID_HI_FIELD_SHIFT		9
 
 #define LPFC_BMBX			0x0160
 #define lpfc_bmbx_addr_SHIFT		2
@@ -3313,7 +3340,11 @@ struct xmit_bls_rsp64_wqe {
 	uint32_t rsrvd4;
 	struct wqe_did	wqe_dest;
 	struct wqe_common wqe_com; /* words 6-11 */
-	uint32_t rsvd_12_15[4];
+	uint32_t word12;
+#define xmit_bls_rsp64_temprpi_SHIFT  0
+#define xmit_bls_rsp64_temprpi_MASK   0x0000ffff
+#define xmit_bls_rsp64_temprpi_WORD   word12
+	uint32_t rsvd_13_15[3];
 };
 
 struct wqe_rctl_dfctl {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index dfea2dada02c..b38f99f3be32 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -32,6 +32,7 @@
 #include <linux/aer.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
+#include <linux/miscdevice.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -1474,8 +1475,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 				phba->sli4_hba.u.if_type2.STATUSregaddr,
 				&portstat_reg.word0);
 		/* consider PCI bus read error as pci_channel_offline */
-		if (pci_rd_rc1 == -EIO)
+		if (pci_rd_rc1 == -EIO) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3151 PCI bus read access failure: x%x\n",
+				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
 			return;
+		}
 		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
 		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
 		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
@@ -1525,6 +1530,9 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 			}
 			/* fall through for not able to recover */
 		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3152 Unrecoverable error, bring the port "
+				"offline\n");
 		lpfc_sli4_offline_eratt(phba);
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_1:
@@ -2333,13 +2341,20 @@ lpfc_cleanup(struct lpfc_vport *vport)
 			continue;
 		}
 
+		/* take care of nodes in unused state before the state
+		 * machine taking action.
+		 */
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
 		if (ndlp->nlp_type & NLP_FABRIC)
 			lpfc_disc_state_machine(vport, ndlp, NULL,
 					NLP_EVT_DEVICE_RECOVERY);
 
 		lpfc_disc_state_machine(vport, ndlp, NULL,
 					     NLP_EVT_DEVICE_RM);
-
 	}
 
 	/* At this point, ALL ndlp's should be gone
@@ -2513,6 +2528,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
 }
 
 /**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist  *ndlp, *next_ndlp;
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->load_flag & FC_UNLOADING)
+				continue;
+
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						 &vports[i]->fc_nodes,
+						 nlp_listp) {
+				if (NLP_CHK_NODE_ACT(ndlp))
+					ndlp->nlp_rpi =
+						lpfc_sli4_alloc_rpi(phba);
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
  * lpfc_online - Initialize and bring a HBA online
  * @phba: pointer to lpfc hba data structure.
  *
@@ -2653,6 +2704,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
 				}
 				spin_lock_irq(shost->host_lock);
 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+
+				/*
+				 * Whenever an SLI4 port goes offline, free the
+				 * RPI.  A new RPI when the adapter port comes
+				 * back online.
+				 */
+				if (phba->sli_rev == LPFC_SLI_REV4)
+					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+
 				spin_unlock_irq(shost->host_lock);
 				lpfc_unreg_rpi(vports[i], ndlp);
 			}
@@ -4327,6 +4387,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
 	int longs, sli_family;
+	int sges_per_segment;
 
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
@@ -4390,6 +4451,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
+	/* With BlockGuard we can have multiple SGEs per Data Segemnt */
+	sges_per_segment = 1;
+	if (phba->cfg_enable_bg)
+		sges_per_segment = 2;
+
 	/*
 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4398,7 +4464,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	 * sgl sizes of must be a power of 2.
 	 */
 	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+		    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
+		    sizeof(struct sli4_sge)));
 
 	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
 	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
@@ -4415,6 +4482,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	default:
 		break;
 	}
+
 	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
 	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
 	     dma_buf_size = dma_buf_size << 1)
@@ -7223,19 +7291,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 					rc = -ENODEV;
 					goto out;
 				}
-				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
-					break;
-				if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+				if (bf_get(lpfc_sliport_status_rn, &reg_data))
 					reset_again++;
+				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
 					break;
-				}
 			}
 
 			/*
 			 * If the port responds to the init request with
 			 * reset needed, delay for a bit and restart the loop.
 			 */
-			if (reset_again) {
+			if (reset_again && (rdy_chk < 1000)) {
 				msleep(10);
 				reset_again = 0;
 				continue;
@@ -8112,6 +8178,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(shost->host_lock);
 
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
 	lpfc_stop_hba_timers(phba);
 
 	phba->pport->work_port_events = 0;
@@ -8644,6 +8713,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
 	/* Final cleanup of txcmplq and reset the HBA */
 	lpfc_sli_brdrestart(phba);
 
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
 	lpfc_stop_hba_timers(phba);
 	spin_lock_irq(&phba->hbalock);
 	list_del_init(&vport->listentry);
@@ -9058,7 +9130,7 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 int
 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 {
-	char fwrev[32];
+	char fwrev[FW_REV_STR_SIZE];
 	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
 	struct list_head dma_buffer_list;
 	int i, rc = 0;
@@ -10012,6 +10084,36 @@ lpfc_io_resume(struct pci_dev *pdev)
 	return;
 }
 
+/**
+ * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine puts a reference count on the lpfc module whenever the
+ * character device is opened
+ **/
+static int
+lpfc_mgmt_open(struct inode *inode, struct file *filep)
+{
+	try_module_get(THIS_MODULE);
+	return 0;
+}
+
+/**
+ * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine removes a reference count from the lpfc module when the
+ * character device is closed
+ **/
+static int
+lpfc_mgmt_release(struct inode *inode, struct file *filep)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
 		PCI_ANY_ID, PCI_ANY_ID, },
@@ -10124,6 +10226,17 @@ static struct pci_driver lpfc_driver = {
 	.err_handler    = &lpfc_err_handler,
 };
 
+static const struct file_operations lpfc_mgmt_fop = {
+	.open = lpfc_mgmt_open,
+	.release = lpfc_mgmt_release,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "lpfcmgmt",
+	.fops = &lpfc_mgmt_fop,
+};
+
 /**
  * lpfc_init - lpfc module initialization routine
  *
@@ -10144,6 +10257,11 @@ lpfc_init(void)
 	printk(LPFC_MODULE_DESC "\n");
 	printk(LPFC_COPYRIGHT "\n");
 
+	error = misc_register(&lpfc_mgmt_dev);
+	if (error)
+		printk(KERN_ERR "Could not register lpfcmgmt device, "
+			"misc_register returned with status %d", error);
+
 	if (lpfc_enable_npiv) {
 		lpfc_transport_functions.vport_create = lpfc_vport_create;
 		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
@@ -10180,6 +10298,7 @@ lpfc_init(void)
 static void __exit
 lpfc_exit(void)
 {
+	misc_deregister(&lpfc_mgmt_dev);
 	pci_unregister_driver(&lpfc_driver);
 	fc_release_transport(lpfc_transport_template);
 	if (lpfc_enable_npiv)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e8bb00559943..7b6b2aa5795a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -48,6 +48,10 @@ static int
 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		 struct lpfc_name *nn, struct lpfc_name *pn)
 {
+	/* First, we MUST have a RPI registered */
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+		return 0;
+
 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
 	 * table entry for that node.
 	 */
@@ -385,6 +389,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!mbox)
 		goto out;
 
+	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_unreg_rpi(vport, ndlp);
+
 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
 			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
 	if (rc) {
@@ -445,11 +453,43 @@ out:
 	return 0;
 }
 
+/**
+ * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object
+ *
+ * This routine is invoked to issue a completion to a rcv'ed
+ * ADISC or PDISC after the paused RPI has been resumed.
+ **/
+static void
+lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint32_t cmd;
+
+	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
+	ndlp = (struct lpfc_nodelist *) mboxq->context2;
+	vport = mboxq->vport;
+	cmd = elsiocb->drvrTimeout;
+
+	if (cmd == ELS_CMD_ADISC) {
+		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
+	} else {
+		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
+			ndlp, NULL);
+	}
+	kfree(elsiocb);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
 static int
 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		struct lpfc_iocbq *cmdiocb)
 {
 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq  *elsiocb;
 	struct lpfc_dmabuf *pcmd;
 	struct serv_parm   *sp;
 	struct lpfc_name   *pnn, *ppn;
@@ -475,12 +515,43 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
 	icmd = &cmdiocb->iocb;
 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+
+		/*
+		 * As soon as  we send ACC, the remote NPort can
+		 * start sending us data. Thus, for SLI4 we must
+		 * resume the RPI before the ACC goes out.
+		 */
+		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
+				GFP_KERNEL);
+			if (elsiocb) {
+
+				/* Save info from cmd IOCB used in rsp */
+				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
+					sizeof(struct lpfc_iocbq));
+
+				/* Save the ELS cmd */
+				elsiocb->drvrTimeout = cmd;
+
+				lpfc_sli4_resume_rpi(ndlp,
+					lpfc_mbx_cmpl_resume_rpi, elsiocb);
+				goto out;
+			}
+		}
+
 		if (cmd == ELS_CMD_ADISC) {
 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
 		} else {
-			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
-					 NULL);
+			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+				ndlp, NULL);
 		}
+out:
+		/* If we are authenticated, move to the proper state */
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+		else
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
 		return 1;
 	}
 	/* Reject this request because invalid parameters */
@@ -1229,7 +1300,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 	}
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		rc = lpfc_sli4_resume_rpi(ndlp);
+		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
 		if (rc) {
 			/* Stay in state and retry. */
 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c60f5d0b3869..efc055b6bac4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -1280,31 +1280,45 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
 }
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/*
- * Given a scsi cmnd, determine the BlockGuard tags to be used with it
+
+#define BG_ERR_INIT	1
+#define BG_ERR_TGT	2
+#define BG_ERR_SWAP	3
+#define BG_ERR_CHECK	4
+
+/**
+ * lpfc_bg_err_inject - Determine if we should inject an error
+ * @phba: The Hba for which this call is being executed.
  * @sc: The SCSI command to examine
  * @reftag: (out) BlockGuard reference tag for transmitted data
  * @apptag: (out) BlockGuard application tag for transmitted data
  * @new_guard (in) Value to replace CRC with if needed
  *
- * Returns (1) if error injection was performed, (0) otherwise
- */
+ * Returns (1) if error injection is detected by Initiator
+ * Returns (2) if error injection is detected by Target
+ * Returns (3) if swapping CSUM->CRC is required for error injection
+ * Returns (4) disabling Guard/Ref/App checking is required for error injection
+ **/
 static int
 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
 	struct scatterlist *sgde; /* s/g data entry */
-	struct scsi_dif_tuple *src;
+	struct scsi_dif_tuple *src = NULL;
 	uint32_t op = scsi_get_prot_op(sc);
 	uint32_t blksize;
 	uint32_t numblks;
 	sector_t lba;
 	int rc = 0;
+	int blockoff = 0;
 
 	if (op == SCSI_PROT_NORMAL)
 		return 0;
 
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
 	lba = scsi_get_lba(sc);
 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
 		blksize = lpfc_cmd_blksize(sc);
@@ -1314,142 +1328,296 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		if ((phba->lpfc_injerr_lba < lba) ||
 			(phba->lpfc_injerr_lba >= (lba + numblks)))
 			return 0;
+		if (sgpe) {
+			blockoff = phba->lpfc_injerr_lba - lba;
+			numblks = sg_dma_len(sgpe) /
+				sizeof(struct scsi_dif_tuple);
+			if (numblks < blockoff)
+				blockoff = numblks;
+			src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+			src += blockoff;
+		}
 	}
 
-	sgpe = scsi_prot_sglist(sc);
-	sgde = scsi_sglist(sc);
-
 	/* Should we change the Reference Tag */
 	if (reftag) {
-		/*
-		 * If we are SCSI_PROT_WRITE_STRIP, the protection data is
-		 * being stripped from the wire, thus it doesn't matter.
-		 */
-		if ((op == SCSI_PROT_WRITE_PASS) ||
-			(op == SCSI_PROT_WRITE_INSERT)) {
-			if (phba->lpfc_injerr_wref_cnt) {
+		if (phba->lpfc_injerr_wref_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (blockoff && src) {
+					/* Insert error in middle of the IO */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9076 BLKGRD: Injecting reftag error: "
+					"write lba x%lx + x%x oldrefTag x%x\n",
+					(unsigned long)lba, blockoff,
+					src->ref_tag);
+
+					/*
+					 * NOTE, this will change ref tag in
+					 * the memory location forever!
+					 */
+					src->ref_tag = 0xDEADBEEF;
+					phba->lpfc_injerr_wref_cnt--;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					rc = BG_ERR_CHECK;
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				rc = BG_ERR_INIT;
 
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9077 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
 				/* DEADBEEF will be the reftag on the wire */
 				*reftag = 0xDEADBEEF;
 				phba->lpfc_injerr_wref_cnt--;
 				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = 1;
+				rc = BG_ERR_TGT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9081 BLKGRD: Injecting reftag error: "
+					"9078 BLKGRD: Injecting reftag error: "
 					"write lba x%lx\n", (unsigned long)lba);
+				break;
 			}
-		} else {
-			if (phba->lpfc_injerr_rref_cnt) {
+		}
+		if (phba->lpfc_injerr_rref_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+				/*
+				 * For READ_INSERT, it doesn't make sense
+				 * to change the reftag.
+				 */
+				break;
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
 				*reftag = 0xDEADBEEF;
 				phba->lpfc_injerr_rref_cnt--;
 				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = 1;
+				rc = BG_ERR_INIT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9076 BLKGRD: Injecting reftag error: "
+					"9079 BLKGRD: Injecting reftag error: "
 					"read lba x%lx\n", (unsigned long)lba);
+				break;
 			}
 		}
 	}
 
 	/* Should we change the Application Tag */
 	if (apptag) {
-		/*
-		 * If we are SCSI_PROT_WRITE_STRIP, the protection data is
-		 * being stripped from the wire, thus it doesn't matter.
-		 */
-		if ((op == SCSI_PROT_WRITE_PASS) ||
-			(op == SCSI_PROT_WRITE_INSERT)) {
-			if (phba->lpfc_injerr_wapp_cnt) {
+		if (phba->lpfc_injerr_wapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (blockoff && src) {
+					/* Insert error in middle of the IO */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9080 BLKGRD: Injecting apptag error: "
+					"write lba x%lx + x%x oldappTag x%x\n",
+					(unsigned long)lba, blockoff,
+					src->app_tag);
 
+					/*
+					 * NOTE, this will change app tag in
+					 * the memory location forever!
+					 */
+					src->app_tag = 0xDEAD;
+					phba->lpfc_injerr_wapp_cnt--;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					rc = BG_ERR_CHECK;
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0812 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
 				/* DEAD will be the apptag on the wire */
 				*apptag = 0xDEAD;
 				phba->lpfc_injerr_wapp_cnt--;
 				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = 1;
+				rc = BG_ERR_TGT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9077 BLKGRD: Injecting apptag error: "
+					"0813 BLKGRD: Injecting apptag error: "
 					"write lba x%lx\n", (unsigned long)lba);
+				break;
 			}
-		} else {
-			if (phba->lpfc_injerr_rapp_cnt) {
+		}
+		if (phba->lpfc_injerr_rapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+				/*
+				 * For READ_INSERT, it doesn't make sense
+				 * to change the apptag.
+				 */
+				break;
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
 				*apptag = 0xDEAD;
 				phba->lpfc_injerr_rapp_cnt--;
 				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-				rc = 1;
+				rc = BG_ERR_INIT;
 
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9078 BLKGRD: Injecting apptag error: "
+					"0814 BLKGRD: Injecting apptag error: "
 					"read lba x%lx\n", (unsigned long)lba);
+				break;
 			}
 		}
 	}
 
+
 	/* Should we change the Guard Tag */
+	if (new_guard) {
+		if (phba->lpfc_injerr_wgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (blockoff && src) {
+					/* Insert error in middle of the IO */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0815 BLKGRD: Injecting guard error: "
+					"write lba x%lx + x%x oldgrdTag x%x\n",
+					(unsigned long)lba, blockoff,
+					src->guard_tag);
 
-	/*
-	 * If we are SCSI_PROT_WRITE_INSERT, the protection data is
-	 * being on the wire is being fully generated on the HBA.
-	 * The host cannot change it or force an error.
-	 */
-	if (((op == SCSI_PROT_WRITE_STRIP) ||
-		(op == SCSI_PROT_WRITE_PASS)) &&
-		phba->lpfc_injerr_wgrd_cnt) {
-		if (sgpe) {
-			src = (struct scsi_dif_tuple *)sg_virt(sgpe);
-			/*
-			 * Just inject an error in the first
-			 * prot block.
-			 */
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-				"9079 BLKGRD: Injecting guard error: "
-				"write lba x%lx oldGuard x%x refTag x%x\n",
-				(unsigned long)lba, src->guard_tag,
-				src->ref_tag);
+					/*
+					 * NOTE, this will change guard tag in
+					 * the memory location forever!
+					 */
+					src->guard_tag = 0xDEAD;
+					phba->lpfc_injerr_wgrd_cnt--;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					rc = BG_ERR_CHECK;
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
-			src->guard_tag = (uint16_t)new_guard;
-			phba->lpfc_injerr_wgrd_cnt--;
-			phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-			rc = 1;
+				rc = BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
 
-		} else {
-			blksize = lpfc_cmd_blksize(sc);
-			/*
-			 * Jump past the first data block
-			 * and inject an error in the
-			 * prot data. The prot data is already
-			 * embedded after the regular data.
-			 */
-			src = (struct scsi_dif_tuple *)
-					(sg_virt(sgde) + blksize);
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0816 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-				"9080 BLKGRD: Injecting guard error: "
-				"write lba x%lx oldGuard x%x refTag x%x\n",
-				(unsigned long)lba, src->guard_tag,
-				src->ref_tag);
-
-			src->guard_tag = (uint16_t)new_guard;
-			phba->lpfc_injerr_wgrd_cnt--;
-			phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-			rc = 1;
+				rc = BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0817 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+				/*
+				 * For READ_INSERT, it doesn't make sense
+				 * to change the guard tag.
+				 */
+				break;
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_rgrd_cnt--;
+				phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+
+				rc = BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0818 BLKGRD: Injecting guard error: "
+					"read lba x%lx\n", (unsigned long)lba);
+			}
 		}
 	}
+
 	return rc;
 }
 #endif
 
-/*
- * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
+/**
+ * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
+ * the specified SCSI command.
+ * @phba: The Hba for which this call is being executed.
  * @sc: The SCSI command to examine
  * @txopt: (out) BlockGuard operation for transmitted data
  * @rxopt: (out) BlockGuard operation for received data
  *
  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
  *
- */
+ **/
 static int
 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		uint8_t *txop, uint8_t *rxop)
@@ -1519,8 +1687,88 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	return ret;
 }
 
-/*
- * This function sets up buffer list for protection groups of
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
+ * the specified SCSI command in order to force a guard tag error.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint8_t *txop, uint8_t *rxop)
+{
+	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+	uint8_t ret = 0;
+
+	if (guard_type == SHOST_DIX_GUARD_IP) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
+			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*txop = BG_OP_IN_CRC_OUT_CRC;
+			*rxop = BG_OP_IN_CRC_OUT_CRC;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+
+		}
+	} else {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*txop = BG_OP_IN_CSUM_OUT_CRC;
+			*rxop = BG_OP_IN_CRC_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
+			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+		}
+	}
+
+	return ret;
+}
+#endif
+
+/**
+ * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
  * type LPFC_PG_TYPE_NO_DIF
  *
  * This is usually used when the HBA is instructed to generate
@@ -1539,12 +1787,11 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
  *                                |more Data BDE's ... (opt)|
  *                                +-------------------------+
  *
- * @sc: pointer to scsi command we're working on
- * @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
  *
  * Note: Data s/g buffers have been dma mapped
- */
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
 static int
 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		struct ulp_bde64 *bpl, int datasegcnt)
@@ -1555,6 +1802,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	dma_addr_t physaddr;
 	int i = 0, num_bde = 0, status;
 	int datadir = sc->sc_data_direction;
+	uint32_t rc;
+	uint32_t checking = 1;
 	uint32_t reftag;
 	unsigned blksize;
 	uint8_t txop, rxop;
@@ -1565,11 +1814,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
 	/* extract some info from the scsi command for pde*/
 	blksize = lpfc_cmd_blksize(sc);
-	reftag = scsi_get_lba(sc) & 0xffffffff;
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	/* reftag is the only error we can inject here */
-	lpfc_bg_err_inject(phba, sc, &reftag, 0, 0);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	if (rc) {
+		if (rc == BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc == BG_ERR_CHECK)
+			checking = 0;
+	}
 #endif
 
 	/* setup PDE5 with what we have */
@@ -1592,8 +1846,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	bf_set(pde6_optx, pde6, txop);
 	bf_set(pde6_oprx, pde6, rxop);
 	if (datadir == DMA_FROM_DEVICE) {
-		bf_set(pde6_ce, pde6, 1);
-		bf_set(pde6_re, pde6, 1);
+		bf_set(pde6_ce, pde6, checking);
+		bf_set(pde6_re, pde6, checking);
 	}
 	bf_set(pde6_ai, pde6, 1);
 	bf_set(pde6_ae, pde6, 0);
@@ -1627,9 +1881,16 @@ out:
 	return num_bde;
 }
 
-/*
- * This function sets up buffer list for protection groups of
- * type LPFC_PG_TYPE_DIF_BUF
+/**
+ * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
  *
  * This is usually used when DIFs are in their own buffers,
  * separate from the data. The HBA can then by instructed
@@ -1654,14 +1915,11 @@ out:
  *                                    |          ...            |
  *                                    +-------------------------+
  *
- * @sc: pointer to scsi command we're working on
- * @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
- * @protcnt: number of segment of protection data that have been dma mapped
- *
  * Note: It is assumed that both data and protection s/g buffers have been
  *       mapped for DMA
- */
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
 static int
 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
@@ -1681,6 +1939,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	int datadir = sc->sc_data_direction;
 	unsigned char pgdone = 0, alldone = 0;
 	unsigned blksize;
+	uint32_t rc;
+	uint32_t checking = 1;
 	uint32_t reftag;
 	uint8_t txop, rxop;
 	int num_bde = 0;
@@ -1701,11 +1961,16 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
 	/* extract some info from the scsi command */
 	blksize = lpfc_cmd_blksize(sc);
-	reftag = scsi_get_lba(sc) & 0xffffffff;
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	/* reftag / guard tag are the only errors we can inject here */
-	lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD);
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	if (rc) {
+		if (rc == BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc == BG_ERR_CHECK)
+			checking = 0;
+	}
 #endif
 
 	split_offset = 0;
@@ -1729,8 +1994,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
 		bf_set(pde6_optx, pde6, txop);
 		bf_set(pde6_oprx, pde6, rxop);
-		bf_set(pde6_ce, pde6, 1);
-		bf_set(pde6_re, pde6, 1);
+		bf_set(pde6_ce, pde6, checking);
+		bf_set(pde6_re, pde6, checking);
 		bf_set(pde6_ai, pde6, 1);
 		bf_set(pde6_ae, pde6, 0);
 		bf_set(pde6_apptagval, pde6, 0);
@@ -1852,13 +2117,358 @@ out:
 	return num_bde;
 }
 
-/*
+/**
+ * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |         DI_SEED         |
+ *                                +-------------------------+
+ *                                |         Data SGE        |
+ *                                +-------------------------+
+ *                                |more Data SGE's ... (opt)|
+ *                                +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_sge = 0, status;
+	int datadir = sc->sc_data_direction;
+	uint32_t reftag;
+	unsigned blksize;
+	uint8_t txop, rxop;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+
+	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command for pde*/
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	if (rc) {
+		if (rc == BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc == BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	/* setup DISEED with what we have */
+	diseed = (struct sli4_sge_diseed *) sgl;
+	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+	/* Endianness conversion if necessary */
+	diseed->ref_tag = cpu_to_le32(reftag);
+	diseed->ref_tag_tran = diseed->ref_tag;
+
+	/* setup DISEED with the rest of the info */
+	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+	if (datadir == DMA_FROM_DEVICE) {
+		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+	}
+	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+	/* Endianness conversion if necessary for DISEED */
+	diseed->word2 = cpu_to_le32(diseed->word2);
+	diseed->word3 = cpu_to_le32(diseed->word3);
+
+	/* advance bpl and increment sge count */
+	num_sge++;
+	sgl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		dma_len = sg_dma_len(sgde);
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+		if ((i + 1) == datasegcnt)
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+		else
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+		sgl->sge_len = cpu_to_le32(dma_len);
+		dma_offset += dma_len;
+
+		sgl++;
+		num_sge++;
+	}
+
+out:
+	return num_sge;
+}
+
+/**
+ * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |      DIF (Prot SGE)     |
+ *                                    +-------------------------+
+ *                                    |        Data SGE         |
+ *                                    +-------------------------+
+ *                                    |more Data SGE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int status;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+	uint32_t dma_len;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t dma_offset = 0;
+	int num_sge = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command */
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+	if (rc) {
+		if (rc == BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc == BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	split_offset = 0;
+	do {
+		/* setup DISEED with what we have */
+		diseed = (struct sli4_sge_diseed *) sgl;
+		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+		/* Endianness conversion if necessary */
+		diseed->ref_tag = cpu_to_le32(reftag);
+		diseed->ref_tag_tran = diseed->ref_tag;
+
+		/* setup DISEED with the rest of the info */
+		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+		/* Endianness conversion if necessary for DISEED */
+		diseed->word2 = cpu_to_le32(diseed->word2);
+		diseed->word3 = cpu_to_le32(diseed->word3);
+
+		/* advance sgl and increment bde count */
+		num_sge++;
+		sgl++;
+
+		/* setup the first BDE that points to protection buffer */
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		/* Now setup DIF SGE */
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
+		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
+		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		/* check if DIF SGE is crossing the 4K boundary; if so split */
+		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgrp_blks * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
+
+		num_sge++;
+
+		/* setup SGE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			if (!sgde) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9086 BLKGRD:%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			sgl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				dma_len = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				dma_len = protgrp_bytes - subtotal;
+				split_offset += dma_len;
+			}
+
+			subtotal += dma_len;
+
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+			sgl->sge_len = cpu_to_le32(dma_len);
+			dma_offset += dma_len;
+
+			num_sge++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+		}
+
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			sgl++;
+			continue;
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			sgl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9085 BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+
+out:
+
+	return num_sge;
+}
+
+/**
+ * lpfc_prot_group_type - Get prtotection group type of SCSI command
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ *
  * Given a SCSI command that supports DIF, determine composition of protection
  * groups involved in setting up buffer lists
  *
- * Returns:
- *			      for DIF (for both read and write)
- * */
+ * Returns: Protection group type (with or without DIF)
+ *
+ **/
 static int
 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
 {
@@ -1885,13 +2495,17 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
 	return ret;
 }
 
-/*
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ *
  * This is the protection/DIF aware version of
  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
  * two functions eventually, but for now, it's here
- */
+ **/
 static int
-lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
+lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
 		struct lpfc_scsi_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
@@ -2147,7 +2761,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
-		bghm /= cmd->device->sector_size;
+
+		/* bghm is a "on the wire" FC frame based count */
+		switch (scsi_get_prot_op(cmd)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			bghm /= cmd->device->sector_size;
+			break;
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			bghm /= (cmd->device->sector_size +
+				sizeof(struct scsi_dif_tuple));
+			break;
+		}
 
 		failing_sector = scsi_get_lba(cmd);
 		failing_sector += bghm;
@@ -2292,6 +2920,180 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 }
 
 /**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+	int diflen, fcpdl;
+	unsigned blksize;
+
+	fcpdl = scsi_bufflen(sc);
+
+	/* Check if there is protection data on the wire */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		/* Read */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+			return fcpdl;
+
+	} else {
+		/* Write */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+			return fcpdl;
+	}
+
+	/* If protection data on the wire, adjust the count accordingly */
+	blksize = lpfc_cmd_blksize(sc);
+	diflen = (fcpdl / blksize) * 8;
+	fcpdl += diflen;
+	return fcpdl;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_bde = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int fcpdl;
+
+	/*
+	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data bde entry
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		sgl += 1;
+		lpfc_cmd->seg_cnt = datasegcnt;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9087 BLKGRD: %s: Too many sg segments"
+					" from dma_map_sg.  Config %d, seg_cnt"
+					" %d\n",
+					__func__, phba->cfg_sg_seg_cnt,
+					lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+			num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+					datasegcnt);
+			/* we should have 2 or more entries in buffer list */
+			if (num_bde < 2)
+				goto err;
+			break;
+		case LPFC_PG_TYPE_DIF_BUF:{
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+			if (lpfc_cmd->prot_seg_cnt
+			    > phba->cfg_prot_sg_seg_cnt) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9088 BLKGRD: %s: Too many prot sg "
+					"segments from dma_map_sg.  Config %d,"
+						"prot_seg_cnt %d\n", __func__,
+						phba->cfg_prot_sg_seg_cnt,
+						lpfc_cmd->prot_seg_cnt);
+				dma_unmap_sg(&phba->pcidev->dev,
+					     scsi_prot_sglist(scsi_cmnd),
+					     scsi_prot_sg_count(scsi_cmnd),
+					     datadir);
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+					datasegcnt, protsegcnt);
+			/* we should have 3 or more entries in buffer list */
+			if (num_bde < 3)
+				goto err;
+			break;
+		}
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9083 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+	lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
+
+	return 0;
+err:
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9084 Could not setup all needed BDE's"
+			"prot_group_type=%d, num_bde=%d\n",
+			prot_group_type, num_bde);
+	return 1;
+}
+
+/**
  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -2310,6 +3112,25 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 }
 
 /**
+ * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * using BlockGuard.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static inline int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
  * @phba: Pointer to hba context object.
  * @vport: Pointer to vport object.
@@ -3072,12 +3893,14 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 	case LPFC_PCI_DEV_LP:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
 		break;
 	case LPFC_PCI_DEV_OC:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
 		break;
@@ -3250,8 +4073,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	ndlp = rdata->pnode;
 
 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
-		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) ||
-		(phba->sli_rev == LPFC_SLI_REV4))) {
+		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
 
 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 23a27592388c..e0e4d8d18244 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -293,7 +293,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
 	}
 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
-	bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
+	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
@@ -372,7 +374,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
-	bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
+	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
+			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
 	return released;
 }
@@ -554,81 +558,6 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 }
 
 /**
- * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: nodelist pointer for this target.
- * @xritag: xri used in this exchange.
- * @rxid: Remote Exchange ID.
- * @send_rrq: Flag used to determine if we should send rrq els cmd.
- *
- * This function is called with hbalock held.
- * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
- * rrq struct and adds it to the active_rrq_list.
- *
- * returns  0 for rrq slot for this xri
- *         < 0  Were not able to get rrq mem or invalid parameter.
- **/
-static int
-__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
-		uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
-{
-	struct lpfc_node_rrq *rrq;
-	int empty;
-	uint32_t did = 0;
-
-
-	if (!ndlp)
-		return -EINVAL;
-
-	if (!phba->cfg_enable_rrq)
-		return -EINVAL;
-
-	if (phba->pport->load_flag & FC_UNLOADING) {
-		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-		goto out;
-	}
-	did = ndlp->nlp_DID;
-
-	/*
-	 * set the active bit even if there is no mem available.
-	 */
-	if (NLP_CHK_FREE_REQ(ndlp))
-		goto out;
-
-	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
-		goto out;
-
-	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
-		goto out;
-
-	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
-	if (rrq) {
-		rrq->send_rrq = send_rrq;
-		rrq->xritag = xritag;
-		rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
-		rrq->ndlp = ndlp;
-		rrq->nlp_DID = ndlp->nlp_DID;
-		rrq->vport = ndlp->vport;
-		rrq->rxid = rxid;
-		empty = list_empty(&phba->active_rrq_list);
-		rrq->send_rrq = send_rrq;
-		list_add_tail(&rrq->list, &phba->active_rrq_list);
-		if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
-			phba->hba_flag |= HBA_RRQ_ACTIVE;
-			if (empty)
-				lpfc_worker_wake_up(phba);
-		}
-		return 0;
-	}
-out:
-	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
-			" DID:0x%x Send:%d\n",
-			xritag, rxid, did, send_rrq);
-	return -EINVAL;
-}
-
-/**
  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
  * @phba: Pointer to HBA context object.
  * @xritag: xri used in this exchange.
@@ -856,15 +785,68 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  **/
 int
 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
-			uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
 {
-	int ret;
 	unsigned long iflags;
+	struct lpfc_node_rrq *rrq;
+	int empty;
+
+	if (!ndlp)
+		return -EINVAL;
+
+	if (!phba->cfg_enable_rrq)
+		return -EINVAL;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+		goto out;
+	}
+
+	/*
+	 * set the active bit even if there is no mem available.
+	 */
+	if (NLP_CHK_FREE_REQ(ndlp))
+		goto out;
+
+	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+		goto out;
+
+	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+		goto out;
+
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	return ret;
+	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+	if (!rrq) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
+				" DID:0x%x Send:%d\n",
+				xritag, rxid, ndlp->nlp_DID, send_rrq);
+		return -EINVAL;
+	}
+	rrq->send_rrq = send_rrq;
+	rrq->xritag = xritag;
+	rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+	rrq->ndlp = ndlp;
+	rrq->nlp_DID = ndlp->nlp_DID;
+	rrq->vport = ndlp->vport;
+	rrq->rxid = rxid;
+	rrq->send_rrq = send_rrq;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	empty = list_empty(&phba->active_rrq_list);
+	list_add_tail(&rrq->list, &phba->active_rrq_list);
+	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	if (empty)
+		lpfc_worker_wake_up(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return 0;
+out:
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
+			" DID:0x%x Send:%d\n",
+			xritag, rxid, ndlp->nlp_DID, send_rrq);
+	return -EINVAL;
 }
 
 /**
@@ -5596,6 +5578,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 		for (i = 0; i < count; i++)
 			phba->sli4_hba.rpi_ids[i] = base + i;
 
+		lpfc_sli4_node_prep(phba);
+
 		/* VPIs. */
 		count = phba->sli4_hba.max_cfg_param.max_vpi;
 		base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -7555,6 +7539,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 
 	sgl  = (struct sli4_sge *)sglq->sgl;
 	icmd = &piocbq->iocb;
+	if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+		return sglq->sli4_xritag;
 	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
 		numBdes = icmd->un.genreq64.bdl.bdeSize /
 				sizeof(struct ulp_bde64);
@@ -7756,6 +7742,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
 				*pcmd == ELS_CMD_SCR ||
+				*pcmd == ELS_CMD_FDISC ||
 				*pcmd == ELS_CMD_PLOGI)) {
 				bf_set(els_req64_sp, &wqe->els_req, 1);
 				bf_set(els_req64_sid, &wqe->els_req,
@@ -7763,7 +7750,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
 					phba->vpi_ids[phba->pport->vpi]);
-			} else if (iocbq->context1) {
+			} else if (pcmd && iocbq->context1) {
 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
 					phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7830,12 +7817,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
 		/* Always open the exchange */
 		bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
-		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
 		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
 		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
 		       LPFC_WQE_LENLOC_WORD4);
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+		if (iocbq->iocb_flag & LPFC_IO_DIF) {
+			iocbq->iocb_flag &= ~LPFC_IO_DIF;
+			bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+		}
+		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
 		break;
 	case CMD_FCP_IREAD64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -7849,12 +7840,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
 		/* Always open the exchange */
 		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
 		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
 		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
 		       LPFC_WQE_LENLOC_WORD4);
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+		if (iocbq->iocb_flag & LPFC_IO_DIF) {
+			iocbq->iocb_flag &= ~LPFC_IO_DIF;
+			bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+		}
+		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
 		break;
 	case CMD_FCP_ICMND64_CR:
 		/* word3 iocb=IO_TAG wqe=reserved */
@@ -7982,6 +7977,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		xritag = 0;
 		break;
 	case CMD_XMIT_BLS_RSP64_CX:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
 		/* As BLS ABTS RSP WQE is very different from other WQEs,
 		 * we re-construct this WQE here based on information in
 		 * iocbq from scratch.
@@ -8008,8 +8004,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		}
 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+
+		/* Use CT=VPI */
+		bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
+			ndlp->nlp_DID);
+		bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
+			iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
-		       iocbq->iocb.ulpContext);
+			phba->vpi_ids[phba->pport->vpi]);
 		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
 		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
 		       LPFC_WQE_LENLOC_NONE);
@@ -8073,8 +8076,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
 	if (piocb->sli4_xritag == NO_XRI) {
 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
-		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
-		    piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
 			sglq = NULL;
 		else {
 			if (pring->txq_cnt) {
@@ -8384,10 +8386,13 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
 {
 	struct lpfc_vport *vport;
 
-	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 				"3115 Node Context not found, driver "
 				"ignoring abts err event\n");
+		return;
+	}
+
 	vport = ndlp->vport;
 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"3116 Port generated FCP XRI ABORT event on "
@@ -10653,12 +10658,14 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
 			      struct lpfc_wcqe_complete *wcqe)
 {
 	unsigned long iflags;
+	uint32_t status;
 	size_t offset = offsetof(struct lpfc_iocbq, iocb);
 
 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
 	       sizeof(struct lpfc_iocbq) - offset);
 	/* Map WCQE parameters into irspiocb parameters */
-	pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
 	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
 		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
 			pIocbIn->iocb.un.fcpi.fcpi_parm =
@@ -10671,6 +10678,44 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
 		pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
 	}
 
+	/* Convert BG errors for completion status */
+	if (status == CQE_STATUS_DI_ERROR) {
+		pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+
+		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
+
+		pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
+		if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_GUARD_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_APPTAG_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_REFTAG_ERR_MASK;
+
+		/* Check to see if there was any good data before the error */
+		if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_HI_WATER_MARK_PRESENT_MASK;
+			pIocbIn->iocb.unsli3.sli3_bg.bghm =
+				wcqe->total_data_placed;
+		}
+
+		/*
+		* Set ALL the error bits to indicate we don't know what
+		* type of error it is.
+		*/
+		if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+				BGS_GUARD_ERR_MASK);
+	}
+
 	/* Pick up HBA exchange busy condition */
 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
 		spin_lock_irqsave(&phba->hbalock, iflags);
@@ -14042,6 +14087,13 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
 {
 	if (cmd_iocbq)
 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+
+	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
+	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
+			rsp_iocbq->iocb.ulpStatus,
+			rsp_iocbq->iocb.un.ulpWord[4]);
 }
 
 /**
@@ -14748,7 +14800,8 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
  * provided rpi via a bitmask.
  **/
 int
-lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
 {
 	LPFC_MBOXQ_t *mboxq;
 	struct lpfc_hba *phba = ndlp->phba;
@@ -14761,6 +14814,13 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
 
 	/* Post all rpi memory regions to the port. */
 	lpfc_resume_rpi(mboxq, ndlp);
+	if (cmpl) {
+		mboxq->mbox_cmpl = cmpl;
+		mboxq->context1 = arg;
+		mboxq->context2 = ndlp;
+	} else
+		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mboxq->vport = ndlp->vport;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 29c13b63e323..3290b8e7ab65 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,6 +69,7 @@ struct lpfc_iocbq {
 #define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
 #define DSS_SECURITY_OP		0x100	/* security IO */
 #define LPFC_IO_ON_Q		0x200	/* The IO is still on the TXCMPLQ */
+#define LPFC_IO_DIF		0x400	/* T10 DIF IO */
 
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT	14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3f266e2c54e0..c19d139618b7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -633,7 +633,8 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_remove_rpis(struct lpfc_hba *);
 void lpfc_sli4_async_event_proc(struct lpfc_hba *);
 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
+			void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index dd044d01a07f..f2a2602e5c35 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.28"
+#define LPFC_DRIVER_VERSION "8.3.29"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index a78036f5e1a6..82fa6ce481f0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2575,6 +2575,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 
 	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
 	    GFP_KERNEL, ioc->chain_pages);
+	if (!ioc->chain_lookup) {
+		printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
+		    "sz(%d)\n", ioc->name, (int)sz);
+		goto out;
+	}
 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
 	    ioc->request_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 193e33e28e49..d953a57e779d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -5744,7 +5744,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
- * _scsih_sas_broadcast_primative_event - handle broadcast events
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
  * @ioc: per adapter object
  * @fw_event: The fw_event_work object
  * Context: user.
@@ -5752,7 +5752,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
  * Return nothing.
  */
 static void
-_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
+_scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
     struct fw_event_work *fw_event)
 {
 	struct scsi_cmnd *scmd;
@@ -7263,7 +7263,7 @@ _firmware_event_work(struct work_struct *work)
 		    fw_event);
 		break;
 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
-		_scsih_sas_broadcast_primative_event(ioc,
+		_scsih_sas_broadcast_primitive_event(ioc,
 		    fw_event);
 		break;
 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 6f589195746c..cc59dff3810b 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -60,7 +60,6 @@ static struct scsi_host_template mvs_sht = {
 	.queuecommand		= sas_queuecommand,
 	.target_alloc		= sas_target_alloc,
 	.slave_configure	= sas_slave_configure,
-	.slave_destroy		= sas_slave_destroy,
 	.scan_finished		= mvs_scan_finished,
 	.scan_start		= mvs_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
@@ -74,7 +73,6 @@ static struct scsi_host_template mvs_sht = {
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
-	.slave_alloc		= sas_slave_alloc,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= mvst_host_attrs,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 01ab9c4d3464..fd3b2839843b 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -308,7 +308,7 @@ int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
 	if (mvs_prv->scan_finished == 0)
 		return 0;
 
-	scsi_flush_work(shost);
+	sas_drain_work(sha);
 	return 1;
 }
 
@@ -893,9 +893,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
 
 	mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
 
-	if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
-		spin_unlock_irq(dev->sata_dev.ap->lock);
-
 	spin_lock_irqsave(&mvi->lock, flags);
 	rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
 	if (rc)
@@ -906,9 +903,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
 				(MVS_CHIP_SLOT_SZ - 1));
 	spin_unlock_irqrestore(&mvi->lock, flags);
 
-	if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
-		spin_lock_irq(dev->sata_dev.ap->lock);
-
 	return rc;
 }
 
@@ -1480,10 +1474,11 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
 {
 	int rc;
-	struct sas_phy *phy = sas_find_local_phy(dev);
+	struct sas_phy *phy = sas_get_local_phy(dev);
 	int reset_type = (dev->dev_type == SATA_DEV ||
 			(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 	rc = sas_phy_reset(phy, reset_type);
+	sas_put_local_phy(phy);
 	msleep(2000);
 	return rc;
 }
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
index 4efa4d0950e5..9241c7826034 100644
--- a/drivers/scsi/pm8001/pm8001_chips.h
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -46,9 +46,9 @@ static inline u32 pm8001_read_32(void *virt_addr)
 	return *((u32 *)virt_addr);
 }
 
-static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
+static inline void pm8001_write_32(void *addr, u32 offset, __le32 val)
 {
-	*((u32 *)(addr + offset)) = val;
+	*((__le32 *)(addr + offset)) = val;
 }
 
 static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index e12c4f632a63..3619f6eeeeda 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -338,26 +338,25 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
 }
 
 /**
- * bar4_shift - function is called to shift BAR base address
- * @pm8001_ha : our hba card information
+ * pm8001_bar4_shift - function is called to shift BAR base address
+ * @pm8001_ha : our hba card infomation
  * @shiftValue : shifting value in memory bar.
  */
-static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
 {
 	u32 regVal;
-	u32 max_wait_count;
+	unsigned long start;
 
 	/* program the inbound AXI translation Lower Address */
 	pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
 
 	/* confirm the setting is written */
-	max_wait_count = 1 * 1000 * 1000;  /* 1 sec */
+	start = jiffies + HZ; /* 1 sec */
 	do {
-		udelay(1);
 		regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
-	} while ((regVal != shiftValue) && (--max_wait_count));
+	} while ((regVal != shiftValue) && time_before(jiffies, start));
 
-	if (!max_wait_count) {
+	if (regVal != shiftValue) {
 		PM8001_INIT_DBG(pm8001_ha,
 			pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
 			" = 0x%x\n", regVal));
@@ -375,6 +374,7 @@ static void __devinit
 mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
 {
 	u32 value, offset, i;
+	unsigned long flags;
 
 #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -388,16 +388,23 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
     * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
     * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
     */
-	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	if (-1 == pm8001_bar4_shift(pm8001_ha,
+				SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		return;
+	}
 
 	for (i = 0; i < 4; i++) {
 		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
 		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
 	}
 	/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
-	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
+	if (-1 == pm8001_bar4_shift(pm8001_ha,
+				SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		return;
+	}
 	for (i = 4; i < 8; i++) {
 		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
 		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
@@ -421,7 +428,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
 	pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
 
 	/*set the shifted destination address to 0x0 to avoid error operation */
-	bar4_shift(pm8001_ha, 0x0);
+	pm8001_bar4_shift(pm8001_ha, 0x0);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	return;
 }
 
@@ -437,6 +445,7 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
 	u32 offset;
 	u32 value;
 	u32 i;
+	unsigned long flags;
 
 #define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
 #define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -445,24 +454,30 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
 #define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
 
 	value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
 	/* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
-	if (-1 == bar4_shift(pm8001_ha,
-			     OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
+	if (-1 == pm8001_bar4_shift(pm8001_ha,
+			     OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		return;
+	}
 	for (i = 0; i < 4; i++) {
 		offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
 		pm8001_cw32(pm8001_ha, 2, offset, value);
 	}
 
-	if (-1 == bar4_shift(pm8001_ha,
-			     OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
+	if (-1 == pm8001_bar4_shift(pm8001_ha,
+			     OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		return;
+	}
 	for (i = 4; i < 8; i++) {
 		offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
 		pm8001_cw32(pm8001_ha, 2, offset, value);
 	}
 	/*set the shifted destination address to 0x0 to avoid error operation */
-	bar4_shift(pm8001_ha, 0x0);
+	pm8001_bar4_shift(pm8001_ha, 0x0);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	return;
 }
 
@@ -607,7 +622,8 @@ static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 	update_inbnd_queue_table(pm8001_ha, 0);
 	update_outbnd_queue_table(pm8001_ha, 0);
 	mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-	mpi_set_open_retry_interval_reg(pm8001_ha, 7);
+	/* 7->130ms, 34->500ms, 119->1.5s */
+	mpi_set_open_retry_interval_reg(pm8001_ha, 119);
 	/* notify firmware update finished and check initialization status */
 	if (0 == mpi_init_check(pm8001_ha)) {
 		PM8001_INIT_DBG(pm8001_ha,
@@ -688,8 +704,11 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
 		PM8001_INIT_DBG(pm8001_ha,
 			pm8001_printk("Firmware is ready for reset .\n"));
 	} else {
-	/* Trigger NMI twice via RB6 */
-		if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+		unsigned long flags;
+		/* Trigger NMI twice via RB6 */
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
 					RB6_ACCESS_REG));
@@ -715,8 +734,10 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
 				pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			return -1;
 		}
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	}
 	return 0;
 }
@@ -733,6 +754,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 	u32	regVal, toggleVal;
 	u32	max_wait_count;
 	u32	regVal1, regVal2, regVal3;
+	unsigned long flags;
 
 	/* step1: Check FW is ready for soft reset */
 	if (soft_reset_ready_check(pm8001_ha) != 0) {
@@ -743,7 +765,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 	/* step 2: clear NMI status register on AAP1 and IOP, write the same
 	value to clear */
 	/* map 0x60000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("Shift Bar4 to 0x%x failed\n",
 			MBIC_AAP1_ADDR_BASE));
@@ -754,7 +778,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 		pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
 	pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
 	/* map 0x70000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("Shift Bar4 to 0x%x failed\n",
 			MBIC_IOP_ADDR_BASE));
@@ -796,7 +821,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 
 	/* read required registers for confirmming */
 	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("Shift Bar4 to 0x%x failed\n",
 			GSM_ADDR_BASE));
@@ -862,7 +888,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 	/* step 5: delay 10 usec */
 	udelay(10);
 	/* step 5-b: set GPIO-0 output control to tristate anyway */
-	if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_INIT_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
 				GPIO_ADDR_BASE));
@@ -878,7 +905,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 
 	/* Step 6: Reset the IOP and AAP1 */
 	/* map 0x00000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
 			SPC_TOP_LEVEL_ADDR_BASE));
@@ -915,7 +943,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 
 	/* step 11: reads and sets the GSM Configuration and Reset Register */
 	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
 			GSM_ADDR_BASE));
@@ -968,7 +997,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 
 	/* step 13: bring the IOP and AAP1 out of reset */
 	/* map 0x00000 to BAR4(0x20), BAR2(win) */
-	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+	if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("Shift Bar4 to 0x%x failed\n",
 			SPC_TOP_LEVEL_ADDR_BASE));
@@ -1010,6 +1040,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
 				pm8001_cr32(pm8001_ha, 0,
 				MSGU_SCRATCH_PAD_3)));
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			return -1;
 		}
 
@@ -1039,9 +1070,12 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
 				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
 				pm8001_cr32(pm8001_ha, 0,
 				MSGU_SCRATCH_PAD_3)));
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			return -1;
 		}
 	}
+	pm8001_bar4_shift(pm8001_ha, 0);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 
 	PM8001_INIT_DBG(pm8001_ha,
 		pm8001_printk("SPC soft reset Complete\n"));
@@ -1157,8 +1191,8 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
 	msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
 	msi_index += MSIX_TABLE_BASE;
 	pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
-
 }
+
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1212,7 +1246,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
 	consumer_index = pm8001_read_32(circularQ->ci_virt);
 	circularQ->consumer_index = cpu_to_le32(consumer_index);
 	if (((circularQ->producer_idx + bcCount) % 256) ==
-		circularQ->consumer_index) {
+		le32_to_cpu(circularQ->consumer_index)) {
 		*messagePtr = NULL;
 		return -1;
 	}
@@ -1321,7 +1355,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 	u32 header_tmp;
 	do {
 		/* If there are not-yet-delivered messages ... */
-		if (circularQ->producer_index != circularQ->consumer_idx) {
+		if (le32_to_cpu(circularQ->producer_index)
+			!= circularQ->consumer_idx) {
 			/*Get the pointer to the circular queue buffer element*/
 			msgHeader = (struct mpi_msg_hdr *)
 				(circularQ->base_virt +
@@ -1329,14 +1364,14 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 			/* read header */
 			header_tmp = pm8001_read_32(msgHeader);
 			msgHeader_tmp = cpu_to_le32(header_tmp);
-			if (0 != (msgHeader_tmp & 0x80000000)) {
+			if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
 				if (OPC_OUB_SKIP_ENTRY !=
-					(msgHeader_tmp & 0xfff)) {
+					(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
 					*messagePtr1 =
 						((u8 *)msgHeader) +
 						sizeof(struct mpi_msg_hdr);
-					*pBC = (u8)((msgHeader_tmp >> 24) &
-						0x1f);
+					*pBC = (u8)((le32_to_cpu(msgHeader_tmp)
+						>> 24) & 0x1f);
 					PM8001_IO_DBG(pm8001_ha,
 						pm8001_printk(": CI=%d PI=%d "
 						"msgHeader=%x\n",
@@ -1347,8 +1382,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 				} else {
 					circularQ->consumer_idx =
 						(circularQ->consumer_idx +
-						((msgHeader_tmp >> 24) & 0x1f))
-						% 256;
+						((le32_to_cpu(msgHeader_tmp)
+						>> 24) & 0x1f)) % 256;
 					msgHeader_tmp = 0;
 					pm8001_write_32(msgHeader, 0, 0);
 					/* update the CI of outbound queue */
@@ -1360,7 +1395,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 			} else {
 				circularQ->consumer_idx =
 					(circularQ->consumer_idx +
-					((msgHeader_tmp >> 24) & 0x1f)) % 256;
+					((le32_to_cpu(msgHeader_tmp) >> 24) &
+					0x1f)) % 256;
 				msgHeader_tmp = 0;
 				pm8001_write_32(msgHeader, 0, 0);
 				/* update the CI of outbound queue */
@@ -1376,7 +1412,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 			producer_index = pm8001_read_32(pi_virt);
 			circularQ->producer_index = cpu_to_le32(producer_index);
 		}
-	} while (circularQ->producer_index != circularQ->consumer_idx);
+	} while (le32_to_cpu(circularQ->producer_index) !=
+		circularQ->consumer_idx);
 	/* while we don't have any more not-yet-delivered message */
 	/* report empty */
 	return MPI_IO_STATUS_BUSY;
@@ -1388,24 +1425,191 @@ static void pm8001_work_fn(struct work_struct *work)
 	struct pm8001_device *pm8001_dev;
 	struct domain_device *dev;
 
+	/*
+	 * So far, all users of this stash an associated structure here.
+	 * If we get here, and this pointer is null, then the action
+	 * was cancelled. This nullification happens when the device
+	 * goes away.
+	 */
+	pm8001_dev = pw->data; /* Most stash device structure */
+	if ((pm8001_dev == NULL)
+	 || ((pw->handler != IO_XFER_ERROR_BREAK)
+	  && (pm8001_dev->dev_type == NO_DEVICE))) {
+		kfree(pw);
+		return;
+	}
+
 	switch (pw->handler) {
+	case IO_XFER_ERROR_BREAK:
+	{	/* This one stashes the sas_task instead */
+		struct sas_task *t = (struct sas_task *)pm8001_dev;
+		u32 tag;
+		struct pm8001_ccb_info *ccb;
+		struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+		unsigned long flags, flags1;
+		struct task_status_struct *ts;
+		int i;
+
+		if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
+			break; /* Task still on lu */
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+		spin_lock_irqsave(&t->task_state_lock, flags1);
+		if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			break; /* Task got completed by another */
+		}
+		spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+		/* Search for a possible ccb that matches the task */
+		for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+			ccb = &pm8001_ha->ccb_info[i];
+			tag = ccb->ccb_tag;
+			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+				break;
+		}
+		if (!ccb) {
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			break; /* Task got freed by another */
+		}
+		ts = &t->task_status;
+		ts->resp = SAS_TASK_COMPLETE;
+		/* Force the midlayer to retry */
+		ts->stat = SAS_QUEUE_FULL;
+		pm8001_dev = ccb->device;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		spin_lock_irqsave(&t->task_state_lock, flags1);
+		t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+		t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+		t->task_state_flags |= SAS_TASK_STATE_DONE;
+		if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+			PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
+				" done with event 0x%x resp 0x%x stat 0x%x but"
+				" aborted by upper layer!\n",
+				t, pw->handler, ts->resp, ts->stat));
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		} else {
+			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/* in order to force CPU ordering */
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			t->task_done(t);
+		}
+	}	break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+	{	/* This one stashes the sas_task instead */
+		struct sas_task *t = (struct sas_task *)pm8001_dev;
+		u32 tag;
+		struct pm8001_ccb_info *ccb;
+		struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+		unsigned long flags, flags1;
+		int i, ret = 0;
+
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+
+		ret = pm8001_query_task(t);
+
+		PM8001_IO_DBG(pm8001_ha,
+			switch (ret) {
+			case TMF_RESP_FUNC_SUCC:
+				pm8001_printk("...Task on lu\n");
+				break;
+
+			case TMF_RESP_FUNC_COMPLETE:
+				pm8001_printk("...Task NOT on lu\n");
+				break;
+
+			default:
+				pm8001_printk("...query task failed!!!\n");
+				break;
+			});
+
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+		spin_lock_irqsave(&t->task_state_lock, flags1);
+
+		if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+				(void)pm8001_abort_task(t);
+			break; /* Task got completed by another */
+		}
+
+		spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+		/* Search for a possible ccb that matches the task */
+		for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+			ccb = &pm8001_ha->ccb_info[i];
+			tag = ccb->ccb_tag;
+			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+				break;
+		}
+		if (!ccb) {
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+				(void)pm8001_abort_task(t);
+			break; /* Task got freed by another */
+		}
+
+		pm8001_dev = ccb->device;
+		dev = pm8001_dev->sas_device;
+
+		switch (ret) {
+		case TMF_RESP_FUNC_SUCC: /* task on lu */
+			ccb->open_retry = 1; /* Snub completion */
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			ret = pm8001_abort_task(t);
+			ccb->open_retry = 0;
+			switch (ret) {
+			case TMF_RESP_FUNC_SUCC:
+			case TMF_RESP_FUNC_COMPLETE:
+				break;
+			default: /* device misbehavior */
+				ret = TMF_RESP_FUNC_FAILED;
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("...Reset phy\n"));
+				pm8001_I_T_nexus_reset(dev);
+				break;
+			}
+			break;
+
+		case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			/* Do we need to abort the task locally? */
+			break;
+
+		default: /* device misbehavior */
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			ret = TMF_RESP_FUNC_FAILED;
+			PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("...Reset phy\n"));
+			pm8001_I_T_nexus_reset(dev);
+		}
+
+		if (ret == TMF_RESP_FUNC_FAILED)
+			t = NULL;
+		pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
+	}	break;
 	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
-		pm8001_dev = pw->data;
 		dev = pm8001_dev->sas_device;
 		pm8001_I_T_nexus_reset(dev);
 		break;
 	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
-		pm8001_dev = pw->data;
 		dev = pm8001_dev->sas_device;
 		pm8001_I_T_nexus_reset(dev);
 		break;
 	case IO_DS_IN_ERROR:
-		pm8001_dev = pw->data;
 		dev = pm8001_dev->sas_device;
 		pm8001_I_T_nexus_reset(dev);
 		break;
 	case IO_DS_NON_OPERATIONAL:
-		pm8001_dev = pw->data;
 		dev = pm8001_dev->sas_device;
 		pm8001_I_T_nexus_reset(dev);
 		break;
@@ -1460,6 +1664,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
 	status = le32_to_cpu(psspPayload->status);
 	tag = le32_to_cpu(psspPayload->tag);
 	ccb = &pm8001_ha->ccb_info[tag];
+	if ((status == IO_ABORTED) && ccb->open_retry) {
+		/* Being completed by another */
+		ccb->open_retry = 0;
+		return;
+	}
 	pm8001_dev = ccb->device;
 	param = le32_to_cpu(psspPayload->param);
 
@@ -1515,6 +1724,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
 			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
 		ts->resp = SAS_TASK_COMPLETE;
 		ts->stat = SAS_OPEN_REJECT;
+		/* Force the midlayer to retry */
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
 		break;
 	case IO_XFER_ERROR_PHY_NOT_READY:
 		PM8001_IO_DBG(pm8001_ha,
@@ -1719,9 +1930,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 	case IO_XFER_ERROR_BREAK:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
-		ts->resp = SAS_TASK_COMPLETE;
-		ts->stat = SAS_INTERRUPTED;
-		break;
+		pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+		return;
 	case IO_XFER_ERROR_PHY_NOT_READY:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
@@ -1800,10 +2010,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 	case IO_XFER_OPEN_RETRY_TIMEOUT:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
-		ts->resp = SAS_TASK_COMPLETE;
-		ts->stat = SAS_OPEN_REJECT;
-		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
-		break;
+		pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+		return;
 	case IO_XFER_ERROR_UNEXPECTED_PHASE:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
@@ -1877,7 +2085,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct sas_task *t;
 	struct pm8001_ccb_info *ccb;
-	unsigned long flags = 0;
 	u32 param;
 	u32 status;
 	u32 tag;
@@ -2016,9 +2223,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*in order to force CPU ordering*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2036,9 +2243,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2064,9 +2271,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/* ditto*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2131,9 +2338,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2155,9 +2362,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2175,31 +2382,31 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		ts->stat = SAS_DEV_NO_RESPONSE;
 		break;
 	}
-	spin_lock_irqsave(&t->task_state_lock, flags);
+	spin_lock_irq(&t->task_state_lock);
 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 	t->task_state_flags |= SAS_TASK_STATE_DONE;
 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("task 0x%p done with io_status 0x%x"
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	} else if (t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/* ditto */
-		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
-		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		spin_lock_irq(&pm8001_ha->lock);
 	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/*ditto*/
-		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
-		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		spin_lock_irq(&pm8001_ha->lock);
 	}
 }
 
@@ -2207,7 +2414,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
 	struct sas_task *t;
-	unsigned long flags = 0;
 	struct task_status_struct *ts;
 	struct pm8001_ccb_info *ccb;
 	struct pm8001_device *pm8001_dev;
@@ -2287,9 +2493,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			spin_unlock_irq(&pm8001_ha->lock);
 			t->task_done(t);
-			spin_lock_irqsave(&pm8001_ha->lock, flags);
+			spin_lock_irq(&pm8001_ha->lock);
 			return;
 		}
 		break;
@@ -2387,31 +2593,31 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 		ts->stat = SAS_OPEN_TO;
 		break;
 	}
-	spin_lock_irqsave(&t->task_state_lock, flags);
+	spin_lock_irq(&t->task_state_lock);
 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 	t->task_state_flags |= SAS_TASK_STATE_DONE;
 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("task 0x%p done with io_status 0x%x"
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	} else if (t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/* ditto */
-		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
-		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		spin_lock_irq(&pm8001_ha->lock);
 	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		spin_unlock_irq(&t->task_state_lock);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/*ditto*/
-		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		spin_unlock_irq(&pm8001_ha->lock);
 		t->task_done(t);
-		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		spin_lock_irq(&pm8001_ha->lock);
 	}
 }
 
@@ -2857,7 +3063,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
 
 	memset((u8 *)&payload, 0, sizeof(payload));
 	circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
-	payload.tag = 1;
+	payload.tag = cpu_to_le32(1);
 	payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
 		((phyId & 0x0F) << 4) | (port_id & 0x0F));
 	payload.param0 = cpu_to_le32(param0);
@@ -2929,9 +3135,9 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 	phy->phy_type |= PORT_TYPE_SAS;
 	phy->identify.device_type = deviceType;
 	phy->phy_attached = 1;
-	if (phy->identify.device_type == SAS_END_DEV)
+	if (phy->identify.device_type == SAS_END_DEVICE)
 		phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
-	else if (phy->identify.device_type != NO_DEVICE)
+	else if (phy->identify.device_type != SAS_PHY_UNUSED)
 		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
 	phy->sas_phy.oob_mode = SAS_OOB_MODE;
 	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
@@ -3075,7 +3281,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		(struct dev_reg_resp *)(piomb + 4);
 
 	htag = le32_to_cpu(registerRespPayload->tag);
-	ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
+	ccb = &pm8001_ha->ccb_info[htag];
 	pm8001_dev = ccb->device;
 	status = le32_to_cpu(registerRespPayload->status);
 	device_id = le32_to_cpu(registerRespPayload->device_id);
@@ -3149,7 +3355,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 	struct fw_control_ex	fw_control_context;
 	struct fw_flash_Update_resp *ppayload =
 		(struct fw_flash_Update_resp *)(piomb + 4);
-	u32 tag = le32_to_cpu(ppayload->tag);
+	u32 tag = ppayload->tag;
 	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
 	status = le32_to_cpu(ppayload->status);
 	memcpy(&fw_control_context,
@@ -3238,13 +3444,12 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 
 	struct task_abort_resp *pPayload =
 		(struct task_abort_resp *)(piomb + 4);
-	ccb = &pm8001_ha->ccb_info[pPayload->tag];
-	t = ccb->task;
-
 
 	status = le32_to_cpu(pPayload->status);
 	tag = le32_to_cpu(pPayload->tag);
 	scp = le32_to_cpu(pPayload->scp);
+	ccb = &pm8001_ha->ccb_info[tag];
+	t = ccb->task;
 	PM8001_IO_DBG(pm8001_ha,
 		pm8001_printk(" status = 0x%x\n", status));
 	if (t == NULL)
@@ -3270,7 +3475,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 	t->task_state_flags |= SAS_TASK_STATE_DONE;
 	spin_unlock_irqrestore(&t->task_state_lock, flags);
-	pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
+	pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	mb();
 	t->task_done(t);
 	return 0;
@@ -3497,7 +3702,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
 static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	u32 pHeader = (u32)*(u32 *)piomb;
-	u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
+	u8 opc = (u8)(pHeader & 0xFFF);
 
 	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
 
@@ -3664,9 +3869,11 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
 {
 	struct outbound_queue_table *circularQ;
 	void *pMsg1 = NULL;
-	u8 bc = 0;
+	u8 uninitialized_var(bc);
 	u32 ret = MPI_IO_STATUS_FAIL;
+	unsigned long flags;
 
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
 	circularQ = &pm8001_ha->outbnd_q_tbl[0];
 	do {
 		ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
@@ -3677,16 +3884,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
 			mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
 		}
 		if (MPI_IO_STATUS_BUSY == ret) {
-			u32 producer_idx;
 			/* Update the producer index from SPC */
-			producer_idx = pm8001_read_32(circularQ->pi_virt);
-			circularQ->producer_index = cpu_to_le32(producer_idx);
-			if (circularQ->producer_index ==
+			circularQ->producer_index =
+				cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+			if (le32_to_cpu(circularQ->producer_index) ==
 				circularQ->consumer_idx)
 				/* OQ is empty */
 				break;
 		}
 	} while (1);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	return ret;
 }
 
@@ -3712,9 +3919,9 @@ pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 	}
 }
 
-static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
+static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
 {
-	psmp_cmd->tag = cpu_to_le32(hTag);
+	psmp_cmd->tag = hTag;
 	psmp_cmd->device_id = cpu_to_le32(deviceID);
 	psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
 }
@@ -3798,7 +4005,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 	struct ssp_ini_io_start_req ssp_cmd;
 	u32 tag = ccb->ccb_tag;
 	int ret;
-	__le64 phys_addr;
+	u64 phys_addr;
 	struct inbound_queue_table *circularQ;
 	u32 opc = OPC_INB_SSPINIIOSTART;
 	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -3819,15 +4026,15 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 	/* fill in PRD (scatter/gather) table, if any */
 	if (task->num_scatter > 1) {
 		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
-		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
-				offsetof(struct pm8001_ccb_info, buf_prd[0]));
-		ssp_cmd.addr_low = lower_32_bits(phys_addr);
-		ssp_cmd.addr_high = upper_32_bits(phys_addr);
+		phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
+		ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
+		ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
 		ssp_cmd.esgl = cpu_to_le32(1<<31);
 	} else if (task->num_scatter == 1) {
-		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
-		ssp_cmd.addr_low = lower_32_bits(dma_addr);
-		ssp_cmd.addr_high = upper_32_bits(dma_addr);
+		u64 dma_addr = sg_dma_address(task->scatter);
+		ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+		ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
 		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
 		ssp_cmd.esgl = 0;
 	} else if (task->num_scatter == 0) {
@@ -3850,7 +4057,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
 	int ret;
 	struct sata_start_req sata_cmd;
 	u32 hdr_tag, ncg_tag = 0;
-	__le64 phys_addr;
+	u64 phys_addr;
 	u32 ATAP = 0x0;
 	u32 dir;
 	struct inbound_queue_table *circularQ;
@@ -3889,13 +4096,13 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
 	/* fill in PRD (scatter/gather) table, if any */
 	if (task->num_scatter > 1) {
 		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
-		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
-				offsetof(struct pm8001_ccb_info, buf_prd[0]));
+		phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
 		sata_cmd.addr_low = lower_32_bits(phys_addr);
 		sata_cmd.addr_high = upper_32_bits(phys_addr);
 		sata_cmd.esgl = cpu_to_le32(1 << 31);
 	} else if (task->num_scatter == 1) {
-		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
+		u64 dma_addr = sg_dma_address(task->scatter);
 		sata_cmd.addr_low = lower_32_bits(dma_addr);
 		sata_cmd.addr_high = upper_32_bits(dma_addr);
 		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
@@ -4039,7 +4246,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
 
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	memset(&payload, 0, sizeof(payload));
-	payload.tag = 1;
+	payload.tag = cpu_to_le32(1);
 	payload.device_id = cpu_to_le32(device_id);
 	PM8001_MSG_DBG(pm8001_ha,
 		pm8001_printk("unregister device device_id = %d\n", device_id));
@@ -4063,7 +4270,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
 	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
 	memset(&payload, 0, sizeof(payload));
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
-	payload.tag = 1;
+	payload.tag = cpu_to_le32(1);
 	payload.phyop_phyid =
 		cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
 	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
@@ -4092,12 +4299,9 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
 static irqreturn_t
 pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
 {
-	unsigned long flags;
-	spin_lock_irqsave(&pm8001_ha->lock, flags);
 	pm8001_chip_interrupt_disable(pm8001_ha);
 	process_oq(pm8001_ha);
 	pm8001_chip_interrupt_enable(pm8001_ha);
-	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -4360,8 +4564,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
 	payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
 	payload.total_image_len = cpu_to_le32(info->total_image_len);
 	payload.len = info->sgl.im_len.len ;
-	payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
-	payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
+	payload.sgl_addr_lo =
+		cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
+	payload.sgl_addr_hi =
+		cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
 	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
 	return ret;
 }
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 909132041c07..1a4611eb0321 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -625,7 +625,7 @@ struct set_nvm_data_req {
 	__le32	tag;
 	__le32	len_ir_vpdd;
 	__le32	vpd_offset;
-	u32	reserved[8];
+	__le32	reserved[8];
 	__le32	resp_addr_lo;
 	__le32	resp_addr_hi;
 	__le32	resp_len;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c21a2163f9f6..36efaa7c3a54 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -62,7 +62,6 @@ static struct scsi_host_template pm8001_sht = {
 	.queuecommand		= sas_queuecommand,
 	.target_alloc		= sas_target_alloc,
 	.slave_configure	= sas_slave_configure,
-	.slave_destroy		= sas_slave_destroy,
 	.scan_finished		= pm8001_scan_finished,
 	.scan_start		= pm8001_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
@@ -76,7 +75,6 @@ static struct scsi_host_template pm8001_sht = {
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
-	.slave_alloc		= sas_slave_alloc,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= pm8001_host_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index fb3dc9978861..3b11edd4a50c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -166,6 +166,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
 	struct pm8001_hba_info *pm8001_ha = NULL;
 	struct sas_phy_linkrates *rates;
 	DECLARE_COMPLETION_ONSTACK(completion);
+	unsigned long flags;
 	pm8001_ha = sas_phy->ha->lldd_ha;
 	pm8001_ha->phy[phy_id].enable_completion = &completion;
 	switch (func) {
@@ -209,8 +210,29 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
 	case PHY_FUNC_DISABLE:
 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
 		break;
+	case PHY_FUNC_GET_EVENTS:
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+		if (-1 == pm8001_bar4_shift(pm8001_ha,
+					(phy_id < 4) ? 0x30000 : 0x40000)) {
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			return -EINVAL;
+		}
+		{
+			struct sas_phy *phy = sas_phy->phy;
+			uint32_t *qp = (uint32_t *)(((char *)
+				pm8001_ha->io_mem[2].memvirtaddr)
+				+ 0x1034 + (0x4000 * (phy_id & 3)));
+
+			phy->invalid_dword_count = qp[0];
+			phy->running_disparity_error_count = qp[1];
+			phy->loss_of_dword_sync_count = qp[3];
+			phy->phy_reset_problem_count = qp[4];
+		}
+		pm8001_bar4_shift(pm8001_ha, 0);
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		return 0;
 	default:
-		rc = -ENOSYS;
+		rc = -EOPNOTSUPP;
 	}
 	msleep(300);
 	return rc;
@@ -234,12 +256,14 @@ void pm8001_scan_start(struct Scsi_Host *shost)
 
 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
+	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+
 	/* give the phy enabling interrupt event time to come in (1s
 	* is empirically about all it takes) */
 	if (time < HZ)
 		return 0;
 	/* Wait for discovery to finish */
-	scsi_flush_work(shost);
+	sas_drain_work(ha);
 	return 1;
 }
 
@@ -340,7 +364,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
 	struct pm8001_ccb_info *ccb;
 	u32 tag = 0xdeadbeef, rc, n_elem = 0;
 	u32 n = num;
-	unsigned long flags = 0, flags_libsas = 0;
+	unsigned long flags = 0;
 
 	if (!dev->port) {
 		struct task_status_struct *tsm = &t->task_status;
@@ -364,11 +388,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
 				ts->stat = SAS_PHY_DOWN;
 
 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-				spin_unlock_irqrestore(dev->sata_dev.ap->lock,
-						flags_libsas);
 				t->task_done(t);
-				spin_lock_irqsave(dev->sata_dev.ap->lock,
-					flags_libsas);
 				spin_lock_irqsave(&pm8001_ha->lock, flags);
 				if (n > 1)
 					t = list_entry(t->list.next,
@@ -516,6 +536,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
 	task->lldd_task = NULL;
 	ccb->task = NULL;
 	ccb->ccb_tag = 0xFFFFFFFF;
+	ccb->open_retry = 0;
 	pm8001_ccb_free(pm8001_ha, ccb_idx);
 }
 
@@ -615,7 +636,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
 	wait_for_completion(&completion);
 	if (dev->dev_type == SAS_END_DEV)
 		msleep(50);
-	pm8001_ha->flags |= PM8001F_RUN_TIME ;
+	pm8001_ha->flags = PM8001F_RUN_TIME;
 	return 0;
 found_out:
 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -860,6 +881,77 @@ static int pm8001_issue_ssp_tmf(struct domain_device *dev,
 		tmf);
 }
 
+/* retry commands by ha, by task and/or by device */
+void pm8001_open_reject_retry(
+	struct pm8001_hba_info *pm8001_ha,
+	struct sas_task *task_to_close,
+	struct pm8001_device *device_to_close)
+{
+	int i;
+	unsigned long flags;
+
+	if (pm8001_ha == NULL)
+		return;
+
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+	for (i = 0; i < PM8001_MAX_CCB; i++) {
+		struct sas_task *task;
+		struct task_status_struct *ts;
+		struct pm8001_device *pm8001_dev;
+		unsigned long flags1;
+		u32 tag;
+		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+
+		pm8001_dev = ccb->device;
+		if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+			continue;
+		if (!device_to_close) {
+			uintptr_t d = (uintptr_t)pm8001_dev
+					- (uintptr_t)&pm8001_ha->devices;
+			if (((d % sizeof(*pm8001_dev)) != 0)
+			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
+				continue;
+		} else if (pm8001_dev != device_to_close)
+			continue;
+		tag = ccb->ccb_tag;
+		if (!tag || (tag == 0xFFFFFFFF))
+			continue;
+		task = ccb->task;
+		if (!task || !task->task_done)
+			continue;
+		if (task_to_close && (task != task_to_close))
+			continue;
+		ts = &task->task_status;
+		ts->resp = SAS_TASK_COMPLETE;
+		/* Force the midlayer to retry */
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		spin_lock_irqsave(&task->task_state_lock, flags1);
+		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+		task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		if (unlikely((task->task_state_flags
+				& SAS_TASK_STATE_ABORTED))) {
+			spin_unlock_irqrestore(&task->task_state_lock,
+				flags1);
+			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+		} else {
+			spin_unlock_irqrestore(&task->task_state_lock,
+				flags1);
+			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+			mb();/* in order to force CPU ordering */
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			task->task_done(task);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
+		}
+	}
+
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+}
+
 /**
   * Standard mandates link reset for ATA  (type 0) and hard reset for
   * SSP (type 1) , only for RECOVERY
@@ -875,12 +967,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
 
 	pm8001_dev = dev->lldd_dev;
 	pm8001_ha = pm8001_find_ha_by_dev(dev);
-	phy = sas_find_local_phy(dev);
+	phy = sas_get_local_phy(dev);
 
 	if (dev_is_sata(dev)) {
 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
-		if (scsi_is_sas_phy_local(phy))
-			return 0;
+		if (scsi_is_sas_phy_local(phy)) {
+			rc = 0;
+			goto out;
+		}
 		rc = sas_phy_reset(phy, 1);
 		msleep(2000);
 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
@@ -889,12 +983,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
 			pm8001_dev, 0x01);
 		wait_for_completion(&completion_setstate);
-	} else{
-	rc = sas_phy_reset(phy, 1);
-	msleep(2000);
+	} else {
+		rc = sas_phy_reset(phy, 1);
+		msleep(2000);
 	}
 	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
 		pm8001_dev->device_id, rc));
+ out:
+	sas_put_local_phy(phy);
 	return rc;
 }
 
@@ -906,10 +1002,11 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
 	if (dev_is_sata(dev)) {
-		struct sas_phy *phy = sas_find_local_phy(dev);
+		struct sas_phy *phy = sas_get_local_phy(dev);
 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
 			dev, 1, 0);
 		rc = sas_phy_reset(phy, 1);
+		sas_put_local_phy(phy);
 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
 			pm8001_dev, 0x01);
 		msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93959febe205..11008205aeb3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -235,6 +235,7 @@ struct pm8001_ccb_info {
 	struct pm8001_device	*device;
 	struct pm8001_prd	buf_prd[PM8001_MAX_DMA_SG];
 	struct fw_control_ex	*fw_control_context;
+	u8			open_retry;
 };
 
 struct mpi_mem {
@@ -484,10 +485,15 @@ void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
+void pm8001_open_reject_retry(
+	struct pm8001_hba_info *pm8001_ha,
+	struct sas_task *task_to_close,
+	struct pm8001_device *device_to_close);
 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
 	u32 mem_size, u32 align);
 
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
 extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9f41b3b4358f..5926f5a87ea8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -356,7 +356,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		else if (start == (ha->flt_region_boot * 4) ||
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
-		else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7065,
@@ -627,144 +628,6 @@ static struct bin_attribute sysfs_reset_attr = {
 };
 
 static ssize_t
-qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
-			struct bin_attribute *bin_attr,
-			char *buf, loff_t off, size_t count)
-{
-	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
-	    struct device, kobj)));
-	struct qla_hw_data *ha = vha->hw;
-	uint16_t dev, adr, opt, len;
-	int rval;
-
-	ha->edc_data_len = 0;
-
-	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
-		return -EINVAL;
-
-	if (!ha->edc_data) {
-		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
-		    &ha->edc_data_dma);
-		if (!ha->edc_data) {
-			ql_log(ql_log_warn, vha, 0x7073,
-			    "Unable to allocate memory for EDC write.\n");
-			return -ENOMEM;
-		}
-	}
-
-	dev = le16_to_cpup((void *)&buf[0]);
-	adr = le16_to_cpup((void *)&buf[2]);
-	opt = le16_to_cpup((void *)&buf[4]);
-	len = le16_to_cpup((void *)&buf[6]);
-
-	if (!(opt & BIT_0))
-		if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
-			return -EINVAL;
-
-	memcpy(ha->edc_data, &buf[8], len);
-
-	rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
-	    dev, adr, len, opt);
-	if (rval != QLA_SUCCESS) {
-		ql_log(ql_log_warn, vha, 0x7074,
-		    "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
-		    rval, dev, adr, opt, len, buf[8]);
-		return -EIO;
-	}
-
-	return count;
-}
-
-static struct bin_attribute sysfs_edc_attr = {
-	.attr = {
-		.name = "edc",
-		.mode = S_IWUSR,
-	},
-	.size = 0,
-	.write = qla2x00_sysfs_write_edc,
-};
-
-static ssize_t
-qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
-			struct bin_attribute *bin_attr,
-			char *buf, loff_t off, size_t count)
-{
-	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
-	    struct device, kobj)));
-	struct qla_hw_data *ha = vha->hw;
-	uint16_t dev, adr, opt, len;
-	int rval;
-
-	ha->edc_data_len = 0;
-
-	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
-		return -EINVAL;
-
-	if (!ha->edc_data) {
-		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
-		    &ha->edc_data_dma);
-		if (!ha->edc_data) {
-			ql_log(ql_log_warn, vha, 0x708c,
-			    "Unable to allocate memory for EDC status.\n");
-			return -ENOMEM;
-		}
-	}
-
-	dev = le16_to_cpup((void *)&buf[0]);
-	adr = le16_to_cpup((void *)&buf[2]);
-	opt = le16_to_cpup((void *)&buf[4]);
-	len = le16_to_cpup((void *)&buf[6]);
-
-	if (!(opt & BIT_0))
-		if (len == 0 || len > DMA_POOL_SIZE)
-			return -EINVAL;
-
-	memset(ha->edc_data, 0, len);
-	rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
-			dev, adr, len, opt);
-	if (rval != QLA_SUCCESS) {
-		ql_log(ql_log_info, vha, 0x7075,
-		    "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
-		    rval, dev, adr, opt, len);
-		return -EIO;
-	}
-
-	ha->edc_data_len = len;
-
-	return count;
-}
-
-static ssize_t
-qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
-			   struct bin_attribute *bin_attr,
-			   char *buf, loff_t off, size_t count)
-{
-	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
-	    struct device, kobj)));
-	struct qla_hw_data *ha = vha->hw;
-
-	if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
-		return 0;
-
-	if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
-		return -EINVAL;
-
-	memcpy(buf, ha->edc_data, ha->edc_data_len);
-
-	return ha->edc_data_len;
-}
-
-static struct bin_attribute sysfs_edc_status_attr = {
-	.attr = {
-		.name = "edc_status",
-		.mode = S_IRUSR | S_IWUSR,
-	},
-	.size = 0,
-	.write = qla2x00_sysfs_write_edc_status,
-	.read = qla2x00_sysfs_read_edc_status,
-};
-
-static ssize_t
 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
@@ -879,8 +742,6 @@ static struct sysfs_entry {
 	{ "vpd", &sysfs_vpd_attr, 1 },
 	{ "sfp", &sysfs_sfp_attr, 1 },
 	{ "reset", &sysfs_reset_attr, },
-	{ "edc", &sysfs_edc_attr, 2 },
-	{ "edc_status", &sysfs_edc_status_attr, 2 },
 	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
 	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
 	{ NULL },
@@ -898,7 +759,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
 			continue;
 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
 			continue;
-		if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
+		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
 
 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -926,7 +787,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
 			continue;
 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
 			continue;
-		if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
+		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
 
 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1231,7 +1092,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1278,7 +1139,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1293,7 +1154,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1316,7 +1177,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 
-	if (!IS_QLA8XXX_TYPE(vha->hw))
+	if (!IS_CNA_CAPABLE(vha->hw))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1328,7 +1189,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 
-	if (!IS_QLA8XXX_TYPE(vha->hw))
+	if (!IS_CNA_CAPABLE(vha->hw))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1364,7 +1225,7 @@ qla2x00_thermal_temp_show(struct device *dev,
 	else if (!vha->hw->flags.eeh_busy)
 		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
 	if (rval != QLA_SUCCESS)
-		temp = frac = 0;
+		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
 }
@@ -1493,6 +1354,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
 	case PORT_SPEED_10GB:
 		speed = FC_PORTSPEED_10GBIT;
 		break;
+	case PORT_SPEED_16GB:
+		speed = FC_PORTSPEED_16GBIT;
+		break;
 	}
 	fc_host_speed(shost) = speed;
 }
@@ -1643,10 +1507,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
 	 * final cleanup of firmware resources (PCBs and XCBs).
 	 */
 	if (fcport->loop_id != FC_NO_LOOP_ID &&
-	    !test_bit(UNLOADING, &fcport->vha->dpc_flags))
-		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
-			fcport->loop_id, fcport->d_id.b.domain,
-			fcport->d_id.b.area, fcport->d_id.b.al_pa);
+	    !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+		if (IS_FWI2_CAPABLE(fcport->vha->hw))
+			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+			    fcport->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+		else
+			qla2x00_port_logout(fcport->vha, fcport);
+	}
 }
 
 static int
@@ -1889,6 +1757,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 			break;
 		}
 	}
+
 	if (qos) {
 		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
 			qos);
@@ -2086,7 +1955,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
-	if (IS_QLA8XXX_TYPE(ha))
+	if (IS_CNA_CAPABLE(ha))
 		speed = FC_PORTSPEED_10GBIT;
 	else if (IS_QLA25XX(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2c4714279bcc..f74cc0602f3b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,29 +11,36 @@
 #include <linux/delay.h>
 
 /* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+void
+qla2x00_bsg_job_done(void *data, void *ptr, int res)
 {
-	srb_t *sp;
+	srb_t *sp = (srb_t *)ptr;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+	bsg_job->reply->result = res;
+	bsg_job->job_done(bsg_job);
+	sp->free(vha, sp);
+}
+
+void
+qla2x00_bsg_sp_free(void *data, void *ptr)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	struct qla_hw_data *ha = vha->hw;
-	struct srb_ctx *ctx;
 
-	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
-	if (!sp)
-		goto done;
-	ctx = kzalloc(size, GFP_KERNEL);
-	if (!ctx) {
-		mempool_free(sp, ha->srb_mempool);
-		sp = NULL;
-		goto done;
-	}
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 
-	memset(sp, 0, sizeof(*sp));
-	sp->fcport = fcport;
-	sp->ctx = ctx;
-	ctx->iocbs = 1;
-done:
-	return sp;
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (sp->type == SRB_CT_CMD ||
+	    sp->type == SRB_ELS_CMD_HST)
+		kfree(sp->fcport);
+	mempool_free(sp, vha->hw->srb_mempool);
 }
 
 int
@@ -101,8 +108,6 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 	uint32_t len;
 	uint32_t oper;
 
-	bsg_job->reply->reply_payload_rcv_len = 0;
-
 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
 		ret = -EINVAL;
 		goto exit_fcp_prio_cfg;
@@ -217,6 +222,7 @@ exit_fcp_prio_cfg:
 	bsg_job->job_done(bsg_job);
 	return ret;
 }
+
 static int
 qla2x00_process_els(struct fc_bsg_job *bsg_job)
 {
@@ -230,7 +236,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	int req_sg_cnt, rsp_sg_cnt;
 	int rval =  (DRIVER_ERROR << 16);
 	uint16_t nextlid = 0;
-	struct srb_ctx *els;
 
 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
 		rport = bsg_job->rport;
@@ -337,20 +342,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	}
 
 	/* Alloc SRB structure */
-	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp) {
 		rval = -ENOMEM;
 		goto done_unmap_sg;
 	}
 
-	els = sp->ctx;
-	els->type =
+	sp->type =
 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
 		SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
-	els->name =
+	sp->name =
 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
 		"bsg_els_rpt" : "bsg_els_hst");
-	els->u.bsg_job = bsg_job;
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
 
 	ql_dbg(ql_dbg_user, vha, 0x700a,
 	    "bsg rqst type: %s els type: %x - loop-id=%x "
@@ -362,7 +368,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x700e,
 		    "qla2x00_start_sp failed = %d\n", rval);
-		kfree(sp->ctx);
 		mempool_free(sp, ha->srb_mempool);
 		rval = -EIO;
 		goto done_unmap_sg;
@@ -409,7 +414,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	uint16_t loop_id;
 	struct fc_port *fcport;
 	char  *type = "FC_BSG_HST_CT";
-	struct srb_ctx *ct;
 
 	req_sg_cnt =
 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@@ -486,19 +490,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	fcport->loop_id = loop_id;
 
 	/* Alloc SRB structure */
-	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp) {
 		ql_log(ql_log_warn, vha, 0x7015,
-		    "qla2x00_get_ctx_bsg_sp failed.\n");
+		    "qla2x00_get_sp failed.\n");
 		rval = -ENOMEM;
 		goto done_free_fcport;
 	}
 
-	ct = sp->ctx;
-	ct->type = SRB_CT_CMD;
-	ct->name = "bsg_ct";
-	ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
-	ct->u.bsg_job = bsg_job;
+	sp->type = SRB_CT_CMD;
+	sp->name = "bsg_ct";
+	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
 
 	ql_dbg(ql_dbg_user, vha, 0x7016,
 	    "bsg rqst type: %s else type: %x - "
@@ -511,7 +516,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x7017,
 		    "qla2x00_start_sp failed=%d.\n", rval);
-		kfree(sp->ctx);
 		mempool_free(sp, ha->srb_mempool);
 		rval = -EIO;
 		goto done_free_fcport;
@@ -540,7 +544,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
 	int rval = 0;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		goto done_set_internal;
 
 	new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -582,7 +586,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
 	uint16_t new_config[4];
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		goto done_reset_internal;
 
 	memset(new_config, 0 , sizeof(new_config));
@@ -707,7 +711,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 
 	if ((ha->current_topology == ISP_CFG_F ||
 	    (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
-	    (IS_QLA81XX(ha) &&
+	    ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 		elreq.options == EXTERNAL_LOOPBACK) {
@@ -717,13 +721,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 		command_sent = INT_DEF_LB_ECHO_CMD;
 		rval = qla2x00_echo_test(vha, &elreq, response);
 	} else {
-		if (IS_QLA81XX(ha)) {
+		if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
 			memset(config, 0, sizeof(config));
 			memset(new_config, 0, sizeof(new_config));
 			if (qla81xx_get_port_config(vha, config)) {
 				ql_log(ql_log_warn, vha, 0x701f,
 				    "Get port config failed.\n");
-				bsg_job->reply->reply_payload_rcv_len = 0;
 				bsg_job->reply->result = (DID_ERROR << 16);
 				rval = -EPERM;
 				goto done_free_dma_req;
@@ -737,8 +740,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 					new_config)) {
 					ql_log(ql_log_warn, vha, 0x7024,
 					    "Internal loopback failed.\n");
-					bsg_job->reply->reply_payload_rcv_len =
-						0;
 					bsg_job->reply->result =
 						(DID_ERROR << 16);
 					rval = -EPERM;
@@ -750,8 +751,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 				 */
 				if (qla81xx_reset_internal_loopback(vha,
 					config, 1)) {
-					bsg_job->reply->reply_payload_rcv_len =
-						0;
 					bsg_job->reply->result =
 						(DID_ERROR << 16);
 					rval = -EPERM;
@@ -788,7 +787,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 					    "MPI reset failed.\n");
 				}
 
-				bsg_job->reply->reply_payload_rcv_len = 0;
 				bsg_job->reply->result = (DID_ERROR << 16);
 				rval = -EIO;
 				goto done_free_dma_req;
@@ -813,7 +811,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 		fw_sts_ptr += sizeof(response);
 		*fw_sts_ptr = command_sent;
 		rval = 0;
-		bsg_job->reply->reply_payload_rcv_len = 0;
 		bsg_job->reply->result = (DID_ERROR << 16);
 	} else {
 		ql_dbg(ql_dbg_user, vha, 0x702d,
@@ -872,7 +869,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
 	if (rval) {
 		ql_log(ql_log_warn, vha, 0x7030,
 		    "Vendor request 84xx reset failed.\n");
-		rval = bsg_job->reply->reply_payload_rcv_len = 0;
+		rval = 0;
 		bsg_job->reply->result = (DID_ERROR << 16);
 
 	} else {
@@ -971,9 +968,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
 		ql_log(ql_log_warn, vha, 0x7037,
 		    "Vendor request 84xx updatefw failed.\n");
 
-		rval = bsg_job->reply->reply_payload_rcv_len = 0;
+		rval = 0;
 		bsg_job->reply->result = (DID_ERROR << 16);
-
 	} else {
 		ql_dbg(ql_dbg_user, vha, 0x7038,
 		    "Vendor request 84xx updatefw completed.\n");
@@ -1159,7 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
 		ql_log(ql_log_warn, vha, 0x7043,
 		    "Vendor request 84xx mgmt failed.\n");
 
-		rval = bsg_job->reply->reply_payload_rcv_len = 0;
+		rval = 0;
 		bsg_job->reply->result = (DID_ERROR << 16);
 
 	} else {
@@ -1210,8 +1206,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	uint8_t *rsp_ptr = NULL;
 
-	bsg_job->reply->reply_payload_rcv_len = 0;
-
 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
 		return -EINVAL;
@@ -1304,8 +1298,6 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
 	int valid = 0;
 	struct qla_hw_data *ha = vha->hw;
 
-	bsg_job->reply->reply_payload_rcv_len = 0;
-
 	if (unlikely(pci_channel_offline(ha->pdev)))
 		return -EINVAL;
 
@@ -1331,7 +1323,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
-		    IS_QLA8XXX_TYPE(ha))
+		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7058,
@@ -1617,6 +1609,9 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
 	struct Scsi_Host *host;
 	scsi_qla_host_t *vha;
 
+	/* In case no data transferred. */
+	bsg_job->reply->reply_payload_rcv_len = 0;
+
 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
 		rport = bsg_job->rport;
 		fcport = *(fc_port_t **) rport->dd_data;
@@ -1655,6 +1650,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
 	case FC_BSG_RPT_CT:
 	default:
 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
+		bsg_job->reply->result = ret;
 		break;
 	}
 	return ret;
@@ -1669,7 +1665,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
 	int cnt, que;
 	unsigned long flags;
 	struct req_que *req;
-	struct srb_ctx *sp_bsg;
 
 	/* find the bsg job from the active list of commands */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1681,11 +1676,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 			sp = req->outstanding_cmds[cnt];
 			if (sp) {
-				sp_bsg = sp->ctx;
-
-				if (((sp_bsg->type == SRB_CT_CMD) ||
-					(sp_bsg->type == SRB_ELS_CMD_HST))
-					&& (sp_bsg->u.bsg_job == bsg_job)) {
+				if (((sp->type == SRB_CT_CMD) ||
+					(sp->type == SRB_ELS_CMD_HST))
+					&& (sp->u.bsg_job == bsg_job)) {
 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
 					if (ha->isp_ops->abort_command(sp)) {
 						ql_log(ql_log_warn, vha, 0x7089,
@@ -1715,7 +1708,6 @@ done:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
 		kfree(sp->fcport);
-	kfree(sp->ctx);
 	mempool_free(sp, ha->srb_mempool);
 	return 0;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45cbf0ba624d..897731b93df2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,23 +11,27 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0116       | 0xfa           |
- * | Mailbox commands             |       0x112b       |		|
- * | Device Discovery             |       0x2084       |		|
- * | Queue Command and IO tracing |       0x302f       | 0x3008,0x302d, |
- * |                              |                    | 0x302e         |
+ * | Module Init and Probe        |       0x0120       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x113e       | 0x112c-0x112e  |
+ * |                              |                    | 0x113a         |
+ * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
+ * | Queue Command and IO tracing |       0x302f       | 0x3006,0x3008  |
+ * |                              |                    | 0x302d-0x302e  |
  * | DPC Thread                   |       0x401c       |		|
- * | Async Events                 |       0x5057       | 0x5052		|
- * | Timer Routines               |       0x6011       | 0x600e,0x600f  |
- * | User Space Interactions      |       0x709e       | 0x7018,0x702e  |
- * |                              |                    | 0x7039,0x7045  |
+ * | Async Events                 |       0x505d       | 0x502b-0x502f  |
+ * |                              |                    | 0x5047,0x5052  |
+ * | Timer Routines               |       0x6011       | 0x600e-0x600f  |
+ * | User Space Interactions      |       0x709f       | 0x7018,0x702e, |
+ * |                              |                    | 0x7039,0x7045, |
+ * |                              |                    | 0x7073-0x7075, |
+ * |                              |                    | 0x708c         |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x900f       |		|
  * | Virtual Port                 |       0xa007       |		|
- * | ISP82XX Specific             |       0xb052       |    		|
- * | MultiQ                       |       0xc00b       |		|
- * | Misc                         |       0xd00b       |		|
+ * | ISP82XX Specific             |       0xb054       | 0xb053         |
+ * | MultiQ                       |       0xc00c       |		|
+ * | Misc                         |       0xd010       |		|
  * ----------------------------------------------------------------------
  */
 
@@ -85,7 +89,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
 	WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-	dwords = GID_LIST_SIZE / 4;
+	dwords = qla2x00_gid_list_size(ha) / 4;
 	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
 	    cnt += dwords, addr += dwords) {
 		if (cnt + dwords > ram_dwords)
@@ -260,7 +264,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
 	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-	words = GID_LIST_SIZE / 2;
+	words = qla2x00_gid_list_size(ha) / 2;
 	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
 	    cnt += words, addr += words) {
 		if (cnt + words > ram_words)
@@ -375,6 +379,77 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 }
 
 static inline void *
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	struct qla2xxx_mqueue_chain *q;
+	struct qla2xxx_mqueue_header *qh;
+	struct req_que *req;
+	struct rsp_que *rsp;
+	int que;
+
+	if (!ha->mqenable)
+		return ptr;
+
+	/* Request queues */
+	for (que = 1; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			break;
+
+		/* Add chain. */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (req->length * sizeof(request_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(req->length * sizeof(request_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, req->ring, req->length * sizeof(request_t));
+		ptr += req->length * sizeof(request_t);
+	}
+
+	/* Response queues */
+	for (que = 1; que < ha->max_rsp_queues; que++) {
+		rsp = ha->rsp_q_map[que];
+		if (!rsp)
+			break;
+
+		/* Add chain. */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (rsp->length * sizeof(response_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(rsp->length * sizeof(response_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
+		ptr += rsp->length * sizeof(response_t);
+	}
+
+	return ptr;
+}
+
+static inline void *
 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 {
 	uint32_t cnt, que_idx;
@@ -382,7 +457,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 	struct qla2xxx_mq_chain *mq = ptr;
 	struct device_reg_25xxmq __iomem *reg;
 
-	if (!ha->mqenable)
+	if (!ha->mqenable || IS_QLA83XX(ha))
 		return ptr;
 
 	mq = ptr;
@@ -1322,12 +1397,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	nxt = qla24xx_copy_eft(ha, nxt);
 
 	/* Chain entries -- started with MQ. */
-	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
 	}
 
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
 qla25xx_fw_dump_failed_0:
 	qla2xxx_dump_post_process(base_vha, rval);
 
@@ -1636,12 +1715,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	nxt = qla24xx_copy_eft(ha, nxt);
 
 	/* Chain entries -- started with MQ. */
-	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
 	}
 
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
 qla81xx_fw_dump_failed_0:
 	qla2xxx_dump_post_process(base_vha, rval);
 
@@ -1650,6 +1733,507 @@ qla81xx_fw_dump_failed:
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+void
+qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt, reg_data;
+	uint32_t	risc_address;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla83xx_fw_dump *fw;
+	uint32_t	ext_mem_cnt;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	risc_address = ext_mem_cnt = 0;
+	flags = 0;
+
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd00c,
+		    "No buffer available for dump!!!\n");
+		goto qla83xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd00d,
+		    "Firmware has been previously dumped (%p) -- ignoring "
+		    "request...\n", ha->fw_dump);
+		goto qla83xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp83;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/* Pause RISC. */
+	rval = qla24xx_pause_risc(reg);
+	if (rval != QLA_SUCCESS)
+		goto qla83xx_fw_dump_failed_0;
+
+	WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+	dmp_reg = &reg->iobase_window;
+	reg_data = RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	dmp_reg = &reg->unused_4_1[0];
+	reg_data = RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+	dmp_reg = &reg->unused_4_1[2];
+	reg_data = RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	/* select PCR and disable ecc checking and correction */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
+
+	/* Host/Risc registers. */
+	iter_reg = fw->host_risc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
+
+	/* PCIe registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x01);
+	dmp_reg = &reg->iobase_c4;
+	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	WRT_REG_DWORD(&reg->iobase_window, 0x00);
+	RD_REG_DWORD(&reg->iobase_window);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* RISC I/O register. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	iter_reg = fw->xseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	iter_reg = fw->rseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
+
+	/* Auxiliary sequence registers. */
+	iter_reg = fw->aseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
+
+	iter_reg = fw->aseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
+
+	/* Command DMA registers. */
+	iter_reg = fw->cmd_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
+	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
+
+	/* RQ0 Array registers. */
+	iter_reg = fw->rq0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
+
+	/* RQ1 Array registers. */
+	iter_reg = fw->rq1_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
+
+	/* RP0 Array registers. */
+	iter_reg = fw->rp0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
+
+	/* RP1 Array registers. */
+	iter_reg = fw->rp1_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
+
+	iter_reg = fw->at0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
+
+	/* I/O Queue Control registers. */
+	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xd00e,
+		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
+		rval = QLA_SUCCESS;
+
+		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+		RD_REG_DWORD(&reg->hccr);
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+		RD_REG_DWORD(&reg->hccr);
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+		RD_REG_DWORD(&reg->hccr);
+
+		for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+			udelay(5);
+
+		if (!cnt) {
+			nxt = fw->code_ram;
+			nxt += sizeof(fw->code_ram),
+			nxt += (ha->fw_memory_size - 0x100000 + 1);
+			goto copy_queue;
+		} else
+			ql_log(ql_log_warn, vha, 0xd010,
+			    "bigger hammer success?\n");
+	}
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla83xx_fw_dump_failed_0;
+
+copy_queue:
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	nxt = qla24xx_copy_eft(ha, nxt);
+
+	/* Chain entries -- started with MQ. */
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla83xx_fw_dump_failed_0:
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla83xx_fw_dump_failed:
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
 /****************************************************************************/
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
@@ -1782,13 +2366,13 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
 	vaf.va = &va;
 
 	switch (level) {
-	case 0: /* FATAL LOG */
+	case ql_log_fatal: /* FATAL LOG */
 		pr_crit("%s%pV", pbuf, &vaf);
 		break;
-	case 1:
+	case ql_log_warn:
 		pr_err("%s%pV", pbuf, &vaf);
 		break;
-	case 2:
+	case ql_log_info:
 		pr_warn("%s%pV", pbuf, &vaf);
 		break;
 	default:
@@ -1837,13 +2421,13 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
 	vaf.va = &va;
 
 	switch (level) {
-	case 0: /* FATAL LOG */
+	case ql_log_fatal: /* FATAL LOG */
 		pr_crit("%s%pV", pbuf, &vaf);
 		break;
-	case 1:
+	case ql_log_warn:
 		pr_err("%s%pV", pbuf, &vaf);
 		break;
-	case 2:
+	case ql_log_info:
 		pr_warn("%s%pV", pbuf, &vaf);
 		break;
 	default:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5f1b6d9c3dcb..2157bdf1569a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -165,6 +165,54 @@ struct qla81xx_fw_dump {
 	uint32_t ext_mem[1];
 };
 
+struct qla83xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_risc_reg[48];
+	uint32_t pcie_regs[4];
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[11];
+	uint32_t risc_io_reg;
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[256];
+	uint32_t xseq_0_reg[48];
+	uint32_t xseq_1_reg[16];
+	uint32_t xseq_2_reg[16];
+	uint32_t rseq_gp_reg[256];
+	uint32_t rseq_0_reg[32];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t rseq_3_reg[16];
+	uint32_t aseq_gp_reg[256];
+	uint32_t aseq_0_reg[32];
+	uint32_t aseq_1_reg[16];
+	uint32_t aseq_2_reg[16];
+	uint32_t aseq_3_reg[16];
+	uint32_t cmd_dma_reg[64];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[128];
+	uint32_t fpm_hdw_reg[256];
+	uint32_t rq0_array_reg[256];
+	uint32_t rq1_array_reg[256];
+	uint32_t rp0_array_reg[256];
+	uint32_t rp1_array_reg[256];
+	uint32_t queue_control_reg[16];
+	uint32_t fb_hdw_reg[432];
+	uint32_t at0_array_reg[128];
+	uint32_t code_ram[0x2400];
+	uint32_t ext_mem[1];
+};
+
 #define EFT_NUM_BUFFERS		4
 #define EFT_BYTES_PER_BUFFER	0x4000
 #define EFT_SIZE		((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -192,9 +240,23 @@ struct qla2xxx_mq_chain {
 	uint32_t qregs[4 * QLA_MQ_SIZE];
 };
 
+struct qla2xxx_mqueue_header {
+	uint32_t queue;
+#define TYPE_REQUEST_QUEUE	0x1
+#define TYPE_RESPONSE_QUEUE	0x2
+	uint32_t number;
+	uint32_t size;
+};
+
+struct qla2xxx_mqueue_chain {
+	uint32_t type;
+	uint32_t chain_size;
+};
+
 #define DUMP_CHAIN_VARIANT	0x80000000
 #define DUMP_CHAIN_FCE		0x7FFFFAF0
 #define DUMP_CHAIN_MQ		0x7FFFFAF1
+#define DUMP_CHAIN_QUEUE	0x7FFFFAF2
 #define DUMP_CHAIN_LAST		0x80000000
 
 struct qla2xxx_fw_dump {
@@ -228,6 +290,7 @@ struct qla2xxx_fw_dump {
 		struct qla24xx_fw_dump isp24;
 		struct qla25xx_fw_dump isp25;
 		struct qla81xx_fw_dump isp81;
+		struct qla83xx_fw_dump isp83;
 	} isp;
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index af1003f9de1e..a2443031dbe7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -125,17 +125,17 @@
  * Fibre Channel device definitions.
  */
 #define WWN_SIZE		8	/* Size of WWPN, WWN & WWNN */
-#define MAX_FIBRE_DEVICES	512
+#define MAX_FIBRE_DEVICES_2100	512
+#define MAX_FIBRE_DEVICES_2400	2048
+#define MAX_FIBRE_DEVICES_LOOP	128
+#define MAX_FIBRE_DEVICES_MAX	MAX_FIBRE_DEVICES_2400
 #define MAX_FIBRE_LUNS  	0xFFFF
-#define	MAX_RSCN_COUNT		32
 #define	MAX_HOST_COUNT		16
 
 /*
  * Host adapter default definitions.
  */
 #define MAX_BUSES		1  /* We only have one bus today */
-#define MAX_TARGETS_2100	MAX_FIBRE_DEVICES
-#define MAX_TARGETS_2200	MAX_FIBRE_DEVICES
 #define MIN_LUNS		8
 #define MAX_LUNS		MAX_FIBRE_LUNS
 #define MAX_CMDS_PER_LUN	255
@@ -202,20 +202,12 @@ struct sd_dif_tuple {
 /*
  * SCSI Request Block
  */
-typedef struct srb {
-	atomic_t ref_count;
-	struct fc_port *fcport;
-	uint32_t handle;
-
+struct srb_cmd {
 	struct scsi_cmnd *cmd;		/* Linux SCSI command pkt */
-
-	uint16_t flags;
-
 	uint32_t request_sense_length;
 	uint8_t *request_sense_ptr;
-
 	void *ctx;
-} srb_t;
+};
 
 /*
  * SRB flag definitions
@@ -254,10 +246,7 @@ struct srb_iocb {
 	} u;
 
 	struct timer_list timer;
-
-	void (*done)(srb_t *);
-	void (*free)(srb_t *);
-	void (*timeout)(srb_t *);
+	void (*timeout)(void *);
 };
 
 /* Values for srb_ctx type */
@@ -268,16 +257,37 @@ struct srb_iocb {
 #define SRB_CT_CMD	5
 #define SRB_ADISC_CMD	6
 #define SRB_TM_CMD	7
+#define SRB_SCSI_CMD	8
 
-struct srb_ctx {
+typedef struct srb {
+	atomic_t ref_count;
+	struct fc_port *fcport;
+	uint32_t handle;
+	uint16_t flags;
 	uint16_t type;
 	char *name;
 	int iocbs;
 	union {
-		struct srb_iocb *iocb_cmd;
+		struct srb_iocb iocb_cmd;
 		struct fc_bsg_job *bsg_job;
+		struct srb_cmd scmd;
 	} u;
-};
+	void (*done)(void *, void *, int);
+	void (*free)(void *, void *);
+} srb_t;
+
+#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
+#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
+#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
+
+#define GET_CMD_SENSE_LEN(sp) \
+	(sp->u.scmd.request_sense_length)
+#define SET_CMD_SENSE_LEN(sp, len) \
+	(sp->u.scmd.request_sense_length = len)
+#define GET_CMD_SENSE_PTR(sp) \
+	(sp->u.scmd.request_sense_ptr)
+#define SET_CMD_SENSE_PTR(sp, ptr) \
+	(sp->u.scmd.request_sense_ptr = ptr)
 
 struct msg_echo_lb {
 	dma_addr_t send_dma;
@@ -653,8 +663,10 @@ typedef struct {
 #define MBC_DIAGNOSTIC_LOOP_BACK	0x45	/* Diagnostic loop back. */
 #define MBC_ONLINE_SELF_TEST		0x46	/* Online self-test. */
 #define MBC_ENHANCED_GET_PORT_DATABASE	0x47	/* Get port database + login */
+#define MBC_CONFIGURE_VF		0x4b	/* Configure VFs */
 #define MBC_RESET_LINK_STATUS		0x52	/* Reset Link Error Status */
 #define MBC_IOCB_COMMAND_A64		0x54	/* Execute IOCB command (64) */
+#define MBC_PORT_LOGOUT			0x56	/* Port Logout request */
 #define MBC_SEND_RNID_ELS		0x57	/* Send RNID ELS request */
 #define MBC_SET_RNID_PARAMS		0x59	/* Set RNID parameters */
 #define MBC_GET_RNID_PARAMS		0x5a	/* Data Rate */
@@ -1709,6 +1721,7 @@ typedef struct fc_port {
 
 	uint16_t vp_idx;
 	uint8_t fc4_type;
+	uint8_t scan_state;
 } fc_port_t;
 
 /*
@@ -1761,7 +1774,6 @@ static const char * const port_state_str[] = {
 
 #define	GID_PT_CMD	0x1A1
 #define	GID_PT_REQ_SIZE	(16 + 4)
-#define	GID_PT_RSP_SIZE	(16 + (MAX_FIBRE_DEVICES * 4))
 
 #define	GPN_ID_CMD	0x112
 #define	GPN_ID_REQ_SIZE	(16 + 4)
@@ -2051,7 +2063,9 @@ struct ct_sns_rsp {
 		} ga_nxt;
 
 		struct {
-			struct ct_sns_gid_pt_data entries[MAX_FIBRE_DEVICES];
+			/* Assume the largest number of targets for the union */
+			struct ct_sns_gid_pt_data
+			    entries[MAX_FIBRE_DEVICES_MAX];
 		} gid_pt;
 
 		struct {
@@ -2112,7 +2126,11 @@ struct ct_sns_pkt {
 
 #define	GID_PT_SNS_SCMD_LEN	6
 #define	GID_PT_SNS_CMD_SIZE	28
-#define	GID_PT_SNS_DATA_SIZE	(MAX_FIBRE_DEVICES * 4 + 16)
+/*
+ * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
+ * adapters.
+ */
+#define	GID_PT_SNS_DATA_SIZE	(MAX_FIBRE_DEVICES_2100 * 4 + 16)
 
 #define	GPN_ID_SNS_SCMD_LEN	6
 #define	GPN_ID_SNS_CMD_SIZE	28
@@ -2160,7 +2178,6 @@ struct gid_list_info {
 	uint16_t loop_id;	/* ISP23XX         -- 6 bytes. */
 	uint16_t reserved_1;	/* ISP24XX         -- 8 bytes. */
 };
-#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
 
 /* NPIV */
 typedef struct vport_info {
@@ -2261,6 +2278,7 @@ struct isp_operations {
 #define QLA_MIDX_DEFAULT	0
 #define QLA_MIDX_RSP_Q		1
 #define QLA_PCI_MSIX_CONTROL	0xa2
+#define QLA_83XX_PCI_MSIX_CONTROL	0x92
 
 struct scsi_qla_host;
 
@@ -2341,7 +2359,7 @@ struct qla_statistics {
 #define QLA_MQ_SIZE 32
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
-	((ha->mqenable) ? \
+	((ha->mqenable || IS_QLA83XX(ha)) ? \
 	((void *)(ha->mqiobase) +\
 	(QLA_QUE_PAGE * id)) :\
 	((void *)(ha->iobase)))
@@ -2461,6 +2479,7 @@ struct qla_hw_data {
 #define MIN_IOBASE_LEN          0x100
 /* Multi queue data structs */
 	device_reg_t __iomem *mqiobase;
+	device_reg_t __iomem *msixbase;
 	uint16_t        msix_count;
 	uint8_t         mqenable;
 	struct req_que **req_q_map;
@@ -2485,6 +2504,7 @@ struct qla_hw_data {
 	atomic_t	loop_down_timer;         /* loop down timer */
 	uint8_t		link_down_timeout;       /* link down timeout */
 	uint16_t	max_loop_id;
+	uint16_t	max_fibre_devices;	/* Maximum number of targets */
 
 	uint16_t	fb_rev;
 	uint16_t	min_external_loopid;    /* First external loop Id */
@@ -2494,6 +2514,7 @@ struct qla_hw_data {
 #define PORT_SPEED_2GB  0x01
 #define PORT_SPEED_4GB  0x03
 #define PORT_SPEED_8GB  0x04
+#define PORT_SPEED_16GB 0x05
 #define PORT_SPEED_10GB	0x13
 	uint16_t	link_data_rate;         /* F/W operating speed */
 
@@ -2515,6 +2536,8 @@ struct qla_hw_data {
 #define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
 #define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
 #define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001
+#define PCI_DEVICE_ID_QLOGIC_ISP8031	0x8031
+#define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031
 	uint32_t	device_type;
 #define DT_ISP2100                      BIT_0
 #define DT_ISP2200                      BIT_1
@@ -2531,7 +2554,9 @@ struct qla_hw_data {
 #define DT_ISP8432                      BIT_12
 #define DT_ISP8001			BIT_13
 #define DT_ISP8021			BIT_14
-#define DT_ISP_LAST			(DT_ISP8021 << 1)
+#define DT_ISP2031			BIT_15
+#define DT_ISP8031			BIT_16
+#define DT_ISP_LAST			(DT_ISP8031 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2555,26 +2580,30 @@ struct qla_hw_data {
 #define IS_QLA2532(ha)  (DT_MASK(ha) & DT_ISP2532)
 #define IS_QLA8432(ha)  (DT_MASK(ha) & DT_ISP8432)
 #define IS_QLA8001(ha)	(DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA81XX(ha)	(IS_QLA8001(ha))
 #define IS_QLA82XX(ha)	(DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
+#define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
 #define IS_QLA24XX(ha)  (IS_QLA2422(ha) || IS_QLA2432(ha))
 #define IS_QLA54XX(ha)  (IS_QLA5422(ha) || IS_QLA5432(ha))
 #define IS_QLA25XX(ha)  (IS_QLA2532(ha))
+#define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
-#define IS_QLA81XX(ha)		(IS_QLA8001(ha))
-#define IS_QLA8XXX_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha))
+#define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+				IS_QLA8031(ha))
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
-				IS_QLA82XX(ha))
-#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
-#define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
-				(ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha))
+				IS_QLA82XX(ha) || IS_QLA83XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+			IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
@@ -2583,6 +2612,8 @@ struct qla_hw_data {
 #define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED)
 #define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
+#define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED)
+#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha))
 
 	/* HBA serial number */
 	uint8_t		serial0;
@@ -2621,10 +2652,6 @@ struct qla_hw_data {
 	void		*sfp_data;
 	dma_addr_t	sfp_data_dma;
 
-	uint8_t		*edc_data;
-	dma_addr_t	edc_data_dma;
-	uint16_t	edc_data_len;
-
 #define XGMAC_DATA_SIZE	4096
 	void		*xgmac_data;
 	dma_addr_t	xgmac_data_dma;
@@ -2653,6 +2680,8 @@ struct qla_hw_data {
 	void		*async_pd;
 	dma_addr_t	async_pd_dma;
 
+	void		*swl;
+
 	/* These are used by mailbox operations. */
 	volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
 
@@ -2674,6 +2703,8 @@ struct qla_hw_data {
 	uint16_t	fw_minor_version;
 	uint16_t	fw_subminor_version;
 	uint16_t	fw_attributes;
+	uint16_t	fw_attributes_h;
+	uint16_t	fw_attributes_ext[2];
 	uint32_t	fw_memory_size;
 	uint32_t	fw_transfer_size;
 	uint32_t	fw_srisc_address;
@@ -2851,7 +2882,6 @@ typedef struct scsi_qla_host {
 	volatile struct {
 		uint32_t	init_done		:1;
 		uint32_t	online			:1;
-		uint32_t	rscn_queue_overflow	:1;
 		uint32_t	reset_active		:1;
 
 		uint32_t	management_server_logged_in :1;
@@ -2905,11 +2935,6 @@ typedef struct scsi_qla_host {
 
 
 
-	/* RSCN queue. */
-	uint32_t rscn_queue[MAX_RSCN_COUNT];
-	uint8_t rscn_in_ptr;
-	uint8_t rscn_out_ptr;
-
 	/* Timeout timers. */
 	uint8_t         loop_down_abort_time;    /* port down timer */
 	atomic_t        loop_down_timer;         /* loop down timer */
@@ -3005,7 +3030,6 @@ typedef struct scsi_qla_host {
 #define QLA_ABORTED			0x105
 #define QLA_SUSPENDED			0x106
 #define QLA_BUSY			0x107
-#define QLA_RSCNS_HANDLED		0x108
 #define QLA_ALREADY_REGISTERED		0x109
 
 #define NVRAM_DELAY()		udelay(10)
@@ -3021,6 +3045,7 @@ typedef struct scsi_qla_host {
 #define OPTROM_SIZE_25XX	0x200000
 #define OPTROM_SIZE_81XX	0x400000
 #define OPTROM_SIZE_82XX	0x800000
+#define OPTROM_SIZE_83XX	0x1000000
 
 #define OPTROM_BURST_SIZE	0x1000
 #define OPTROM_BURST_DWORDS	(OPTROM_BURST_SIZE / 4)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b4c2b794c6f..499c74e39ee5 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		goto out;
 	if (!ha->fce)
 		goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index aa69486dc064..6d7d7758c797 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1327,6 +1327,11 @@ struct qla_flt_header {
 #define FLT_REG_GOLD_FW		0x2f
 #define FLT_REG_FCP_PRIO_0	0x87
 #define FLT_REG_FCP_PRIO_1	0x88
+#define FLT_REG_FCOE_FW		0xA4
+#define FLT_REG_FCOE_VPD_0	0xA9
+#define FLT_REG_FCOE_NVRAM_0	0xAA
+#define FLT_REG_FCOE_VPD_1	0xAB
+#define FLT_REG_FCOE_NVRAM_1	0xAC
 
 struct qla_flt_region {
 	uint32_t code;
@@ -1494,6 +1499,11 @@ struct access_chip_rsp_84xx {
 #define MBC_GET_XGMAC_STATS	0x7a
 #define MBC_GET_DCBX_PARAMS	0x51
 
+/*
+ * ISP83xx mailbox commands
+ */
+#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+
 /* Flash access control option field bit definitions */
 #define FAC_OPT_FORCE_SEMAPHORE		BIT_15
 #define FAC_OPT_REQUESTOR_ID		BIT_14
@@ -1875,4 +1885,7 @@ struct qla_fcp_prio_cfg {
 #define FA_NPIV_CONF0_ADDR_81	0xD1000
 #define FA_NPIV_CONF1_ADDR_81	0xD2000
 
+/* 83XX Flash locations -- occupies second 8MB region. */
+#define FA_FLASH_LAYOUT_ADDR_83	0xFC400
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 408679be8fdf..9f065804bd12 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
-	struct srb_iocb *);
 extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
@@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
 extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
 extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
 
-extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
-
+extern void qla2x00_sp_free_dma(void *, void *);
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -205,8 +202,7 @@ extern int
 qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
 
 extern int
-qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
-    uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
+qla2x00_get_fw_version(scsi_qla_host_t *);
 
 extern int
 qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -371,6 +367,9 @@ qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
 extern int
 qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
 
+extern int
+qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -409,8 +408,10 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
 extern int qla24xx_beacon_on(struct scsi_qla_host *);
 extern int qla24xx_beacon_off(struct scsi_qla_host *);
 extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern void qla83xx_beacon_blink(struct scsi_qla_host *);
 extern int qla82xx_beacon_on(struct scsi_qla_host *);
 extern int qla82xx_beacon_off(struct scsi_qla_host *);
+extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
 
 extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
     uint32_t, uint32_t);
@@ -541,6 +542,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
 
 /* IOCB related functions */
 extern int qla82xx_start_scsi(srb_t *);
+extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_bsg_job_done(void *, void *, int);
+extern void qla2x00_bsg_sp_free(void *, void *);
 
 /* Interrupt related */
 extern irqreturn_t qla82xx_intr_handler(int, void *);
@@ -576,6 +581,8 @@ extern void qla82xx_start_iocbs(scsi_qla_host_t *);
 extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
 extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
 extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
@@ -589,6 +596,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
 extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
 	uint16_t *, uint16_t *);
 
+/* 83xx related functions */
+extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+
 /* Minidump related functions */
 extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
 extern int qla82xx_md_get_template(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4aea4ae23300..3128f80441f5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -240,6 +240,12 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 	return (rval);
 }
 
+static inline int
+qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
+{
+	return vha->hw->max_fibre_devices * 4 + 16;
+}
+
 /**
  * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
  * @ha: HA context
@@ -261,20 +267,21 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 
 	struct ct_sns_gid_pt_data *gid_data;
 	struct qla_hw_data *ha = vha->hw;
+	uint16_t gid_pt_rsp_size;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return qla2x00_sns_gid_pt(vha, list);
 
 	gid_data = NULL;
-
+	gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
 	/* Issue GID_PT */
 	/* Prepare common MS IOCB */
 	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
-	    GID_PT_RSP_SIZE);
+	    gid_pt_rsp_size);
 
 	/* Prepare CT request */
 	ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
-	    GID_PT_RSP_SIZE);
+	    gid_pt_rsp_size);
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- port_type */
@@ -292,7 +299,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		/* Set port IDs in switch info list. */
-		for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+		for (i = 0; i < ha->max_fibre_devices; i++) {
 			gid_data = &ct_rsp->rsp.gid_pt.entries[i];
 			list[i].d_id.b.domain = gid_data->port_id[0];
 			list[i].d_id.b.area = gid_data->port_id[1];
@@ -313,7 +320,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 		 * single call.  Return a failed status, and let GA_NXT handle
 		 * the overload.
 		 */
-		if (i == MAX_FIBRE_DEVICES)
+		if (i == ha->max_fibre_devices)
 			rval = QLA_FUNCTION_FAILED;
 	}
 
@@ -330,7 +337,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 int
 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	uint16_t	i;
 
 	ms_iocb_entry_t	*ms_pkt;
@@ -341,7 +348,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return qla2x00_sns_gpn_id(vha, list);
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GPN_ID */
 		/* Prepare common MS IOCB */
 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
@@ -364,9 +371,11 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 			/*EMPTY*/
 			ql_dbg(ql_dbg_disc, vha, 0x2056,
 			    "GPN_ID issue IOCB failed (%d).\n", rval);
+			break;
 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GPN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
+			break;
 		} else {
 			/* Save portname */
 			memcpy(list[i].port_name,
@@ -391,7 +400,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 int
 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	uint16_t	i;
 	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
@@ -401,7 +410,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return qla2x00_sns_gnn_id(vha, list);
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GNN_ID */
 		/* Prepare common MS IOCB */
 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
@@ -424,9 +433,11 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 			/*EMPTY*/
 			ql_dbg(ql_dbg_disc, vha, 0x2057,
 			    "GNN_ID issue IOCB failed (%d).\n", rval);
+			break;
 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GNN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
+			break;
 		} else {
 			/* Save nodename */
 			memcpy(list[i].node_name,
@@ -735,7 +746,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
 static int
 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	struct qla_hw_data *ha = vha->hw;
 	struct sns_cmd_pkt	*sns_cmd;
 
@@ -814,11 +825,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 	uint16_t	i;
 	uint8_t		*entry;
 	struct sns_cmd_pkt	*sns_cmd;
+	uint16_t gid_pt_sns_data_size;
+
+	gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
 
 	/* Issue GID_PT. */
 	/* Prepare SNS command request. */
 	sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
-	    GID_PT_SNS_DATA_SIZE);
+	    gid_pt_sns_data_size);
 
 	/* Prepare SNS command arguments -- port_type. */
 	sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
@@ -839,7 +853,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		/* Set port IDs in switch info list. */
-		for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+		for (i = 0; i < ha->max_fibre_devices; i++) {
 			entry = &sns_cmd->p.gid_data[(i * 4) + 16];
 			list[i].d_id.b.domain = entry[1];
 			list[i].d_id.b.area = entry[2];
@@ -858,7 +872,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 		 * single call.  Return a failed status, and let GA_NXT handle
 		 * the overload.
 		 */
-		if (i == MAX_FIBRE_DEVICES)
+		if (i == ha->max_fibre_devices)
 			rval = QLA_FUNCTION_FAILED;
 	}
 
@@ -877,12 +891,12 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 static int
 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t	i;
 	struct sns_cmd_pkt	*sns_cmd;
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GPN_ID */
 		/* Prepare SNS command request. */
 		sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
@@ -933,12 +947,12 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 static int
 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t	i;
 	struct sns_cmd_pkt	*sns_cmd;
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GNN_ID */
 		/* Prepare SNS command request. */
 		sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
@@ -1107,20 +1121,26 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
 static int
 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
 {
-	int ret;
+	int ret, rval;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
 	ret = QLA_SUCCESS;
 	if (vha->flags.management_server_logged_in)
 		return ret;
 
-	ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
-	    mb, BIT_1|BIT_0);
-	if (mb[0] != MBS_COMMAND_COMPLETE) {
-		ql_dbg(ql_dbg_disc, vha, 0x2024,
-		    "Failed management_server login: loopid=%x mb[0]=%x "
-		    "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
-		    vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
+	rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
+	    0xfa, mb, BIT_1|BIT_0);
+	if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+		if (rval == QLA_MEMORY_ALLOC_FAILED)
+			ql_dbg(ql_dbg_disc, vha, 0x2085,
+			    "Failed management_server login: loopid=%x "
+			    "rval=%d\n", vha->mgmt_svr_loop_id, rval);
+		else
+			ql_dbg(ql_dbg_disc, vha, 0x2024,
+			    "Failed management_server login: loopid=%x "
+			    "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+			    vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
+			    mb[7]);
 		ret = QLA_FUNCTION_FAILED;
 	} else
 		vha->flags.management_server_logged_in = 1;
@@ -1547,7 +1567,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
 	eiter = (struct ct_fdmi_port_attr *) (entries + size);
 	eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
 	eiter->len = __constant_cpu_to_be16(4 + 4);
-	if (IS_QLA8XXX_TYPE(ha))
+	if (IS_CNA_CAPABLE(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
 	else if (IS_QLA25XX(ha))
@@ -1594,6 +1614,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
 		break;
+	case PORT_SPEED_16GB:
+		eiter->a.cur_speed =
+		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+		break;
 	default:
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1724,7 +1748,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
 int
 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
-	int		rval;
+	int		rval = QLA_SUCCESS;
 	uint16_t	i;
 	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
@@ -1734,7 +1758,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 	if (!IS_IIDMA_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GFPN_ID */
 		/* Prepare common MS IOCB */
 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
@@ -1757,9 +1781,11 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 			/*EMPTY*/
 			ql_dbg(ql_dbg_disc, vha, 0x2023,
 			    "GFPN_ID issue IOCB failed (%d).\n", rval);
+			break;
 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GFPN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
+			break;
 		} else {
 			/* Save fabric portname */
 			memcpy(list[i].fabric_port_name,
@@ -1846,7 +1872,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
 	if (rval)
 		return rval;
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Issue GFPN_ID */
 		/* Prepare common MS IOCB */
 		ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
@@ -1947,7 +1973,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
 	struct qla_hw_data *ha = vha->hw;
 	uint8_t fcp_scsi_features = 0;
 
-	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+	for (i = 0; i < ha->max_fibre_devices; i++) {
 		/* Set default FC4 Type as UNKNOWN so the default is to
 		 * Process this port */
 		list[i].fc4_type = FC4_TYPE_UNKNOWN;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1fa067e053d2..b9465643396b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -29,7 +29,6 @@ static int qla2x00_configure_loop(scsi_qla_host_t *);
 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
 static int qla2x00_configure_fabric(scsi_qla_host_t *);
 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
-static int qla2x00_device_resync(scsi_qla_host_t *);
 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
     uint16_t *);
 
@@ -41,11 +40,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
 
 /* SRB Extensions ---------------------------------------------------------- */
 
-static void
-qla2x00_ctx_sp_timeout(unsigned long __data)
+void
+qla2x00_sp_timeout(unsigned long __data)
 {
 	srb_t *sp = (srb_t *)__data;
-	struct srb_ctx *ctx;
 	struct srb_iocb *iocb;
 	fc_port_t *fcport = sp->fcport;
 	struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +53,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	req = ha->req_q_map[0];
 	req->outstanding_cmds[sp->handle] = NULL;
-	ctx = sp->ctx;
-	iocb = ctx->u.iocb_cmd;
+	iocb = &sp->u.iocb_cmd;
 	iocb->timeout(sp);
-	iocb->free(sp);
+	sp->free(fcport->vha, sp);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
-static void
-qla2x00_ctx_sp_free(srb_t *sp)
+void
+qla2x00_sp_free(void *data, void *ptr)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *iocb = ctx->u.iocb_cmd;
-	struct scsi_qla_host *vha = sp->fcport->vha;
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *iocb = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 
 	del_timer(&iocb->timer);
-	kfree(iocb);
-	kfree(ctx);
-	mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+	mempool_free(sp, vha->hw->srb_mempool);
 
 	QLA_VHA_MARK_NOT_BUSY(vha);
 }
 
-inline srb_t *
-qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
-    unsigned long tmo)
-{
-	srb_t *sp = NULL;
-	struct qla_hw_data *ha = vha->hw;
-	struct srb_ctx *ctx;
-	struct srb_iocb *iocb;
-	uint8_t bail;
-
-	QLA_VHA_MARK_BUSY(vha, bail);
-	if (bail)
-		return NULL;
-
-	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
-	if (!sp)
-		goto done;
-	ctx = kzalloc(size, GFP_KERNEL);
-	if (!ctx) {
-		mempool_free(sp, ha->srb_mempool);
-		sp = NULL;
-		goto done;
-	}
-	iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
-	if (!iocb) {
-		mempool_free(sp, ha->srb_mempool);
-		sp = NULL;
-		kfree(ctx);
-		goto done;
-	}
-
-	memset(sp, 0, sizeof(*sp));
-	sp->fcport = fcport;
-	sp->ctx = ctx;
-	ctx->iocbs = 1;
-	ctx->u.iocb_cmd = iocb;
-	iocb->free = qla2x00_ctx_sp_free;
-
-	init_timer(&iocb->timer);
-	if (!tmo)
-		goto done;
-	iocb->timer.expires = jiffies + tmo * HZ;
-	iocb->timer.data = (unsigned long)sp;
-	iocb->timer.function = qla2x00_ctx_sp_timeout;
-	add_timer(&iocb->timer);
-done:
-	if (!sp)
-		QLA_VHA_MARK_NOT_BUSY(vha);
-	return sp;
-}
-
 /* Asynchronous Login/Logout Routines -------------------------------------- */
 
 static inline unsigned long
@@ -149,19 +93,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
 }
 
 static void
-qla2x00_async_iocb_timeout(srb_t *sp)
+qla2x00_async_iocb_timeout(void *data)
 {
+	srb_t *sp = (srb_t *)data;
 	fc_port_t *fcport = sp->fcport;
-	struct srb_ctx *ctx = sp->ctx;
 
 	ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
 	    "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
-	    ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
 	    fcport->d_id.b.al_pa);
 
 	fcport->flags &= ~FCF_ASYNC_SENT;
-	if (ctx->type == SRB_LOGIN_CMD) {
-		struct srb_iocb *lio = ctx->u.iocb_cmd;
+	if (sp->type == SRB_LOGIN_CMD) {
+		struct srb_iocb *lio = &sp->u.iocb_cmd;
 		qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
 		/* Retry as needed. */
 		lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +117,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
 }
 
 static void
-qla2x00_async_login_ctx_done(srb_t *sp)
+qla2x00_async_login_sp_done(void *data, void *ptr, int res)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *lio = ctx->u.iocb_cmd;
-
-	qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
-		lio->u.logio.data);
-	lio->free(sp);
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags))
+		qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+		    lio->u.logio.data);
+	sp->free(sp->fcport->vha, sp);
 }
 
 int
@@ -188,22 +134,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
 {
 	srb_t *sp;
-	struct srb_ctx *ctx;
 	struct srb_iocb *lio;
 	int rval;
 
 	rval = QLA_FUNCTION_FAILED;
-	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
-	    qla2x00_get_async_timeout(vha) + 2);
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
-	ctx = sp->ctx;
-	ctx->type = SRB_LOGIN_CMD;
-	ctx->name = "login";
-	lio = ctx->u.iocb_cmd;
+	sp->type = SRB_LOGIN_CMD;
+	sp->name = "login";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
 	lio->timeout = qla2x00_async_iocb_timeout;
-	lio->done = qla2x00_async_login_ctx_done;
+	sp->done = qla2x00_async_login_sp_done;
 	lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +164,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 	return rval;
 
 done_free_sp:
-	lio->free(sp);
+	sp->free(fcport->vha, sp);
 done:
 	return rval;
 }
 
 static void
-qla2x00_async_logout_ctx_done(srb_t *sp)
+qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *lio = ctx->u.iocb_cmd;
-
-	qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
-	    lio->u.logio.data);
-	lio->free(sp);
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags))
+		qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+		    lio->u.logio.data);
+	sp->free(sp->fcport->vha, sp);
 }
 
 int
 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
 	srb_t *sp;
-	struct srb_ctx *ctx;
 	struct srb_iocb *lio;
 	int rval;
 
 	rval = QLA_FUNCTION_FAILED;
-	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
-	    qla2x00_get_async_timeout(vha) + 2);
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
-	ctx = sp->ctx;
-	ctx->type = SRB_LOGOUT_CMD;
-	ctx->name = "logout";
-	lio = ctx->u.iocb_cmd;
+	sp->type = SRB_LOGOUT_CMD;
+	sp->name = "logout";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
 	lio->timeout = qla2x00_async_iocb_timeout;
-	lio->done = qla2x00_async_logout_ctx_done;
+	sp->done = qla2x00_async_logout_sp_done;
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
 		goto done_free_sp;
@@ -266,20 +212,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
 	return rval;
 
 done_free_sp:
-	lio->free(sp);
+	sp->free(fcport->vha, sp);
 done:
 	return rval;
 }
 
 static void
-qla2x00_async_adisc_ctx_done(srb_t *sp)
+qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *lio = ctx->u.iocb_cmd;
-
-	qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
-	    lio->u.logio.data);
-	lio->free(sp);
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags))
+		qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+		    lio->u.logio.data);
+	sp->free(sp->fcport->vha, sp);
 }
 
 int
@@ -287,22 +235,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
 {
 	srb_t *sp;
-	struct srb_ctx *ctx;
 	struct srb_iocb *lio;
 	int rval;
 
 	rval = QLA_FUNCTION_FAILED;
-	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
-	    qla2x00_get_async_timeout(vha) + 2);
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
-	ctx = sp->ctx;
-	ctx->type = SRB_ADISC_CMD;
-	ctx->name = "adisc";
-	lio = ctx->u.iocb_cmd;
+	sp->type = SRB_ADISC_CMD;
+	sp->name = "adisc";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
 	lio->timeout = qla2x00_async_iocb_timeout;
-	lio->done = qla2x00_async_adisc_ctx_done;
+	sp->done = qla2x00_async_adisc_sp_done;
 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
 	rval = qla2x00_start_sp(sp);
@@ -316,46 +263,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
 	return rval;
 
 done_free_sp:
-	lio->free(sp);
+	sp->free(fcport->vha, sp);
 done:
 	return rval;
 }
 
 static void
-qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
+qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *iocb = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+	uint32_t flags;
+	uint16_t lun;
+	int rval;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+		flags = iocb->u.tmf.flags;
+		lun = (uint16_t)iocb->u.tmf.lun;
+
+		/* Issue Marker IOCB */
+		rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
+			vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+			flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
 
-	qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
-	iocb->free(sp);
+		if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
+			ql_dbg(ql_dbg_taskm, vha, 0x8030,
+			    "TM IOCB failed (%x).\n", rval);
+		}
+	}
+	sp->free(sp->fcport->vha, sp);
 }
 
 int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
 	uint32_t tag)
 {
 	struct scsi_qla_host *vha = fcport->vha;
 	srb_t *sp;
-	struct srb_ctx *ctx;
 	struct srb_iocb *tcf;
 	int rval;
 
 	rval = QLA_FUNCTION_FAILED;
-	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
-	    qla2x00_get_async_timeout(vha) + 2);
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
-	ctx = sp->ctx;
-	ctx->type = SRB_TM_CMD;
-	ctx->name = "tmf";
-	tcf = ctx->u.iocb_cmd;
-	tcf->u.tmf.flags = flags;
+	sp->type = SRB_TM_CMD;
+	sp->name = "tmf";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	tcf = &sp->u.iocb_cmd;
+	tcf->u.tmf.flags = tm_flags;
 	tcf->u.tmf.lun = lun;
 	tcf->u.tmf.data = tag;
 	tcf->timeout = qla2x00_async_iocb_timeout;
-	tcf->done = qla2x00_async_tm_cmd_ctx_done;
+	sp->done = qla2x00_async_tm_cmd_done;
 
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
@@ -368,7 +331,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 	return rval;
 
 done_free_sp:
-	tcf->free(sp);
+	sp->free(fcport->vha, sp);
 done:
 	return rval;
 }
@@ -387,6 +350,13 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 		 * requests.
 		 */
 		rval = qla2x00_get_port_database(vha, fcport, 0);
+		if (rval == QLA_NOT_LOGGED_IN) {
+			fcport->flags &= ~FCF_ASYNC_SENT;
+			fcport->flags |= FCF_LOGIN_NEEDED;
+			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+			break;
+		}
+
 		if (rval != QLA_SUCCESS) {
 			qla2x00_post_async_logout_work(vha, fcport, NULL);
 			qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -452,30 +422,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 	return;
 }
 
-void
-qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
-    struct srb_iocb *iocb)
-{
-	int rval;
-	uint32_t flags;
-	uint16_t lun;
-
-	flags = iocb->u.tmf.flags;
-	lun = (uint16_t)iocb->u.tmf.lun;
-
-	/* Issue Marker IOCB */
-	rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
-		vha->hw->rsp_q_map[0], fcport->loop_id, lun,
-		flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
-
-	if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
-		ql_dbg(ql_dbg_taskm, vha, 0x8030,
-		    "TM IOCB failed (%x).\n", rval);
-	}
-
-	return;
-}
-
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -969,6 +915,9 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
 {
 	uint16_t mb[4] = {0x1010, 0, 1, 0};
 
+	if (!IS_QLA81XX(vha->hw))
+		return QLA_SUCCESS;
+
 	return qla81xx_write_mpi_register(vha, mb);
 }
 
@@ -1262,7 +1211,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
 		    sizeof(uint16_t);
 	} else if (IS_FWI2_CAPABLE(ha)) {
-		if (IS_QLA81XX(ha))
+		if (IS_QLA83XX(ha))
+			fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+		else if (IS_QLA81XX(ha))
 			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
 		else if (IS_QLA25XX(ha))
 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
@@ -1270,10 +1221,20 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
-		if (ha->mqenable)
-			mq_size = sizeof(struct qla2xxx_mq_chain);
+		if (ha->mqenable) {
+			if (!IS_QLA83XX(ha))
+				mq_size = sizeof(struct qla2xxx_mq_chain);
+			/*
+			 * Allocate maximum buffer size for all queues.
+			 * Resizing must be done at end-of-dump processing.
+			 */
+			mq_size += ha->max_req_queues *
+			    (req->length * sizeof(request_t));
+			mq_size += ha->max_rsp_queues *
+			    (rsp->length * sizeof(response_t));
+		}
 		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 			goto try_eft;
 
 		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -1484,17 +1445,8 @@ enable_82xx_npiv:
 				fw_major_version = ha->fw_major_version;
 				if (IS_QLA82XX(ha))
 					qla82xx_check_md_needed(vha);
-				else {
-					rval = qla2x00_get_fw_version(vha,
-					    &ha->fw_major_version,
-					    &ha->fw_minor_version,
-					    &ha->fw_subminor_version,
-					    &ha->fw_attributes,
-					    &ha->fw_memory_size,
-					    ha->mpi_version,
-					    &ha->mpi_capabilities,
-					    ha->phy_version);
-				}
+				else
+					rval = qla2x00_get_fw_version(vha);
 				if (rval != QLA_SUCCESS)
 					goto failed;
 				ha->flags.npiv_supported = 0;
@@ -1535,6 +1487,9 @@ enable_82xx_npiv:
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
 
+	if (IS_QLA83XX(ha))
+		goto skip_fac_check;
+
 	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
 		uint32_t size;
 
@@ -1547,6 +1502,11 @@ enable_82xx_npiv:
 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
 			    ha->fw_major_version, ha->fw_minor_version,
 			    ha->fw_subminor_version);
+skip_fac_check:
+			if (IS_QLA83XX(ha)) {
+				ha->flags.fac_supported = 0;
+				rval = QLA_SUCCESS;
+			}
 		}
 	}
 failed:
@@ -1725,7 +1685,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
 	struct req_que *req = ha->req_q_map[0];
 	struct rsp_que *rsp = ha->rsp_q_map[0];
 
-/* Setup ring parameters in initialization control block. */
+	/* Setup ring parameters in initialization control block. */
 	icb = (struct init_cb_24xx *)ha->init_cb;
 	icb->request_q_outpointer = __constant_cpu_to_le16(0);
 	icb->response_q_inpointer = __constant_cpu_to_le16(0);
@@ -1736,7 +1696,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
 	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
-	if (ha->mqenable) {
+	if (ha->mqenable || IS_QLA83XX(ha)) {
 		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
 		icb->rid = __constant_cpu_to_le16(rid);
 		if (ha->flags.msix_enabled) {
@@ -1756,7 +1716,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
 				__constant_cpu_to_le32(BIT_18);
 
 		/* Use Disable MSIX Handshake mode for capable adapters */
-		if (IS_MSIX_NACK_CAPABLE(ha)) {
+		if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
+		    (ha->flags.msix_enabled)) {
 			icb->firmware_options_2 &=
 				__constant_cpu_to_le32(~BIT_22);
 			ha->flags.disable_msix_handshake = 1;
@@ -1800,7 +1761,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
 	struct rsp_que *rsp;
-	struct scsi_qla_host *vp;
 	struct mid_init_cb_24xx *mid_init_cb =
 	    (struct mid_init_cb_24xx *) ha->init_cb;
 
@@ -1831,11 +1791,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 	}
 
 	spin_lock(&ha->vport_slock);
-	/* Clear RSCN queue. */
-	list_for_each_entry(vp, &ha->vp_list, list) {
-		vp->rscn_in_ptr = 0;
-		vp->rscn_out_ptr = 0;
-	}
 
 	spin_unlock(&ha->vport_slock);
 
@@ -2028,7 +1983,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
 	if (rval != QLA_SUCCESS) {
 		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
-		    IS_QLA8XXX_TYPE(ha) ||
+		    IS_CNA_CAPABLE(ha) ||
 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
 			ql_dbg(ql_dbg_disc, vha, 0x2008,
 			    "Loop is in a transition state.\n");
@@ -2120,7 +2075,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
 	uint16_t index;
 	struct qla_hw_data *ha = vha->hw;
 	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
-	    !IS_QLA8XXX_TYPE(ha);
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
 
 	if (memcmp(model, BINZERO, len) != 0) {
 		strncpy(ha->model_number, model, len);
@@ -2596,13 +2551,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
 	if (ha->current_topology == ISP_CFG_FL &&
 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
-		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 
 	} else if (ha->current_topology == ISP_CFG_F &&
 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
-		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 		clear_bit(LOCAL_LOOP_UPDATE, &flags);
 
@@ -2612,7 +2565,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
 	} else if (!vha->flags.online ||
 	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
 
-		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 		set_bit(LOCAL_LOOP_UPDATE, &flags);
 	}
@@ -2622,8 +2574,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
 			ql_dbg(ql_dbg_disc, vha, 0x2015,
 			    "Loop resync needed, failing.\n");
 			rval = QLA_FUNCTION_FAILED;
-		}
-		else
+		} else
 			rval = qla2x00_configure_local_loop(vha);
 	}
 
@@ -2662,8 +2613,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		if (test_bit(RSCN_UPDATE, &save_flags)) {
 			set_bit(RSCN_UPDATE, &vha->dpc_flags);
-			if (!IS_ALOGIO_CAPABLE(ha))
-				vha->flags.rscn_queue_overflow = 1;
 		}
 	}
 
@@ -2699,7 +2648,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 
 	found_devs = 0;
 	new_fcport = NULL;
-	entries = MAX_FIBRE_DEVICES;
+	entries = MAX_FIBRE_DEVICES_LOOP;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2016,
 	    "Getting FCAL position map.\n");
@@ -2707,7 +2656,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 		qla2x00_get_fcal_position_map(vha, NULL);
 
 	/* Get list of logged in devices. */
-	memset(ha->gid_list, 0, GID_LIST_SIZE);
+	memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
 	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
 	    &entries);
 	if (rval != QLA_SUCCESS)
@@ -2971,7 +2920,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 static int
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
-	int	rval, rval2;
+	int	rval;
 	fc_port_t	*fcport, *fcptemp;
 	uint16_t	next_loopid;
 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
@@ -2995,12 +2944,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 	}
 	vha->device_flags |= SWITCH_FOUND;
 
-	/* Mark devices that need re-synchronization. */
-	rval2 = qla2x00_device_resync(vha);
-	if (rval2 == QLA_RSCNS_HANDLED) {
-		/* No point doing the scan, just continue. */
-		return (QLA_SUCCESS);
-	}
 	do {
 		/* FDMI support. */
 		if (ql2xfdmienable &&
@@ -3012,8 +2955,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			loop_id = NPH_SNS;
 		else
 			loop_id = SIMPLE_NAME_SERVER;
-		ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
-		    0xfc, mb, BIT_1 | BIT_0);
+		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
+		    0xfc, mb, BIT_1|BIT_0);
+		if (rval != QLA_SUCCESS) {
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			return rval;
+		}
 		if (mb[0] != MBS_COMMAND_COMPLETE) {
 			ql_dbg(ql_dbg_disc, vha, 0x2042,
 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
@@ -3044,6 +2991,13 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			}
 		}
 
+#define QLA_FCPORT_SCAN		1
+#define QLA_FCPORT_FOUND	2
+
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			fcport->scan_state = QLA_FCPORT_SCAN;
+		}
+
 		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
 		if (rval != QLA_SUCCESS)
 			break;
@@ -3059,7 +3013,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
 				continue;
 
-			if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+			if (fcport->scan_state == QLA_FCPORT_SCAN &&
+			    atomic_read(&fcport->state) == FCS_ONLINE) {
 				qla2x00_mark_device_lost(vha, fcport,
 				    ql2xplogiabsentdevice, 0);
 				if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3184,20 +3139,21 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
 	rval = QLA_SUCCESS;
 
 	/* Try GID_PT to get device list, else GAN. */
-	swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
+	if (!ha->swl)
+		ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
+		    GFP_KERNEL);
+	swl = ha->swl;
 	if (!swl) {
 		/*EMPTY*/
 		ql_dbg(ql_dbg_disc, vha, 0x2054,
 		    "GID_PT allocations failed, fallback on GA_NXT.\n");
 	} else {
+		memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
 		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
-			kfree(swl);
 			swl = NULL;
 		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
-			kfree(swl);
 			swl = NULL;
 		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
-			kfree(swl);
 			swl = NULL;
 		} else if (ql2xiidmaenable &&
 		    qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
@@ -3215,7 +3171,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
 	if (new_fcport == NULL) {
 		ql_log(ql_log_warn, vha, 0x205e,
 		    "Failed to allocate memory for fcport.\n");
-		kfree(swl);
 		return (QLA_MEMORY_ALLOC_FAILED);
 	}
 	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
@@ -3332,6 +3287,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
 			    WWN_SIZE))
 				continue;
 
+			fcport->scan_state = QLA_FCPORT_FOUND;
+
 			found++;
 
 			/* Update port state. */
@@ -3368,6 +3325,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
 			fcport->flags |= FCF_LOGIN_NEEDED;
 			if (fcport->loop_id != FC_NO_LOOP_ID &&
 			    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+			    (fcport->flags & FCF_ASYNC_SENT) == 0 &&
 			    fcport->port_type != FCT_INITIATOR &&
 			    fcport->port_type != FCT_BROADCAST) {
 				ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3390,14 +3348,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
 		if (new_fcport == NULL) {
 			ql_log(ql_log_warn, vha, 0x2066,
 			    "Memory allocation failed for fcport.\n");
-			kfree(swl);
 			return (QLA_MEMORY_ALLOC_FAILED);
 		}
 		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
 		new_fcport->d_id.b24 = nxt_d_id.b24;
 	}
 
-	kfree(swl);
 	kfree(new_fcport);
 
 	return (rval);
@@ -3470,6 +3426,9 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
 
 		/* If not in use then it is free to use. */
 		if (!found) {
+			ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+			    "Assigning new loopid=%x, portid=%x.\n",
+			    dev->loop_id, dev->d_id.b24);
 			break;
 		}
 
@@ -3488,110 +3447,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
 }
 
 /*
- * qla2x00_device_resync
- *	Marks devices in the database that needs resynchronization.
- *
- * Input:
- *	ha = adapter block pointer.
- *
- * Context:
- *	Kernel context.
- */
-static int
-qla2x00_device_resync(scsi_qla_host_t *vha)
-{
-	int	rval;
-	uint32_t mask;
-	fc_port_t *fcport;
-	uint32_t rscn_entry;
-	uint8_t rscn_out_iter;
-	uint8_t format;
-	port_id_t d_id = {};
-
-	rval = QLA_RSCNS_HANDLED;
-
-	while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
-	    vha->flags.rscn_queue_overflow) {
-
-		rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
-		format = MSB(MSW(rscn_entry));
-		d_id.b.domain = LSB(MSW(rscn_entry));
-		d_id.b.area = MSB(LSW(rscn_entry));
-		d_id.b.al_pa = LSB(LSW(rscn_entry));
-
-		ql_dbg(ql_dbg_disc, vha, 0x2020,
-		    "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
-		    vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
-		    d_id.b.al_pa);
-
-		vha->rscn_out_ptr++;
-		if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
-			vha->rscn_out_ptr = 0;
-
-		/* Skip duplicate entries. */
-		for (rscn_out_iter = vha->rscn_out_ptr;
-		    !vha->flags.rscn_queue_overflow &&
-		    rscn_out_iter != vha->rscn_in_ptr;
-		    rscn_out_iter = (rscn_out_iter ==
-			(MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
-
-			if (rscn_entry != vha->rscn_queue[rscn_out_iter])
-				break;
-
-			ql_dbg(ql_dbg_disc, vha, 0x2021,
-			    "Skipping duplicate RSCN queue entry found at "
-			    "[%d].\n", rscn_out_iter);
-
-			vha->rscn_out_ptr = rscn_out_iter;
-		}
-
-		/* Queue overflow, set switch default case. */
-		if (vha->flags.rscn_queue_overflow) {
-			ql_dbg(ql_dbg_disc, vha, 0x2022,
-			    "device_resync: rscn overflow.\n");
-
-			format = 3;
-			vha->flags.rscn_queue_overflow = 0;
-		}
-
-		switch (format) {
-		case 0:
-			mask = 0xffffff;
-			break;
-		case 1:
-			mask = 0xffff00;
-			break;
-		case 2:
-			mask = 0xff0000;
-			break;
-		default:
-			mask = 0x0;
-			d_id.b24 = 0;
-			vha->rscn_out_ptr = vha->rscn_in_ptr;
-			break;
-		}
-
-		rval = QLA_SUCCESS;
-
-		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
-			    (fcport->d_id.b24 & mask) != d_id.b24 ||
-			    fcport->port_type == FCT_BROADCAST)
-				continue;
-
-			if (atomic_read(&fcport->state) == FCS_ONLINE) {
-				if (format != 3 ||
-				    fcport->port_type != FCT_INITIATOR) {
-					qla2x00_mark_device_lost(vha, fcport,
-					    0, 0);
-				}
-			}
-		}
-	}
-	return (rval);
-}
-
-/*
  * qla2x00_fabric_dev_login
  *	Login fabric target device and update FC port database.
  *
@@ -3644,6 +3499,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
 		} else {
 			qla2x00_update_fcport(vha, fcport);
 		}
+	} else {
+		/* Retry Login. */
+		qla2x00_mark_device_lost(vha, fcport, 1, 0);
 	}
 
 	return (rval);
@@ -3684,9 +3542,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
 		/* Login fcport on switch. */
-		ha->isp_ops->fabric_login(vha, fcport->loop_id,
+		rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
 		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa, mb, BIT_0);
+		if (rval != QLA_SUCCESS) {
+			return rval;
+		}
 		if (mb[0] == MBS_PORT_ID_USED) {
 			/*
 			 * Device has another loop ID.  The firmware team
@@ -4100,15 +3961,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 			ha->isp_abort_cnt = 0;
 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
 
-			if (IS_QLA81XX(ha))
-				qla2x00_get_fw_version(vha,
-				    &ha->fw_major_version,
-				    &ha->fw_minor_version,
-				    &ha->fw_subminor_version,
-				    &ha->fw_attributes, &ha->fw_memory_size,
-				    ha->mpi_version, &ha->mpi_capabilities,
-				    ha->phy_version);
-
+			if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+				qla2x00_get_fw_version(vha);
 			if (ha->fce) {
 				ha->flags.fce_enabled = 1;
 				memset(ha->fce, 0,
@@ -4974,7 +4828,6 @@ try_blob_fw:
 
 	ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
 	ha->flags.running_gold_fw = 1;
-
 	return rval;
 }
 
@@ -5009,6 +4862,7 @@ int
 qla24xx_configure_vhba(scsi_qla_host_t *vha)
 {
 	int rval = QLA_SUCCESS;
+	int rval2;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
@@ -5033,12 +4887,18 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
 	vha->flags.management_server_logged_in = 0;
 
 	/* Login to SNS first */
-	ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
-	if (mb[0] != MBS_COMMAND_COMPLETE) {
-		ql_dbg(ql_dbg_init, vha, 0x0103,
-		    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
-		    "mb[6]=%x mb[7]=%x.\n",
-		    NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
+	rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
+	    BIT_1);
+	if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+		if (rval2 == QLA_MEMORY_ALLOC_FAILED)
+			ql_dbg(ql_dbg_init, vha, 0x0120,
+			    "Failed SNS login: loop_id=%x, rval2=%d\n",
+			    NPH_SNS, rval2);
+		else
+			ql_dbg(ql_dbg_init, vha, 0x0103,
+			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+			    "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+			    NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
 		return (QLA_FUNCTION_FAILED);
 	}
 
@@ -5214,10 +5074,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
 		nv->reset_delay = 5;
 		nv->max_luns_per_target = __constant_cpu_to_le16(128);
 		nv->port_down_retry_count = __constant_cpu_to_le16(30);
-		nv->link_down_timeout = __constant_cpu_to_le16(30);
+		nv->link_down_timeout = __constant_cpu_to_le16(180);
 		nv->enode_mac[0] = 0x00;
-		nv->enode_mac[1] = 0x02;
-		nv->enode_mac[2] = 0x03;
+		nv->enode_mac[1] = 0xC0;
+		nv->enode_mac[2] = 0xDD;
 		nv->enode_mac[3] = 0x04;
 		nv->enode_mac[4] = 0x05;
 		nv->enode_mac[5] = 0x06 + ha->port_no;
@@ -5248,9 +5108,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
 	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
 	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
 	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
-		icb->enode_mac[0] = 0x01;
-		icb->enode_mac[1] = 0x02;
-		icb->enode_mac[2] = 0x03;
+		icb->enode_mac[0] = 0x00;
+		icb->enode_mac[1] = 0xC0;
+		icb->enode_mac[2] = 0xDD;
 		icb->enode_mac[3] = 0x04;
 		icb->enode_mac[4] = 0x05;
 		icb->enode_mac[5] = 0x06 + ha->port_no;
@@ -5353,6 +5213,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
 	if (ql2xloginretrycount)
 		ha->login_retry_count = ql2xloginretrycount;
 
+	/* if not running MSI-X we need handshaking on interrupts */
+	if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+
 	/* Enable ZIO. */
 	if (!vha->flags.init_done) {
 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 7cc4f36cd539..6e457643c639 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -72,16 +72,19 @@ static inline void
 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
 {
 	struct dsd_dma *dsd_ptr, *tdsd_ptr;
+	struct crc_context *ctx;
+
+	ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
 
 	/* clean up allocated prev pool */
 	list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
-	    &((struct crc_context *)sp->ctx)->dsd_list, list) {
+	    &ctx->dsd_list, list) {
 		dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
 		    dsd_ptr->dsd_list_dma);
 		list_del(&dsd_ptr->list);
 		kfree(dsd_ptr);
 	}
-	INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
+	INIT_LIST_HEAD(&ctx->dsd_list);
 }
 
 static inline void
@@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
 		return 0;
 	 *
 	 */
-
-	switch (scsi_get_prot_op(sp->cmd)) {
+	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
 	case SCSI_PROT_READ_STRIP:
 	case SCSI_PROT_WRITE_INSERT:
 		if (ql2xenablehba_err_chk >= 1)
@@ -144,3 +146,44 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
 	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
 }
+
+static inline srb_t *
+qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+{
+	srb_t *sp = NULL;
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t bail;
+
+	QLA_VHA_MARK_BUSY(vha, bail);
+	if (unlikely(bail))
+		return NULL;
+
+	sp = mempool_alloc(ha->srb_mempool, flag);
+	if (!sp)
+		goto done;
+
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->iocbs = 1;
+done:
+	if (!sp)
+		QLA_VHA_MARK_NOT_BUSY(vha);
+	return sp;
+}
+
+static inline void
+qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+	init_timer(&sp->u.iocb_cmd.timer);
+	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+	sp->u.iocb_cmd.timer.data = (unsigned long)sp;
+	sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
+	add_timer(&sp->u.iocb_cmd.timer);
+	sp->free = qla2x00_sp_free;
+}
+
+static inline int
+qla2x00_gid_list_size(struct qla_hw_data *ha)
+{
+	return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 55a96761b5a4..eac950924497 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -22,18 +22,19 @@ static inline uint16_t
 qla2x00_get_cmd_direction(srb_t *sp)
 {
 	uint16_t cflags;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
 	cflags = 0;
 
 	/* Set transfer direction */
-	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cflags = CF_WRITE;
 		sp->fcport->vha->hw->qla_stats.output_bytes +=
-		    scsi_bufflen(sp->cmd);
-	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		    scsi_bufflen(cmd);
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cflags = CF_READ;
 		sp->fcport->vha->hw->qla_stats.input_bytes +=
-		    scsi_bufflen(sp->cmd);
+		    scsi_bufflen(cmd);
 	}
 	return (cflags);
 }
@@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 static inline int
 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 {
-	uint8_t	guard = scsi_host_get_guard(sp->cmd->device->host);
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
 
 	/* We only support T10 DIF right now */
 	if (guard != SHOST_DIX_GUARD_CRC) {
 		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
-		    "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
+		    "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
 		return 0;
 	}
 
@@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 	*fw_prot_opts = 0;
 
 	/* Translate SCSI opcode to a protection opcode */
-	switch (scsi_get_prot_op(sp->cmd)) {
+	switch (scsi_get_prot_op(cmd)) {
 	case SCSI_PROT_READ_STRIP:
 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
 		break;
@@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 		break;
 	}
 
-	return scsi_prot_sg_count(sp->cmd);
+	return scsi_prot_sg_count(cmd);
 }
 
 /*
@@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 	struct scatterlist *sg;
 	int i;
 
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 2 IOCB */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 	struct scatterlist *sg;
 	int i;
 
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
 	vha = sp->fcport->vha;
 	ha = vha->hw;
 	reg = &ha->iobase->isp;
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 	req = ha->req_q_map[0];
 	rsp = ha->rsp_q_map[0];
 	/* So we know we haven't pci_map'ed anything yet */
@@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
 	sp->handle = handle;
-	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
 
 	/* Set target ID and LUN number*/
 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
-	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
+	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 
 	/* Update tagged queuing modifier */
 	if (scsi_populate_tag_msg(cmd, tag)) {
@@ -473,7 +475,6 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 {
 	struct qla_hw_data *ha = vha->hw;
 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
-	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
 
 	if (IS_QLA82XX(ha)) {
 		qla82xx_start_iocbs(vha);
@@ -487,9 +488,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 			req->ring_ptr++;
 
 		/* Set chip new ring index. */
-		if (ha->mqenable) {
-			WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
-			RD_REG_DWORD(&ioreg->hccr);
+		if (ha->mqenable || IS_QLA83XX(ha)) {
+			WRT_REG_DWORD(req->req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 		} else if (IS_FWI2_CAPABLE(ha)) {
 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -609,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 	struct dsd_dma *dsd_ptr;
 	struct ct6_dsd *ctx;
 
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -636,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 	}
 
 	cur_seg = scsi_sglist(cmd);
-	ctx = sp->ctx;
+	ctx = GET_CMD_CTX_SP(sp);
 
 	while (tot_dsds) {
 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -725,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 	int i;
 	struct req_que *req;
 
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -745,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
 		sp->fcport->vha->hw->qla_stats.output_bytes +=
-		    scsi_bufflen(sp->cmd);
+		    scsi_bufflen(cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_READ_DATA);
 		sp->fcport->vha->hw->qla_stats.input_bytes +=
-		    scsi_bufflen(sp->cmd);
+		    scsi_bufflen(cmd);
 	}
 
 	/* One DSD is available in the Command Type 3 IOCB */
@@ -797,7 +798,7 @@ static inline void
 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
     unsigned int protcnt)
 {
-	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 
 	switch (scsi_get_prot_type(cmd)) {
@@ -952,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 	struct qla2_sgx sgx;
 	dma_addr_t	sle_dma;
 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
-	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
 	prot_int = cmd->device->sector_size;
 
 	memset(&sgx, 0, sizeof(struct qla2_sgx));
-	sgx.tot_bytes = scsi_bufflen(sp->cmd);
-	sgx.cur_sg = scsi_sglist(sp->cmd);
+	sgx.tot_bytes = scsi_bufflen(cmd);
+	sgx.cur_sg = scsi_sglist(cmd);
 	sgx.sp = sp;
 
-	sg_prot = scsi_prot_sglist(sp->cmd);
+	sg_prot = scsi_prot_sglist(cmd);
 
 	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 
@@ -995,7 +996,7 @@ alloc_and_fill:
 			}
 
 			list_add_tail(&dsd_ptr->list,
-			    &((struct crc_context *)sp->ctx)->dsd_list);
+			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
 
 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
 
@@ -1044,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 	uint32_t *cur_dsd = dsd;
 	int	i;
 	uint16_t	used_dsds = tot_dsds;
-	scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 
 	uint8_t		*cp;
 
-	scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 		dma_addr_t	sle_dma;
 
 		/* Allocate additional continuation packets? */
@@ -1078,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 			}
 
 			list_add_tail(&dsd_ptr->list,
-			    &((struct crc_context *)sp->ctx)->dsd_list);
+			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
 
 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
 
@@ -1091,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 		sle_dma = sg_dma_address(sg);
 		ql_dbg(ql_dbg_io, vha, 0x300a,
 		    "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
-		    i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
-		    sp->cmd);
+		    i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 		avail_dsds--;
 
-		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+		if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
 			cp = page_address(sg_page(sg)) + sg->offset;
 			ql_dbg(ql_dbg_io, vha, 0x300b,
-			    "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
+			    "User data buffer=%p for cmd=%p.\n", cp, cmd);
 		}
 	}
 	/* Null termination */
@@ -1128,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	uint8_t		*cp;
 
-
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
 		dma_addr_t	sle_dma;
 
@@ -1160,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
 			}
 
 			list_add_tail(&dsd_ptr->list,
-			    &((struct crc_context *)sp->ctx)->dsd_list);
+			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
 
 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
 
@@ -1171,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
 			cur_dsd = (uint32_t *)next_dsd;
 		}
 		sle_dma = sg_dma_address(sg);
-		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+		if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
 			ql_dbg(ql_dbg_io, vha, 0x3027,
 			    "%s(): %p, sg_entry %d - "
 			    "addr=0x%x0x%x, len=%d.\n",
@@ -1182,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 
-		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+		if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
 			cp = page_address(sg_page(sg)) + sg->offset;
 			ql_dbg(ql_dbg_io, vha, 0x3028,
 			    "%s(): Protection Data buffer = %p.\n", __func__,
@@ -1228,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 	dma_addr_t		crc_ctx_dma;
 	char			tag[2];
 
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 
 	sgc = 0;
 	/* Update entry type to indicate Command Type CRC_2 IOCB */
@@ -1256,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 		    __constant_cpu_to_le16(CF_READ_DATA);
 	}
 
-	if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
-	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
-	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
-	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
+	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
 		bundling = 0;
 
 	/* Allocate CRC context from global pool */
-	crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
-	    GFP_ATOMIC, &crc_ctx_dma);
+	crc_ctx_pkt = sp->u.scmd.ctx =
+	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 
 	if (!crc_ctx_pkt)
 		goto crc_queuing_error;
@@ -1310,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
 		fcp_cmnd->additional_cdb_len |= 2;
 
-	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1345,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 	blk_size = cmd->device->sector_size;
 	dif_bytes = (data_bytes / blk_size) * 8;
 
-	switch (scsi_get_prot_op(sp->cmd)) {
+	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
 	case SCSI_PROT_READ_INSERT:
 	case SCSI_PROT_WRITE_STRIP:
 	    total_bytes = data_bytes;
@@ -1445,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
 	uint16_t	tot_dsds;
 	struct req_que *req = NULL;
 	struct rsp_que *rsp = NULL;
-	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	char		tag[2];
@@ -1510,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
 	sp->handle = handle;
-	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@@ -1529,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 	cmd_pkt->vp_index = sp->fcport->vp_idx;
 
-	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
 
 	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1611,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
 	uint16_t		fw_prot_opts = 0;
 	struct req_que		*req = NULL;
 	struct rsp_que		*rsp = NULL;
-	struct scsi_cmnd	*cmd = sp->cmd;
+	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host	*vha = sp->fcport->vha;
 	struct qla_hw_data	*ha = vha->hw;
 	struct cmd_type_crc_2	*cmd_pkt;
@@ -1728,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
 	sp->handle = handle;
-	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
 	/* Fill-in common area */
@@ -1744,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 
-	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
 
 	/* Total Data and protection segment(s) */
@@ -1797,7 +1797,7 @@ queuing_error:
 
 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
 {
-	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct qla_hw_data *ha = sp->fcport->vha->hw;
 	int affinity = cmd->request->cpu;
 
@@ -1818,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 	uint32_t index, handle;
 	request_t *pkt;
 	uint16_t cnt, req_cnt;
-	struct srb_ctx *ctx;
 
 	pkt = NULL;
 	req_cnt = 1;
@@ -1848,15 +1847,13 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 	sp->handle = handle;
 
 	/* Adjust entry-counts as needed. */
-	if (sp->ctx) {
-		ctx = sp->ctx;
-		req_cnt = ctx->iocbs;
-	}
+	if (sp->type != SRB_SCSI_CMD)
+		req_cnt = sp->iocbs;
 
 skip_cmd_array:
 	/* Check for room on request queue. */
 	if (req->cnt < req_cnt) {
-		if (ha->mqenable)
+		if (ha->mqenable || IS_QLA83XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
 		else if (IS_QLA82XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -1889,8 +1886,7 @@ queuing_error:
 static void
 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
 {
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *lio = ctx->u.iocb_cmd;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
 
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@@ -1909,8 +1905,7 @@ static void
 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
 {
 	struct qla_hw_data *ha = sp->fcport->vha->hw;
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *lio = ctx->u.iocb_cmd;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	uint16_t opts;
 
 	mbx->entry_type = MBX_IOCB_TYPE;
@@ -1999,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
 	struct fc_port *fcport = sp->fcport;
 	scsi_qla_host_t *vha = fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct srb_ctx *ctx = sp->ctx;
-	struct srb_iocb *iocb = ctx->u.iocb_cmd;
+	struct srb_iocb *iocb = &sp->u.iocb_cmd;
 	struct req_que *req = vha->req;
 
 	flags = iocb->u.tmf.flags;
@@ -2027,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
 static void
 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
 {
-	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 
         els_iocb->entry_type = ELS_IOCB_TYPE;
         els_iocb->entry_count = 1;
@@ -2041,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 
 	els_iocb->opcode =
-	    (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
+	    sp->type == SRB_ELS_CMD_RPT ?
 	    bsg_job->request->rqst_data.r_els.els_code :
 	    bsg_job->request->rqst_data.h_els.command_code;
         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2078,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 	uint16_t tot_dsds;
 	scsi_qla_host_t *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
 	int cont_iocb_prsnt = 0;
 	int entry_count = 1;
@@ -2155,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
 	int cont_iocb_prsnt = 0;
 	int entry_count = 1;
@@ -2245,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = NULL;
 	struct rsp_que *rsp = NULL;
-	char		tag[2];
+	char tag[2];
 
 	/* Setup device pointers. */
 	ret = 0;
 	reg = &ha->iobase->isp82;
-	cmd = sp->cmd;
+	cmd = GET_CMD_SP(sp);
 	req = vha->req;
 	rsp = ha->rsp_q_map[0];
 
@@ -2354,12 +2348,14 @@ sufficient_dsds:
 		if (req->cnt < (req_cnt + 2))
 			goto queuing_error;
 
-		ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
-		if (!sp->ctx) {
+		ctx = sp->u.scmd.ctx =
+		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+		if (!ctx) {
 			ql_log(ql_log_fatal, vha, 0x3010,
 			    "Failed to allocate ctx for cmd=%p.\n", cmd);
 			goto queuing_error;
 		}
+
 		memset(ctx, 0, sizeof(struct ct6_dsd));
 		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
 			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@@ -2410,12 +2406,12 @@ sufficient_dsds:
 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
 			goto queuing_error_fcp_cmnd;
 
-		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
 
 		/* build FCP_CMND IU */
 		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
-		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
 		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
 
 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -2495,9 +2491,9 @@ sufficient_dsds:
 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 		cmd_pkt->vp_index = sp->fcport->vp_idx;
 
-		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
-			sizeof(cmd_pkt->lun));
+		    sizeof(cmd_pkt->lun));
 
 		/*
 		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@@ -2538,7 +2534,7 @@ sufficient_dsds:
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
 	sp->handle = handle;
-	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 	wmb();
 
@@ -2584,9 +2580,9 @@ queuing_error:
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
-	if (sp->ctx) {
-		mempool_free(sp->ctx, ha->ctx_mempool);
-		sp->ctx = NULL;
+	if (sp->u.scmd.ctx) {
+		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
+		sp->u.scmd.ctx = NULL;
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2599,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
 	int rval;
 	struct qla_hw_data *ha = sp->fcport->vha->hw;
 	void *pkt;
-	struct srb_ctx *ctx = sp->ctx;
 	unsigned long flags;
 
 	rval = QLA_FUNCTION_FAILED;
@@ -2612,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
 	}
 
 	rval = QLA_SUCCESS;
-	switch (ctx->type) {
+	switch (sp->type) {
 	case SRB_LOGIN_CMD:
 		IS_FWI2_CAPABLE(ha) ?
 		    qla24xx_login_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 349843ea32f6..f79844ce7122 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -44,8 +44,8 @@ qla2100_intr_handler(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-		    "%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x505d,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return (IRQ_NONE);
 	}
 
@@ -141,8 +141,8 @@ qla2300_intr_handler(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-		    "%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x5058,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return (IRQ_NONE);
 	}
 
@@ -289,7 +289,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
 		mb[cnt] = RD_REG_WORD(wptr);
 
 	ql_dbg(ql_dbg_async, vha, 0x5021,
-	    "Inter-Driver Commucation %s -- "
+	    "Inter-Driver Communication %s -- "
 	    "%04x %04x %04x %04x %04x %04x %04x.\n",
 	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
 	    mb[4], mb[5], mb[6]);
@@ -318,7 +318,7 @@ void
 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
 #define LS_UNKNOWN	2
-	static char	*link_speeds[] = { "1", "2", "?", "4", "8", "10" };
+	static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
 	char		*link_speed;
 	uint16_t	handle_cnt;
 	uint16_t	cnt, mbx;
@@ -328,12 +328,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 	uint32_t	rscn_entry, host_pid;
-	uint8_t		rscn_queue_index;
 	unsigned long	flags;
 
 	/* Setup to process RIO completion. */
 	handle_cnt = 0;
-	if (IS_QLA8XXX_TYPE(ha))
+	if (IS_CNA_CAPABLE(ha))
 		goto skip_rio;
 	switch (mb[0]) {
 	case MBA_SCSI_COMPLETION:
@@ -405,7 +404,8 @@ skip_rio:
 		break;
 
 	case MBA_SYSTEM_ERR:		/* System Error */
-		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
+		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+			RD_REG_WORD(&reg24->mailbox7) : 0;
 		ql_log(ql_log_warn, vha, 0x5003,
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
 		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -418,6 +418,7 @@ skip_rio:
 				    "Unrecoverable Hardware Error: adapter "
 				    "marked OFFLINE!\n");
 				vha->flags.online = 0;
+				vha->device_flags |= DFLG_DEV_FAILED;
 			} else {
 				/* Check to see if MPI timeout occurred */
 				if ((mbx & MBX_3) && (ha->flags.port0))
@@ -431,6 +432,7 @@ skip_rio:
 			    "Unrecoverable Hardware Error: adapter marked "
 			    "OFFLINE!\n");
 			vha->flags.online = 0;
+			vha->device_flags |= DFLG_DEV_FAILED;
 		} else
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
@@ -482,10 +484,10 @@ skip_rio:
 			ha->link_data_rate = PORT_SPEED_1GB;
 		} else {
 			link_speed = link_speeds[LS_UNKNOWN];
-			if (mb[1] < 5)
+			if (mb[1] < 6)
 				link_speed = link_speeds[mb[1]];
 			else if (mb[1] == 0x13)
-				link_speed = link_speeds[5];
+				link_speed = link_speeds[6];
 			ha->link_data_rate = mb[1];
 		}
 
@@ -497,7 +499,8 @@ skip_rio:
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
-		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
+		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
+			? RD_REG_WORD(&reg24->mailbox4) : 0;
 		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
 		ql_dbg(ql_dbg_async, vha, 0x500b,
 		    "LOOP DOWN detected (%x %x %x %x).\n",
@@ -547,7 +550,7 @@ skip_rio:
 		if (IS_QLA2100(ha))
 			break;
 
-		if (IS_QLA8XXX_TYPE(ha)) {
+		if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
 			ql_dbg(ql_dbg_async, vha, 0x500d,
 			    "DCBX Completed -- %04x %04x %04x.\n",
 			    mb[1], mb[2], mb[3]);
@@ -681,8 +684,6 @@ skip_rio:
 
 		qla2x00_mark_all_devices_lost(vha, 1);
 
-		vha->flags.rscn_queue_overflow = 1;
-
 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		break;
@@ -711,15 +712,6 @@ skip_rio:
 
 		/* Ignore reserved bits from RSCN-payload. */
 		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
-		rscn_queue_index = vha->rscn_in_ptr + 1;
-		if (rscn_queue_index == MAX_RSCN_COUNT)
-			rscn_queue_index = 0;
-		if (rscn_queue_index != vha->rscn_out_ptr) {
-			vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
-			vha->rscn_in_ptr = rscn_queue_index;
-		} else {
-			vha->flags.rscn_queue_overflow = 1;
-		}
 
 		atomic_set(&vha->loop_down_timer, 0);
 		vha->flags.management_server_logged_in = 0;
@@ -809,6 +801,10 @@ skip_rio:
 	case MBA_IDC_TIME_EXT:
 		qla81xx_idc_event(vha, mb[0], mb[1]);
 		break;
+	default:
+		ql_dbg(ql_dbg_async, vha, 0x5057,
+		    "Unknown AEN:%04x %04x %04x %04x\n",
+		    mb[0], mb[1], mb[2], mb[3]);
 	}
 
 	if (!vha->vp_idx && ha->num_vhosts)
@@ -845,8 +841,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
 		req->outstanding_cmds[index] = NULL;
 
 		/* Save ISP completion status */
-		sp->cmd->result = DID_OK << 16;
-		qla2x00_sp_compl(ha, sp);
+		sp->done(ha, sp, DID_OK << 16);
 	} else {
 		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
 
@@ -903,7 +898,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	fc_port_t *fcport;
 	srb_t *sp;
 	struct srb_iocb *lio;
-	struct srb_ctx *ctx;
 	uint16_t *data;
 	uint16_t status;
 
@@ -911,9 +905,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	if (!sp)
 		return;
 
-	ctx = sp->ctx;
-	lio = ctx->u.iocb_cmd;
-	type = ctx->name;
+	lio = &sp->u.iocb_cmd;
+	type = sp->name;
 	fcport = sp->fcport;
 	data = lio->u.logio.data;
 
@@ -937,7 +930,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	}
 
 	status = le16_to_cpu(mbx->status);
-	if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
+	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
 	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
 		status = 0;
 	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@@ -948,7 +941,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 		    le16_to_cpu(mbx->mb1));
 
 		data[0] = MBS_COMMAND_COMPLETE;
-		if (ctx->type == SRB_LOGIN_CMD) {
+		if (sp->type == SRB_LOGIN_CMD) {
 			fcport->port_type = FCT_TARGET;
 			if (le16_to_cpu(mbx->mb1) & BIT_0)
 				fcport->port_type = FCT_INITIATOR;
@@ -979,7 +972,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	    le16_to_cpu(mbx->mb7));
 
 logio_done:
-	lio->done(sp);
+	sp->done(vha, sp, 0);
 }
 
 static void
@@ -988,29 +981,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 {
 	const char func[] = "CT_IOCB";
 	const char *type;
-	struct qla_hw_data *ha = vha->hw;
 	srb_t *sp;
-	struct srb_ctx *sp_bsg;
 	struct fc_bsg_job *bsg_job;
 	uint16_t comp_status;
+	int res;
 
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (!sp)
 		return;
 
-	sp_bsg = sp->ctx;
-	bsg_job = sp_bsg->u.bsg_job;
+	bsg_job = sp->u.bsg_job;
 
-	type = NULL;
-	switch (sp_bsg->type) {
-	case SRB_CT_CMD:
-		type = "ct pass-through";
-		break;
-	default:
-		ql_log(ql_log_warn, vha, 0x5047,
-		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
-		return;
-	}
+	type = "ct pass-through";
 
 	comp_status = le16_to_cpu(pkt->comp_status);
 
@@ -1022,7 +1004,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 
 	if (comp_status != CS_COMPLETE) {
 		if (comp_status == CS_DATA_UNDERRUN) {
-			bsg_job->reply->result = DID_OK << 16;
+			res = DID_OK << 16;
 			bsg_job->reply->reply_payload_rcv_len =
 			    le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
 
@@ -1035,30 +1017,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 			ql_log(ql_log_warn, vha, 0x5049,
 			    "CT pass-through-%s error "
 			    "comp_status-status=0x%x.\n", type, comp_status);
-			bsg_job->reply->result = DID_ERROR << 16;
+			res = DID_ERROR << 16;
 			bsg_job->reply->reply_payload_rcv_len = 0;
 		}
 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
 		    (uint8_t *)pkt, sizeof(*pkt));
 	} else {
-		bsg_job->reply->result =  DID_OK << 16;
+		res = DID_OK << 16;
 		bsg_job->reply->reply_payload_rcv_len =
 		    bsg_job->reply_payload.payload_len;
 		bsg_job->reply_len = 0;
 	}
 
-	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
-	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
-	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
-	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-	if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
-		kfree(sp->fcport);
-
-	kfree(sp->ctx);
-	mempool_free(sp, ha->srb_mempool);
-	bsg_job->job_done(bsg_job);
+	sp->done(vha, sp, res);
 }
 
 static void
@@ -1067,22 +1038,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 {
 	const char func[] = "ELS_CT_IOCB";
 	const char *type;
-	struct qla_hw_data *ha = vha->hw;
 	srb_t *sp;
-	struct srb_ctx *sp_bsg;
 	struct fc_bsg_job *bsg_job;
 	uint16_t comp_status;
 	uint32_t fw_status[3];
 	uint8_t* fw_sts_ptr;
+	int res;
 
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (!sp)
 		return;
-	sp_bsg = sp->ctx;
-	bsg_job = sp_bsg->u.bsg_job;
+	bsg_job = sp->u.bsg_job;
 
 	type = NULL;
-	switch (sp_bsg->type) {
+	switch (sp->type) {
 	case SRB_ELS_CMD_RPT:
 	case SRB_ELS_CMD_HST:
 		type = "els";
@@ -1091,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 		type = "ct pass-through";
 		break;
 	default:
-		ql_log(ql_log_warn, vha, 0x503e,
-		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
+		ql_dbg(ql_dbg_user, vha, 0x503e,
+		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
 		return;
 	}
 
@@ -1108,11 +1077,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 
 	if (comp_status != CS_COMPLETE) {
 		if (comp_status == CS_DATA_UNDERRUN) {
-			bsg_job->reply->result = DID_OK << 16;
+			res = DID_OK << 16;
 			bsg_job->reply->reply_payload_rcv_len =
-				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
 
-			ql_log(ql_log_info, vha, 0x503f,
+			ql_dbg(ql_dbg_user, vha, 0x503f,
 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
 			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
 			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
@@ -1122,7 +1091,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
 		}
 		else {
-			ql_log(ql_log_info, vha, 0x5040,
+			ql_dbg(ql_dbg_user, vha, 0x5040,
 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
 			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
 			    type, sp->handle, comp_status,
@@ -1130,32 +1099,21 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 				pkt)->error_subcode_1),
 			    le16_to_cpu(((struct els_sts_entry_24xx *)
 				    pkt)->error_subcode_2));
-			bsg_job->reply->result = DID_ERROR << 16;
+			res = DID_ERROR << 16;
 			bsg_job->reply->reply_payload_rcv_len = 0;
 			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
 			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
 		}
-		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
+		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
 				(uint8_t *)pkt, sizeof(*pkt));
 	}
 	else {
-		bsg_job->reply->result =  DID_OK << 16;
+		res =  DID_OK << 16;
 		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
 		bsg_job->reply_len = 0;
 	}
 
-	dma_unmap_sg(&ha->pdev->dev,
-	    bsg_job->request_payload.sg_list,
-	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	dma_unmap_sg(&ha->pdev->dev,
-	    bsg_job->reply_payload.sg_list,
-	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
-	    (sp_bsg->type == SRB_CT_CMD))
-		kfree(sp->fcport);
-	kfree(sp->ctx);
-	mempool_free(sp, ha->srb_mempool);
-	bsg_job->job_done(bsg_job);
+	sp->done(vha, sp, res);
 }
 
 static void
@@ -1167,7 +1125,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	fc_port_t *fcport;
 	srb_t *sp;
 	struct srb_iocb *lio;
-	struct srb_ctx *ctx;
 	uint16_t *data;
 	uint32_t iop[2];
 
@@ -1175,9 +1132,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	if (!sp)
 		return;
 
-	ctx = sp->ctx;
-	lio = ctx->u.iocb_cmd;
-	type = ctx->name;
+	lio = &sp->u.iocb_cmd;
+	type = sp->name;
 	fcport = sp->fcport;
 	data = lio->u.logio.data;
 
@@ -1185,7 +1141,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
 		QLA_LOGIO_LOGIN_RETRIED : 0;
 	if (logio->entry_status) {
-		ql_log(ql_log_warn, vha, 0x5034,
+		ql_log(ql_log_warn, fcport->vha, 0x5034,
 		    "Async-%s error entry - hdl=%x"
 		    "portid=%02x%02x%02x entry-status=%x.\n",
 		    type, sp->handle, fcport->d_id.b.domain,
@@ -1198,14 +1154,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	}
 
 	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
-		ql_dbg(ql_dbg_async, vha, 0x5036,
+		ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
 		    "Async-%s complete - hdl=%x portid=%02x%02x%02x "
 		    "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
 		    le32_to_cpu(logio->io_parameter[0]));
 
 		data[0] = MBS_COMMAND_COMPLETE;
-		if (ctx->type != SRB_LOGIN_CMD)
+		if (sp->type != SRB_LOGIN_CMD)
 			goto logio_done;
 
 		iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1239,7 +1195,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 		break;
 	}
 
-	ql_dbg(ql_dbg_async, vha, 0x5037,
+	ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
 	    "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
 	    "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
@@ -1248,7 +1204,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	    le32_to_cpu(logio->io_parameter[1]));
 
 logio_done:
-	lio->done(sp);
+	sp->done(vha, sp, 0);
 }
 
 static void
@@ -1260,7 +1216,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	fc_port_t *fcport;
 	srb_t *sp;
 	struct srb_iocb *iocb;
-	struct srb_ctx *ctx;
 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
 	int error = 1;
 
@@ -1268,30 +1223,29 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	if (!sp)
 		return;
 
-	ctx = sp->ctx;
-	iocb = ctx->u.iocb_cmd;
-	type = ctx->name;
+	iocb = &sp->u.iocb_cmd;
+	type = sp->name;
 	fcport = sp->fcport;
 
 	if (sts->entry_status) {
-		ql_log(ql_log_warn, vha, 0x5038,
+		ql_log(ql_log_warn, fcport->vha, 0x5038,
 		    "Async-%s error - hdl=%x entry-status(%x).\n",
 		    type, sp->handle, sts->entry_status);
 	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
-		ql_log(ql_log_warn, vha, 0x5039,
+		ql_log(ql_log_warn, fcport->vha, 0x5039,
 		    "Async-%s error - hdl=%x completion status(%x).\n",
 		    type, sp->handle, sts->comp_status);
 	} else if (!(le16_to_cpu(sts->scsi_status) &
 	    SS_RESPONSE_INFO_LEN_VALID)) {
-		ql_log(ql_log_warn, vha, 0x503a,
+		ql_log(ql_log_warn, fcport->vha, 0x503a,
 		    "Async-%s error - hdl=%x no response info(%x).\n",
 		    type, sp->handle, sts->scsi_status);
 	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
-		ql_log(ql_log_warn, vha, 0x503b,
+		ql_log(ql_log_warn, fcport->vha, 0x503b,
 		    "Async-%s error - hdl=%x not enough response(%d).\n",
 		    type, sp->handle, sts->rsp_data_len);
 	} else if (sts->data[3]) {
-		ql_log(ql_log_warn, vha, 0x503c,
+		ql_log(ql_log_warn, fcport->vha, 0x503c,
 		    "Async-%s error - hdl=%x response(%x).\n",
 		    type, sp->handle, sts->data[3]);
 	} else {
@@ -1304,7 +1258,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 		    (uint8_t *)sts, sizeof(*sts));
 	}
 
-	iocb->done(sp);
+	sp->done(vha, sp, 0);
 }
 
 /**
@@ -1390,25 +1344,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
 
 static inline void
 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
-    uint32_t sense_len, struct rsp_que *rsp)
+		     uint32_t sense_len, struct rsp_que *rsp, int res)
 {
 	struct scsi_qla_host *vha = sp->fcport->vha;
-	struct scsi_cmnd *cp = sp->cmd;
+	struct scsi_cmnd *cp = GET_CMD_SP(sp);
+	uint32_t track_sense_len;
 
 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
 		sense_len = SCSI_SENSE_BUFFERSIZE;
 
-	sp->request_sense_length = sense_len;
-	sp->request_sense_ptr = cp->sense_buffer;
-	if (sp->request_sense_length > par_sense_len)
+	SET_CMD_SENSE_LEN(sp, sense_len);
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+	track_sense_len = sense_len;
+
+	if (sense_len > par_sense_len)
 		sense_len = par_sense_len;
 
 	memcpy(cp->sense_buffer, sense_data, sense_len);
 
-	sp->request_sense_ptr += sense_len;
-	sp->request_sense_length -= sense_len;
-	if (sp->request_sense_length != 0)
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+	track_sense_len -= sense_len;
+	SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+	if (track_sense_len != 0) {
 		rsp->status_srb = sp;
+		cp->result = res;
+	}
 
 	if (sense_len) {
 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@@ -1436,7 +1397,7 @@ static inline int
 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 {
 	struct scsi_qla_host *vha = sp->fcport->vha;
-	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	uint8_t		*ap = &sts24->data[12];
 	uint8_t		*ep = &sts24->data[20];
 	uint32_t	e_ref_tag, a_ref_tag;
@@ -1580,6 +1541,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 	uint16_t que;
 	struct req_que *req;
 	int logit = 1;
+	int res = 0;
 
 	sts = (sts_entry_t *) pkt;
 	sts24 = (struct sts_entry_24xx *) pkt;
@@ -1619,7 +1581,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		qla2xxx_wake_dpc(vha);
 		return;
 	}
-	cp = sp->cmd;
+	cp = GET_CMD_SP(sp);
 	if (cp == NULL) {
 		ql_dbg(ql_dbg_io, vha, 0x3018,
 		    "Command already returned (0x%x/%p).\n",
@@ -1668,11 +1630,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			par_sense_len -= rsp_info_len;
 		}
 		if (rsp_info_len > 3 && rsp_info[3]) {
-			ql_dbg(ql_dbg_io, vha, 0x3019,
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
 			    "FCP I/O protocol failure (0x%x/0x%x).\n",
 			    rsp_info_len, rsp_info[3]);
 
-			cp->result = DID_BUS_BUSY << 16;
+			res = DID_BUS_BUSY << 16;
 			goto out;
 		}
 	}
@@ -1689,7 +1651,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 	case CS_COMPLETE:
 	case CS_QUEUE_FULL:
 		if (scsi_status == 0) {
-			cp->result = DID_OK << 16;
+			res = DID_OK << 16;
 			break;
 		}
 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@@ -1699,19 +1661,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			if (!lscsi_status &&
 			    ((unsigned)(scsi_bufflen(cp) - resid) <
 			     cp->underflow)) {
-				ql_dbg(ql_dbg_io, vha, 0x301a,
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
 				    "Mid-layer underflow "
 				    "detected (0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
 
-				cp->result = DID_ERROR << 16;
+				res = DID_ERROR << 16;
 				break;
 			}
 		}
-		cp->result = DID_OK << 16 | lscsi_status;
+		res = DID_OK << 16 | lscsi_status;
 
 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-			ql_dbg(ql_dbg_io, vha, 0x301b,
+			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
 			    "QUEUE FULL detected.\n");
 			break;
 		}
@@ -1724,7 +1686,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			break;
 
 		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
-		    rsp);
+		    rsp, res);
 		break;
 
 	case CS_DATA_UNDERRUN:
@@ -1733,36 +1695,36 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		scsi_set_resid(cp, resid);
 		if (scsi_status & SS_RESIDUAL_UNDER) {
 			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
-				ql_dbg(ql_dbg_io, vha, 0x301d,
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
 				    "Dropped frame(s) detected "
 				    "(0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
 
-				cp->result = DID_ERROR << 16 | lscsi_status;
+				res = DID_ERROR << 16 | lscsi_status;
 				goto check_scsi_status;
 			}
 
 			if (!lscsi_status &&
 			    ((unsigned)(scsi_bufflen(cp) - resid) <
 			    cp->underflow)) {
-				ql_dbg(ql_dbg_io, vha, 0x301e,
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
 				    "Mid-layer underflow "
 				    "detected (0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
 
-				cp->result = DID_ERROR << 16;
+				res = DID_ERROR << 16;
 				break;
 			}
 		} else {
-			ql_dbg(ql_dbg_io, vha, 0x301f,
+			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
 			    "Dropped frame(s) detected (0x%x "
 			    "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
 
-			cp->result = DID_ERROR << 16 | lscsi_status;
+			res = DID_ERROR << 16 | lscsi_status;
 			goto check_scsi_status;
 		}
 
-		cp->result = DID_OK << 16 | lscsi_status;
+		res = DID_OK << 16 | lscsi_status;
 		logit = 0;
 
 check_scsi_status:
@@ -1772,7 +1734,7 @@ check_scsi_status:
 		 */
 		if (lscsi_status != 0) {
 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-				ql_dbg(ql_dbg_io, vha, 0x3020,
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
 				    "QUEUE FULL detected.\n");
 				logit = 1;
 				break;
@@ -1785,7 +1747,7 @@ check_scsi_status:
 				break;
 
 			qla2x00_handle_sense(sp, sense_data, par_sense_len,
-			    sense_len, rsp);
+			    sense_len, rsp, res);
 		}
 		break;
 
@@ -1802,7 +1764,7 @@ check_scsi_status:
 		 * while we try to recover so instruct the mid layer
 		 * to requeue until the class decides how to handle this.
 		 */
-		cp->result = DID_TRANSPORT_DISRUPTED << 16;
+		res = DID_TRANSPORT_DISRUPTED << 16;
 
 		if (comp_status == CS_TIMEOUT) {
 			if (IS_FWI2_CAPABLE(ha))
@@ -1812,7 +1774,7 @@ check_scsi_status:
 				break;
 		}
 
-		ql_dbg(ql_dbg_io, vha, 0x3021,
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
 		    "Port down status: port-state=0x%x.\n",
 		    atomic_read(&fcport->state));
 
@@ -1821,25 +1783,25 @@ check_scsi_status:
 		break;
 
 	case CS_ABORTED:
-		cp->result = DID_RESET << 16;
+		res = DID_RESET << 16;
 		break;
 
 	case CS_DIF_ERROR:
 		logit = qla2x00_handle_dif_error(sp, sts24);
 		break;
 	default:
-		cp->result = DID_ERROR << 16;
+		res = DID_ERROR << 16;
 		break;
 	}
 
 out:
 	if (logit)
-		ql_dbg(ql_dbg_io, vha, 0x3022,
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
 		    "FCP command status: 0x%x-0x%x (0x%x) "
 		    "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
 		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
-		    comp_status, scsi_status, cp->result, vha->host_no,
+		    comp_status, scsi_status, res, vha->host_no,
 		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
 		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@@ -1848,7 +1810,7 @@ out:
 		    resid_len, fw_resid_len);
 
 	if (rsp->status_srb == NULL)
-		qla2x00_sp_compl(ha, sp);
+		sp->done(ha, sp, res);
 }
 
 /**
@@ -1861,84 +1823,52 @@ out:
 static void
 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
 {
-	uint8_t		sense_sz = 0;
+	uint8_t	sense_sz = 0;
 	struct qla_hw_data *ha = rsp->hw;
 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
-	srb_t		*sp = rsp->status_srb;
+	srb_t *sp = rsp->status_srb;
 	struct scsi_cmnd *cp;
+	uint32_t sense_len;
+	uint8_t *sense_ptr;
 
-	if (sp != NULL && sp->request_sense_length != 0) {
-		cp = sp->cmd;
-		if (cp == NULL) {
-			ql_log(ql_log_warn, vha, 0x3025,
-			    "cmd is NULL: already returned to OS (sp=%p).\n",
-			    sp);
+	if (!sp || !GET_CMD_SENSE_LEN(sp))
+		return;
 
-			rsp->status_srb = NULL;
-			return;
-		}
+	sense_len = GET_CMD_SENSE_LEN(sp);
+	sense_ptr = GET_CMD_SENSE_PTR(sp);
 
-		if (sp->request_sense_length > sizeof(pkt->data)) {
-			sense_sz = sizeof(pkt->data);
-		} else {
-			sense_sz = sp->request_sense_length;
-		}
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_log(ql_log_warn, vha, 0x3025,
+		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
 
-		/* Move sense data. */
-		if (IS_FWI2_CAPABLE(ha))
-			host_to_fcp_swap(pkt->data, sizeof(pkt->data));
-		memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
-		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
-			sp->request_sense_ptr, sense_sz);
-
-		sp->request_sense_ptr += sense_sz;
-		sp->request_sense_length -= sense_sz;
-
-		/* Place command on done queue. */
-		if (sp->request_sense_length == 0) {
-			rsp->status_srb = NULL;
-			qla2x00_sp_compl(ha, sp);
-		}
+		rsp->status_srb = NULL;
+		return;
 	}
-}
 
-static int
-qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
-{
-	struct qla_hw_data *ha = vha->hw;
-	struct srb_ctx *ctx;
+	if (sense_len > sizeof(pkt->data))
+		sense_sz = sizeof(pkt->data);
+	else
+		sense_sz = sense_len;
 
-	if (!sp->ctx)
-		return 1;
+	/* Move sense data. */
+	if (IS_FWI2_CAPABLE(ha))
+		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
+	memcpy(sense_ptr, pkt->data, sense_sz);
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+		sense_ptr, sense_sz);
 
-	ctx = sp->ctx;
+	sense_len -= sense_sz;
+	sense_ptr += sense_sz;
 
-	if (ctx->type == SRB_LOGIN_CMD ||
-	    ctx->type == SRB_LOGOUT_CMD ||
-	    ctx->type == SRB_TM_CMD) {
-		ctx->u.iocb_cmd->done(sp);
-		return 0;
-	} else if (ctx->type == SRB_ADISC_CMD) {
-		ctx->u.iocb_cmd->free(sp);
-		return 0;
-	} else {
-		struct fc_bsg_job *bsg_job;
-
-		bsg_job = ctx->u.bsg_job;
-		if (ctx->type == SRB_ELS_CMD_HST ||
-		    ctx->type == SRB_CT_CMD)
-			kfree(sp->fcport);
-
-		bsg_job->reply->reply_data.ctels_reply.status =
-		    FC_CTELS_STATUS_OK;
-		bsg_job->reply->result = DID_ERROR << 16;
-		bsg_job->reply->reply_payload_rcv_len = 0;
-		kfree(sp->ctx);
-		mempool_free(sp, ha->srb_mempool);
-		bsg_job->job_done(bsg_job);
-		return 0;
+	SET_CMD_SENSE_PTR(sp, sense_ptr);
+	SET_CMD_SENSE_LEN(sp, sense_len);
+
+	/* Place command on done queue. */
+	if (sense_len == 0) {
+		rsp->status_srb = NULL;
+		sp->done(ha, sp, cp->result);
 	}
-	return 1;
 }
 
 /**
@@ -1953,53 +1883,34 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 	struct qla_hw_data *ha = vha->hw;
 	const char func[] = "ERROR-IOCB";
 	uint16_t que = MSW(pkt->handle);
-	struct req_que *req = ha->req_q_map[que];
-
-	if (pkt->entry_status & RF_INV_E_ORDER)
-		ql_dbg(ql_dbg_async, vha, 0x502a,
-		    "Invalid Entry Order.\n");
-	else if (pkt->entry_status & RF_INV_E_COUNT)
-		ql_dbg(ql_dbg_async, vha, 0x502b,
-		    "Invalid Entry Count.\n");
-	else if (pkt->entry_status & RF_INV_E_PARAM)
-		ql_dbg(ql_dbg_async, vha, 0x502c,
-		    "Invalid Entry Parameter.\n");
-	else if (pkt->entry_status & RF_INV_E_TYPE)
-		ql_dbg(ql_dbg_async, vha, 0x502d,
-		    "Invalid Entry Type.\n");
-	else if (pkt->entry_status & RF_BUSY)
-		ql_dbg(ql_dbg_async, vha, 0x502e,
-		    "Busy.\n");
-	else
-		ql_dbg(ql_dbg_async, vha, 0x502f,
-		    "UNKNOWN flag error.\n");
+	struct req_que *req = NULL;
+	int res = DID_ERROR << 16;
+
+	ql_dbg(ql_dbg_async, vha, 0x502a,
+	    "type of error status in response: 0x%x\n", pkt->entry_status);
+
+	if (que >= ha->max_req_queues || !ha->req_q_map[que])
+		goto fatal;
+
+	req = ha->req_q_map[que];
+
+	if (pkt->entry_status & RF_BUSY)
+		res = DID_BUS_BUSY << 16;
 
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (sp) {
-		if (qla2x00_free_sp_ctx(vha, sp)) {
-			if (pkt->entry_status &
-			    (RF_INV_E_ORDER | RF_INV_E_COUNT |
-			     RF_INV_E_PARAM | RF_INV_E_TYPE)) {
-				sp->cmd->result = DID_ERROR << 16;
-			} else if (pkt->entry_status & RF_BUSY) {
-				sp->cmd->result = DID_BUS_BUSY << 16;
-			} else {
-				sp->cmd->result = DID_ERROR << 16;
-			}
-			qla2x00_sp_compl(ha, sp);
-		}
-	} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
-		COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
-		|| pkt->entry_type == COMMAND_TYPE_6) {
-		ql_log(ql_log_warn, vha, 0x5030,
-		    "Error entry - invalid handle.\n");
-
-		if (IS_QLA82XX(ha))
-			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
-		else
-			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-		qla2xxx_wake_dpc(vha);
+		sp->done(ha, sp, res);
+		return;
 	}
+fatal:
+	ql_log(ql_log_warn, vha, 0x5030,
+	    "Error entry - invalid handle/queue.\n");
+
+	if (IS_QLA82XX(ha))
+		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+	else
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
 }
 
 /**
@@ -2127,7 +2038,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		return;
 
 	rval = QLA_SUCCESS;
@@ -2168,7 +2079,7 @@ done:
 }
 
 /**
- * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
  * @irq:
  * @dev_id: SCSI driver HA context
  *
@@ -2192,8 +2103,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-		    "%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x5059,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
 
@@ -2276,8 +2187,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-		"%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x505a,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
 	ha = rsp->hw;
@@ -2306,8 +2217,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-			"%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x505b,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
 	ha = rsp->hw;
@@ -2340,8 +2251,8 @@ qla24xx_msix_default(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-			"%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0x505c,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
 	ha = rsp->hw;
@@ -2530,8 +2441,14 @@ msix_failed:
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
-		ha->mqenable = 1;
+	if (IS_QLA83XX(ha)) {
+		if (ha->msixbase && ha->mqiobase &&
+		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+			ha->mqenable = 1;
+	} else
+		if (ha->mqiobase
+		    && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+			ha->mqenable = 1;
 	ql_dbg(ql_dbg_multiq, vha, 0xc005,
 	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
 	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
@@ -2552,8 +2469,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
 	/* If possible, enable MSI-X. */
-	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
-		!IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
+	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
 		goto skip_msi;
 
 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2615,7 +2532,7 @@ clear_risc_ints:
 	 * FIXME: Noted that 8014s were being dropped during NK testing.
 	 * Timing deltas during MSI-X/INTa transitions?
 	 */
-	if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
+	if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
 		goto fail;
 	spin_lock_irq(&ha->hardware_lock);
 	if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 08f1d01bdc1c..b4a23394a7bd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,17 +46,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
+	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
 
 	if (ha->pdev->error_state > pci_channel_io_frozen) {
-		ql_log(ql_log_warn, base_vha, 0x1001,
+		ql_log(ql_log_warn, vha, 0x1001,
 		    "error_state is greater than pci_channel_io_frozen, "
 		    "exiting.\n");
 		return QLA_FUNCTION_TIMEOUT;
 	}
 
 	if (vha->device_flags & DFLG_DEV_FAILED) {
-		ql_log(ql_log_warn, base_vha, 0x1002,
+		ql_log(ql_log_warn, vha, 0x1002,
 		    "Device in failed state, exiting.\n");
 		return QLA_FUNCTION_TIMEOUT;
 	}
@@ -69,7 +69,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
 
 	if (ha->flags.pci_channel_io_perm_failure) {
-		ql_log(ql_log_warn, base_vha, 0x1003,
+		ql_log(ql_log_warn, vha, 0x1003,
 		    "Perm failure on EEH timeout MBX, exiting.\n");
 		return QLA_FUNCTION_TIMEOUT;
 	}
@@ -77,7 +77,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	if (ha->flags.isp82xx_fw_hung) {
 		/* Setting Link-Down error */
 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
-		ql_log(ql_log_warn, base_vha, 0x1004,
+		ql_log(ql_log_warn, vha, 0x1004,
 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
 		return QLA_FUNCTION_TIMEOUT;
 	}
@@ -89,8 +89,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	 */
 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
 		/* Timeout occurred. Return error. */
-		ql_log(ql_log_warn, base_vha, 0x1005,
-		    "Cmd access timeout, Exiting.\n");
+		ql_log(ql_log_warn, vha, 0x1005,
+		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
+		    mcp->mb[0]);
 		return QLA_FUNCTION_TIMEOUT;
 	}
 
@@ -98,7 +99,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	/* Save mailbox command for debug */
 	ha->mcp = mcp;
 
-	ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
+	ql_dbg(ql_dbg_mbx, vha, 0x1006,
 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -127,28 +128,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		iptr++;
 	}
 
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
 	    "Loaded MBX registers (displayed in bytes) =.\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
 	    (uint8_t *)mcp->mb, 16);
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
 	    ".\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
 	    ((uint8_t *)mcp->mb + 0x10), 16);
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
 	    ".\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
 	    ((uint8_t *)mcp->mb + 0x20), 8);
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
 	    "I/O Address = %p.\n", optr);
-	ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
+	ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
 
 	/* Issue set host interrupt command to send cmd out. */
 	ha->flags.mbox_int = 0;
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
 	/* Unlock mbx registers and wait for interrupt */
-	ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
+	ql_dbg(ql_dbg_mbx, vha, 0x100f,
 	    "Going to unlock irq & waiting for interrupts. "
 	    "jiffies=%lx.\n", jiffies);
 
@@ -163,7 +164,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 				spin_unlock_irqrestore(&ha->hardware_lock,
 					flags);
 				ha->flags.mbox_busy = 0;
-				ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
+				ql_dbg(ql_dbg_mbx, vha, 0x1010,
 				    "Pending mailbox timeout, exiting.\n");
 				rval = QLA_FUNCTION_TIMEOUT;
 				goto premature_exit;
@@ -180,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
 
 	} else {
-		ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
+		ql_dbg(ql_dbg_mbx, vha, 0x1011,
 		    "Cmd=%x Polling Mode.\n", command);
 
 		if (IS_QLA82XX(ha)) {
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 				spin_unlock_irqrestore(&ha->hardware_lock,
 					flags);
 				ha->flags.mbox_busy = 0;
-				ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
+				ql_dbg(ql_dbg_mbx, vha, 0x1012,
 				    "Pending mailbox timeout, exiting.\n");
 				rval = QLA_FUNCTION_TIMEOUT;
 				goto premature_exit;
@@ -214,7 +215,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
 				msleep(10);
 		} /* while */
-		ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
+		ql_dbg(ql_dbg_mbx, vha, 0x1013,
 		    "Waited %d sec.\n",
 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
 	}
@@ -223,7 +224,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	if (ha->flags.mbox_int) {
 		uint16_t *iptr2;
 
-		ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
+		ql_dbg(ql_dbg_mbx, vha, 0x1014,
 		    "Cmd=%x completed.\n", command);
 
 		/* Got interrupt. Clear the flag. */
@@ -236,7 +237,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
 			ha->mcp = NULL;
 			rval = QLA_FUNCTION_FAILED;
-			ql_log(ql_log_warn, base_vha, 0x1015,
+			ql_log(ql_log_warn, vha, 0x1015,
 			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
 			goto premature_exit;
 		}
@@ -268,13 +269,19 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 			mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
 			ictrl = RD_REG_WORD(&reg->isp.ictrl);
 		}
-		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
 		    "MBX Command timeout for cmd %x.\n", command);
-		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
 		    "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
-		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
 		    "mb[0] = 0x%x.\n", mb0);
-		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
+		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
+
+		/*
+		 * Attempt to capture a firmware dump for further analysis
+		 * of the current firmware state
+		 */
+		ha->isp_ops->fw_dump(vha, 0);
 
 		rval = QLA_FUNCTION_TIMEOUT;
 	}
@@ -285,7 +292,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	ha->mcp = NULL;
 
 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
-		ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
+		ql_dbg(ql_dbg_mbx, vha, 0x101a,
 		    "Checking for additional resp interrupt.\n");
 
 		/* polling mode for non isp_abort commands. */
@@ -297,7 +304,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
 		    ha->flags.eeh_busy) {
 			/* not in dpc. schedule it for dpc to take over. */
-			ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
+			ql_dbg(ql_dbg_mbx, vha, 0x101b,
 			    "Timeout, schedule isp_abort_needed.\n");
 
 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -313,15 +320,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 					    CRB_NIU_XG_PAUSE_CTL_P1);
 				}
 				ql_log(ql_log_info, base_vha, 0x101c,
-				    "Mailbox cmd timeout occured. "
-				    "Scheduling ISP abort eeh_busy=0x%x.\n",
-					ha->flags.eeh_busy);
+				    "Mailbox cmd timeout occured, cmd=0x%x, "
+				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+				    "abort.\n", command, mcp->mb[0],
+				    ha->flags.eeh_busy);
 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 				qla2xxx_wake_dpc(vha);
 			}
 		} else if (!abort_active) {
 			/* call abort directly since we are in the DPC thread */
-			ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
+			ql_dbg(ql_dbg_mbx, vha, 0x101d,
 			    "Timeout, calling abort_isp.\n");
 
 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -337,9 +345,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 					    CRB_NIU_XG_PAUSE_CTL_P1);
 				}
 				ql_log(ql_log_info, base_vha, 0x101e,
-				    "Mailbox cmd timeout occured. "
-				    "Scheduling ISP abort.\n");
-
+				    "Mailbox cmd timeout occured, cmd=0x%x, "
+				    "mb[0]=0x%x. Scheduling ISP abort ",
+				    command, mcp->mb[0]);
 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 				/* Allow next mbx cmd to come in. */
@@ -350,7 +358,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 					    &vha->dpc_flags);
 				}
 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
-				ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
+				ql_dbg(ql_dbg_mbx, vha, 0x101f,
 				    "Finished abort_isp.\n");
 				goto mbx_done;
 			}
@@ -364,8 +372,8 @@ premature_exit:
 mbx_done:
 	if (rval) {
 		ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
-		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
-		    mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
+		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
 	} else {
 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
 	}
@@ -455,7 +463,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		mcp->mb[1] = MSW(risc_addr);
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->mb[3] = 0;
-		if (IS_QLA81XX(ha)) {
+		if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
 			struct nvram_81xx *nv = ha->nvram;
 			mcp->mb[4] = (nv->enhanced_features &
 			    EXTENDED_BB_CREDITS);
@@ -508,21 +516,22 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
  *	Kernel context.
  */
 int
-qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
-    uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
-    uint32_t *mpi_caps, uint8_t *phy)
+qla2x00_get_fw_version(scsi_qla_host_t *vha)
 {
 	int		rval;
 	mbx_cmd_t	mc;
 	mbx_cmd_t	*mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(vha->hw))
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+	if (IS_QLA83XX(vha->hw))
+		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -530,23 +539,37 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
 		goto failed;
 
 	/* Return mailbox data. */
-	*major = mcp->mb[1];
-	*minor = mcp->mb[2];
-	*subminor = mcp->mb[3];
-	*attributes = mcp->mb[6];
+	ha->fw_major_version = mcp->mb[1];
+	ha->fw_minor_version = mcp->mb[2];
+	ha->fw_subminor_version = mcp->mb[3];
+	ha->fw_attributes = mcp->mb[6];
 	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
-		*memory = 0x1FFFF;			/* Defaults to 128KB. */
+		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
 	else
-		*memory = (mcp->mb[5] << 16) | mcp->mb[4];
-	if (IS_QLA81XX(vha->hw)) {
-		mpi[0] = mcp->mb[10] & 0xff;
-		mpi[1] = mcp->mb[11] >> 8;
-		mpi[2] = mcp->mb[11] & 0xff;
-		*mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
-		phy[0] = mcp->mb[8] & 0xff;
-		phy[1] = mcp->mb[9] >> 8;
-		phy[2] = mcp->mb[9] & 0xff;
+		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+		ha->mpi_version[0] = mcp->mb[10] & 0xff;
+		ha->mpi_version[1] = mcp->mb[11] >> 8;
+		ha->mpi_version[2] = mcp->mb[11] & 0xff;
+		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
+		ha->phy_version[0] = mcp->mb[8] & 0xff;
+		ha->phy_version[1] = mcp->mb[9] >> 8;
+		ha->phy_version[2] = mcp->mb[9] & 0xff;
+	}
+	if (IS_QLA83XX(ha)) {
+		if (mcp->mb[6] & BIT_15) {
+			ha->fw_attributes_h = mcp->mb[15];
+			ha->fw_attributes_ext[0] = mcp->mb[16];
+			ha->fw_attributes_ext[1] = mcp->mb[17];
+			ql_dbg(ql_dbg_mbx, vha, 0x1139,
+			    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+			    __func__, mcp->mb[15], mcp->mb[6]);
+		} else
+			ql_dbg(ql_dbg_mbx, vha, 0x112f,
+			    "%s: FwAttributes [Upper]  invalid, MB6:%04x\n",
+			    __func__, mcp->mb[6]);
 	}
+
 failed:
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
@@ -859,6 +882,7 @@ qla2x00_abort_command(srb_t *sp)
 	scsi_qla_host_t *vha = fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = vha->req;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
 	ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
 
@@ -881,7 +905,7 @@ qla2x00_abort_command(srb_t *sp)
 		mcp->mb[1] = fcport->loop_id << 8;
 	mcp->mb[2] = (uint16_t)handle;
 	mcp->mb[3] = (uint16_t)(handle >> 16);
-	mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
+	mcp->mb[6] = (uint16_t)cmd->device->lun;
 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
@@ -1028,7 +1052,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
 	mcp->mb[9] = vha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_0;
 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA8XXX_TYPE(vha->hw))
+	if (IS_CNA_CAPABLE(vha->hw))
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -1052,7 +1076,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
 	} else {
 		ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
 
-		if (IS_QLA8XXX_TYPE(vha->hw)) {
+		if (IS_CNA_CAPABLE(vha->hw)) {
 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
 			vha->fcoe_fcf_idx = mcp->mb[10];
 			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1163,7 +1187,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
 	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
+	if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
 		mcp->mb[1] = BIT_0;
 		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
 		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
@@ -1172,7 +1196,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 		mcp->mb[14] = sizeof(*ha->ex_init_cb);
 		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
 	}
-	mcp->in_mb = MBX_0;
+	/* 1 and 2 should normally be captured. */
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA83XX(ha))
+		/* mb3 is additional info about the installed SFP. */
+		mcp->in_mb  |= MBX_3;
 	mcp->buf_size = size;
 	mcp->flags = MBX_DMA_OUT;
 	mcp->tov = MBX_TOV_SECONDS;
@@ -1181,7 +1209,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
-		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
 	} else {
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
@@ -1260,6 +1289,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 		goto gpd_error_out;
 
 	if (IS_FWI2_CAPABLE(ha)) {
+		uint64_t zero = 0;
 		pd24 = (struct port_database_24xx *) pd;
 
 		/* Check for logged in state. */
@@ -1273,6 +1303,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 			goto gpd_error_out;
 		}
 
+		if (fcport->loop_id == FC_NO_LOOP_ID ||
+		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+		     memcmp(fcport->port_name, pd24->port_name, 8))) {
+			/* We lost the device mid way. */
+			rval = QLA_NOT_LOGGED_IN;
+			goto gpd_error_out;
+		}
+
 		/* Names are little-endian. */
 		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
 		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
@@ -1289,6 +1327,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 		else
 			fcport->port_type = FCT_TARGET;
 	} else {
+		uint64_t zero = 0;
+
 		/* Check for logged in state. */
 		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
 		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
@@ -1301,6 +1341,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 			goto gpd_error_out;
 		}
 
+		if (fcport->loop_id == FC_NO_LOOP_ID ||
+		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+		     memcmp(fcport->port_name, pd->port_name, 8))) {
+			/* We lost the device mid way. */
+			rval = QLA_NOT_LOGGED_IN;
+			goto gpd_error_out;
+		}
+
 		/* Names are little-endian. */
 		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
 		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
@@ -1481,7 +1529,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
 
 	ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
 
-	if (IS_QLA8XXX_TYPE(vha->hw)) {
+	if (IS_CNA_CAPABLE(vha->hw)) {
 		/* Logout across all FCFs. */
 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
 		mcp->mb[1] = BIT_1;
@@ -1622,7 +1670,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
 	lg->port_id[1] = area;
 	lg->port_id[2] = domain;
 	lg->vp_index = vha->vp_idx;
-	rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+	    (ha->r_a_tov / 10 * 2) + 2);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1063,
 		    "Failed to issue login IOCB (%x).\n", rval);
@@ -1885,8 +1934,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
 	lg->port_id[1] = area;
 	lg->port_id[2] = domain;
 	lg->vp_index = vha->vp_idx;
-
-	rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+	    (ha->r_a_tov / 10 * 2) + 2);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x106f,
 		    "Failed to issue logout IOCB (%x).\n", rval);
@@ -2094,7 +2143,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(vha->hw))
+	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
 		mcp->in_mb |= MBX_12;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -2121,7 +2170,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
 			*orig_iocb_cnt = mcp->mb[10];
 		if (vha->hw->flags.npiv_supported && max_npiv_vports)
 			*max_npiv_vports = mcp->mb[11];
-		if (IS_QLA81XX(vha->hw) && max_fcfs)
+		if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
 			*max_fcfs = mcp->mb[12];
 	}
 
@@ -2686,7 +2735,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
 
-	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+	    !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -2828,7 +2878,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
 	mcp->mb[0] = MBC_PORT_PARAMS;
 	mcp->mb[1] = loop_id;
 	mcp->mb[2] = BIT_0;
-	if (IS_QLA8XXX_TYPE(vha->hw))
+	if (IS_CNA_CAPABLE(vha->hw))
 		mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
 	else
 		mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3298,6 +3348,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 	mcp->mb[12] = req->qos;
 	mcp->mb[11] = req->vp_idx;
 	mcp->mb[13] = req->rid;
+	if (IS_QLA83XX(ha))
+		mcp->mb[15] = 0;
 
 	reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
 		QLA_QUE_PAGE * req->id);
@@ -3311,12 +3363,21 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->flags = MBX_DMA_OUT;
-	mcp->tov = 60;
+	mcp->tov = MBX_TOV_SECONDS * 2;
+
+	if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+		mcp->in_mb |= MBX_1;
+	if (IS_QLA83XX(ha)) {
+		mcp->out_mb |= MBX_15;
+		/* debug q create issue in SR-IOV */
+		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+	}
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(req->options & BIT_0)) {
 		WRT_REG_DWORD(&reg->req_q_in, 0);
-		WRT_REG_DWORD(&reg->req_q_out, 0);
+		if (!IS_QLA83XX(ha))
+			WRT_REG_DWORD(&reg->req_q_out, 0);
 	}
 	req->req_q_in = &reg->req_q_in;
 	req->req_q_out = &reg->req_q_out;
@@ -3354,6 +3415,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 	mcp->mb[5] = rsp->length;
 	mcp->mb[14] = rsp->msix->entry;
 	mcp->mb[13] = rsp->rid;
+	if (IS_QLA83XX(ha))
+		mcp->mb[15] = 0;
 
 	reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
 		QLA_QUE_PAGE * rsp->id);
@@ -3367,12 +3430,23 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->flags = MBX_DMA_OUT;
-	mcp->tov = 60;
+	mcp->tov = MBX_TOV_SECONDS * 2;
+
+	if (IS_QLA81XX(ha)) {
+		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+		mcp->in_mb |= MBX_1;
+	} else if (IS_QLA83XX(ha)) {
+		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+		mcp->in_mb |= MBX_1;
+		/* debug q create issue in SR-IOV */
+		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+	}
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(rsp->options & BIT_0)) {
 		WRT_REG_DWORD(&reg->rsp_q_out, 0);
-		WRT_REG_DWORD(&reg->rsp_q_in, 0);
+		if (!IS_QLA83XX(ha))
+			WRT_REG_DWORD(&reg->rsp_q_in, 0);
 	}
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3424,7 +3498,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
 
-	if (!IS_QLA81XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3454,7 +3528,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA81XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
@@ -3486,7 +3560,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA81XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
@@ -3641,7 +3715,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
 
-	if (!IS_QLA8XXX_TYPE(vha->hw))
+	if (!IS_CNA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_GET_XGMAC_STATS;
@@ -3680,7 +3754,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
 
 	ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
 
-	if (!IS_QLA8XXX_TYPE(vha->hw))
+	if (!IS_CNA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
@@ -3775,7 +3849,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
 
 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
-	if (IS_QLA8XXX_TYPE(vha->hw))
+	if (IS_CNA_CAPABLE(vha->hw))
 		mcp->out_mb |= MBX_2;
 	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
 
@@ -3813,7 +3887,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
 	mcp->mb[1] = mreq->options | BIT_6;	/* BIT_6 specifies 64bit address */
-	if (IS_QLA8XXX_TYPE(ha)) {
+	if (IS_CNA_CAPABLE(ha)) {
 		mcp->mb[1] |= BIT_15;
 		mcp->mb[2] = vha->fcoe_fcf_idx;
 	}
@@ -3831,13 +3905,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
 
 	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
 	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
-	if (IS_QLA8XXX_TYPE(ha))
+	if (IS_CNA_CAPABLE(ha))
 		mcp->out_mb |= MBX_2;
 
 	mcp->in_mb = MBX_0;
-	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+	    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
 		mcp->in_mb |= MBX_1;
-	if (IS_QLA8XXX_TYPE(ha))
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
 		mcp->in_mb |= MBX_3;
 
 	mcp->tov = MBX_TOV_SECONDS;
@@ -3976,6 +4051,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
 
 	return rval;
 }
+
 int
 qla2x00_get_data_rate(scsi_qla_host_t *vha)
 {
@@ -3993,6 +4069,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
 	mcp->mb[1] = 0;
 	mcp->out_mb = MBX_1|MBX_0;
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA83XX(ha))
+		mcp->in_mb |= MBX_3;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -4018,7 +4096,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
 
 	ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
 
-	if (!IS_QLA81XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 		return QLA_FUNCTION_FAILED;
 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
 	mcp->out_mb = MBX_0;
@@ -4299,6 +4377,90 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
 }
 
 int
+qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_SET_LED_CONFIG;
+	mcp->mb[1] = led_cfg[0];
+	mcp->mb[2] = led_cfg[1];
+	if (IS_QLA8031(ha)) {
+		mcp->mb[3] = led_cfg[2];
+		mcp->mb[4] = led_cfg[3];
+		mcp->mb[5] = led_cfg[4];
+		mcp->mb[6] = led_cfg[5];
+	}
+
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA8031(ha))
+		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1134,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_GET_LED_CONFIG;
+
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA8031(ha))
+		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1137,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		led_cfg[0] = mcp->mb[1];
+		led_cfg[1] = mcp->mb[2];
+		if (IS_QLA8031(ha)) {
+			led_cfg[2] = mcp->mb[3];
+			led_cfg[3] = mcp->mb[4];
+			led_cfg[4] = mcp->mb[5];
+			led_cfg[5] = mcp->mb[6];
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
 {
 	int rval;
@@ -4321,7 +4483,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
 
 	mcp->out_mb = MBX_7|MBX_0;
 	mcp->in_mb = MBX_0;
-	mcp->tov = 30;
+	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -4335,3 +4497,75 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
 
 	return rval;
 }
+
+int
+qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA83XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+	mcp->mb[1] = LSW(reg);
+	mcp->mb[2] = MSW(reg);
+	mcp->mb[3] = LSW(data);
+	mcp->mb[4] = MSW(data);
+	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1131,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x1132,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x113b,
+		    "Implicit LOGO Unsupported.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+
+	ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n",  __func__);
+
+	/* Perform Implicit LOGO. */
+	mcp->mb[0] = MBC_PORT_LOGOUT;
+	mcp->mb[1] = fcport->loop_id;
+	mcp->mb[10] = BIT_15;
+	mcp->out_mb = MBX_10|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		ql_dbg(ql_dbg_mbx, vha, 0x113d,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	else
+		ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+
+	return rval;
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f488cc69fc79..aa062a1b0ca4 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -479,7 +479,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 	host->max_channel = MAX_BUSES - 1;
 	host->max_lun = ql2xmaxlun;
 	host->unique_id = host->host_no;
-	host->max_id = MAX_TARGETS_2200;
+	host->max_id = ha->max_fibre_devices;
 	host->transportt = qla2xxx_transport_vport_template;
 
 	ql_dbg(ql_dbg_vport, vha, 0xa007,
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 270ba3130fde..f0528539bbbc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -908,27 +908,37 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
 	return 0;
 }
 
+int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+	uint32_t  off_value, rval = 0;
+
+	WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+	    (off & 0xFFFF0000));
+
+	/* Read back value to make sure write has gone through */
+	RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	off_value  = (off & 0x0000FFFF);
+
+	if (flag)
+		WRT_REG_DWORD((void *)
+		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
+		    data);
+	else
+		rval = RD_REG_DWORD((void *)
+		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+
+	return rval;
+}
+
 static int
 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
 {
-	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	/* Dword reads to flash. */
+	qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
+	*valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
+	    (addr & 0x0000FFFF), 0, 0);
 
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
-	qla82xx_wait_rom_busy(ha);
-	if (qla82xx_wait_rom_done(ha)) {
-		ql_log(ql_log_fatal, vha, 0x00ba,
-		    "Error waiting for rom done.\n");
-		return -1;
-	}
-	/* Reset abyte_cnt and dummy_byte_cnt */
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
-	udelay(10);
-	cond_resched();
-	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
-	*valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
 	return 0;
 }
 
@@ -2040,8 +2050,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
-		printk(KERN_INFO
-			"%s(): NULL response queue pointer.\n", __func__);
+		ql_log(ql_log_info, NULL, 0xb054,
+		    "%s: NULL response queue pointer.\n", __func__);
 		return IRQ_NONE;
 	}
 	ha = rsp->hw;
@@ -3136,12 +3146,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
 	fw_minor_version = ha->fw_minor_version;
 	fw_subminor_version = ha->fw_subminor_version;
 
-	rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
-	    &ha->fw_minor_version, &ha->fw_subminor_version,
-	    &ha->fw_attributes, &ha->fw_memory_size,
-	    ha->mpi_version, &ha->mpi_capabilities,
-	    ha->phy_version);
-
+	rval = qla2x00_get_fw_version(vha);
 	if (rval != QLA_SUCCESS)
 		return rval;
 
@@ -3150,7 +3155,6 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
 			if (fw_major_version != ha->fw_major_version ||
 			    fw_minor_version != ha->fw_minor_version ||
 			    fw_subminor_version != ha->fw_subminor_version) {
-
 				ql_log(ql_log_info, vha, 0xb02d,
 				    "Firmware version differs "
 				    "Previous version: %d:%d:%d - "
@@ -3614,7 +3618,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
 			for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 				sp = req->outstanding_cmds[cnt];
 				if (sp) {
-					if (!sp->ctx ||
+					if (!sp->u.scmd.ctx ||
 					    (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
 						spin_unlock_irqrestore(
 						    &ha->hardware_lock, flags);
@@ -3645,29 +3649,6 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
 }
 
 /* Minidump related functions */
-int
-qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
-{
-	uint32_t  off_value, rval = 0;
-
-	WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
-	    (off & 0xFFFF0000));
-
-	/* Read back value to make sure write has gone through */
-	RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
-	off_value  = (off & 0x0000FFFF);
-
-	if (flag)
-		WRT_REG_DWORD((void *)
-		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
-		    data);
-	else
-		rval = RD_REG_DWORD((void *)
-		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
-
-	return rval;
-}
-
 static int
 qla82xx_minidump_process_control(scsi_qla_host_t *vha,
 	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
@@ -4117,8 +4098,9 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
 	data_ptr = (uint32_t *)ha->md_dump;
 
 	if (ha->fw_dumped) {
-		ql_log(ql_log_info, vha, 0xb037,
-		    "Firmware dump available to retrive\n");
+		ql_log(ql_log_warn, vha, 0xb037,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n", ha->fw_dump);
 		goto md_failed;
 	}
 
@@ -4161,7 +4143,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
 
 	total_data_size = ha->md_dump_size;
 
-	ql_dbg(ql_log_info, vha, 0xb03d,
+	ql_dbg(ql_dbg_p3p, vha, 0xb03d,
 	    "Total minidump data_size 0x%x to be captured\n", total_data_size);
 
 	/* Check whether template obtained is valid */
@@ -4284,7 +4266,7 @@ skip_nxt_entry:
 	}
 
 	if (data_collected != total_data_size) {
-		ql_dbg(ql_log_warn, vha, 0xb043,
+		ql_dbg(ql_dbg_p3p, vha, 0xb043,
 		    "MiniDump data mismatch: Data collected: [0x%x],"
 		    "total_data_size:[0x%x]\n",
 		    data_collected, total_data_size);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57a226be339a..4ac50e274661 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -888,7 +888,8 @@ struct ct6_dsd {
 };
 
 #define MBC_TOGGLE_INTERRUPT	0x10
-#define MBC_SET_LED_CONFIG	0x125
+#define MBC_SET_LED_CONFIG	0x125	/* FCoE specific LED control */
+#define MBC_GET_LED_CONFIG	0x126	/* FCoE specific LED control */
 
 /* Flash  offset */
 #define FLT_REG_BOOTLOAD_82XX	0x72
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 036030c95339..a2f999273a5f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 	struct req_que **, struct rsp_que **);
 static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
-static void qla2x00_sp_free_dma(srb_t *);
 
 /* -------------------------------------------------------------------------- */
 static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
 	return str;
 }
 
-static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
-	struct scsi_cmnd *cmd)
+void
+qla2x00_sp_free_dma(void *vha, void *ptr)
 {
-	srb_t *sp;
-	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp = (srb_t *)ptr;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	void *ctx = GET_CMD_CTX_SP(sp);
 
-	sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
-	if (!sp) {
-		ql_log(ql_log_warn, vha, 0x3006,
-		    "Memory allocation failed for sp.\n");
-		return sp;
+	if (sp->flags & SRB_DMA_VALID) {
+		scsi_dma_unmap(cmd);
+		sp->flags &= ~SRB_DMA_VALID;
 	}
 
-	atomic_set(&sp->ref_count, 1);
-	sp->fcport = fcport;
-	sp->cmd = cmd;
-	sp->flags = 0;
-	CMD_SP(cmd) = (void *)sp;
-	sp->ctx = NULL;
+	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+	}
 
-	return sp;
+	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+		/* List assured to be having elements */
+		qla2x00_clean_dsd_pool(ha, sp);
+		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+	}
+
+	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+		dma_pool_free(ha->dl_dma_pool, ctx,
+		    ((struct crc_context *)ctx)->crc_ctx_dma);
+		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+	}
+
+	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+		struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+			ctx1->fcp_cmnd_dma);
+		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+		mempool_free(ctx1, ha->ctx_mempool);
+		ctx1 = NULL;
+	}
+
+	CMD_SP(cmd) = NULL;
+	mempool_free(sp, ha->srb_mempool);
+}
+
+static void
+qla2x00_sp_compl(void *data, void *ptr, int res)
+{
+	struct qla_hw_data *ha = (struct qla_hw_data *)data;
+	srb_t *sp = (srb_t *)ptr;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+	cmd->result = res;
+
+	if (atomic_read(&sp->ref_count) == 0) {
+		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+		    sp, GET_CMD_SP(sp));
+		if (ql2xextended_error_logging & ql_dbg_io)
+			BUG();
+		return;
+	}
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	qla2x00_sp_free_dma(ha, sp);
+	cmd->scsi_done(cmd);
 }
 
 static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 		goto qc24_target_busy;
 	}
 
-	sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
+	sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
 	if (!sp)
 		goto qc24_host_busy;
 
+	sp->u.scmd.cmd = cmd;
+	sp->type = SRB_SCSI_CMD;
+	atomic_set(&sp->ref_count, 1);
+	CMD_SP(cmd) = (void *)sp;
+	sp->free = qla2x00_sp_free_dma;
+	sp->done = qla2x00_sp_compl;
+
 	rval = ha->isp_ops->start_scsi(sp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2x00_sp_free_dma(sp);
-	mempool_free(sp, ha->srb_mempool);
+	qla2x00_sp_free_dma(ha, sp);
 
 qc24_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	}
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	qla2x00_sp_compl(ha, sp);
+	sp->done(ha, sp, 0);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	/* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
 	srb_t *sp;
+	struct scsi_cmnd *cmd;
 
 	status = QLA_SUCCESS;
 
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
 		sp = req->outstanding_cmds[cnt];
 		if (!sp)
 			continue;
-		if ((sp->ctx) && !IS_PROT_IO(sp))
+		if (sp->type != SRB_SCSI_CMD)
 			continue;
 		if (vha->vp_idx != sp->fcport->vha->vp_idx)
 			continue;
 		match = 0;
+		cmd = GET_CMD_SP(sp);
 		switch (type) {
 		case WAIT_HOST:
 			match = 1;
 			break;
 		case WAIT_TARGET:
-			match = sp->cmd->device->id == t;
+			match = cmd->device->id == t;
 			break;
 		case WAIT_LUN:
-			match = (sp->cmd->device->id == t &&
-				sp->cmd->device->lun == l);
+			match = (cmd->device->id == t &&
+				cmd->device->lun == l);
 			break;
 		}
 		if (!match)
 			continue;
 
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
-		status = qla2x00_eh_wait_on_command(sp->cmd);
+		status = qla2x00_eh_wait_on_command(cmd);
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1219,7 +1273,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
 		}
 	}
 
-	if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
+	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
 		ret = qla2x00_full_login_lip(vha);
 		if (ret != QLA_SUCCESS) {
 			ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 	int que, cnt;
 	unsigned long flags;
 	srb_t *sp;
-	struct srb_ctx *ctx;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
 
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 			sp = req->outstanding_cmds[cnt];
 			if (sp) {
 				req->outstanding_cmds[cnt] = NULL;
-				if (!sp->ctx ||
-					(sp->flags & SRB_FCP_CMND_DMA_VALID) ||
-					IS_PROT_IO(sp)) {
-					sp->cmd->result = res;
-					qla2x00_sp_compl(ha, sp);
-				} else {
-					ctx = sp->ctx;
-					if (ctx->type == SRB_ELS_CMD_RPT ||
-					    ctx->type == SRB_ELS_CMD_HST ||
-					    ctx->type == SRB_CT_CMD) {
-						struct fc_bsg_job *bsg_job =
-						    ctx->u.bsg_job;
-						if (bsg_job->request->msgcode
-						    == FC_BSG_HST_CT)
-							kfree(sp->fcport);
-						bsg_job->req->errors = 0;
-						bsg_job->reply->result = res;
-						bsg_job->job_done(bsg_job);
-						kfree(sp->ctx);
-						mempool_free(sp,
-							ha->srb_mempool);
-					} else {
-						ctx->u.iocb_cmd->free(sp);
-					}
-				}
+				sp->done(vha, sp, res);
 			}
 		}
 	}
@@ -1488,9 +1517,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 	uint16_t msix;
 	int cpus;
 
-	if (IS_QLA82XX(ha))
-		return qla82xx_iospace_config(ha);
-
 	if (pci_request_selected_regions(ha->pdev, ha->bars,
 	    QLA2XXX_DRIVER_NAME)) {
 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
@@ -1593,6 +1619,96 @@ iospace_error_exit:
 }
 
 
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+	uint16_t msix;
+	int cpus;
+
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+
+		goto iospace_error_exit;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+		    "Invalid pci I/O region size (%s).\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+		    "Invalid PCI mem region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+		    "Cannot remap MMIO (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+	/* 83XX 26XX always use MQ type access for queues
+	 * - mbar 2, a.k.a region 4 */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+			pci_resource_len(ha->pdev, 4));
+
+	if (!ha->mqiobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+		    "BAR2/region4 not enabled\n");
+		goto mqiobase_exit;
+	}
+
+	ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+			pci_resource_len(ha->pdev, 2));
+	if (ha->msixbase) {
+		/* Read MSIX vector size of the board */
+		pci_read_config_word(ha->pdev,
+		    QLA_83XX_PCI_MSIX_CONTROL, &msix);
+		ha->msix_count = msix;
+		/* Max queues are bounded by available msix vectors */
+		/* queue 0 uses two msix vectors */
+		if (ql2xmultique_tag) {
+			cpus = num_online_cpus();
+			ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+				(cpus + 1) : (ha->msix_count - 1);
+			ha->max_req_queues = 2;
+		} else if (ql2xmaxqueues > 1) {
+			ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+						QLA_MQ_SIZE : ql2xmaxqueues;
+			ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
+			    "QoS mode set, max no of request queues:%d.\n",
+			    ha->max_req_queues);
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+			    "QoS mode set, max no of request queues:%d.\n",
+			    ha->max_req_queues);
+		}
+		ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+		    "MSI-X vector count: %d.\n", msix);
+	} else
+		ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+		    "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+	ha->msix_count = ha->max_rsp_queues + 1;
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+	    "MSIX Count:%d.\n", ha->msix_count);
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
 static struct isp_operations qla2100_isp_ops = {
 	.pci_config		= qla2100_pci_config,
 	.reset_chip		= qla2x00_reset_chip,
@@ -1769,7 +1885,7 @@ static struct isp_operations qla81xx_isp_ops = {
 	.fw_dump		= qla81xx_fw_dump,
 	.beacon_on		= qla24xx_beacon_on,
 	.beacon_off		= qla24xx_beacon_off,
-	.beacon_blink		= qla24xx_beacon_blink,
+	.beacon_blink		= qla83xx_beacon_blink,
 	.read_optrom		= qla25xx_read_optrom_data,
 	.write_optrom		= qla24xx_write_optrom_data,
 	.get_flash_version	= qla24xx_get_flash_version,
@@ -1815,6 +1931,43 @@ static struct isp_operations qla82xx_isp_ops = {
 	.iospace_config     	= qla82xx_iospace_config,
 };
 
+static struct isp_operations qla83xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla81xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla83xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla83xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla83xx_iospace_config,
+};
+
 static inline void
 qla2x00_set_isp_flags(struct qla_hw_data *ha)
 {
@@ -1909,6 +2062,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
 		/* Initialize 82XX ISP flags */
 		qla82xx_init_flags(ha);
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2031:
+		ha->device_type |= DT_ISP2031;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8031:
+		ha->device_type |= DT_ISP8031;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	}
 
 	if (IS_QLA82XX(ha))
@@ -1966,7 +2135,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	char pci_info[30];
 	char fw_str[30];
 	struct scsi_host_template *sht;
-	int bars, max_id, mem_only = 0;
+	int bars, mem_only = 0;
 	uint16_t req_length = 0, rsp_length = 0;
 	struct req_que *req = NULL;
 	struct rsp_que *rsp = NULL;
@@ -1980,7 +2149,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		mem_only = 1;
 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2020,9 +2191,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	qla2x00_set_isp_flags(ha);
 
 	/* Set EEH reset type to fundamental if required by hba */
-	if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
 		pdev->needs_freset = 1;
-	}
 
 	ha->prev_topology = 0;
 	ha->init_cb_size = sizeof(init_cb_t);
@@ -2030,9 +2200,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	ha->optrom_size = OPTROM_SIZE_2300;
 
 	/* Assign ISP specific operations. */
-	max_id = MAX_TARGETS_2200;
 	if (IS_QLA2100(ha)) {
-		max_id = MAX_TARGETS_2100;
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
 		req_length = REQUEST_ENTRY_CNT_2100;
 		rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2044,6 +2213,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA2200(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
 		req_length = REQUEST_ENTRY_CNT_2200;
 		rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2055,6 +2225,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA23XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_2200;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2068,6 +2239,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2300_isp_ops;
 	} else if (IS_QLA24XX_TYPE(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2082,6 +2254,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
 	} else if (IS_QLA25XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2096,6 +2269,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
 	} else if (IS_QLA81XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2110,6 +2284,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->nvram_conf_off = ~0;
 		ha->nvram_data_off = ~0;
 	} else if (IS_QLA82XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_82XX;
 		rsp_length = RESPONSE_ENTRY_CNT_82XX;
@@ -2123,14 +2298,31 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA83XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_83XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla83xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 	}
+
 	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
 	    "mbx_count=%d, req_length=%d, "
 	    "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
-	    "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+	    "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+	    "max_fibre_devices=%d.\n",
 	    ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
 	    ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
-	    ha->nvram_npiv_size);
+	    ha->nvram_npiv_size, ha->max_fibre_devices);
 	ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
 	    "isp_ops=%p, flash_conf_off=%d, "
 	    "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
@@ -2204,7 +2396,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	    "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
 	    host->can_queue, base_vha->req,
 	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
-	host->max_id = max_id;
+	host->max_id = ha->max_fibre_devices;
 	host->this_id = 255;
 	host->cmd_per_lun = 3;
 	host->unique_id = host->host_no;
@@ -2251,7 +2443,7 @@ que_init:
 	req->req_q_out = &ha->iobase->isp24.req_q_out;
 	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
 	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
-	if (ha->mqenable) {
+	if (ha->mqenable || IS_QLA83XX(ha)) {
 		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
 		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
 		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2552,6 +2744,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
 		if (ha->mqiobase)
 			iounmap(ha->mqiobase);
+
+		if (IS_QLA83XX(ha) && ha->msixbase)
+			iounmap(ha->msixbase);
 	}
 
 	pci_release_selected_regions(ha->pdev, ha->bars);
@@ -2751,8 +2946,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 	if (!ha->init_cb)
 		goto fail;
 
-	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
-		&ha->gid_list_dma, GFP_KERNEL);
+	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+		qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
 	if (!ha->gid_list)
 		goto fail_free_init_cb;
 
@@ -2893,7 +3088,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 		ha->npiv_info = NULL;
 
 	/* Get consistent memory allocated for EX-INIT-CB. */
-	if (IS_QLA8XXX_TYPE(ha)) {
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
 		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
 		    &ha->ex_init_cb_dma);
 		if (!ha->ex_init_cb)
@@ -2967,7 +3162,8 @@ fail_free_srb_mempool:
 	mempool_destroy(ha->srb_mempool);
 	ha->srb_mempool = NULL;
 fail_free_gid_list:
-	dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
+	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	ha->gid_list,
 	ha->gid_list_dma);
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
@@ -3045,9 +3241,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 	if (ha->sfp_data)
 		dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
 
-	if (ha->edc_data)
-		dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
-
 	if (ha->ms_iocb)
 		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
 
@@ -3062,8 +3255,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 		dma_pool_destroy(ha->s_dma_pool);
 
 	if (ha->gid_list)
-		dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
-		ha->gid_list_dma);
+		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+		ha->gid_list, ha->gid_list_dma);
 
 	if (IS_QLA82XX(ha)) {
 		if (!list_empty(&ha->gbl_dsd_list)) {
@@ -3095,6 +3288,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 	vfree(ha->optrom_buffer);
 	kfree(ha->nvram);
 	kfree(ha->npiv_info);
+	kfree(ha->swl);
 
 	ha->srb_mempool = NULL;
 	ha->ctx_mempool = NULL;
@@ -3661,75 +3855,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
 	}
 }
 
-static void
-qla2x00_sp_free_dma(srb_t *sp)
-{
-	struct scsi_cmnd *cmd = sp->cmd;
-	struct qla_hw_data *ha = sp->fcport->vha->hw;
-
-	if (sp->flags & SRB_DMA_VALID) {
-		scsi_dma_unmap(cmd);
-		sp->flags &= ~SRB_DMA_VALID;
-	}
-
-	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
-		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
-		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
-		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
-	}
-
-	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
-		/* List assured to be having elements */
-		qla2x00_clean_dsd_pool(ha, sp);
-		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
-	}
-
-	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-		dma_pool_free(ha->dl_dma_pool, sp->ctx,
-		    ((struct crc_context *)sp->ctx)->crc_ctx_dma);
-		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
-	}
-
-	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-		struct ct6_dsd *ctx = sp->ctx;
-		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
-			ctx->fcp_cmnd_dma);
-		list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
-		ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
-		ha->gbl_dsd_avail += ctx->dsd_use_cnt;
-		mempool_free(sp->ctx, ha->ctx_mempool);
-		sp->ctx = NULL;
-	}
-
-	CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
-	struct scsi_cmnd *cmd = sp->cmd;
-
-	qla2x00_sp_free_dma(sp);
-	mempool_free(sp, ha->srb_mempool);
-	cmd->scsi_done(cmd);
-}
-
-void
-qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
-{
-	if (atomic_read(&sp->ref_count) == 0) {
-		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
-		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
-		    sp, sp->cmd);
-		if (ql2xextended_error_logging & ql_dbg_io)
-			BUG();
-		return;
-	}
-	if (!atomic_dec_and_test(&sp->ref_count))
-		return;
-	qla2x00_sp_final_compl(ha, sp);
-}
-
 /**************************************************************************
 *   qla2x00_timer
 *
@@ -3800,7 +3925,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
 					sp = req->outstanding_cmds[index];
 					if (!sp)
 						continue;
-					if (sp->ctx && !IS_PROT_IO(sp))
+					if (sp->type != SRB_SCSI_CMD)
 						continue;
 					sfcp = sp->fcport;
 					if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3889,7 +4014,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
 
 /* Firmware interface routines. */
 
-#define FW_BLOBS	8
+#define FW_BLOBS	10
 #define FW_ISP21XX	0
 #define FW_ISP22XX	1
 #define FW_ISP2300	2
@@ -3898,6 +4023,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
 #define FW_ISP25XX	5
 #define FW_ISP81XX	6
 #define FW_ISP82XX	7
+#define FW_ISP2031	8
+#define FW_ISP8031	9
 
 #define FW_FILE_ISP21XX	"ql2100_fw.bin"
 #define FW_FILE_ISP22XX	"ql2200_fw.bin"
@@ -3907,6 +4034,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
 #define FW_FILE_ISP25XX	"ql2500_fw.bin"
 #define FW_FILE_ISP81XX	"ql8100_fw.bin"
 #define FW_FILE_ISP82XX	"ql8200_fw.bin"
+#define FW_FILE_ISP2031	"ql2600_fw.bin"
+#define FW_FILE_ISP8031	"ql8300_fw.bin"
 
 static DEFINE_MUTEX(qla_fw_lock);
 
@@ -3919,6 +4048,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
 	{ .name = FW_FILE_ISP25XX, },
 	{ .name = FW_FILE_ISP81XX, },
 	{ .name = FW_FILE_ISP82XX, },
+	{ .name = FW_FILE_ISP2031, },
+	{ .name = FW_FILE_ISP8031, },
 };
 
 struct fw_blob *
@@ -3927,7 +4058,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct fw_blob *blob;
 
-	blob = NULL;
 	if (IS_QLA2100(ha)) {
 		blob = &qla_fw_blobs[FW_ISP21XX];
 	} else if (IS_QLA2200(ha)) {
@@ -3944,6 +4074,12 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
 		blob = &qla_fw_blobs[FW_ISP81XX];
 	} else if (IS_QLA82XX(ha)) {
 		blob = &qla_fw_blobs[FW_ISP82XX];
+	} else if (IS_QLA2031(ha)) {
+		blob = &qla_fw_blobs[FW_ISP2031];
+	} else if (IS_QLA8031(ha)) {
+		blob = &qla_fw_blobs[FW_ISP8031];
+	} else {
+		return NULL;
 	}
 
 	mutex_lock(&qla_fw_lock);
@@ -4265,6 +4401,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
 	{ 0 },
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 16bc72844a97..3c13c0a6be63 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,6 +568,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
 	else if (IS_QLA82XX(ha)) {
 		*start = FA_FLASH_LAYOUT_ADDR_82;
 		goto end;
+	} else if (IS_QLA83XX(ha)) {
+		*start = FA_FLASH_LAYOUT_ADDR_83;
+		goto end;
 	}
 	/* Begin with first PCI expansion ROM header. */
 	buf = (uint8_t *)req->ring;
@@ -721,13 +724,22 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 		    le32_to_cpu(region->size));
 
 		switch (le32_to_cpu(region->code) & 0xff) {
+		case FLT_REG_FCOE_FW:
+			if (!IS_QLA8031(ha))
+				break;
+			ha->flt_region_fw = start;
+			break;
 		case FLT_REG_FW:
+			if (IS_QLA8031(ha))
+				break;
 			ha->flt_region_fw = start;
 			break;
 		case FLT_REG_BOOT_CODE:
 			ha->flt_region_boot = start;
 			break;
 		case FLT_REG_VPD_0:
+			if (IS_QLA8031(ha))
+				break;
 			ha->flt_region_vpd_nvram = start;
 			if (IS_QLA82XX(ha))
 				break;
@@ -735,16 +747,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_VPD_1:
-			if (IS_QLA82XX(ha))
+			if (IS_QLA82XX(ha) || IS_QLA8031(ha))
 				break;
 			if (!ha->flags.port0)
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_NVRAM_0:
+			if (IS_QLA8031(ha))
+				break;
 			if (ha->flags.port0)
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_NVRAM_1:
+			if (IS_QLA8031(ha))
+				break;
 			if (!ha->flags.port0)
 				ha->flt_region_nvram = start;
 			break;
@@ -785,6 +801,31 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 		case FLT_REG_VPD_82XX:
 			ha->flt_region_vpd = start;
 			break;
+		case FLT_REG_FCOE_VPD_0:
+			if (!IS_QLA8031(ha))
+				break;
+			ha->flt_region_vpd_nvram = start;
+			if (ha->flags.port0)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_FCOE_VPD_1:
+			if (!IS_QLA8031(ha))
+				break;
+			if (!ha->flags.port0)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_FCOE_NVRAM_0:
+			if (!IS_QLA8031(ha))
+				break;
+			if (ha->flags.port0)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_FCOE_NVRAM_1:
+			if (!IS_QLA8031(ha))
+				break;
+			if (!ha->flags.port0)
+				ha->flt_region_nvram = start;
+			break;
 		}
 	}
 	goto done;
@@ -804,15 +845,12 @@ no_flash_data:
 	    def_npiv_conf0[def] : def_npiv_conf1[def];
 done:
 	ql_dbg(ql_dbg_init, vha, 0x004a,
-	    "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
-	    loc, ha->flt_region_boot,
-	    ha->flt_region_fw, ha->flt_region_vpd_nvram,
-	    ha->flt_region_vpd);
-	ql_dbg(ql_dbg_init, vha, 0x004b,
-	    "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
-	    ha->flt_region_nvram,
-	    ha->flt_region_fdt, ha->flt_region_flt,
-	    ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
+	    "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
+	    "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+	    loc, ha->flt_region_boot, ha->flt_region_fw,
+	    ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
+	    ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
+	    ha->flt_region_fcp_prio);
 }
 
 static void
@@ -948,7 +986,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
 	uint32_t flt_addr;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
 		return QLA_SUCCESS;
 
 	ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -974,7 +1013,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
 	struct qla_npiv_entry *entry;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
 		return;
 
 	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1144,8 +1184,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Prepare burst-capable write on supported ISPs. */
-	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
-	    dwords > OPTROM_BURST_DWORDS) {
+	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+	    !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
 		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
 		    &optrom_dma, GFP_KERNEL);
 		if (!optrom) {
@@ -1619,6 +1659,71 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+void
+qla83xx_beacon_blink(struct scsi_qla_host *vha)
+{
+	uint32_t led_select_value;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t led_cfg[6];
+	uint16_t orig_led_cfg[6];
+
+	if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
+		return;
+
+	if (IS_QLA2031(ha) && ha->beacon_blink_led) {
+		if (ha->flags.port0)
+			led_select_value = 0x00201320;
+		else
+			led_select_value = 0x00201328;
+
+		qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
+		qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
+		msleep(1000);
+		qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
+		qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
+	} else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
+		int rval;
+
+		/* Save Current */
+		rval = qla81xx_get_led_config(vha, orig_led_cfg);
+		/* Do the blink */
+		if (rval == QLA_SUCCESS) {
+			if (IS_QLA81XX(ha)) {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0;
+				led_cfg[3] = 0;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0;
+			} else {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x4000;
+				led_cfg[2] = 0x4000;
+				led_cfg[3] = 0x2000;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0x2000;
+			}
+			rval = qla81xx_set_led_config(vha, led_cfg);
+			msleep(1000);
+			if (IS_QLA81XX(ha)) {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0;
+			} else {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0x4000;
+				led_cfg[3] = 0x4000;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0x2000;
+			}
+			rval = qla81xx_set_led_config(vha, led_cfg);
+		}
+		/* On exit, restore original (presumes no status change) */
+		qla81xx_set_led_config(vha, orig_led_cfg);
+	}
+}
+
 int
 qla24xx_beacon_on(struct scsi_qla_host *vha)
 {
@@ -1630,6 +1735,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
 	if (IS_QLA82XX(ha))
 		return QLA_SUCCESS;
 
+	if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+		goto skip_gpio; /* let blink handle it */
+
 	if (ha->beacon_blink_led == 0) {
 		/* Enable firmware for update */
 		ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1644,6 +1752,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
 			return QLA_FUNCTION_FAILED;
 		}
 
+		if (IS_QLA2031(ha))
+			goto skip_gpio;
+
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		gpio_data = RD_REG_DWORD(&reg->gpiod);
 
@@ -1658,6 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
 	/* So all colors blink together. */
 	ha->beacon_color_state = 0;
 
+skip_gpio:
 	/* Let the per HBA timer kick off the blinking process. */
 	ha->beacon_blink_led = 1;
 
@@ -1676,6 +1788,13 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
 		return QLA_SUCCESS;
 
 	ha->beacon_blink_led = 0;
+
+	if (IS_QLA2031(ha))
+		goto set_fw_options;
+
+	if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+		return QLA_SUCCESS;
+
 	ha->beacon_color_state = QLA_LED_ALL_ON;
 
 	ha->isp_ops->beacon_blink(vha);	/* Will flip to all off. */
@@ -1690,6 +1809,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
 	RD_REG_DWORD(&reg->gpiod);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+set_fw_options:
 	ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
 
 	if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index bfe68545203f..7f2492e88be7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,8 +150,6 @@
 #define QL4_SESS_RECOVERY_TMO		120	/* iSCSI session */
 						/* recovery timeout */
 
-#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
-#define LSW(x) ((uint16_t)(x))
 #define LSDW(x) ((u32)((u64)(x)))
 #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
 
@@ -223,6 +221,15 @@ struct srb {
 	uint16_t reserved2;
 };
 
+/* Mailbox request block structure */
+struct mrb {
+	struct scsi_qla_host *ha;
+	struct mbox_cmd_iocb *mbox;
+	uint32_t mbox_cmd;
+	uint16_t iocb_cnt;		/* Number of used iocbs */
+	uint32_t pid;
+};
+
 /*
  * Asynchronous Event Queue structure
  */
@@ -265,7 +272,7 @@ struct ddb_entry {
 					   * retried */
 	uint32_t default_time2wait;	  /* Default Min time between
 					   * relogins (+aens) */
-
+	uint16_t chap_tbl_idx;
 };
 
 struct qla_ddb_index {
@@ -284,6 +291,7 @@ struct ql4_tuple_ddb {
 	uint16_t options;
 #define DDB_OPT_IPV6 0x0e0e
 #define DDB_OPT_IPV4 0x0f0f
+	uint8_t isid[6];
 };
 
 /*
@@ -303,7 +311,28 @@ struct ql4_tuple_ddb {
 #define DF_ISNS_DISCOVERED	2	/* Device was discovered via iSNS */
 #define DF_FO_MASKED		3
 
+enum qla4_work_type {
+	QLA4_EVENT_AEN,
+	QLA4_EVENT_PING_STATUS,
+};
 
+struct qla4_work_evt {
+	struct list_head list;
+	enum qla4_work_type type;
+	union {
+		struct {
+			enum iscsi_host_event_code code;
+			uint32_t data_size;
+			uint8_t data[0];
+		} aen;
+		struct {
+			uint32_t status;
+			uint32_t pid;
+			uint32_t data_size;
+			uint8_t data[0];
+		} ping;
+	} u;
+};
 
 struct ql82xx_hw_data {
 	/* Offsets for flash/nvram access (set to ~0 if not used). */
@@ -657,6 +686,7 @@ struct scsi_qla_host {
 	struct dma_pool *chap_dma_pool;
 	uint8_t *chap_list; /* CHAP table cache */
 	struct mutex  chap_sem;
+
 #define CHAP_DMA_BLOCK_SIZE    512
 	struct workqueue_struct *task_wq;
 	unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
@@ -674,6 +704,15 @@ struct scsi_qla_host {
 	uint16_t sec_ddb_idx;
 	int is_reset;
 	uint16_t temperature;
+
+	/* event work list */
+	struct list_head work_list;
+	spinlock_t work_lock;
+
+	/* mbox iocb */
+#define MAX_MRB		128
+	struct mrb *active_mrb_array[MAX_MRB];
+	uint32_t mrb_index;
 };
 
 struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7825c141bc1a..210cd1d64475 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -331,6 +331,10 @@ struct qla_flt_region {
 /*  Mailbox command definitions */
 #define MBOX_CMD_ABOUT_FW			0x0009
 #define MBOX_CMD_PING				0x000B
+#define PING_IPV6_PROTOCOL_ENABLE		0x1
+#define PING_IPV6_LINKLOCAL_ADDR		0x4
+#define PING_IPV6_ADDR0				0x8
+#define PING_IPV6_ADDR1				0xC
 #define MBOX_CMD_ENABLE_INTRS			0x0010
 #define INTR_DISABLE				0
 #define INTR_ENABLE				1
@@ -396,6 +400,10 @@ struct qla_flt_region {
 #define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED	0x0008
 #define FW_ADDSTATE_LINK_UP			0x0010
 #define FW_ADDSTATE_ISNS_SVC_ENABLED		0x0020
+#define FW_ADDSTATE_LINK_SPEED_10MBPS		0x0100
+#define FW_ADDSTATE_LINK_SPEED_100MBPS		0x0200
+#define FW_ADDSTATE_LINK_SPEED_1GBPS		0x0400
+#define FW_ADDSTATE_LINK_SPEED_10GBPS		0x0800
 
 #define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS	0x006B
 #define IPV6_DEFAULT_DDB_ENTRY			0x0001
@@ -918,6 +926,8 @@ struct qla4_header {
 #define ET_CMND_T3		 0x19
 #define ET_PASSTHRU0		 0x3A
 #define ET_PASSTHRU_STATUS	 0x3C
+#define ET_MBOX_CMD		0x38
+#define ET_MBOX_STATUS		0x39
 
 	uint8_t entryStatus;
 	uint8_t systemDefined;
@@ -1118,6 +1128,20 @@ struct passthru_status {
 	uint8_t res4[16];	/* 30-3F */
 };
 
+struct mbox_cmd_iocb {
+	struct qla4_header hdr;	/* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint32_t in_mbox[8];	/* 08-25 */
+	uint32_t res1[6];	/* 26-3F */
+};
+
+struct mbox_status_iocb {
+	struct qla4_header hdr;	/* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint32_t out_mbox[8];	/* 08-25 */
+	uint32_t res1[6];	/* 26-3F */
+};
+
 /*
  * ISP queue - response queue entry definition.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d0dd4b330206..910536667cf5 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -81,6 +81,8 @@ int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
 		      uint32_t offset, uint32_t length, uint32_t options);
 int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
 		uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+			   char *password, int bidi, uint16_t *chap_index);
 
 void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
 void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
@@ -181,6 +183,13 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 		       struct ddb_entry *ddb_entry, uint32_t state);
 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code,
+			  uint32_t data_size, uint8_t *data);
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+		      uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+			       uint32_t status, uint32_t pid,
+			       uint32_t data_size, uint8_t *data);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90614f38b55d..90ee5d8fa731 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -86,6 +86,7 @@ static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
 int qla4xxx_init_rings(struct scsi_qla_host *ha)
 {
 	unsigned long flags = 0;
+	int i;
 
 	/* Initialize request queue. */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -125,6 +126,10 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
 
 	qla4xxx_init_response_q_entries(ha);
 
+	/* Initialize mabilbox active array */
+	for (i = 0; i < MAX_MRB; i++)
+		ha->active_mrb_array[i] = NULL;
+
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 410669351906..2a2022a6bb9b 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -445,3 +445,95 @@ queuing_error:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return ret;
 }
+
+static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
+{
+	struct mrb *mrb;
+
+	mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
+	if (!mrb)
+		return mrb;
+
+	mrb->ha = ha;
+	return mrb;
+}
+
+static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
+				  uint32_t *in_mbox)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t i;
+	unsigned long flags;
+	uint32_t index = 0;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Get pointer to the queue entry for the marker */
+	rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
+	if (rval != QLA_SUCCESS)
+		goto exit_mbox_iocb;
+
+	index = ha->mrb_index;
+	/* get valid mrb index*/
+	for (i = 0; i < MAX_MRB; i++) {
+		index++;
+		if (index == MAX_MRB)
+			index = 1;
+		if (ha->active_mrb_array[index] == NULL) {
+			ha->mrb_index = index;
+			break;
+		}
+	}
+
+	mrb->iocb_cnt = 1;
+	ha->active_mrb_array[index] = mrb;
+	mrb->mbox->handle = index;
+	mrb->mbox->hdr.entryType = ET_MBOX_CMD;
+	mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
+	memcpy(mrb->mbox->in_mbox, in_mbox, 32);
+	mrb->mbox_cmd = in_mbox[0];
+	wmb();
+
+	ha->isp_ops->queue_iocb(ha);
+exit_mbox_iocb:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return rval;
+}
+
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+		      uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
+{
+	uint32_t in_mbox[8];
+	struct mrb *mrb = NULL;
+	int rval = QLA_SUCCESS;
+
+	memset(in_mbox, 0, sizeof(in_mbox));
+
+	mrb = qla4xxx_get_new_mrb(ha);
+	if (!mrb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
+				  __func__));
+		rval = QLA_ERROR;
+		goto exit_ping;
+	}
+
+	in_mbox[0] = MBOX_CMD_PING;
+	in_mbox[1] = options;
+	memcpy(&in_mbox[2], &ipaddr[0], 4);
+	memcpy(&in_mbox[3], &ipaddr[4], 4);
+	memcpy(&in_mbox[4], &ipaddr[8], 4);
+	memcpy(&in_mbox[5], &ipaddr[12], 4);
+	in_mbox[6] = payload_size;
+
+	mrb->pid = pid;
+	rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
+
+	if (rval != QLA_SUCCESS)
+		goto exit_ping;
+
+	return rval;
+exit_ping:
+	kfree(mrb);
+	return rval;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 95828862eea0..7c9f28b7da72 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,6 +385,71 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
 	queue_work(ha->task_wq, &task_data->task_work);
 }
 
+static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
+						     uint32_t index)
+{
+	struct mrb *mrb = NULL;
+
+	/* validate handle and remove from active array */
+	if (index >= MAX_MRB)
+		return mrb;
+
+	mrb = ha->active_mrb_array[index];
+	ha->active_mrb_array[index] = NULL;
+	if (!mrb)
+		return mrb;
+
+	/* update counters */
+	ha->req_q_count += mrb->iocb_cnt;
+	ha->iocb_cnt -= mrb->iocb_cnt;
+
+	return mrb;
+}
+
+static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
+				      struct mbox_status_iocb *mbox_sts_entry)
+{
+	struct mrb *mrb;
+	uint32_t status;
+	uint32_t data_size;
+
+	mrb = qla4xxx_del_mrb_from_active_array(ha,
+					le32_to_cpu(mbox_sts_entry->handle));
+
+	if (mrb == NULL) {
+		ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
+			   mbox_sts_entry->handle);
+		return;
+	}
+
+	switch (mrb->mbox_cmd) {
+	case MBOX_CMD_PING:
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
+				  "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
+				  __func__, mrb->mbox_cmd,
+				  mbox_sts_entry->out_mbox[0],
+				  mbox_sts_entry->out_mbox[6]));
+
+		if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
+			status = QLA_SUCCESS;
+		else
+			status = QLA_ERROR;
+
+		data_size = sizeof(mbox_sts_entry->out_mbox);
+
+		qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
+					(uint8_t *) mbox_sts_entry->out_mbox);
+		break;
+
+	default:
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
+				  "0x%x\n", __func__, mrb->mbox_cmd));
+	}
+
+	kfree(mrb);
+	return;
+}
+
 /**
  * qla4xxx_process_response_queue - process response queue completions
  * @ha: Pointer to host adapter structure.
@@ -461,6 +526,13 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
 				      "ignoring\n", ha->host_no, __func__));
 			break;
 
+		case ET_MBOX_STATUS:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: mbox status IOCB\n", __func__));
+			qla4xxx_mbox_status_entry(ha,
+					(struct mbox_status_iocb *)sts_entry);
+			break;
+
 		default:
 			/*
 			 * Invalid entry in response queue, reset RISC
@@ -576,6 +648,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
 
 			ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
+			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
+					      sizeof(mbox_sts),
+					      (uint8_t *) mbox_sts);
 			break;
 
 		case MBOX_ASTS_LINK_DOWN:
@@ -584,6 +659,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
 
 			ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
+			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
+					      sizeof(mbox_sts),
+					      (uint8_t *) mbox_sts);
 			break;
 
 		case MBOX_ASTS_HEARTBEAT:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index e1e66a45e4d0..7ac21dabbf22 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -622,7 +622,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
 		return QLA_ERROR;
 	}
 
-	ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
+	ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
 	    ha->host_no, mbox_sts[2]);
 
 	return QLA_SUCCESS;
@@ -661,6 +661,8 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
 	}
 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
 	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	if (fw_ddb_entry)
+		memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
 
 	mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
 	mbox_cmd[1] = (uint32_t) fw_ddb_index;
@@ -1424,8 +1426,8 @@ exit_set_chap:
  * match is found. If a match is not found then add the entry in FLASH and
  * return the index at which entry is written in the FLASH.
  **/
-static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
-			    char *password, int bidi, uint16_t *chap_index)
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+			   char *password, int bidi, uint16_t *chap_index)
 {
 	int i, rval;
 	int free_index = -1;
@@ -1444,6 +1446,11 @@ static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
 		return QLA_ERROR;
 	}
 
+	if (!username || !password) {
+		ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
+		return QLA_ERROR;
+	}
+
 	mutex_lock(&ha->chap_sem);
 	for (i = 0; i < max_chap_entries; i++) {
 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
@@ -1600,7 +1607,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
 	char *ip;
 	uint16_t iscsi_opts = 0;
 	uint32_t options = 0;
-	uint16_t idx;
+	uint16_t idx, *ptid;
 
 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
 					  &fw_ddb_entry_dma, GFP_KERNEL);
@@ -1626,6 +1633,14 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
 		goto exit_set_param;
 	}
 
+	ptid = (uint16_t *)&fw_ddb_entry->isid[1];
+	*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
+			  fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
+			  fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
+			  fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+
 	iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
 	memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
 
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 65253dfbe962..e1e46b6dac75 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -841,11 +841,8 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
 		done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
 		if (done == 1)
 			break;
-		if (timeout >= qla4_8xxx_rom_lock_timeout) {
-			ql4_printk(KERN_WARNING, ha,
-			    "%s: Failed to acquire rom lock", __func__);
+		if (timeout >= qla4_8xxx_rom_lock_timeout)
 			return -1;
-		}
 
 		timeout++;
 
@@ -996,18 +993,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
 	else
 		qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
 
-	/* reset ms */
-	val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
-	val |= (1 << 1);
-	qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
-
-	msleep(20);
-	/* unreset ms */
-	val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
-	val &= ~(1 << 1);
-	qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
-	msleep(20);
-
 	qla4_8xxx_rom_unlock(ha);
 
 	/* Read the signature value from the flash.
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc45ac923691..dc7500e47b8b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -623,6 +623,7 @@ struct crb_addr_pair {
 
 #define ADDR_ERROR	((unsigned long) 0xffffffff)
 #define MAX_CTL_CHECK	1000
+#define QLA82XX_FWERROR_CODE(code)	((code >> 8) & 0x1fffff)
 
 /***************************************************************************
  *		PCI related defines.
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index edf503437e96..3d9419460e0c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -32,14 +32,14 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
-int ql4xdisablesysfsboot = 1;
+static int ql4xdisablesysfsboot = 1;
 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdisablesysfsboot,
 		 " Set to disable exporting boot targets to sysfs.\n"
 		 "\t\t  0 - Export boot targets\n"
 		 "\t\t  1 - Do not export boot targets (Default)");
 
-int ql4xdontresethba = 0;
+int ql4xdontresethba;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
 		 " Don't reset the HBA for driver recovery.\n"
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
-		"Target Session Recovery Timeout.\n"
+		" Target Session Recovery Timeout.\n"
 		"\t\t  Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
@@ -83,6 +83,8 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
 /*
  * iSCSI template entry points
  */
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+				     enum iscsi_param param, char *buf);
 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
 				  enum iscsi_param param, char *buf);
 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -118,6 +120,13 @@ static void qla4xxx_task_cleanup(struct iscsi_task *);
 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
 				   struct iscsi_stats *stats);
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+			     uint32_t iface_type, uint32_t payload_size,
+			     uint32_t pid, struct sockaddr *dst_addr);
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+				 uint32_t *num_entries, char *buf);
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+
 /*
  * SCSI host template entry points
  */
@@ -179,7 +188,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
 	.destroy_conn           = qla4xxx_conn_destroy,
 	.set_param              = iscsi_set_param,
 	.get_conn_param		= qla4xxx_conn_get_param,
-	.get_session_param	= iscsi_session_get_param,
+	.get_session_param	= qla4xxx_session_get_param,
 	.get_ep_param           = qla4xxx_get_ep_param,
 	.ep_connect		= qla4xxx_ep_connect,
 	.ep_poll		= qla4xxx_ep_poll,
@@ -194,10 +203,93 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
 	.set_iface_param	= qla4xxx_iface_set_param,
 	.get_iface_param	= qla4xxx_get_iface_param,
 	.bsg_request		= qla4xxx_bsg_request,
+	.send_ping		= qla4xxx_send_ping,
+	.get_chap		= qla4xxx_get_chap_list,
+	.delete_chap		= qla4xxx_delete_chap,
 };
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
 
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+			     uint32_t iface_type, uint32_t payload_size,
+			     uint32_t pid, struct sockaddr *dst_addr)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	uint32_t options = 0;
+	uint8_t ipaddr[IPv6_ADDR_LEN];
+	int rval;
+
+	memset(ipaddr, 0, IPv6_ADDR_LEN);
+	/* IPv4 to IPv4 */
+	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
+	    (dst_addr->sa_family == AF_INET)) {
+		addr = (struct sockaddr_in *)dst_addr;
+		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
+				  "dest: %pI4\n", __func__,
+				  &ha->ip_config.ip_address, ipaddr));
+		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
+					 ipaddr);
+		if (rval)
+			rval = -EINVAL;
+	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
+		   (dst_addr->sa_family == AF_INET6)) {
+		/* IPv6 to IPv6 */
+		addr6 = (struct sockaddr_in6 *)dst_addr;
+		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
+
+		options |= PING_IPV6_PROTOCOL_ENABLE;
+
+		/* Ping using LinkLocal address */
+		if ((iface_num == 0) || (iface_num == 1)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
+					  "src: %pI6 dest: %pI6\n", __func__,
+					  &ha->ip_config.ipv6_link_local_addr,
+					  ipaddr));
+			options |= PING_IPV6_LINKLOCAL_ADDR;
+			rval = qla4xxx_ping_iocb(ha, options, payload_size,
+						 pid, ipaddr);
+		} else {
+			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
+				   "not supported\n", __func__, iface_num);
+			rval = -ENOSYS;
+			goto exit_send_ping;
+		}
+
+		/*
+		 * If ping using LinkLocal address fails, try ping using
+		 * IPv6 address
+		 */
+		if (rval != QLA_SUCCESS) {
+			options &= ~PING_IPV6_LINKLOCAL_ADDR;
+			if (iface_num == 0) {
+				options |= PING_IPV6_ADDR0;
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+						  "Ping src: %pI6 "
+						  "dest: %pI6\n", __func__,
+						  &ha->ip_config.ipv6_addr0,
+						  ipaddr));
+			} else if (iface_num == 1) {
+				options |= PING_IPV6_ADDR1;
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+						  "Ping src: %pI6 "
+						  "dest: %pI6\n", __func__,
+						  &ha->ip_config.ipv6_addr1,
+						  ipaddr));
+			}
+			rval = qla4xxx_ping_iocb(ha, options, payload_size,
+						 pid, ipaddr);
+			if (rval)
+				rval = -EINVAL;
+		}
+	} else
+		rval = -ENOSYS;
+exit_send_ping:
+	return rval;
+}
+
 static umode_t ql4_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
@@ -206,6 +298,8 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
 		case ISCSI_HOST_PARAM_HWADDRESS:
 		case ISCSI_HOST_PARAM_IPADDRESS:
 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
+		case ISCSI_HOST_PARAM_PORT_STATE:
+		case ISCSI_HOST_PARAM_PORT_SPEED:
 			return S_IRUGO;
 		default:
 			return 0;
@@ -225,6 +319,12 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
 		case ISCSI_PARAM_IFACE_NAME:
+		case ISCSI_PARAM_CHAP_OUT_IDX:
+		case ISCSI_PARAM_CHAP_IN_IDX:
+		case ISCSI_PARAM_USERNAME:
+		case ISCSI_PARAM_PASSWORD:
+		case ISCSI_PARAM_USERNAME_IN:
+		case ISCSI_PARAM_PASSWORD_IN:
 			return S_IRUGO;
 		default:
 			return 0;
@@ -255,6 +355,189 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
 	return 0;
 }
 
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+				  uint32_t *num_entries, char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_chap_table *chap_table;
+	struct iscsi_chap_rec *chap_rec;
+	int max_chap_entries = 0;
+	int valid_chap_entries = 0;
+	int ret = 0, i;
+
+	if (is_qla8022(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+					sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
+			__func__, *num_entries, chap_tbl_idx);
+
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit_get_chap_list;
+	}
+
+	chap_rec = (struct iscsi_chap_rec *) buf;
+	mutex_lock(&ha->chap_sem);
+	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+		if (chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+			continue;
+
+		chap_rec->chap_tbl_idx = i;
+		strncpy(chap_rec->username, chap_table->name,
+			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
+		strncpy(chap_rec->password, chap_table->secret,
+			QL4_CHAP_MAX_SECRET_LEN);
+		chap_rec->password_length = chap_table->secret_len;
+
+		if (chap_table->flags & BIT_7) /* local */
+			chap_rec->chap_type = CHAP_TYPE_OUT;
+
+		if (chap_table->flags & BIT_6) /* peer */
+			chap_rec->chap_type = CHAP_TYPE_IN;
+
+		chap_rec++;
+
+		valid_chap_entries++;
+		if (valid_chap_entries == *num_entries)
+			break;
+		else
+			continue;
+	}
+	mutex_unlock(&ha->chap_sem);
+
+exit_get_chap_list:
+	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
+			__func__,  valid_chap_entries);
+	*num_entries = valid_chap_entries;
+	return ret;
+}
+
+static int __qla4xxx_is_chap_active(struct device *dev, void *data)
+{
+	int ret = 0;
+	uint16_t *chap_tbl_idx = (uint16_t *) data;
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+
+	if (!iscsi_is_session_dev(dev))
+		goto exit_is_chap_active;
+
+	cls_session = iscsi_dev_to_session(dev);
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+
+	if (iscsi_session_chkready(cls_session))
+		goto exit_is_chap_active;
+
+	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
+		ret = 1;
+
+exit_is_chap_active:
+	return ret;
+}
+
+static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
+				  uint16_t chap_tbl_idx)
+{
+	int ret = 0;
+
+	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
+				    __qla4xxx_is_chap_active);
+
+	return ret;
+}
+
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_chap_table *chap_table;
+	dma_addr_t chap_dma;
+	int max_chap_entries = 0;
+	uint32_t offset = 0;
+	uint32_t chap_size;
+	int ret = 0;
+
+	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+	if (chap_table == NULL)
+		return -ENOMEM;
+
+	memset(chap_table, 0, sizeof(struct ql4_chap_table));
+
+	if (is_qla8022(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_tbl_idx > max_chap_entries) {
+		ret = -EINVAL;
+		goto exit_delete_chap;
+	}
+
+	/* Check if chap index is in use.
+	 * If chap is in use don't delet chap entry */
+	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
+	if (ret) {
+		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
+			   "delete from flash\n", chap_tbl_idx);
+		ret = -EBUSY;
+		goto exit_delete_chap;
+	}
+
+	chap_size = sizeof(struct ql4_chap_table);
+	if (is_qla40XX(ha))
+		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
+	else {
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		/* flt_chap_size is CHAP table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			offset += (ha->hw.flt_chap_size / 2);
+		offset += (chap_tbl_idx * chap_size);
+	}
+
+	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+	if (ret != QLA_SUCCESS) {
+		ret = -EINVAL;
+		goto exit_delete_chap;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+			  __le16_to_cpu(chap_table->cookie)));
+
+	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+		goto exit_delete_chap;
+	}
+
+	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+
+	offset = FLASH_CHAP_OFFSET |
+			(chap_tbl_idx * sizeof(struct ql4_chap_table));
+	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
+				FLASH_OPT_RMW_COMMIT);
+	if (ret == QLA_SUCCESS && ha->chap_list) {
+		mutex_lock(&ha->chap_sem);
+		/* Update ha chap_list cache */
+		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
+			chap_table, sizeof(struct ql4_chap_table));
+		mutex_unlock(&ha->chap_sem);
+	}
+	if (ret != QLA_SUCCESS)
+		ret =  -EINVAL;
+
+exit_delete_chap:
+	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+	return ret;
+}
+
 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
 				   enum iscsi_param_type param_type,
 				   int param, char *buf)
@@ -548,6 +831,43 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
 	return ret;
 }
 
+static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_cls_host *ihost = shost_priv(shost);
+	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
+
+	qla4xxx_get_firmware_state(ha);
+
+	switch (ha->addl_fw_state & 0x0F00) {
+	case FW_ADDSTATE_LINK_SPEED_10MBPS:
+		speed = ISCSI_PORT_SPEED_10MBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_100MBPS:
+		speed = ISCSI_PORT_SPEED_100MBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_1GBPS:
+		speed = ISCSI_PORT_SPEED_1GBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_10GBPS:
+		speed = ISCSI_PORT_SPEED_10GBPS;
+		break;
+	}
+	ihost->port_speed = speed;
+}
+
+static void qla4xxx_set_port_state(struct Scsi_Host *shost)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_cls_host *ihost = shost_priv(shost);
+	uint32_t state = ISCSI_PORT_STATE_DOWN;
+
+	if (test_bit(AF_LINK_UP, &ha->flags))
+		state = ISCSI_PORT_STATE_UP;
+
+	ihost->port_state = state;
+}
+
 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
 				  enum iscsi_host_param param, char *buf)
 {
@@ -564,6 +884,14 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
 		len = sprintf(buf, "%s\n", ha->name_string);
 		break;
+	case ISCSI_HOST_PARAM_PORT_STATE:
+		qla4xxx_set_port_state(shost);
+		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+		break;
+	case ISCSI_HOST_PARAM_PORT_SPEED:
+		qla4xxx_set_port_speed(shost);
+		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+		break;
 	default:
 		return -ENOSYS;
 	}
@@ -968,6 +1296,41 @@ exit_init_fw_cb:
 	return rval;
 }
 
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+				     enum iscsi_param param, char *buf)
+{
+	struct iscsi_session *sess = cls_sess->dd_data;
+	struct ddb_entry *ddb_entry = sess->dd_data;
+	struct scsi_qla_host *ha = ddb_entry->ha;
+	int rval, len;
+	uint16_t idx;
+
+	switch (param) {
+	case ISCSI_PARAM_CHAP_IN_IDX:
+		rval = qla4xxx_get_chap_index(ha, sess->username_in,
+					      sess->password_in, BIDI_CHAP,
+					      &idx);
+		if (rval)
+			return -EINVAL;
+
+		len = sprintf(buf, "%hu\n", idx);
+		break;
+	case ISCSI_PARAM_CHAP_OUT_IDX:
+		rval = qla4xxx_get_chap_index(ha, sess->username,
+					      sess->password, LOCAL_CHAP,
+					      &idx);
+		if (rval)
+			return -EINVAL;
+
+		len = sprintf(buf, "%hu\n", idx);
+		break;
+	default:
+		return iscsi_session_get_param(cls_sess, param, buf);
+	}
+
+	return len;
+}
+
 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 				  enum iscsi_param param, char *buf)
 {
@@ -1506,13 +1869,17 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
 {
 	int buflen = 0;
 	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
 	struct iscsi_conn *conn;
 	char ip_addr[DDB_IPADDR_LEN];
 	uint16_t options = 0;
 
 	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
 	conn = cls_conn->dd_data;
 
+	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
 	conn->max_recv_dlength = BYTE_UNITS *
 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
 
@@ -1552,6 +1919,8 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
 			(char *)ha->name_string, buflen);
 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
 			(char *)ip_addr, buflen);
+	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+			(char *)fw_ddb_entry->iscsi_alias, buflen);
 }
 
 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -1638,6 +2007,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
 
 	/* Update params */
+	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
 	conn->max_recv_dlength = BYTE_UNITS *
 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
 
@@ -1666,6 +2036,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 	memcpy(sess->initiatorname, ha->name_string,
 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
 
+	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+			(char *)fw_ddb_entry->iscsi_alias, 0);
+
 exit_session_conn_param:
 	if (fw_ddb_entry)
 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2113,7 +2486,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 				halt_status = qla4_8xxx_rd_32(ha,
 						QLA82XX_PEG_HALT_STATUS1);
 
-				if (LSW(MSB(halt_status)) == 0x67)
+				if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
 					ql4_printk(KERN_ERR, ha, "%s:"
 						   " Firmware aborted with"
 						   " error code 0x00006700."
@@ -2230,6 +2603,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
 		}
 	}
 
+	/* Process any deferred work. */
+	if (!list_empty(&ha->work_list))
+		start_dpc++;
+
 	/* Wakeup the dpc routine for this adapter, if needed. */
 	if (start_dpc ||
 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -2795,6 +3172,109 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 		queue_work(ha->dpc_thread, &ha->dpc_work);
 }
 
+static struct qla4_work_evt *
+qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
+		   enum qla4_work_type type)
+{
+	struct qla4_work_evt *e;
+	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
+
+	e = kzalloc(size, GFP_ATOMIC);
+	if (!e)
+		return NULL;
+
+	INIT_LIST_HEAD(&e->list);
+	e->type = type;
+	return e;
+}
+
+static void qla4xxx_post_work(struct scsi_qla_host *ha,
+			     struct qla4_work_evt *e)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->work_lock, flags);
+	list_add_tail(&e->list, &ha->work_list);
+	spin_unlock_irqrestore(&ha->work_lock, flags);
+	qla4xxx_wake_dpc(ha);
+}
+
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+			  enum iscsi_host_event_code aen_code,
+			  uint32_t data_size, uint8_t *data)
+{
+	struct qla4_work_evt *e;
+
+	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
+	if (!e)
+		return QLA_ERROR;
+
+	e->u.aen.code = aen_code;
+	e->u.aen.data_size = data_size;
+	memcpy(e->u.aen.data, data, data_size);
+
+	qla4xxx_post_work(ha, e);
+
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+			       uint32_t status, uint32_t pid,
+			       uint32_t data_size, uint8_t *data)
+{
+	struct qla4_work_evt *e;
+
+	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
+	if (!e)
+		return QLA_ERROR;
+
+	e->u.ping.status = status;
+	e->u.ping.pid = pid;
+	e->u.ping.data_size = data_size;
+	memcpy(e->u.ping.data, data, data_size);
+
+	qla4xxx_post_work(ha, e);
+
+	return QLA_SUCCESS;
+}
+
+static void qla4xxx_do_work(struct scsi_qla_host *ha)
+{
+	struct qla4_work_evt *e, *tmp;
+	unsigned long flags;
+	LIST_HEAD(work);
+
+	spin_lock_irqsave(&ha->work_lock, flags);
+	list_splice_init(&ha->work_list, &work);
+	spin_unlock_irqrestore(&ha->work_lock, flags);
+
+	list_for_each_entry_safe(e, tmp, &work, list) {
+		list_del_init(&e->list);
+
+		switch (e->type) {
+		case QLA4_EVENT_AEN:
+			iscsi_post_host_event(ha->host_no,
+					      &qla4xxx_iscsi_transport,
+					      e->u.aen.code,
+					      e->u.aen.data_size,
+					      e->u.aen.data);
+			break;
+		case QLA4_EVENT_PING_STATUS:
+			iscsi_ping_comp_event(ha->host_no,
+					      &qla4xxx_iscsi_transport,
+					      e->u.ping.status,
+					      e->u.ping.pid,
+					      e->u.ping.data_size,
+					      e->u.ping.data);
+			break;
+		default:
+			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
+				   "supported", e->type);
+		}
+		kfree(e);
+	}
+}
+
 /**
  * qla4xxx_do_dpc - dpc routine
  * @data: in our case pointer to adapter structure
@@ -2826,6 +3306,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
 		return;
 	}
 
+	/* post events to application */
+	qla4xxx_do_work(ha);
+
 	if (is_qla8022(ha)) {
 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
 			qla4_8xxx_idc_lock(ha);
@@ -3341,9 +3824,8 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
 		/* Check Boot Mode */
 		val = rd_nvram_byte(ha, addr);
 		if (!(val & 0x07)) {
-			DEBUG2(ql4_printk(KERN_ERR, ha,
-					  "%s: Failed Boot options : 0x%x\n",
-					  __func__, val));
+			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
+					  "options : 0x%x\n", __func__, val));
 			ret = QLA_ERROR;
 			goto exit_boot_info;
 		}
@@ -3388,9 +3870,8 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
 		}
 		/* Check Boot Mode */
 		if (!(buf[1] & 0x07)) {
-			DEBUG2(ql4_printk(KERN_INFO, ha,
-					  "Failed: Boot options : 0x%x\n",
-					  buf[1]));
+			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
+					  " : 0x%x\n", buf[1]));
 			ret = QLA_ERROR;
 			goto exit_boot_info_free;
 		}
@@ -3411,12 +3892,11 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
 			  " target ID %d\n", __func__, ddb_index[0],
 			  ddb_index[1]));
 
-	ha->pri_ddb_idx = ddb_index[0];
-	ha->sec_ddb_idx = ddb_index[1];
-
 exit_boot_info_free:
 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
+	ha->pri_ddb_idx = ddb_index[0];
+	ha->sec_ddb_idx = ddb_index[1];
 	return ret;
 }
 
@@ -3497,8 +3977,8 @@ static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
 
 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
 				   fw_ddb_entry_dma, ddb_index)) {
-		DEBUG2(ql4_printk(KERN_ERR, ha,
-				  "%s: Flash DDB read Failed\n", __func__));
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
+				  "index [%d]\n", __func__, ddb_index));
 		ret = QLA_ERROR;
 		goto exit_boot_target;
 	}
@@ -3576,8 +4056,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
 	ddb_index[1] = 0xffff;
 	ret = get_fw_boot_info(ha, ddb_index);
 	if (ret != QLA_SUCCESS) {
-		DEBUG2(ql4_printk(KERN_ERR, ha,
-				  "%s: Failed to set boot info.\n", __func__));
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				"%s: No boot target configured.\n", __func__));
 		return ret;
 	}
 
@@ -3590,8 +4070,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
 				      ddb_index[0]);
 	if (rval != QLA_SUCCESS) {
-		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
-				  "primary target\n", __func__));
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
+				  "configured\n", __func__));
 	} else
 		ret = QLA_SUCCESS;
 
@@ -3602,8 +4082,8 @@ sec_target:
 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
 				      ddb_index[1]);
 	if (rval != QLA_SUCCESS) {
-		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
-				  "secondary target\n", __func__));
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
+				  " configured\n", __func__));
 	} else
 		ret = QLA_SUCCESS;
 
@@ -3772,11 +4252,13 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
 
 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
+	memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
 }
 
 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
 				     struct ql4_tuple_ddb *old_tddb,
-				     struct ql4_tuple_ddb *new_tddb)
+				     struct ql4_tuple_ddb *new_tddb,
+				     uint8_t is_isid_compare)
 {
 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
 		return QLA_ERROR;
@@ -3787,6 +4269,26 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
 	if (old_tddb->port != new_tddb->port)
 		return QLA_ERROR;
 
+	/* For multi sessions, driver generates the ISID, so do not compare
+	 * ISID in reset path since it would be a comparision between the
+	 * driver generated ISID and firmware generated ISID. This could
+	 * lead to adding duplicated DDBs in the list as driver generated
+	 * ISID would not match firmware generated ISID.
+	 */
+	if (is_isid_compare) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
+			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
+			__func__, old_tddb->isid[5], old_tddb->isid[4],
+			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
+			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
+			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
+			new_tddb->isid[0]));
+
+		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+			   sizeof(old_tddb->isid)))
+			return QLA_ERROR;
+	}
+
 	DEBUG2(ql4_printk(KERN_INFO, ha,
 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
@@ -3829,7 +4331,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
 			continue;
 
 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
-		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
 			ret = QLA_SUCCESS; /* found */
 			goto exit_check;
 		}
@@ -3872,7 +4374,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
 
 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
-		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
 			ret = QLA_SUCCESS; /* found */
 			goto exit_check;
 		}
@@ -4038,6 +4540,10 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
 		if (ret == QLA_ERROR)
 			break;
 
+		/* Ignore DDB if invalid state (unassigned) */
+		if (state == DDB_DS_UNASSIGNED)
+			goto continue_next_st;
+
 		/* Check if ST, add to the list_st */
 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
 			goto continue_next_st;
@@ -4397,6 +4903,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 
 	spin_lock_init(&ha->hardware_lock);
 
+	/* Initialize work list */
+	INIT_LIST_HEAD(&ha->work_list);
+
 	/* Allocate dma buffers */
 	if (qla4xxx_mem_alloc(ha)) {
 		ql4_printk(KERN_WARNING, ha,
@@ -4524,8 +5033,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 	       ha->patch_number, ha->build_number);
 
 	if (qla4xxx_setup_boot_info(ha))
-		ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
-			   __func__);
+		ql4_printk(KERN_ERR, ha,
+			   "%s: No iSCSI boot target configured\n", __func__);
 
 		/* Perform the build ddb list and login to each */
 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 133989b3a9f4..ede9af944141 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k12"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k15"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2aeb2e9c4d3b..07322ecff90d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -782,12 +782,6 @@ static void scsi_done(struct scsi_cmnd *cmd)
 	blk_complete_request(cmd->request);
 }
 
-/* Move this to a header if it becomes more generally useful */
-static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
-{
-	return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
-}
-
 /**
  * scsi_finish_command - cleanup and pass command back to upper layer
  * @cmd: the command
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 68da6c092f65..591856131c4e 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -126,6 +126,7 @@ static const char * scsi_debug_version_date = "20100324";
 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
 #define SCSI_DEBUG_OPT_DIF_ERR   32
 #define SCSI_DEBUG_OPT_DIX_ERR   64
+#define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
 /* When "every_nth" > 0 then modulo "every_nth" commands:
  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
  *   - a RECOVERED_ERROR is simulated on successful read and write
@@ -2220,7 +2221,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd,
 	mapped = map_state(lba, &num);
 
 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
-	put_unaligned_be32(16, &arr[0]);	/* Parameter Data Length */
+	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
 	put_unaligned_be64(lba, &arr[8]);	/* LBA */
 	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
 	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
@@ -3615,6 +3616,9 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
 			scsi_debug_every_nth = -1;
 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
 			return 0; /* ignore command causing timeout */
+		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
+			 scsi_medium_access_command(SCpnt))
+			return 0; /* time out reads and writes */
 		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
 			inj_recovered = 1; /* to reads and writes below */
 		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5f84a148eb14..2cfcbffa41fd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -30,6 +30,7 @@
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_host.h>
@@ -141,11 +142,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
 	else if (host->hostt->eh_timed_out)
 		rtn = host->hostt->eh_timed_out(scmd);
 
+	scmd->result |= DID_TIME_OUT << 16;
+
 	if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
-		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
-		scmd->result |= DID_TIME_OUT << 16;
+		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
 		rtn = BLK_EH_HANDLED;
-	}
 
 	return rtn;
 }
@@ -366,6 +367,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
 			return TARGET_ERROR;
 
 	case ILLEGAL_REQUEST:
+		if (sshdr.asc == 0x20 || /* Invalid command operation code */
+		    sshdr.asc == 0x21 || /* Logical block address out of range */
+		    sshdr.asc == 0x24 || /* Invalid field in cdb */
+		    sshdr.asc == 0x26) { /* Parameter value invalid */
+			return TARGET_ERROR;
+		}
+		return SUCCESS;
+
 	default:
 		return SUCCESS;
 	}
@@ -770,6 +779,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
 			     int cmnd_size, int timeout, unsigned sense_bytes)
 {
 	struct scsi_device *sdev = scmd->device;
+	struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
 	struct Scsi_Host *shost = sdev->host;
 	DECLARE_COMPLETION_ONSTACK(done);
 	unsigned long timeleft;
@@ -824,6 +834,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
 	}
 
 	scsi_eh_restore_cmnd(scmd, &ses);
+
+	if (sdrv->eh_action)
+		rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
+
 	return rtn;
 }
 
@@ -1540,7 +1554,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 			 * Need to modify host byte to signal a
 			 * permanent target failure
 			 */
-			scmd->result |= (DID_TARGET_FAILURE << 16);
+			set_host_byte(scmd, DID_TARGET_FAILURE);
 			rtn = SUCCESS;
 		}
 		/* if rtn == FAILED, we have no sense information;
@@ -1560,7 +1574,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 	case RESERVATION_CONFLICT:
 		sdev_printk(KERN_INFO, scmd->device,
 			    "reservation conflict\n");
-		scmd->result |= (DID_NEXUS_FAILURE << 16);
+		set_host_byte(scmd, DID_NEXUS_FAILURE);
 		return SUCCESS; /* causes immediate i/o error */
 	default:
 		return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a33b2b66da67..ead6405f3e51 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -682,11 +682,11 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
 		error = -ENOLINK;
 		break;
 	case DID_TARGET_FAILURE:
-		cmd->result |= (DID_OK << 16);
+		set_host_byte(cmd, DID_OK);
 		error = -EREMOTEIO;
 		break;
 	case DID_NEXUS_FAILURE:
-		cmd->result |= (DID_OK << 16);
+		set_host_byte(cmd, DID_OK);
 		error = -EBADE;
 		break;
 	default:
@@ -880,6 +880,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				    cmd->cmnd[0] == WRITE_SAME)) {
 				description = "Discard failure";
 				action = ACTION_FAIL;
+				error = -EREMOTEIO;
 			} else
 				action = ACTION_FAIL;
 			break;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f59d4a05ecd7..80fbe2ac0b47 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -313,7 +313,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
 #define FC_STARGET_NUM_ATTRS 	3
 #define FC_RPORT_NUM_ATTRS	10
 #define FC_VPORT_NUM_ATTRS	9
-#define FC_HOST_NUM_ATTRS	22
+#define FC_HOST_NUM_ATTRS	29
 
 struct fc_internal {
 	struct scsi_transport_template t;
@@ -399,6 +399,20 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
 	fc_host->max_npiv_vports = 0;
 	memset(fc_host->serial_number, 0,
 		sizeof(fc_host->serial_number));
+	memset(fc_host->manufacturer, 0,
+		sizeof(fc_host->manufacturer));
+	memset(fc_host->model, 0,
+		sizeof(fc_host->model));
+	memset(fc_host->model_description, 0,
+		sizeof(fc_host->model_description));
+	memset(fc_host->hardware_version, 0,
+		sizeof(fc_host->hardware_version));
+	memset(fc_host->driver_version, 0,
+		sizeof(fc_host->driver_version));
+	memset(fc_host->firmware_version, 0,
+		sizeof(fc_host->firmware_version));
+	memset(fc_host->optionrom_version, 0,
+		sizeof(fc_host->optionrom_version));
 
 	fc_host->port_id = -1;
 	fc_host->port_type = FC_PORTTYPE_UNKNOWN;
@@ -1513,6 +1527,13 @@ fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
 fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
+fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
+fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
 
 
 /* Dynamic Host Attributes */
@@ -2208,6 +2229,13 @@ fc_attach_transport(struct fc_function_template *ft)
 		SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
 	}
 	SETUP_HOST_ATTRIBUTE_RD(serial_number);
+	SETUP_HOST_ATTRIBUTE_RD(manufacturer);
+	SETUP_HOST_ATTRIBUTE_RD(model);
+	SETUP_HOST_ATTRIBUTE_RD(model_description);
+	SETUP_HOST_ATTRIBUTE_RD(hardware_version);
+	SETUP_HOST_ATTRIBUTE_RD(driver_version);
+	SETUP_HOST_ATTRIBUTE_RD(firmware_version);
+	SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
 
 	SETUP_HOST_ATTRIBUTE_RD(port_id);
 	SETUP_HOST_ATTRIBUTE_RD(port_type);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index cfd491437239..fac31730addf 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -727,10 +727,11 @@ static void iscsi_session_release(struct device *dev)
 	kfree(session);
 }
 
-static int iscsi_is_session_dev(const struct device *dev)
+int iscsi_is_session_dev(const struct device *dev)
 {
 	return dev->release == iscsi_session_release;
 }
+EXPORT_SYMBOL_GPL(iscsi_is_session_dev);
 
 static int iscsi_iter_session_fn(struct device *dev, void *data)
 {
@@ -1476,6 +1477,66 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
 
+void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
+			   enum iscsi_host_event_code code, uint32_t data_size,
+			   uint8_t *data)
+{
+	struct nlmsghdr *nlh;
+	struct sk_buff *skb;
+	struct iscsi_uevent *ev;
+	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
+		       host_no, code);
+		return;
+	}
+
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+	ev = NLMSG_DATA(nlh);
+	ev->transport_handle = iscsi_handle(transport);
+	ev->type = ISCSI_KEVENT_HOST_EVENT;
+	ev->r.host_event.host_no = host_no;
+	ev->r.host_event.code = code;
+	ev->r.host_event.data_size = data_size;
+
+	if (data_size)
+		memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(iscsi_post_host_event);
+
+void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
+			   uint32_t status, uint32_t pid, uint32_t data_size,
+			   uint8_t *data)
+{
+	struct nlmsghdr *nlh;
+	struct sk_buff *skb;
+	struct iscsi_uevent *ev;
+	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb) {
+		printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
+		return;
+	}
+
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+	ev = NLMSG_DATA(nlh);
+	ev->transport_handle = iscsi_handle(transport);
+	ev->type = ISCSI_KEVENT_PING_COMP;
+	ev->r.ping_comp.host_no = host_no;
+	ev->r.ping_comp.status = status;
+	ev->r.ping_comp.pid = pid;
+	ev->r.ping_comp.data_size = data_size;
+	memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
+
 static int
 iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
 		    void *payload, int size)
@@ -1915,6 +1976,123 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
 }
 
 static int
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct sockaddr *dst_addr;
+	int err;
+
+	if (!transport->send_ping)
+		return -ENOSYS;
+
+	shost = scsi_host_lookup(ev->u.iscsi_ping.host_no);
+	if (!shost) {
+		printk(KERN_ERR "iscsi_ping could not find host no %u\n",
+		       ev->u.iscsi_ping.host_no);
+		return -ENODEV;
+	}
+
+	dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev));
+	err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num,
+				   ev->u.iscsi_ping.iface_type,
+				   ev->u.iscsi_ping.payload_size,
+				   ev->u.iscsi_ping.pid,
+				   dst_addr);
+	scsi_host_put(shost);
+	return err;
+}
+
+static int
+iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+	struct Scsi_Host *shost = NULL;
+	struct iscsi_chap_rec *chap_rec;
+	struct iscsi_internal *priv;
+	struct sk_buff *skbchap;
+	struct nlmsghdr *nlhchap;
+	struct iscsi_uevent *evchap;
+	uint32_t chap_buf_size;
+	int len, err = 0;
+	char *buf;
+
+	if (!transport->get_chap)
+		return -EINVAL;
+
+	priv = iscsi_if_transport_lookup(transport);
+	if (!priv)
+		return -EINVAL;
+
+	chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
+	len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+
+	shost = scsi_host_lookup(ev->u.get_chap.host_no);
+	if (!shost) {
+		printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
+		       __func__, ev->u.get_chap.host_no);
+		return -ENODEV;
+	}
+
+	do {
+		int actual_size;
+
+		skbchap = alloc_skb(len, GFP_KERNEL);
+		if (!skbchap) {
+			printk(KERN_ERR "can not deliver chap: OOM\n");
+			err = -ENOMEM;
+			goto exit_get_chap;
+		}
+
+		nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
+				      (len - sizeof(*nlhchap)), 0);
+		evchap = NLMSG_DATA(nlhchap);
+		memset(evchap, 0, sizeof(*evchap));
+		evchap->transport_handle = iscsi_handle(transport);
+		evchap->type = nlh->nlmsg_type;
+		evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
+		evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
+		evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
+		buf = (char *) ((char *)evchap + sizeof(*evchap));
+		memset(buf, 0, chap_buf_size);
+
+		err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
+				    &evchap->u.get_chap.num_entries, buf);
+
+		actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+		skb_trim(skbchap, NLMSG_ALIGN(actual_size));
+		nlhchap->nlmsg_len = actual_size;
+
+		err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID,
+					  GFP_KERNEL);
+	} while (err < 0 && err != -ECONNREFUSED);
+
+exit_get_chap:
+	scsi_host_put(shost);
+	return err;
+}
+
+static int iscsi_delete_chap(struct iscsi_transport *transport,
+			     struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	int err = 0;
+
+	if (!transport->delete_chap)
+		return -ENOSYS;
+
+	shost = scsi_host_lookup(ev->u.delete_chap.host_no);
+	if (!shost) {
+		printk(KERN_ERR "%s could not find host no %u\n",
+		       __func__, ev->u.delete_chap.host_no);
+		return -ENODEV;
+	}
+
+	err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx);
+	scsi_host_put(shost);
+	return err;
+}
+
+static int
 iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 {
 	int err = 0;
@@ -1941,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 	switch (nlh->nlmsg_type) {
 	case ISCSI_UEVENT_CREATE_SESSION:
 		err = iscsi_if_create_session(priv, ep, ev,
-					      NETLINK_CREDS(skb)->pid,
+					      NETLINK_CB(skb).pid,
 					      ev->u.c_session.initial_cmdsn,
 					      ev->u.c_session.cmds_max,
 					      ev->u.c_session.queue_depth);
@@ -1954,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 		}
 
 		err = iscsi_if_create_session(priv, ep, ev,
-					NETLINK_CREDS(skb)->pid,
+					NETLINK_CB(skb).pid,
 					ev->u.c_bound_session.initial_cmdsn,
 					ev->u.c_bound_session.cmds_max,
 					ev->u.c_bound_session.queue_depth);
@@ -2059,6 +2237,15 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 		err = iscsi_set_iface_params(transport, ev,
 					     nlmsg_attrlen(nlh, sizeof(*ev)));
 		break;
+	case ISCSI_UEVENT_PING:
+		err = iscsi_send_ping(transport, ev);
+		break;
+	case ISCSI_UEVENT_GET_CHAP:
+		err = iscsi_get_chap(transport, nlh);
+		break;
+	case ISCSI_UEVENT_DELETE_CHAP:
+		err = iscsi_delete_chap(transport, ev);
+		break;
 	default:
 		err = -ENOSYS;
 		break;
@@ -2108,9 +2295,11 @@ iscsi_if_rx(struct sk_buff *skb)
 			 */
 			if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
 				break;
+			if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
+				break;
 			err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
 				nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-		} while (err < 0 && err != -ECONNREFUSED);
+		} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
 		skb_pull(skb, rlen);
 	}
 	mutex_unlock(&rx_queue_mutex);
@@ -2286,6 +2475,8 @@ iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
 iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
 iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
 iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1);
+iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1);
 iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
 iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
 iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
@@ -2382,6 +2573,8 @@ static struct attribute *iscsi_session_attrs[] = {
 	&dev_attr_priv_sess_recovery_tmo.attr,
 	&dev_attr_priv_sess_state.attr,
 	&dev_attr_priv_sess_creator.attr,
+	&dev_attr_sess_chap_out_idx.attr,
+	&dev_attr_sess_chap_in_idx.attr,
 	NULL,
 };
 
@@ -2413,6 +2606,10 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
 		param = ISCSI_PARAM_TARGET_NAME;
 	else if (attr == &dev_attr_sess_tpgt.attr)
 		param = ISCSI_PARAM_TPGT;
+	else if (attr == &dev_attr_sess_chap_in_idx.attr)
+		param = ISCSI_PARAM_CHAP_IN_IDX;
+	else if (attr == &dev_attr_sess_chap_out_idx.attr)
+		param = ISCSI_PARAM_CHAP_OUT_IDX;
 	else if (attr == &dev_attr_sess_password.attr)
 		param = ISCSI_PARAM_USERNAME;
 	else if (attr == &dev_attr_sess_password_in.attr)
@@ -2476,12 +2673,16 @@ iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
 iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
 iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
 iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE);
+iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED);
 
 static struct attribute *iscsi_host_attrs[] = {
 	&dev_attr_host_netdev.attr,
 	&dev_attr_host_hwaddress.attr,
 	&dev_attr_host_ipaddress.attr,
 	&dev_attr_host_initiatorname.attr,
+	&dev_attr_host_port_state.attr,
+	&dev_attr_host_port_speed.attr,
 	NULL,
 };
 
@@ -2501,6 +2702,10 @@ static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
 		param = ISCSI_HOST_PARAM_IPADDRESS;
 	else if (attr == &dev_attr_host_initiatorname.attr)
 		param = ISCSI_HOST_PARAM_INITIATOR_NAME;
+	else if (attr == &dev_attr_host_port_state.attr)
+		param = ISCSI_HOST_PARAM_PORT_STATE;
+	else if (attr == &dev_attr_host_port_speed.attr)
+		param = ISCSI_HOST_PARAM_PORT_SPEED;
 	else {
 		WARN_ONCE(1, "Invalid host attr");
 		return 0;
@@ -2514,6 +2719,61 @@ static struct attribute_group iscsi_host_group = {
 	.is_visible = iscsi_host_attr_is_visible,
 };
 
+/* convert iscsi_port_speed values to ascii string name */
+static const struct {
+	enum iscsi_port_speed	value;
+	char			*name;
+} iscsi_port_speed_names[] = {
+	{ISCSI_PORT_SPEED_UNKNOWN,	"Unknown" },
+	{ISCSI_PORT_SPEED_10MBPS,	"10 Mbps" },
+	{ISCSI_PORT_SPEED_100MBPS,	"100 Mbps" },
+	{ISCSI_PORT_SPEED_1GBPS,	"1 Gbps" },
+	{ISCSI_PORT_SPEED_10GBPS,	"10 Gbps" },
+};
+
+char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
+{
+	int i;
+	char *speed = "Unknown!";
+	struct iscsi_cls_host *ihost = shost->shost_data;
+	uint32_t port_speed = ihost->port_speed;
+
+	for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) {
+		if (iscsi_port_speed_names[i].value & port_speed) {
+			speed = iscsi_port_speed_names[i].name;
+			break;
+		}
+	}
+	return speed;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name);
+
+/* convert iscsi_port_state values to ascii string name */
+static const struct {
+	enum iscsi_port_state	value;
+	char			*name;
+} iscsi_port_state_names[] = {
+	{ISCSI_PORT_STATE_DOWN,		"LINK DOWN" },
+	{ISCSI_PORT_STATE_UP,		"LINK UP" },
+};
+
+char *iscsi_get_port_state_name(struct Scsi_Host *shost)
+{
+	int i;
+	char *state = "Unknown!";
+	struct iscsi_cls_host *ihost = shost->shost_data;
+	uint32_t port_state = ihost->port_state;
+
+	for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) {
+		if (iscsi_port_state_names[i].value & port_state) {
+			state = iscsi_port_state_names[i].name;
+			break;
+		}
+	}
+	return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_state_name);
+
 static int iscsi_session_match(struct attribute_container *cont,
 			   struct device *dev)
 {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9d9330ae4213..f7565fc4f0e3 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -615,6 +615,7 @@ do_sas_phy_reset(struct device *dev, size_t count, int hard_reset)
 	error = i->f->phy_reset(phy, hard_reset);
 	if (error)
 		return error;
+	phy->enabled = 1;
 	return count;
 };
 
@@ -652,9 +653,21 @@ sas_phy_linkerror_attr(running_disparity_error_count);
 sas_phy_linkerror_attr(loss_of_dword_sync_count);
 sas_phy_linkerror_attr(phy_reset_problem_count);
 
+static int sas_phy_setup(struct transport_container *tc, struct device *dev,
+			 struct device *cdev)
+{
+	struct sas_phy *phy = dev_to_phy(dev);
+	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+	struct sas_internal *i = to_sas_internal(shost->transportt);
+
+	if (i->f->phy_setup)
+		i->f->phy_setup(phy);
+
+	return 0;
+}
 
 static DECLARE_TRANSPORT_CLASS(sas_phy_class,
-		"sas_phy", NULL, NULL, NULL);
+		"sas_phy", sas_phy_setup, NULL, NULL);
 
 static int sas_phy_match(struct attribute_container *cont, struct device *dev)
 {
@@ -678,7 +691,11 @@ static int sas_phy_match(struct attribute_container *cont, struct device *dev)
 static void sas_phy_release(struct device *dev)
 {
 	struct sas_phy *phy = dev_to_phy(dev);
+	struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+	struct sas_internal *i = to_sas_internal(shost->transportt);
 
+	if (i->f->phy_release)
+		i->f->phy_release(phy);
 	put_device(dev->parent);
 	kfree(phy);
 }
@@ -1044,6 +1061,29 @@ int scsi_is_sas_port(const struct device *dev)
 EXPORT_SYMBOL(scsi_is_sas_port);
 
 /**
+ * sas_port_get_phy - try to take a reference on a port member
+ * @port: port to check
+ */
+struct sas_phy *sas_port_get_phy(struct sas_port *port)
+{
+	struct sas_phy *phy;
+
+	mutex_lock(&port->phy_list_mutex);
+	if (list_empty(&port->phy_list))
+		phy = NULL;
+	else {
+		struct list_head *ent = port->phy_list.next;
+
+		phy = list_entry(ent, typeof(*phy), port_siblings);
+		get_device(&phy->dev);
+	}
+	mutex_unlock(&port->phy_list_mutex);
+
+	return phy;
+}
+EXPORT_SYMBOL(sas_port_get_phy);
+
+/**
  * sas_port_add_phy - add another phy to a port to form a wide port
  * @port:	port to add the phy to
  * @phy:	phy to add
@@ -1603,6 +1643,20 @@ sas_rphy_delete(struct sas_rphy *rphy)
 EXPORT_SYMBOL(sas_rphy_delete);
 
 /**
+ * sas_rphy_unlink  -  unlink SAS remote PHY
+ * @rphy:	SAS remote phy to unlink from its parent port
+ *
+ * Removes port reference to an rphy
+ */
+void sas_rphy_unlink(struct sas_rphy *rphy)
+{
+	struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
+
+	parent->rphy = NULL;
+}
+EXPORT_SYMBOL(sas_rphy_unlink);
+
+/**
  * sas_rphy_remove  -  remove SAS remote PHY
  * @rphy:	SAS remote phy to remove
  *
@@ -1612,7 +1666,6 @@ void
 sas_rphy_remove(struct sas_rphy *rphy)
 {
 	struct device *dev = &rphy->dev;
-	struct sas_port *parent = dev_to_sas_port(dev->parent);
 
 	switch (rphy->identify.device_type) {
 	case SAS_END_DEVICE:
@@ -1626,10 +1679,9 @@ sas_rphy_remove(struct sas_rphy *rphy)
 		break;
 	}
 
+	sas_rphy_unlink(rphy);
 	transport_remove_device(dev);
 	device_del(dev);
-
-	parent->rphy = NULL;
 }
 EXPORT_SYMBOL(sas_rphy_remove);
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d173b90b25e9..09e3df42a402 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -107,6 +107,7 @@ static int sd_suspend(struct device *, pm_message_t state);
 static int sd_resume(struct device *);
 static void sd_rescan(struct device *);
 static int sd_done(struct scsi_cmnd *);
+static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int);
 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
 static void scsi_disk_release(struct device *cdev);
 static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
@@ -346,6 +347,31 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
 	return count;
 }
 
+static ssize_t
+sd_show_max_medium_access_timeouts(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+	return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
+}
+
+static ssize_t
+sd_store_max_medium_access_timeouts(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct scsi_disk *sdkp = to_scsi_disk(dev);
+	int err;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
+
+	return err ? err : count;
+}
+
 static struct device_attribute sd_disk_attrs[] = {
 	__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
 	       sd_store_cache_type),
@@ -360,6 +386,9 @@ static struct device_attribute sd_disk_attrs[] = {
 	__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
 	__ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
 	       sd_store_provisioning_mode),
+	__ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
+	       sd_show_max_medium_access_timeouts,
+	       sd_store_max_medium_access_timeouts),
 	__ATTR_NULL,
 };
 
@@ -382,6 +411,7 @@ static struct scsi_driver sd_template = {
 	},
 	.rescan			= sd_rescan,
 	.done			= sd_done,
+	.eh_action		= sd_eh_action,
 };
 
 /*
@@ -497,6 +527,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 		max(sdkp->physical_block_size,
 		    sdkp->unmap_granularity * logical_block_size);
 
+	sdkp->provisioning_mode = mode;
+
 	switch (mode) {
 
 	case SD_LBP_DISABLE:
@@ -524,8 +556,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 
 	q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
-
-	sdkp->provisioning_mode = mode;
 }
 
 /**
@@ -1313,6 +1343,55 @@ static const struct block_device_operations sd_fops = {
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 };
 
+/**
+ *	sd_eh_action - error handling callback
+ *	@scmd:		sd-issued command that has failed
+ *	@eh_cmnd:	The command that was sent during error handling
+ *	@eh_cmnd_len:	Length of eh_cmnd in bytes
+ *	@eh_disp:	The recovery disposition suggested by the midlayer
+ *
+ *	This function is called by the SCSI midlayer upon completion of
+ *	an error handling command (TEST UNIT READY, START STOP UNIT,
+ *	etc.) The command sent to the device by the error handler is
+ *	stored in eh_cmnd. The result of sending the eh command is
+ *	passed in eh_disp.
+ **/
+static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,
+			int eh_cmnd_len, int eh_disp)
+{
+	struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+
+	if (!scsi_device_online(scmd->device) ||
+	    !scsi_medium_access_command(scmd))
+		return eh_disp;
+
+	/*
+	 * The device has timed out executing a medium access command.
+	 * However, the TEST UNIT READY command sent during error
+	 * handling completed successfully. Either the device is in the
+	 * process of recovering or has it suffered an internal failure
+	 * that prevents access to the storage medium.
+	 */
+	if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS &&
+	    eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY)
+		sdkp->medium_access_timed_out++;
+
+	/*
+	 * If the device keeps failing read/write commands but TEST UNIT
+	 * READY always completes successfully we assume that medium
+	 * access is no longer possible and take the device offline.
+	 */
+	if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
+		scmd_printk(KERN_ERR, scmd,
+			    "Medium access timeout failure. Offlining disk!\n");
+		scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+
+		return FAILED;
+	}
+
+	return eh_disp;
+}
+
 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
 {
 	u64 start_lba = blk_rq_pos(scmd->request);
@@ -1402,6 +1481,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 	    (!sense_valid || sense_deferred))
 		goto out;
 
+	sdkp->medium_access_timed_out = 0;
+
 	switch (sshdr.sense_key) {
 	case HARDWARE_ERROR:
 	case MEDIUM_ERROR:
@@ -2523,6 +2604,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 	sdkp->RCD = 0;
 	sdkp->ATO = 0;
 	sdkp->first_scan = 1;
+	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
 
 	sd_revalidate_disk(gd);
 
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 4163f2910e3d..f703f4827b6f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -20,6 +20,7 @@
  */
 #define SD_MAX_RETRIES		5
 #define SD_PASSTHROUGH_RETRIES	1
+#define SD_MAX_MEDIUM_TIMEOUTS	2
 
 /*
  * Size of the initial data buffer for mode and read capacity data
@@ -59,6 +60,8 @@ struct scsi_disk {
 	u32		unmap_alignment;
 	u32		index;
 	unsigned int	physical_block_size;
+	unsigned int	max_medium_access_timeouts;
+	unsigned int	medium_access_timed_out;
 	u8		media_present;
 	u8		write_prot;
 	u8		protection_type;/* Data Integrity Field */
@@ -88,6 +91,38 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
 		    (sdsk)->disk->disk_name, ##a) :			\
 	sdev_printk(prefix, (sdsk)->device, fmt, ##a)
 
+static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+{
+	switch (scmd->cmnd[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case SYNCHRONIZE_CACHE:
+	case VERIFY:
+	case VERIFY_12:
+	case VERIFY_16:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+	case WRITE_SAME:
+	case WRITE_SAME_16:
+	case UNMAP:
+		return 1;
+	case VARIABLE_LENGTH_CMD:
+		switch (scmd->cmnd[9]) {
+		case READ_32:
+		case VERIFY_32:
+		case WRITE_32:
+		case WRITE_SAME_32:
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9b28f39bac26..9262cdfa4b23 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1177,6 +1177,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
 static int st_open(struct inode *inode, struct file *filp)
 {
 	int i, retval = (-EIO);
+	int resumed = 0;
 	struct scsi_tape *STp;
 	struct st_partstat *STps;
 	int dev = TAPE_NR(inode);
@@ -1211,6 +1212,11 @@ static int st_open(struct inode *inode, struct file *filp)
 	write_unlock(&st_dev_arr_lock);
 	STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
 
+	if (scsi_autopm_get_device(STp->device) < 0) {
+		retval = -EIO;
+		goto err_out;
+	}
+	resumed = 1;
 	if (!scsi_block_when_processing_errors(STp->device)) {
 		retval = (-ENXIO);
 		goto err_out;
@@ -1258,6 +1264,8 @@ static int st_open(struct inode *inode, struct file *filp)
 	normalize_buffer(STp->buffer);
 	STp->in_use = 0;
 	scsi_tape_put(STp);
+	if (resumed)
+		scsi_autopm_put_device(STp->device);
 	mutex_unlock(&st_mutex);
 	return retval;
 
@@ -1391,6 +1399,7 @@ static int st_release(struct inode *inode, struct file *filp)
 	write_lock(&st_dev_arr_lock);
 	STp->in_use = 0;
 	write_unlock(&st_dev_arr_lock);
+	scsi_autopm_put_device(STp->device);
 	scsi_tape_put(STp);
 
 	return result;
@@ -4154,6 +4163,7 @@ static int st_probe(struct device *dev)
 		if (error)
 			goto out_free_tape;
 	}
+	scsi_autopm_put_device(SDp);
 
 	sdev_printk(KERN_NOTICE, SDp,
 		    "Attached scsi tape %s\n", tape_name(tpnt));
@@ -4201,6 +4211,7 @@ static int st_remove(struct device *dev)
 	struct scsi_tape *tpnt;
 	int i, j, mode;
 
+	scsi_autopm_get_device(SDp);
 	write_lock(&st_dev_arr_lock);
 	for (i = 0; i < st_dev_max; i++) {
 		tpnt = scsi_tapes[i];
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
new file mode 100644
index 000000000000..efccd72c4a3e
--- /dev/null
+++ b/drivers/scsi/virtio_scsi.c
@@ -0,0 +1,594 @@
+/*
+ * Virtio SCSI HBA driver
+ *
+ * Copyright IBM Corp. 2010
+ * Copyright Red Hat, Inc. 2011
+ *
+ * Authors:
+ *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
+ *  Paolo Bonzini   <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#define VIRTIO_SCSI_MEMPOOL_SZ 64
+
+/* Command queue element */
+struct virtio_scsi_cmd {
+	struct scsi_cmnd *sc;
+	struct completion *comp;
+	union {
+		struct virtio_scsi_cmd_req       cmd;
+		struct virtio_scsi_ctrl_tmf_req  tmf;
+		struct virtio_scsi_ctrl_an_req   an;
+	} req;
+	union {
+		struct virtio_scsi_cmd_resp      cmd;
+		struct virtio_scsi_ctrl_tmf_resp tmf;
+		struct virtio_scsi_ctrl_an_resp  an;
+		struct virtio_scsi_event         evt;
+	} resp;
+} ____cacheline_aligned_in_smp;
+
+/* Driver instance state */
+struct virtio_scsi {
+	/* Protects ctrl_vq, req_vq and sg[] */
+	spinlock_t vq_lock;
+
+	struct virtio_device *vdev;
+	struct virtqueue *ctrl_vq;
+	struct virtqueue *event_vq;
+	struct virtqueue *req_vq;
+
+	/* For sglist construction when adding commands to the virtqueue.  */
+	struct scatterlist sg[];
+};
+
+static struct kmem_cache *virtscsi_cmd_cache;
+static mempool_t *virtscsi_cmd_pool;
+
+static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
+{
+	return vdev->priv;
+}
+
+static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
+{
+	if (!resid)
+		return;
+
+	if (!scsi_bidi_cmnd(sc)) {
+		scsi_set_resid(sc, resid);
+		return;
+	}
+
+	scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
+	scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
+}
+
+/**
+ * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
+ *
+ * Called with vq_lock held.
+ */
+static void virtscsi_complete_cmd(void *buf)
+{
+	struct virtio_scsi_cmd *cmd = buf;
+	struct scsi_cmnd *sc = cmd->sc;
+	struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
+
+	dev_dbg(&sc->device->sdev_gendev,
+		"cmd %p response %u status %#02x sense_len %u\n",
+		sc, resp->response, resp->status, resp->sense_len);
+
+	sc->result = resp->status;
+	virtscsi_compute_resid(sc, resp->resid);
+	switch (resp->response) {
+	case VIRTIO_SCSI_S_OK:
+		set_host_byte(sc, DID_OK);
+		break;
+	case VIRTIO_SCSI_S_OVERRUN:
+		set_host_byte(sc, DID_ERROR);
+		break;
+	case VIRTIO_SCSI_S_ABORTED:
+		set_host_byte(sc, DID_ABORT);
+		break;
+	case VIRTIO_SCSI_S_BAD_TARGET:
+		set_host_byte(sc, DID_BAD_TARGET);
+		break;
+	case VIRTIO_SCSI_S_RESET:
+		set_host_byte(sc, DID_RESET);
+		break;
+	case VIRTIO_SCSI_S_BUSY:
+		set_host_byte(sc, DID_BUS_BUSY);
+		break;
+	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
+		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+		break;
+	case VIRTIO_SCSI_S_TARGET_FAILURE:
+		set_host_byte(sc, DID_TARGET_FAILURE);
+		break;
+	case VIRTIO_SCSI_S_NEXUS_FAILURE:
+		set_host_byte(sc, DID_NEXUS_FAILURE);
+		break;
+	default:
+		scmd_printk(KERN_WARNING, sc, "Unknown response %d",
+			    resp->response);
+		/* fall through */
+	case VIRTIO_SCSI_S_FAILURE:
+		set_host_byte(sc, DID_ERROR);
+		break;
+	}
+
+	WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
+	if (sc->sense_buffer) {
+		memcpy(sc->sense_buffer, resp->sense,
+		       min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
+		if (resp->sense_len)
+			set_driver_byte(sc, DRIVER_SENSE);
+	}
+
+	mempool_free(cmd, virtscsi_cmd_pool);
+	sc->scsi_done(sc);
+}
+
+static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
+{
+	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+	struct virtio_scsi *vscsi = shost_priv(sh);
+	void *buf;
+	unsigned long flags;
+	unsigned int len;
+
+	spin_lock_irqsave(&vscsi->vq_lock, flags);
+
+	do {
+		virtqueue_disable_cb(vq);
+		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
+			fn(buf);
+	} while (!virtqueue_enable_cb(vq));
+
+	spin_unlock_irqrestore(&vscsi->vq_lock, flags);
+}
+
+static void virtscsi_req_done(struct virtqueue *vq)
+{
+	virtscsi_vq_done(vq, virtscsi_complete_cmd);
+};
+
+static void virtscsi_complete_free(void *buf)
+{
+	struct virtio_scsi_cmd *cmd = buf;
+
+	if (cmd->comp)
+		complete_all(cmd->comp);
+	mempool_free(cmd, virtscsi_cmd_pool);
+}
+
+static void virtscsi_ctrl_done(struct virtqueue *vq)
+{
+	virtscsi_vq_done(vq, virtscsi_complete_free);
+};
+
+static void virtscsi_event_done(struct virtqueue *vq)
+{
+	virtscsi_vq_done(vq, virtscsi_complete_free);
+};
+
+static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
+			     struct scsi_data_buffer *sdb)
+{
+	struct sg_table *table = &sdb->table;
+	struct scatterlist *sg_elem;
+	unsigned int idx = *p_idx;
+	int i;
+
+	for_each_sg(table->sgl, sg_elem, table->nents, i)
+		sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
+
+	*p_idx = idx;
+}
+
+/**
+ * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
+ * @vscsi	: virtio_scsi state
+ * @cmd		: command structure
+ * @out_num	: number of read-only elements
+ * @in_num	: number of write-only elements
+ * @req_size	: size of the request buffer
+ * @resp_size	: size of the response buffer
+ *
+ * Called with vq_lock held.
+ */
+static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
+			     struct virtio_scsi_cmd *cmd,
+			     unsigned *out_num, unsigned *in_num,
+			     size_t req_size, size_t resp_size)
+{
+	struct scsi_cmnd *sc = cmd->sc;
+	struct scatterlist *sg = vscsi->sg;
+	unsigned int idx = 0;
+
+	if (sc) {
+		struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+		BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
+
+		/* TODO: check feature bit and fail if unsupported?  */
+		BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
+	}
+
+	/* Request header.  */
+	sg_set_buf(&sg[idx++], &cmd->req, req_size);
+
+	/* Data-out buffer.  */
+	if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
+		virtscsi_map_sgl(sg, &idx, scsi_out(sc));
+
+	*out_num = idx;
+
+	/* Response header.  */
+	sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
+
+	/* Data-in buffer */
+	if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
+		virtscsi_map_sgl(sg, &idx, scsi_in(sc));
+
+	*in_num = idx - *out_num;
+}
+
+static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
+			     struct virtio_scsi_cmd *cmd,
+			     size_t req_size, size_t resp_size, gfp_t gfp)
+{
+	unsigned int out_num, in_num;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&vscsi->vq_lock, flags);
+
+	virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
+
+	ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
+	if (ret >= 0)
+		virtqueue_kick(vq);
+
+	spin_unlock_irqrestore(&vscsi->vq_lock, flags);
+	return ret;
+}
+
+static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+{
+	struct virtio_scsi *vscsi = shost_priv(sh);
+	struct virtio_scsi_cmd *cmd;
+	int ret;
+
+	dev_dbg(&sc->device->sdev_gendev,
+		"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
+
+	ret = SCSI_MLQUEUE_HOST_BUSY;
+	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
+	if (!cmd)
+		goto out;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->sc = sc;
+	cmd->req.cmd = (struct virtio_scsi_cmd_req){
+		.lun[0] = 1,
+		.lun[1] = sc->device->id,
+		.lun[2] = (sc->device->lun >> 8) | 0x40,
+		.lun[3] = sc->device->lun & 0xff,
+		.tag = (unsigned long)sc,
+		.task_attr = VIRTIO_SCSI_S_SIMPLE,
+		.prio = 0,
+		.crn = 0,
+	};
+
+	BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
+	memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+
+	if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
+			      sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
+			      GFP_ATOMIC) >= 0)
+		ret = 0;
+
+out:
+	return ret;
+}
+
+static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+{
+	DECLARE_COMPLETION_ONSTACK(comp);
+	int ret;
+
+	cmd->comp = &comp;
+	ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
+			       sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
+			       GFP_NOIO);
+	if (ret < 0)
+		return FAILED;
+
+	wait_for_completion(&comp);
+	if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
+	    cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+		return FAILED;
+
+	return SUCCESS;
+}
+
+static int virtscsi_device_reset(struct scsi_cmnd *sc)
+{
+	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+	struct virtio_scsi_cmd *cmd;
+
+	sdev_printk(KERN_INFO, sc->device, "device reset\n");
+	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+	if (!cmd)
+		return FAILED;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->sc = sc;
+	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+		.type = VIRTIO_SCSI_T_TMF,
+		.subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
+		.lun[0] = 1,
+		.lun[1] = sc->device->id,
+		.lun[2] = (sc->device->lun >> 8) | 0x40,
+		.lun[3] = sc->device->lun & 0xff,
+	};
+	return virtscsi_tmf(vscsi, cmd);
+}
+
+static int virtscsi_abort(struct scsi_cmnd *sc)
+{
+	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+	struct virtio_scsi_cmd *cmd;
+
+	scmd_printk(KERN_INFO, sc, "abort\n");
+	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+	if (!cmd)
+		return FAILED;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->sc = sc;
+	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+		.type = VIRTIO_SCSI_T_TMF,
+		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
+		.lun[0] = 1,
+		.lun[1] = sc->device->id,
+		.lun[2] = (sc->device->lun >> 8) | 0x40,
+		.lun[3] = sc->device->lun & 0xff,
+		.tag = (unsigned long)sc,
+	};
+	return virtscsi_tmf(vscsi, cmd);
+}
+
+static struct scsi_host_template virtscsi_host_template = {
+	.module = THIS_MODULE,
+	.name = "Virtio SCSI HBA",
+	.proc_name = "virtio_scsi",
+	.queuecommand = virtscsi_queuecommand,
+	.this_id = -1,
+	.eh_abort_handler = virtscsi_abort,
+	.eh_device_reset_handler = virtscsi_device_reset,
+
+	.can_queue = 1024,
+	.dma_boundary = UINT_MAX,
+	.use_clustering = ENABLE_CLUSTERING,
+};
+
+#define virtscsi_config_get(vdev, fld) \
+	({ \
+		typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+		vdev->config->get(vdev, \
+				  offsetof(struct virtio_scsi_config, fld), \
+				  &__val, sizeof(__val)); \
+		__val; \
+	})
+
+#define virtscsi_config_set(vdev, fld, val) \
+	(void)({ \
+		typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+		vdev->config->set(vdev, \
+				  offsetof(struct virtio_scsi_config, fld), \
+				  &__val, sizeof(__val)); \
+	})
+
+static int virtscsi_init(struct virtio_device *vdev,
+			 struct virtio_scsi *vscsi)
+{
+	int err;
+	struct virtqueue *vqs[3];
+	vq_callback_t *callbacks[] = {
+		virtscsi_ctrl_done,
+		virtscsi_event_done,
+		virtscsi_req_done
+	};
+	const char *names[] = {
+		"control",
+		"event",
+		"request"
+	};
+
+	/* Discover virtqueues and write information to configuration.  */
+	err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
+	if (err)
+		return err;
+
+	vscsi->ctrl_vq = vqs[0];
+	vscsi->event_vq = vqs[1];
+	vscsi->req_vq = vqs[2];
+
+	virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
+	virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
+	return 0;
+}
+
+static int __devinit virtscsi_probe(struct virtio_device *vdev)
+{
+	struct Scsi_Host *shost;
+	struct virtio_scsi *vscsi;
+	int err;
+	u32 sg_elems;
+	u32 cmd_per_lun;
+
+	/* We need to know how many segments before we allocate.
+	 * We need an extra sg elements at head and tail.
+	 */
+	sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
+
+	/* Allocate memory and link the structs together.  */
+	shost = scsi_host_alloc(&virtscsi_host_template,
+		sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
+
+	if (!shost)
+		return -ENOMEM;
+
+	shost->sg_tablesize = sg_elems;
+	vscsi = shost_priv(shost);
+	vscsi->vdev = vdev;
+	vdev->priv = shost;
+
+	/* Random initializations.  */
+	spin_lock_init(&vscsi->vq_lock);
+	sg_init_table(vscsi->sg, sg_elems + 2);
+
+	err = virtscsi_init(vdev, vscsi);
+	if (err)
+		goto virtscsi_init_failed;
+
+	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
+	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
+	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
+	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
+	shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
+	shost->max_channel = 0;
+	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+	err = scsi_add_host(shost, &vdev->dev);
+	if (err)
+		goto scsi_add_host_failed;
+
+	scsi_scan_host(shost);
+
+	return 0;
+
+scsi_add_host_failed:
+	vdev->config->del_vqs(vdev);
+virtscsi_init_failed:
+	scsi_host_put(shost);
+	return err;
+}
+
+static void virtscsi_remove_vqs(struct virtio_device *vdev)
+{
+	/* Stop all the virtqueues. */
+	vdev->config->reset(vdev);
+
+	vdev->config->del_vqs(vdev);
+}
+
+static void __devexit virtscsi_remove(struct virtio_device *vdev)
+{
+	struct Scsi_Host *shost = virtio_scsi_host(vdev);
+
+	scsi_remove_host(shost);
+
+	virtscsi_remove_vqs(vdev);
+	scsi_host_put(shost);
+}
+
+#ifdef CONFIG_PM
+static int virtscsi_freeze(struct virtio_device *vdev)
+{
+	virtscsi_remove_vqs(vdev);
+	return 0;
+}
+
+static int virtscsi_restore(struct virtio_device *vdev)
+{
+	struct Scsi_Host *sh = virtio_scsi_host(vdev);
+	struct virtio_scsi *vscsi = shost_priv(sh);
+
+	return virtscsi_init(vdev, vscsi);
+}
+#endif
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_scsi_driver = {
+	.driver.name = KBUILD_MODNAME,
+	.driver.owner = THIS_MODULE,
+	.id_table = id_table,
+	.probe = virtscsi_probe,
+#ifdef CONFIG_PM
+	.freeze = virtscsi_freeze,
+	.restore = virtscsi_restore,
+#endif
+	.remove = __devexit_p(virtscsi_remove),
+};
+
+static int __init init(void)
+{
+	int ret = -ENOMEM;
+
+	virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
+	if (!virtscsi_cmd_cache) {
+		printk(KERN_ERR "kmem_cache_create() for "
+				"virtscsi_cmd_cache failed\n");
+		goto error;
+	}
+
+
+	virtscsi_cmd_pool =
+		mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
+					 virtscsi_cmd_cache);
+	if (!virtscsi_cmd_pool) {
+		printk(KERN_ERR "mempool_create() for"
+				"virtscsi_cmd_pool failed\n");
+		goto error;
+	}
+	ret = register_virtio_driver(&virtio_scsi_driver);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	if (virtscsi_cmd_pool) {
+		mempool_destroy(virtscsi_cmd_pool);
+		virtscsi_cmd_pool = NULL;
+	}
+	if (virtscsi_cmd_cache) {
+		kmem_cache_destroy(virtscsi_cmd_cache);
+		virtscsi_cmd_cache = NULL;
+	}
+	return ret;
+}
+
+static void __exit fini(void)
+{
+	unregister_virtio_driver(&virtio_scsi_driver);
+	mempool_destroy(virtscsi_cmd_pool);
+	kmem_cache_destroy(virtscsi_cmd_cache);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio SCSI HBA driver");
+MODULE_LICENSE("GPL");