summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 19:42:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 19:42:25 -0800
commit382f51fe2f2276344d8a21447656778cdf6583b6 (patch)
treec2836a2cca4126c9c026ce5aa2fdf9f1c8ccded6 /drivers
parent701791cc3c8fc6dd83f6ec8af7e2541b4a316606 (diff)
parent54987386ee3790f3900de4df2ed4deb0e18dfc9f (diff)
downloadlinux-382f51fe2f2276344d8a21447656778cdf6583b6.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (222 commits)
  [SCSI] zfcp: Remove flag ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP
  [SCSI] zfcp: Activate fc4s attributes for zfcp in FC transport class
  [SCSI] zfcp: Block scsi_eh thread for rport state BLOCKED
  [SCSI] zfcp: Update FSF error reporting
  [SCSI] zfcp: Improve ELS ADISC handling
  [SCSI] zfcp: Simplify handling of ct and els requests
  [SCSI] zfcp: Remove ZFCP_DID_MASK
  [SCSI] zfcp: Move WKA port to zfcp FC code
  [SCSI] zfcp: Use common code definitions for FC CT structs
  [SCSI] zfcp: Use common code definitions for FC ELS structs
  [SCSI] zfcp: Update FCP protocol related code
  [SCSI] zfcp: Dont fail SCSI commands when transitioning to blocked fc_rport
  [SCSI] zfcp: Assign scheduled work to driver queue
  [SCSI] zfcp: Remove STATUS_COMMON_REMOVE flag as it is not required anymore
  [SCSI] zfcp: Implement module unloading
  [SCSI] zfcp: Merge trace code for fsf requests in one function
  [SCSI] zfcp: Access ports and units with container_of in sysfs code
  [SCSI] zfcp: Remove suspend callback
  [SCSI] zfcp: Remove global config_mutex
  [SCSI] zfcp: Replace local reference counting with common kref
  ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c7
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/md/dm-mpath.c8
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c5
-rw-r--r--drivers/message/fusion/mptscsih.c95
-rw-r--r--drivers/message/fusion/mptscsih.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c388
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c179
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c26
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c134
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h10
-rw-r--r--drivers/s390/scsi/zfcp_def.h323
-rw-r--r--drivers/s390/scsi/zfcp_erp.c138
-rw-r--r--drivers/s390/scsi/zfcp_ext.h38
-rw-r--r--drivers/s390/scsi/zfcp_fc.c677
-rw-r--r--drivers/s390/scsi/zfcp_fc.h260
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c367
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h53
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c156
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c250
-rw-r--r--drivers/scsi/3w-9xxx.c6
-rw-r--r--drivers/scsi/3w-sas.c1924
-rw-r--r--drivers/scsi/3w-sas.h396
-rw-r--r--drivers/scsi/3w-xxxx.c6
-rw-r--r--drivers/scsi/53c700.c7
-rw-r--r--drivers/scsi/Kconfig29
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/aacraid/linit.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c5
-rw-r--r--drivers/scsi/be2iscsi/be.h24
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c263
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h37
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c23
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1019
-rw-r--r--drivers/scsi/be2iscsi/be_main.h49
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c69
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h8
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h10
-rw-r--r--drivers/scsi/bfa/bfa_cee.c2
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c59
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c11
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c8
-rw-r--r--drivers/scsi/bfa/bfa_intr.c2
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h18
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h12
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c8
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h8
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c4
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c6
-rw-r--r--drivers/scsi/bfa/bfa_log.c4
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h4
-rw-r--r--drivers/scsi/bfa/bfa_rport.c6
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c9
-rw-r--r--drivers/scsi/bfa/bfa_uf.c2
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h2
-rw-r--r--drivers/scsi/bfa/bfad_intr.c3
-rw-r--r--drivers/scsi/bfa/fabric.c18
-rw-r--r--drivers/scsi/bfa/fcbuild.c190
-rw-r--r--drivers/scsi/bfa/fcbuild.h6
-rw-r--r--drivers/scsi/bfa/fcpim.c7
-rw-r--r--drivers/scsi/bfa/fcs.h2
-rw-r--r--drivers/scsi/bfa/fdmi.c8
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h2
-rw-r--r--drivers/scsi/bfa/include/bfa.h10
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h8
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h12
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h4
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h8
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h6
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h6
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h2
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h2
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h17
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h10
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h22
-rw-r--r--drivers/scsi/bfa/loop.c233
-rw-r--r--drivers/scsi/bfa/lport_api.c15
-rw-r--r--drivers/scsi/bfa/ns.c5
-rw-r--r--drivers/scsi/bfa/plog.c2
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c28
-rw-r--r--drivers/scsi/bfa/vfapi.c2
-rw-r--r--drivers/scsi/bfa/vport.c20
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c10
-rw-r--r--drivers/scsi/constants.c1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c28
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c139
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c94
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c115
-rw-r--r--drivers/scsi/fcoe/fcoe.c1376
-rw-r--r--drivers/scsi/fcoe/fcoe.h82
-rw-r--r--drivers/scsi/fcoe/libfcoe.c433
-rw-r--r--drivers/scsi/fnic/fnic.h25
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c499
-rw-r--r--drivers/scsi/fnic/fnic_isr.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c104
-rw-r--r--drivers/scsi/fnic/fnic_res.c5
-rw-r--r--drivers/scsi/fnic/fnic_res.h54
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c96
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h1
-rw-r--r--drivers/scsi/hosts.c13
-rw-r--r--drivers/scsi/hptiop.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c335
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libfc/Makefile4
-rw-r--r--drivers/scsi/libfc/fc_disc.c85
-rw-r--r--drivers/scsi/libfc/fc_elsct.c79
-rw-r--r--drivers/scsi/libfc/fc_exch.c932
-rw-r--r--drivers/scsi/libfc/fc_fcp.c1044
-rw-r--r--drivers/scsi/libfc/fc_frame.c13
-rw-r--r--drivers/scsi/libfc/fc_libfc.c134
-rw-r--r--drivers/scsi/libfc/fc_libfc.h112
-rw-r--r--drivers/scsi/libfc/fc_lport.c853
-rw-r--r--drivers/scsi/libfc/fc_npiv.c161
-rw-r--r--drivers/scsi/libfc/fc_rport.c409
-rw-r--r--drivers/scsi/libiscsi.c389
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c6
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c262
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c76
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hbadisc.c129
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h131
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c515
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c394
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1240
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c738
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h149
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h274
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h18
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h14
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h16
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c152
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h37
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c197
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c819
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c86
-rw-r--r--drivers/scsi/osd/osd_initiator.c121
-rw-r--r--drivers/scsi/osd/osd_uld.c260
-rw-r--r--drivers/scsi/pm8001/Makefile12
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h89
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c573
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h67
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h112
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4458
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h1030
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c891
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1103
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h481
-rw-r--r--drivers/scsi/pmcraid.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qlogicpti.h2
-rw-r--r--drivers/scsi/scsi.c10
-rw-r--r--drivers/scsi/scsi_debug.c351
-rw-r--r--drivers/scsi/scsi_devinfo.c13
-rw-r--r--drivers/scsi/scsi_error.c65
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_lib_dma.c4
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c44
-rw-r--r--drivers/scsi/scsi_transport_fc.c99
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c13
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/stex.c282
-rw-r--r--drivers/scsi/vmw_pvscsi.c1407
-rw-r--r--drivers/scsi/vmw_pvscsi.h397
205 files changed, 26307 insertions, 7037 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 62e6b9ea96af..1683ebda900b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1208,6 +1208,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
  *	ata_scsi_change_queue_depth - SCSI callback for queue depth config
  *	@sdev: SCSI device to configure queue depth for
  *	@queue_depth: new queue depth
+ *	@reason: calling context
  *
  *	This is libata standard hostt->change_queue_depth callback.
  *	SCSI will call into this callback when user tries to set queue
@@ -1219,12 +1220,16 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
  *	RETURNS:
  *	Newly configured queue depth.
  */
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+				int reason)
 {
 	struct ata_port *ap = ata_shost_to_port(sdev->host);
 	struct ata_device *dev;
 	unsigned long flags;
 
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (queue_depth < 1 || queue_depth == sdev->queue_depth)
 		return sdev->queue_depth;
 
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 1eb4e020eb5c..0c82d335c55d 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1975,7 +1975,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 
 	if (strncmp(model_num, "Maxtor", 6) == 0) {
-		ata_scsi_change_queue_depth(sdev, 1);
+		ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
 		ata_dev_printk(dev, KERN_NOTICE,
 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
 	}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index add9188663ff..5f7a6fca0a4d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -625,6 +625,7 @@ static struct iscsi_transport iscsi_iser_transport = {
 				  ISCSI_USERNAME | ISCSI_PASSWORD |
 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+				  ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
 				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
 				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 32d0b878eccc..dce971dbdfa3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1116,8 +1116,9 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
 	return limit_reached;
 }
 
-static void pg_init_done(struct dm_path *path, int errors)
+static void pg_init_done(void *data, int errors)
 {
+	struct dm_path *path = data;
 	struct pgpath *pgpath = path_to_pgpath(path);
 	struct priority_group *pg = pgpath->pg;
 	struct multipath *m = pg->m;
@@ -1183,12 +1184,11 @@ static void pg_init_done(struct dm_path *path, int errors)
 
 static void activate_path(struct work_struct *work)
 {
-	int ret;
 	struct pgpath *pgpath =
 		container_of(work, struct pgpath, activate_path);
 
-	ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
-	pg_init_done(&pgpath->path, ret);
+	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+				pg_init_done, &pgpath->path);
 }
 
 /*
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 8dd4d219e433..b4948671eb92 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.12"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.12"
+#define MPT_LINUX_VERSION_COMMON	"3.04.13"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.13"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 9b2e2198aee9..352acd05c46b 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -621,11 +621,8 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	 */
 	iocnumX = khdr.iocnum & 0xFF;
 	if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
-	    (iocp == NULL)) {
-		printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
-				__FILE__, __LINE__, iocnumX);
+	    (iocp == NULL))
 		return -ENODEV;
-	}
 
 	if (!iocp->active) {
 		printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index c29578614504..57752751712b 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,11 +792,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 			 *  precedence!
 			 */
 			sc->result = (DID_OK << 16) | scsi_status;
-			if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
-				/* Have already saved the status and sense data
+			if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+
+				/*
+				 * For an Errata on LSI53C1030
+				 * When the length of request data
+				 * and transfer data are different
+				 * with result of command (READ or VERIFY),
+				 * DID_SOFT_ERROR is set.
 				 */
-				;
-			} else {
+				if (ioc->bus_type == SPI) {
+					if (pScsiReq->CDB[0] == READ_6  ||
+					    pScsiReq->CDB[0] == READ_10 ||
+					    pScsiReq->CDB[0] == READ_12 ||
+					    pScsiReq->CDB[0] == READ_16 ||
+					    pScsiReq->CDB[0] == VERIFY  ||
+					    pScsiReq->CDB[0] == VERIFY_16) {
+						if (scsi_bufflen(sc) !=
+							xfer_cnt) {
+							sc->result =
+							DID_SOFT_ERROR << 16;
+						    printk(KERN_WARNING "Errata"
+						    "on LSI53C1030 occurred."
+						    "sc->req_bufflen=0x%02x,"
+						    "xfer_cnt=0x%02x\n",
+						    scsi_bufflen(sc),
+						    xfer_cnt);
+						}
+					}
+				}
+
 				if (xfer_cnt < sc->underflow) {
 					if (scsi_status == SAM_STAT_BUSY)
 						sc->result = SAM_STAT_BUSY;
@@ -835,7 +860,58 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 			sc->result = (DID_OK << 16) | scsi_status;
 			if (scsi_state == 0) {
 				;
-			} else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+			} else if (scsi_state &
+			    MPI_SCSI_STATE_AUTOSENSE_VALID) {
+
+				/*
+				 * For potential trouble on LSI53C1030.
+				 * (date:2007.xx.)
+				 * It is checked whether the length of
+				 * request data is equal to
+				 * the length of transfer and residual.
+				 * MEDIUM_ERROR is set by incorrect data.
+				 */
+				if ((ioc->bus_type == SPI) &&
+					(sc->sense_buffer[2] & 0x20)) {
+					u32	 difftransfer;
+					difftransfer =
+					sc->sense_buffer[3] << 24 |
+					sc->sense_buffer[4] << 16 |
+					sc->sense_buffer[5] << 8 |
+					sc->sense_buffer[6];
+					if (((sc->sense_buffer[3] & 0x80) ==
+						0x80) && (scsi_bufflen(sc)
+						!= xfer_cnt)) {
+						sc->sense_buffer[2] =
+						    MEDIUM_ERROR;
+						sc->sense_buffer[12] = 0xff;
+						sc->sense_buffer[13] = 0xff;
+						printk(KERN_WARNING"Errata"
+						"on LSI53C1030 occurred."
+						"sc->req_bufflen=0x%02x,"
+						"xfer_cnt=0x%02x\n" ,
+						scsi_bufflen(sc),
+						xfer_cnt);
+					}
+					if (((sc->sense_buffer[3] & 0x80)
+						!= 0x80) &&
+						(scsi_bufflen(sc) !=
+						xfer_cnt + difftransfer)) {
+						sc->sense_buffer[2] =
+							MEDIUM_ERROR;
+						sc->sense_buffer[12] = 0xff;
+						sc->sense_buffer[13] = 0xff;
+						printk(KERN_WARNING
+						"Errata on LSI53C1030 occurred"
+						"sc->req_bufflen=0x%02x,"
+						" xfer_cnt=0x%02x,"
+						"difftransfer=0x%02x\n",
+						scsi_bufflen(sc),
+						xfer_cnt,
+						difftransfer);
+					}
+				}
+
 				/*
 				 * If running against circa 200003dd 909 MPT f/w,
 				 * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
@@ -2275,11 +2351,12 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
  *	mptscsih_change_queue_depth - This function will set a devices queue depth
  *	@sdev: per scsi_device pointer
  *	@qdepth: requested queue depth
+ *	@reason: calling context
  *
  *	Adding support for new 'change_queue_depth' api.
 */
 int
-mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
 	MPT_SCSI_HOST		*hd = shost_priv(sdev->host);
 	VirtTarget 		*vtarget;
@@ -2291,6 +2368,9 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 	starget = scsi_target(sdev);
 	vtarget = starget->hostdata;
 
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (ioc->bus_type == SPI) {
 		if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
 			max_depth = 1;
@@ -2357,7 +2437,8 @@ mptscsih_slave_configure(struct scsi_device *sdev)
 		    ioc->name, vtarget->negoFlags, vtarget->maxOffset,
 		    vtarget->minSyncFactor));
 
-	mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
+	mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH,
+				    SCSI_QDEPTH_DEFAULT);
 	dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 		"tagged %d, simple %d, ordered %d\n",
 		ioc->name,sdev->tagged_supported, sdev->simple_tags,
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index e0b33e04a33b..45a5ff3eff61 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -128,7 +128,8 @@ extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_F
 extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
 extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
-extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
+extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				       int reason);
 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern struct device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2889e5f2dfd3..9d0c941b7d33 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -31,6 +31,7 @@
 #include <linux/miscdevice.h>
 #include <linux/seq_file.h>
 #include "zfcp_ext.h"
+#include "zfcp_fc.h"
 
 #define ZFCP_BUS_ID_SIZE	20
 
@@ -80,48 +81,40 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
 
 static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
 {
-	struct ccw_device *ccwdev;
+	struct ccw_device *cdev;
 	struct zfcp_adapter *adapter;
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
 
-	ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
-	if (!ccwdev)
+	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+	if (!cdev)
 		return;
 
-	if (ccw_device_set_online(ccwdev))
-		goto out_ccwdev;
+	if (ccw_device_set_online(cdev))
+		goto out_ccw_device;
 
-	mutex_lock(&zfcp_data.config_mutex);
-	adapter = dev_get_drvdata(&ccwdev->dev);
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
 	if (!adapter)
-		goto out_unlock;
-	zfcp_adapter_get(adapter);
+		goto out_ccw_device;
 
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
 	if (!port)
 		goto out_port;
 
-	zfcp_port_get(port);
 	unit = zfcp_unit_enqueue(port, lun);
 	if (IS_ERR(unit))
 		goto out_unit;
-	mutex_unlock(&zfcp_data.config_mutex);
 
 	zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
 	zfcp_erp_wait(adapter);
 	flush_work(&unit->scsi_work);
 
-	mutex_lock(&zfcp_data.config_mutex);
-	zfcp_unit_put(unit);
 out_unit:
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 out_port:
-	zfcp_adapter_put(adapter);
-out_unlock:
-	mutex_unlock(&zfcp_data.config_mutex);
-out_ccwdev:
-	put_device(&ccwdev->dev);
+	zfcp_ccw_adapter_put(adapter);
+out_ccw_device:
+	put_device(&cdev->dev);
 	return;
 }
 
@@ -167,7 +160,7 @@ static int __init zfcp_module_init(void)
 	int retval = -ENOMEM;
 
 	zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
-					sizeof(struct ct_iu_gpn_ft_req));
+					sizeof(struct zfcp_fc_gpn_ft_req));
 	if (!zfcp_data.gpn_ft_cache)
 		goto out;
 
@@ -182,12 +175,14 @@ static int __init zfcp_module_init(void)
 		goto out_sr_cache;
 
 	zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
-					sizeof(struct zfcp_gid_pn_data));
+					sizeof(struct zfcp_fc_gid_pn));
 	if (!zfcp_data.gid_pn_cache)
 		goto out_gid_cache;
 
-	mutex_init(&zfcp_data.config_mutex);
-	rwlock_init(&zfcp_data.config_lock);
+	zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
+					sizeof(struct zfcp_fc_els_adisc));
+	if (!zfcp_data.adisc_cache)
+		goto out_adisc_cache;
 
 	zfcp_data.scsi_transport_template =
 		fc_attach_transport(&zfcp_transport_functions);
@@ -200,7 +195,7 @@ static int __init zfcp_module_init(void)
 		goto out_misc;
 	}
 
-	retval = zfcp_ccw_register();
+	retval = ccw_driver_register(&zfcp_ccw_driver);
 	if (retval) {
 		pr_err("The zfcp device driver could not register with "
 		       "the common I/O layer\n");
@@ -216,6 +211,8 @@ out_ccw_register:
 out_misc:
 	fc_release_transport(zfcp_data.scsi_transport_template);
 out_transport:
+	kmem_cache_destroy(zfcp_data.adisc_cache);
+out_adisc_cache:
 	kmem_cache_destroy(zfcp_data.gid_pn_cache);
 out_gid_cache:
 	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
@@ -229,6 +226,20 @@ out:
 
 module_init(zfcp_module_init);
 
+static void __exit zfcp_module_exit(void)
+{
+	ccw_driver_unregister(&zfcp_ccw_driver);
+	misc_deregister(&zfcp_cfdc_misc);
+	fc_release_transport(zfcp_data.scsi_transport_template);
+	kmem_cache_destroy(zfcp_data.adisc_cache);
+	kmem_cache_destroy(zfcp_data.gid_pn_cache);
+	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
+	kmem_cache_destroy(zfcp_data.qtcb_cache);
+	kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+}
+
+module_exit(zfcp_module_exit);
+
 /**
  * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
  * @port: pointer to port to search for unit
@@ -238,12 +249,18 @@ module_init(zfcp_module_init);
  */
 struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
 {
+	unsigned long flags;
 	struct zfcp_unit *unit;
 
-	list_for_each_entry(unit, &port->unit_list_head, list)
-		if ((unit->fcp_lun == fcp_lun) &&
-		    !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
-		    return unit;
+	read_lock_irqsave(&port->unit_list_lock, flags);
+	list_for_each_entry(unit, &port->unit_list, list)
+		if (unit->fcp_lun == fcp_lun) {
+			if (!get_device(&unit->sysfs_device))
+				unit = NULL;
+			read_unlock_irqrestore(&port->unit_list_lock, flags);
+			return unit;
+		}
+	read_unlock_irqrestore(&port->unit_list_lock, flags);
 	return NULL;
 }
 
@@ -257,18 +274,35 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
 struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
 					u64 wwpn)
 {
+	unsigned long flags;
 	struct zfcp_port *port;
 
-	list_for_each_entry(port, &adapter->port_list_head, list)
-		if ((port->wwpn == wwpn) &&
-		    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE))
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			if (!get_device(&port->sysfs_device))
+				port = NULL;
+			read_unlock_irqrestore(&adapter->port_list_lock, flags);
 			return port;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 	return NULL;
 }
 
-static void zfcp_sysfs_unit_release(struct device *dev)
+/**
+ * zfcp_unit_release - dequeue unit
+ * @dev: pointer to device
+ *
+ * waits until all work is done on unit and removes it then from the unit->list
+ * of the associated port.
+ */
+static void zfcp_unit_release(struct device *dev)
 {
-	kfree(container_of(dev, struct zfcp_unit, sysfs_device));
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
+					      sysfs_device);
+
+	put_device(&unit->port->sysfs_device);
+	kfree(unit);
 }
 
 /**
@@ -276,43 +310,40 @@ static void zfcp_sysfs_unit_release(struct device *dev)
  * @port: pointer to port where unit is added
  * @fcp_lun: FCP LUN of unit to be enqueued
  * Returns: pointer to enqueued unit on success, ERR_PTR on error
- * Locks: config_mutex must be held to serialize changes to the unit list
  *
  * Sets up some unit internal structures and creates sysfs entry.
  */
 struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
 {
 	struct zfcp_unit *unit;
+	int retval = -ENOMEM;
 
-	read_lock_irq(&zfcp_data.config_lock);
-	if (zfcp_get_unit_by_lun(port, fcp_lun)) {
-		read_unlock_irq(&zfcp_data.config_lock);
-		return ERR_PTR(-EINVAL);
+	get_device(&port->sysfs_device);
+
+	unit = zfcp_get_unit_by_lun(port, fcp_lun);
+	if (unit) {
+		put_device(&unit->sysfs_device);
+		retval = -EEXIST;
+		goto err_out;
 	}
-	read_unlock_irq(&zfcp_data.config_lock);
 
 	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
 	if (!unit)
-		return ERR_PTR(-ENOMEM);
-
-	atomic_set(&unit->refcount, 0);
-	init_waitqueue_head(&unit->remove_wq);
-	INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
+		goto err_out;
 
 	unit->port = port;
 	unit->fcp_lun = fcp_lun;
+	unit->sysfs_device.parent = &port->sysfs_device;
+	unit->sysfs_device.release = zfcp_unit_release;
 
 	if (dev_set_name(&unit->sysfs_device, "0x%016llx",
 			 (unsigned long long) fcp_lun)) {
 		kfree(unit);
-		return ERR_PTR(-ENOMEM);
+		goto err_out;
 	}
-	unit->sysfs_device.parent = &port->sysfs_device;
-	unit->sysfs_device.release = zfcp_sysfs_unit_release;
-	dev_set_drvdata(&unit->sysfs_device, unit);
+	retval = -EINVAL;
 
-	/* mark unit unusable as long as sysfs registration is not complete */
-	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+	INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
 
 	spin_lock_init(&unit->latencies.lock);
 	unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -324,50 +355,30 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
 
 	if (device_register(&unit->sysfs_device)) {
 		put_device(&unit->sysfs_device);
-		return ERR_PTR(-EINVAL);
+		goto err_out;
 	}
 
 	if (sysfs_create_group(&unit->sysfs_device.kobj,
-			       &zfcp_sysfs_unit_attrs)) {
-		device_unregister(&unit->sysfs_device);
-		return ERR_PTR(-EINVAL);
-	}
+			       &zfcp_sysfs_unit_attrs))
+		goto err_out_put;
 
-	zfcp_unit_get(unit);
+	write_lock_irq(&port->unit_list_lock);
+	list_add_tail(&unit->list, &port->unit_list);
+	write_unlock_irq(&port->unit_list_lock);
 
-	write_lock_irq(&zfcp_data.config_lock);
-	list_add_tail(&unit->list, &port->unit_list_head);
-	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
 	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
 
-	write_unlock_irq(&zfcp_data.config_lock);
-
-	zfcp_port_get(port);
-
 	return unit;
-}
 
-/**
- * zfcp_unit_dequeue - dequeue unit
- * @unit: pointer to zfcp_unit
- *
- * waits until all work is done on unit and removes it then from the unit->list
- * of the associated port.
- */
-void zfcp_unit_dequeue(struct zfcp_unit *unit)
-{
-	wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
-	write_lock_irq(&zfcp_data.config_lock);
-	list_del(&unit->list);
-	write_unlock_irq(&zfcp_data.config_lock);
-	zfcp_port_put(unit->port);
-	sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
+err_out_put:
 	device_unregister(&unit->sysfs_device);
+err_out:
+	put_device(&port->sysfs_device);
+	return ERR_PTR(retval);
 }
 
 static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 {
-	/* must only be called with zfcp_data.config_mutex taken */
 	adapter->pool.erp_req =
 		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
 	if (!adapter->pool.erp_req)
@@ -405,9 +416,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 	if (!adapter->pool.status_read_data)
 		return -ENOMEM;
 
-	adapter->pool.gid_pn_data =
+	adapter->pool.gid_pn =
 		mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
-	if (!adapter->pool.gid_pn_data)
+	if (!adapter->pool.gid_pn)
 		return -ENOMEM;
 
 	return 0;
@@ -415,7 +426,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 
 static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 {
-	/* zfcp_data.config_mutex must be held */
 	if (adapter->pool.erp_req)
 		mempool_destroy(adapter->pool.erp_req);
 	if (adapter->pool.scsi_req)
@@ -428,8 +438,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 		mempool_destroy(adapter->pool.status_read_req);
 	if (adapter->pool.status_read_data)
 		mempool_destroy(adapter->pool.status_read_data);
-	if (adapter->pool.gid_pn_data)
-		mempool_destroy(adapter->pool.gid_pn_data);
+	if (adapter->pool.gid_pn)
+		mempool_destroy(adapter->pool.gid_pn);
 }
 
 /**
@@ -497,53 +507,56 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
  * zfcp_adapter_enqueue - enqueue a new adapter to the list
  * @ccw_device: pointer to the struct cc_device
  *
- * Returns:	0             if a new adapter was successfully enqueued
- *		-ENOMEM       if alloc failed
+ * Returns:	struct zfcp_adapter*
  * Enqueues an adapter at the end of the adapter list in the driver data.
  * All adapter internal structures are set up.
  * Proc-fs entries are also created.
- * locks: config_mutex must be held to serialize changes to the adapter list
  */
-int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 {
 	struct zfcp_adapter *adapter;
 
-	/*
-	 * Note: It is safe to release the list_lock, as any list changes
-	 * are protected by the config_mutex, which must be held to get here
-	 */
+	if (!get_device(&ccw_device->dev))
+		return ERR_PTR(-ENODEV);
 
 	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
-	if (!adapter)
-		return -ENOMEM;
+	if (!adapter) {
+		put_device(&ccw_device->dev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&adapter->ref);
 
 	ccw_device->handler = NULL;
 	adapter->ccw_device = ccw_device;
-	atomic_set(&adapter->refcount, 0);
+
+	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
 
 	if (zfcp_qdio_setup(adapter))
-		goto qdio_failed;
+		goto failed;
 
 	if (zfcp_allocate_low_mem_buffers(adapter))
-		goto low_mem_buffers_failed;
+		goto failed;
 
 	if (zfcp_reqlist_alloc(adapter))
-		goto low_mem_buffers_failed;
+		goto failed;
 
 	if (zfcp_dbf_adapter_register(adapter))
-		goto debug_register_failed;
+		goto failed;
 
 	if (zfcp_setup_adapter_work_queue(adapter))
-		goto work_queue_failed;
+		goto failed;
 
 	if (zfcp_fc_gs_setup(adapter))
-		goto generic_services_failed;
+		goto failed;
+
+	rwlock_init(&adapter->port_list_lock);
+	INIT_LIST_HEAD(&adapter->port_list);
 
-	init_waitqueue_head(&adapter->remove_wq);
 	init_waitqueue_head(&adapter->erp_ready_wq);
 	init_waitqueue_head(&adapter->erp_done_wqh);
 
-	INIT_LIST_HEAD(&adapter->port_list_head);
 	INIT_LIST_HEAD(&adapter->erp_ready_head);
 	INIT_LIST_HEAD(&adapter->erp_running_head);
 
@@ -553,83 +566,85 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 	rwlock_init(&adapter->abort_lock);
 
 	if (zfcp_erp_thread_setup(adapter))
-		goto erp_thread_failed;
-
-	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
-	INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
+		goto failed;
 
 	adapter->service_level.seq_print = zfcp_print_sl;
 
-	/* mark adapter unusable as long as sysfs registration is not complete */
-	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-
 	dev_set_drvdata(&ccw_device->dev, adapter);
 
 	if (sysfs_create_group(&ccw_device->dev.kobj,
 			       &zfcp_sysfs_adapter_attrs))
-		goto sysfs_failed;
-
-	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+		goto failed;
 
 	if (!zfcp_adapter_scsi_register(adapter))
-		return 0;
+		return adapter;
 
-sysfs_failed:
-	zfcp_erp_thread_kill(adapter);
-erp_thread_failed:
-	zfcp_fc_gs_destroy(adapter);
-generic_services_failed:
+failed:
+	zfcp_adapter_unregister(adapter);
+	return ERR_PTR(-ENOMEM);
+}
+
+void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct ccw_device *cdev = adapter->ccw_device;
+
+	cancel_work_sync(&adapter->scan_work);
+	cancel_work_sync(&adapter->stat_work);
 	zfcp_destroy_adapter_work_queue(adapter);
-work_queue_failed:
+
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
+	zfcp_adapter_scsi_unregister(adapter);
+	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+
+	zfcp_erp_thread_kill(adapter);
 	zfcp_dbf_adapter_unregister(adapter->dbf);
-debug_register_failed:
-	dev_set_drvdata(&ccw_device->dev, NULL);
-	kfree(adapter->req_list);
-low_mem_buffers_failed:
-	zfcp_free_low_mem_buffers(adapter);
-qdio_failed:
 	zfcp_qdio_destroy(adapter->qdio);
-	kfree(adapter);
-	return -ENOMEM;
+
+	zfcp_ccw_adapter_put(adapter); /* final put to release */
 }
 
 /**
- * zfcp_adapter_dequeue - remove the adapter from the resource list
- * @adapter: pointer to struct zfcp_adapter which should be removed
+ * zfcp_adapter_release - remove the adapter from the resource list
+ * @ref: pointer to struct kref
  * locks:	adapter list write lock is assumed to be held by caller
  */
-void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
+void zfcp_adapter_release(struct kref *ref)
 {
-	int retval = 0;
-	unsigned long flags;
+	struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
+						    ref);
+	struct ccw_device *cdev = adapter->ccw_device;
 
-	cancel_work_sync(&adapter->stat_work);
-	zfcp_fc_wka_ports_force_offline(adapter->gs);
-	sysfs_remove_group(&adapter->ccw_device->dev.kobj,
-			   &zfcp_sysfs_adapter_attrs);
 	dev_set_drvdata(&adapter->ccw_device->dev, NULL);
-	/* sanity check: no pending FSF requests */
-	spin_lock_irqsave(&adapter->req_list_lock, flags);
-	retval = zfcp_reqlist_isempty(adapter);
-	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-	if (!retval)
-		return;
-
 	zfcp_fc_gs_destroy(adapter);
-	zfcp_erp_thread_kill(adapter);
-	zfcp_destroy_adapter_work_queue(adapter);
-	zfcp_dbf_adapter_unregister(adapter->dbf);
 	zfcp_free_low_mem_buffers(adapter);
-	zfcp_qdio_destroy(adapter->qdio);
 	kfree(adapter->req_list);
 	kfree(adapter->fc_stats);
 	kfree(adapter->stats_reset_data);
 	kfree(adapter);
+	put_device(&cdev->dev);
 }
 
-static void zfcp_sysfs_port_release(struct device *dev)
+/**
+ * zfcp_device_unregister - remove port, unit from system
+ * @dev: reference to device which is to be removed
+ * @grp: related reference to attribute group
+ *
+ * Helper function to unregister port, unit from system
+ */
+void zfcp_device_unregister(struct device *dev,
+			    const struct attribute_group *grp)
 {
-	kfree(container_of(dev, struct zfcp_port, sysfs_device));
+	sysfs_remove_group(&dev->kobj, grp);
+	device_unregister(dev);
+}
+
+static void zfcp_port_release(struct device *dev)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port,
+					      sysfs_device);
+
+	zfcp_ccw_adapter_put(port->adapter);
+	kfree(port);
 }
 
 /**
@@ -639,7 +654,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
  * @status: initial status for the port
  * @d_id: destination id of the remote port to be enqueued
  * Returns: pointer to enqueued port on success, ERR_PTR on error
- * Locks: config_mutex must be held to serialize changes to the port list
  *
  * All port internal structures are set up and the sysfs entry is generated.
  * d_id is used to enqueue ports with a well known address like the Directory
@@ -649,20 +663,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 				     u32 status, u32 d_id)
 {
 	struct zfcp_port *port;
+	int retval = -ENOMEM;
 
-	read_lock_irq(&zfcp_data.config_lock);
-	if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
-		read_unlock_irq(&zfcp_data.config_lock);
-		return ERR_PTR(-EINVAL);
+	kref_get(&adapter->ref);
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (port) {
+		put_device(&port->sysfs_device);
+		retval = -EEXIST;
+		goto err_out;
 	}
-	read_unlock_irq(&zfcp_data.config_lock);
 
 	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
 	if (!port)
-		return ERR_PTR(-ENOMEM);
+		goto err_out;
+
+	rwlock_init(&port->unit_list_lock);
+	INIT_LIST_HEAD(&port->unit_list);
 
-	init_waitqueue_head(&port->remove_wq);
-	INIT_LIST_HEAD(&port->unit_list_head);
 	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
 	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
 	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -671,58 +689,38 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 	port->d_id = d_id;
 	port->wwpn = wwpn;
 	port->rport_task = RPORT_NONE;
-
-	/* mark port unusable as long as sysfs registration is not complete */
-	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
-	atomic_set(&port->refcount, 0);
+	port->sysfs_device.parent = &adapter->ccw_device->dev;
+	port->sysfs_device.release = zfcp_port_release;
 
 	if (dev_set_name(&port->sysfs_device, "0x%016llx",
 			 (unsigned long long)wwpn)) {
 		kfree(port);
-		return ERR_PTR(-ENOMEM);
+		goto err_out;
 	}
-	port->sysfs_device.parent = &adapter->ccw_device->dev;
-	port->sysfs_device.release = zfcp_sysfs_port_release;
-	dev_set_drvdata(&port->sysfs_device, port);
+	retval = -EINVAL;
 
 	if (device_register(&port->sysfs_device)) {
 		put_device(&port->sysfs_device);
-		return ERR_PTR(-EINVAL);
+		goto err_out;
 	}
 
 	if (sysfs_create_group(&port->sysfs_device.kobj,
-			       &zfcp_sysfs_port_attrs)) {
-		device_unregister(&port->sysfs_device);
-		return ERR_PTR(-EINVAL);
-	}
+			       &zfcp_sysfs_port_attrs))
+		goto err_out_put;
 
-	zfcp_port_get(port);
+	write_lock_irq(&adapter->port_list_lock);
+	list_add_tail(&port->list, &adapter->port_list);
+	write_unlock_irq(&adapter->port_list_lock);
 
-	write_lock_irq(&zfcp_data.config_lock);
-	list_add_tail(&port->list, &adapter->port_list_head);
-	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
-	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
+	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
 
-	write_unlock_irq(&zfcp_data.config_lock);
-
-	zfcp_adapter_get(adapter);
 	return port;
-}
 
-/**
- * zfcp_port_dequeue - dequeues a port from the port list of the adapter
- * @port: pointer to struct zfcp_port which should be removed
- */
-void zfcp_port_dequeue(struct zfcp_port *port)
-{
-	write_lock_irq(&zfcp_data.config_lock);
-	list_del(&port->list);
-	write_unlock_irq(&zfcp_data.config_lock);
-	wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
-	cancel_work_sync(&port->rport_work); /* usually not necessary */
-	zfcp_adapter_put(port->adapter);
-	sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
+err_out_put:
 	device_unregister(&port->sysfs_device);
+err_out:
+	zfcp_ccw_adapter_put(adapter);
+	return ERR_PTR(retval);
 }
 
 /**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e08339428ecf..c22cb72a5ae8 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -13,28 +13,34 @@
 
 #define ZFCP_MODEL_PRIV 0x4
 
-static int zfcp_ccw_suspend(struct ccw_device *cdev)
+static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
 
+struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
 {
-	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
-
-	if (!adapter)
-		return 0;
-
-	mutex_lock(&zfcp_data.config_mutex);
+	struct zfcp_adapter *adapter;
+	unsigned long flags;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
-	zfcp_erp_wait(adapter);
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	adapter = dev_get_drvdata(&cdev->dev);
+	if (adapter)
+		kref_get(&adapter->ref);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+	return adapter;
+}
 
-	mutex_unlock(&zfcp_data.config_mutex);
+void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
 
-	return 0;
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	kref_put(&adapter->ref, zfcp_adapter_release);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
 }
 
 static int zfcp_ccw_activate(struct ccw_device *cdev)
 
 {
-	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
 	if (!adapter)
 		return 0;
@@ -46,6 +52,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
 	zfcp_erp_wait(adapter);
 	flush_work(&adapter->scan_work);
 
+	zfcp_ccw_adapter_put(adapter);
+
 	return 0;
 }
 
@@ -67,28 +75,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
 
 /**
  * zfcp_ccw_probe - probe function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
  *
  * This function gets called by the common i/o layer for each FCP
  * device found on the current system. This is only a stub to make cio
  * work: To only allocate adapter resources for devices actually used,
  * the allocation is deferred to the first call to ccw_set_online.
  */
-static int zfcp_ccw_probe(struct ccw_device *ccw_device)
+static int zfcp_ccw_probe(struct ccw_device *cdev)
 {
 	return 0;
 }
 
 /**
  * zfcp_ccw_remove - remove function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
  *
  * This function gets called by the common i/o layer and removes an adapter
  * from the system. Task of this function is to get rid of all units and
  * ports that belong to this adapter. And in addition all resources of this
  * adapter will be freed too.
  */
-static void zfcp_ccw_remove(struct ccw_device *ccw_device)
+static void zfcp_ccw_remove(struct ccw_device *cdev)
 {
 	struct zfcp_adapter *adapter;
 	struct zfcp_port *port, *p;
@@ -96,49 +104,37 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
 	LIST_HEAD(unit_remove_lh);
 	LIST_HEAD(port_remove_lh);
 
-	ccw_device_set_offline(ccw_device);
+	ccw_device_set_offline(cdev);
 
-	mutex_lock(&zfcp_data.config_mutex);
-	adapter = dev_get_drvdata(&ccw_device->dev);
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
 	if (!adapter)
-		goto out;
-	mutex_unlock(&zfcp_data.config_mutex);
+		return;
 
-	cancel_work_sync(&adapter->scan_work);
-
-	mutex_lock(&zfcp_data.config_mutex);
-
-	/* this also removes the scsi devices, so call it first */
-	zfcp_adapter_scsi_unregister(adapter);
-
-	write_lock_irq(&zfcp_data.config_lock);
-	list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
-		list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
+	write_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+		write_lock(&port->unit_list_lock);
+		list_for_each_entry_safe(unit, u, &port->unit_list, list)
 			list_move(&unit->list, &unit_remove_lh);
-			atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
-					&unit->status);
-		}
+		write_unlock(&port->unit_list_lock);
 		list_move(&port->list, &port_remove_lh);
-		atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
 	}
-	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-	write_unlock_irq(&zfcp_data.config_lock);
+	write_unlock_irq(&adapter->port_list_lock);
+	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
 
-	list_for_each_entry_safe(port, p, &port_remove_lh, list) {
-		list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
-			zfcp_unit_dequeue(unit);
-		zfcp_port_dequeue(port);
-	}
-	wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
-	zfcp_adapter_dequeue(adapter);
+	list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
+		zfcp_device_unregister(&unit->sysfs_device,
+				       &zfcp_sysfs_unit_attrs);
 
-out:
-	mutex_unlock(&zfcp_data.config_mutex);
+	list_for_each_entry_safe(port, p, &port_remove_lh, list)
+		zfcp_device_unregister(&port->sysfs_device,
+				       &zfcp_sysfs_port_attrs);
+
+	zfcp_adapter_unregister(adapter);
 }
 
 /**
  * zfcp_ccw_set_online - set_online function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
  *
  * This function gets called by the common i/o layer and sets an
  * adapter into state online.  The first call will allocate all
@@ -149,23 +145,20 @@ out:
  * the SCSI stack, that the QDIO queues will be set up and that the
  * adapter will be opened.
  */
-static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_online(struct ccw_device *cdev)
 {
-	struct zfcp_adapter *adapter;
-	int ret = 0;
-
-	mutex_lock(&zfcp_data.config_mutex);
-	adapter = dev_get_drvdata(&ccw_device->dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
 	if (!adapter) {
-		ret = zfcp_adapter_enqueue(ccw_device);
-		if (ret) {
-			dev_err(&ccw_device->dev,
+		adapter = zfcp_adapter_enqueue(cdev);
+
+		if (IS_ERR(adapter)) {
+			dev_err(&cdev->dev,
 				"Setting up data structures for the "
 				"FCP adapter failed\n");
-			goto out;
+			return PTR_ERR(adapter);
 		}
-		adapter = dev_get_drvdata(&ccw_device->dev);
+		kref_get(&adapter->ref);
 	}
 
 	/* initialize request counter */
@@ -177,58 +170,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 				"ccsonl2", NULL);
 	zfcp_erp_wait(adapter);
-out:
-	mutex_unlock(&zfcp_data.config_mutex);
-	if (!ret)
-		flush_work(&adapter->scan_work);
-	return ret;
+
+	flush_work(&adapter->scan_work);
+
+	zfcp_ccw_adapter_put(adapter);
+	return 0;
 }
 
 /**
  * zfcp_ccw_set_offline - set_offline function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
  *
  * This function gets called by the common i/o layer and sets an adapter
  * into state offline.
  */
-static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
 {
-	struct zfcp_adapter *adapter;
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 0;
 
-	mutex_lock(&zfcp_data.config_mutex);
-	adapter = dev_get_drvdata(&ccw_device->dev);
 	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
 	zfcp_erp_wait(adapter);
-	mutex_unlock(&zfcp_data.config_mutex);
+
+	zfcp_ccw_adapter_put(adapter);
 	return 0;
 }
 
 /**
  * zfcp_ccw_notify - ccw notify function
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
  * @event: indicates if adapter was detached or attached
  *
  * This function gets called by the common i/o layer if an adapter has gone
  * or reappeared.
  */
-static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
+static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 {
-	struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 1;
 
 	switch (event) {
 	case CIO_GONE:
-		dev_warn(&adapter->ccw_device->dev,
-			 "The FCP device has been detached\n");
+		dev_warn(&cdev->dev, "The FCP device has been detached\n");
 		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
 		break;
 	case CIO_NO_PATH:
-		dev_warn(&adapter->ccw_device->dev,
+		dev_warn(&cdev->dev,
 			 "The CHPID for the FCP device is offline\n");
 		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
 		break;
 	case CIO_OPER:
-		dev_info(&adapter->ccw_device->dev,
-			 "The FCP device is operational again\n");
+		dev_info(&cdev->dev, "The FCP device is operational again\n");
 		zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
 					       ZFCP_STATUS_COMMON_RUNNING,
 					       ZFCP_SET);
@@ -236,11 +232,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
 					"ccnoti4", NULL);
 		break;
 	case CIO_BOXED:
-		dev_warn(&adapter->ccw_device->dev, "The FCP device "
-			 "did not respond within the specified time\n");
+		dev_warn(&cdev->dev, "The FCP device did not respond within "
+				     "the specified time\n");
 		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
 		break;
 	}
+
+	zfcp_ccw_adapter_put(adapter);
 	return 1;
 }
 
@@ -250,18 +248,16 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
  */
 static void zfcp_ccw_shutdown(struct ccw_device *cdev)
 {
-	struct zfcp_adapter *adapter;
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
-	mutex_lock(&zfcp_data.config_mutex);
-	adapter = dev_get_drvdata(&cdev->dev);
 	if (!adapter)
-		goto out;
+		return;
 
 	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
 	zfcp_erp_wait(adapter);
 	zfcp_erp_thread_kill(adapter);
-out:
-	mutex_unlock(&zfcp_data.config_mutex);
+
+	zfcp_ccw_adapter_put(adapter);
 }
 
 struct ccw_driver zfcp_ccw_driver = {
@@ -274,18 +270,7 @@ struct ccw_driver zfcp_ccw_driver = {
 	.set_offline = zfcp_ccw_set_offline,
 	.notify      = zfcp_ccw_notify,
 	.shutdown    = zfcp_ccw_shutdown,
-	.freeze      = zfcp_ccw_suspend,
+	.freeze      = zfcp_ccw_set_offline,
 	.thaw	     = zfcp_ccw_activate,
 	.restore     = zfcp_ccw_activate,
 };
-
-/**
- * zfcp_ccw_register - ccw register function
- *
- * Registers the driver at the common i/o layer. This function will be called
- * at module load time/system start.
- */
-int __init zfcp_ccw_register(void)
-{
-	return ccw_driver_register(&zfcp_ccw_driver);
-}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index ef681dfed0cc..f932400e980a 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -86,22 +86,17 @@ static int zfcp_cfdc_copy_to_user(void __user  *user_buffer,
 static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
 {
 	char busid[9];
-	struct ccw_device *ccwdev;
-	struct zfcp_adapter *adapter = NULL;
+	struct ccw_device *cdev;
+	struct zfcp_adapter *adapter;
 
 	snprintf(busid, sizeof(busid), "0.0.%04x", devno);
-	ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
-	if (!ccwdev)
-		goto out;
-
-	adapter = dev_get_drvdata(&ccwdev->dev);
-	if (!adapter)
-		goto out_put;
-
-	zfcp_adapter_get(adapter);
-out_put:
-	put_device(&ccwdev->dev);
-out:
+	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+	if (!cdev)
+		return NULL;
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	put_device(&cdev->dev);
 	return adapter;
 }
 
@@ -212,7 +207,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
 		retval = -ENXIO;
 		goto free_buffer;
 	}
-	zfcp_adapter_get(adapter);
 
 	retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
 				    data_user->control_file);
@@ -245,7 +239,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
  free_sg:
 	zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
  adapter_put:
-	zfcp_adapter_put(adapter);
+	zfcp_ccw_adapter_put(adapter);
  free_buffer:
 	kfree(data);
  no_mem_sense:
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 215b70749e95..84450955ae11 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -13,6 +13,7 @@
 #include <asm/debug.h>
 #include "zfcp_dbf.h"
 #include "zfcp_ext.h"
+#include "zfcp_fc.h"
 
 static u32 dbfsize = 4;
 
@@ -177,8 +178,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
 
 	case FSF_QTCB_SEND_ELS:
 		send_els = (struct zfcp_send_els *)fsf_req->data;
-		response->u.els.d_id = qtcb->bottom.support.d_id;
-		response->u.els.ls_code = send_els->ls_code >> 24;
+		response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
 		break;
 
 	case FSF_QTCB_ABORT_FCP_CMND:
@@ -348,7 +348,6 @@ static void zfcp_dbf_hba_view_response(char **p,
 
 	case FSF_QTCB_SEND_ELS:
 		zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
-		zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
 		break;
 
 	case FSF_QTCB_ABORT_FCP_CMND:
@@ -677,14 +676,14 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
 /**
  * zfcp_dbf_san_ct_request - trace event for issued CT request
  * @fsf_req: request containing issued CT data
+ * @d_id: destination id where ct request is sent to
  */
-void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
 {
-	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
-	struct zfcp_wka_port *wka_port = ct->wka_port;
-	struct zfcp_adapter *adapter = wka_port->adapter;
+	struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct zfcp_dbf *dbf = adapter->dbf;
-	struct ct_hdr *hdr = sg_virt(ct->req);
+	struct fc_ct_hdr *hdr = sg_virt(ct->req);
 	struct zfcp_dbf_san_record *r = &dbf->san_buf;
 	struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
 	int level = 3;
@@ -695,19 +694,18 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
 	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
 	r->fsf_reqid = fsf_req->req_id;
 	r->fsf_seqno = fsf_req->seq_no;
-	r->s_id = fc_host_port_id(adapter->scsi_host);
-	r->d_id = wka_port->d_id;
-	oct->cmd_req_code = hdr->cmd_rsp_code;
-	oct->revision = hdr->revision;
-	oct->gs_type = hdr->gs_type;
-	oct->gs_subtype = hdr->gs_subtype;
-	oct->options = hdr->options;
-	oct->max_res_size = hdr->max_res_size;
-	oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
+	oct->d_id = d_id;
+	oct->cmd_req_code = hdr->ct_cmd;
+	oct->revision = hdr->ct_rev;
+	oct->gs_type = hdr->ct_fs_type;
+	oct->gs_subtype = hdr->ct_fs_subtype;
+	oct->options = hdr->ct_options;
+	oct->max_res_size = hdr->ct_mr_size;
+	oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
 		       ZFCP_DBF_SAN_MAX_PAYLOAD);
 	debug_event(dbf->san, level, r, sizeof(*r));
 	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
-			 (void *)hdr + sizeof(struct ct_hdr), oct->len);
+			 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
@@ -717,10 +715,9 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
  */
 void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
 {
-	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
-	struct zfcp_wka_port *wka_port = ct->wka_port;
-	struct zfcp_adapter *adapter = wka_port->adapter;
-	struct ct_hdr *hdr = sg_virt(ct->resp);
+	struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+	struct fc_ct_hdr *hdr = sg_virt(ct->resp);
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf_san_record *r = &dbf->san_buf;
 	struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
@@ -732,25 +729,23 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
 	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
 	r->fsf_reqid = fsf_req->req_id;
 	r->fsf_seqno = fsf_req->seq_no;
-	r->s_id = wka_port->d_id;
-	r->d_id = fc_host_port_id(adapter->scsi_host);
-	rct->cmd_rsp_code = hdr->cmd_rsp_code;
-	rct->revision = hdr->revision;
-	rct->reason_code = hdr->reason_code;
-	rct->expl = hdr->reason_code_expl;
-	rct->vendor_unique = hdr->vendor_unique;
-	rct->max_res_size = hdr->max_res_size;
-	rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
+	rct->cmd_rsp_code = hdr->ct_cmd;
+	rct->revision = hdr->ct_rev;
+	rct->reason_code = hdr->ct_reason;
+	rct->expl = hdr->ct_explan;
+	rct->vendor_unique = hdr->ct_vendor;
+	rct->max_res_size = hdr->ct_mr_size;
+	rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
 		       ZFCP_DBF_SAN_MAX_PAYLOAD);
 	debug_event(dbf->san, level, r, sizeof(*r));
 	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
-			 (void *)hdr + sizeof(struct ct_hdr), rct->len);
+			 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
 static void zfcp_dbf_san_els(const char *tag, int level,
-			     struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id,
-			     u8 ls_code, void *buffer, int buflen)
+			     struct zfcp_fsf_req *fsf_req, u32 d_id,
+			     void *buffer, int buflen)
 {
 	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct zfcp_dbf *dbf = adapter->dbf;
@@ -762,9 +757,7 @@ static void zfcp_dbf_san_els(const char *tag, int level,
 	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
 	rec->fsf_reqid = fsf_req->req_id;
 	rec->fsf_seqno = fsf_req->seq_no;
-	rec->s_id = s_id;
-	rec->d_id = d_id;
-	rec->u.els.ls_code = ls_code;
+	rec->u.els.d_id = d_id;
 	debug_event(dbf->san, level, rec, sizeof(*rec));
 	zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
 			 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
@@ -777,12 +770,11 @@ static void zfcp_dbf_san_els(const char *tag, int level,
  */
 void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
 {
-	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+	struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
+	u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
 
-	zfcp_dbf_san_els("oels", 2, fsf_req,
-			       fc_host_port_id(els->adapter->scsi_host),
-			       els->d_id, *(u8 *) sg_virt(els->req),
-			       sg_virt(els->req), els->req->length);
+	zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
+			 sg_virt(els->req), els->req->length);
 }
 
 /**
@@ -791,12 +783,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
  */
 void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
 {
-	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+	struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
+	u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
 
-	zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id,
-			       fc_host_port_id(els->adapter->scsi_host),
-			       *(u8 *)sg_virt(els->req), sg_virt(els->resp),
-			       els->resp->length);
+	zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
+			       sg_virt(els->resp), els->resp->length);
 }
 
 /**
@@ -805,16 +796,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
  */
 void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
 {
-	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fsf_status_read_buffer *buf =
 			(struct fsf_status_read_buffer *)fsf_req->data;
 	int length = (int)buf->length -
 		     (int)((void *)&buf->payload - (void *)buf);
 
-	zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id,
-			       fc_host_port_id(adapter->scsi_host),
-			       buf->payload.data[0], (void *)buf->payload.data,
-			       length);
+	zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
+			       (void *)buf->payload.data, length);
 }
 
 static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
@@ -829,11 +817,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
 	zfcp_dbf_tag(&p, "tag", r->tag);
 	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
 	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-	zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
-	zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
 
 	if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
 		struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
+		zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
 		zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
 		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
 		zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -852,7 +839,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
 		   strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
 		   strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
 		struct zfcp_dbf_san_record_els *els = &r->u.els;
-		zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
+		zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
 	}
 	return p - out_buf;
 }
@@ -870,8 +857,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
 	struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
 	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
 	unsigned long flags;
-	struct fcp_rsp_iu *fcp_rsp;
-	char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
+	char *fcp_sns_info = NULL;
 	int offset = 0, buflen = 0;
 
 	spin_lock_irqsave(&dbf->scsi_lock, flags);
@@ -895,20 +883,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
 				rec->scsi_allowed = scsi_cmnd->allowed;
 			}
 			if (fsf_req != NULL) {
-				fcp_rsp = (struct fcp_rsp_iu *)
-				    &(fsf_req->qtcb->bottom.io.fcp_rsp);
-				fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
-				fcp_sns_info =
-				    zfcp_get_fcp_sns_info_ptr(fcp_rsp);
-
-				rec->rsp_validity = fcp_rsp->validity.value;
-				rec->rsp_scsi_status = fcp_rsp->scsi_status;
-				rec->rsp_resid = fcp_rsp->fcp_resid;
-				if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
-					rec->rsp_code = *(fcp_rsp_info + 3);
-				if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
-					buflen = min((int)fcp_rsp->fcp_sns_len,
-						     ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
+				fcp_rsp = (struct fcp_resp_with_ext *)
+					&(fsf_req->qtcb->bottom.io.fcp_rsp);
+				fcp_rsp_info = (struct fcp_resp_rsp_info *)
+					&fcp_rsp[1];
+				fcp_sns_info = (char *) &fcp_rsp[1];
+				if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
+					fcp_sns_info += fcp_rsp->ext.fr_sns_len;
+
+				rec->rsp_validity = fcp_rsp->resp.fr_flags;
+				rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
+				rec->rsp_resid = fcp_rsp->ext.fr_resid;
+				if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
+					rec->rsp_code = fcp_rsp_info->rsp_code;
+				if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+					buflen = min(fcp_rsp->ext.fr_sns_len,
+					   (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
 					rec->sns_info_len = buflen;
 					memcpy(rec->sns_info, fcp_sns_info,
 					       min(buflen,
@@ -1067,6 +1057,8 @@ err_out:
  */
 void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
 {
+	if (!dbf)
+		return;
 	debug_unregister(dbf->scsi);
 	debug_unregister(dbf->san);
 	debug_unregister(dbf->hba);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 6b1461e8f847..8b7fd9a1033e 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -22,6 +22,7 @@
 #ifndef ZFCP_DBF_H
 #define ZFCP_DBF_H
 
+#include <scsi/fc/fc_fcp.h>
 #include "zfcp_ext.h"
 #include "zfcp_fsf.h"
 #include "zfcp_def.h"
@@ -122,7 +123,6 @@ struct zfcp_dbf_hba_record_response {
 		} unit;
 		struct {
 			u32 d_id;
-			u8 ls_code;
 		} els;
 	} u;
 } __attribute__ ((packed));
@@ -166,6 +166,7 @@ struct zfcp_dbf_san_record_ct_request {
 	u8 options;
 	u16 max_res_size;
 	u32 len;
+	u32 d_id;
 } __attribute__ ((packed));
 
 struct zfcp_dbf_san_record_ct_response {
@@ -179,16 +180,13 @@ struct zfcp_dbf_san_record_ct_response {
 } __attribute__ ((packed));
 
 struct zfcp_dbf_san_record_els {
-	u8 ls_code;
-	u32 len;
+	u32 d_id;
 } __attribute__ ((packed));
 
 struct zfcp_dbf_san_record {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u64 fsf_reqid;
 	u32 fsf_seqno;
-	u32 s_id;
-	u32 d_id;
 	union {
 		struct zfcp_dbf_san_record_ct_request ct_req;
 		struct zfcp_dbf_san_record_ct_response ct_resp;
@@ -343,7 +341,7 @@ static inline
 void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
 			    struct scsi_cmnd *scsi_cmnd)
 {
-	zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
+	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
 			    unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
 }
 
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7da2fad8f515..e1b5b88e2ddb 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -71,131 +71,6 @@
 /* timeout value for "default timer" for fsf requests */
 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
 
-/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
-
-/* task attribute values in FCP-2 FCP_CMND IU */
-#define SIMPLE_Q	0
-#define HEAD_OF_Q	1
-#define ORDERED_Q	2
-#define ACA_Q		4
-#define UNTAGGED	5
-
-/* task management flags in FCP-2 FCP_CMND IU */
-#define FCP_CLEAR_ACA		0x40
-#define FCP_TARGET_RESET	0x20
-#define FCP_LOGICAL_UNIT_RESET	0x10
-#define FCP_CLEAR_TASK_SET	0x04
-#define FCP_ABORT_TASK_SET	0x02
-
-#define FCP_CDB_LENGTH		16
-
-#define ZFCP_DID_MASK           0x00FFFFFF
-
-/* FCP(-2) FCP_CMND IU */
-struct fcp_cmnd_iu {
-	u64 fcp_lun;	   /* FCP logical unit number */
-	u8  crn;	           /* command reference number */
-	u8  reserved0:5;	   /* reserved */
-	u8  task_attribute:3;	   /* task attribute */
-	u8  task_management_flags; /* task management flags */
-	u8  add_fcp_cdb_length:6;  /* additional FCP_CDB length */
-	u8  rddata:1;              /* read data */
-	u8  wddata:1;              /* write data */
-	u8  fcp_cdb[FCP_CDB_LENGTH];
-} __attribute__((packed));
-
-/* FCP(-2) FCP_RSP IU */
-struct fcp_rsp_iu {
-	u8  reserved0[10];
-	union {
-		struct {
-			u8 reserved1:3;
-			u8 fcp_conf_req:1;
-			u8 fcp_resid_under:1;
-			u8 fcp_resid_over:1;
-			u8 fcp_sns_len_valid:1;
-			u8 fcp_rsp_len_valid:1;
-		} bits;
-		u8 value;
-	} validity;
-	u8  scsi_status;
-	u32 fcp_resid;
-	u32 fcp_sns_len;
-	u32 fcp_rsp_len;
-} __attribute__((packed));
-
-
-#define RSP_CODE_GOOD		 0
-#define RSP_CODE_LENGTH_MISMATCH 1
-#define RSP_CODE_FIELD_INVALID	 2
-#define RSP_CODE_RO_MISMATCH	 3
-#define RSP_CODE_TASKMAN_UNSUPP	 4
-#define RSP_CODE_TASKMAN_FAILED	 5
-
-/* see fc-fs */
-#define LS_RSCN  0x61
-#define LS_LOGO  0x05
-#define LS_PLOGI 0x03
-
-struct fcp_rscn_head {
-        u8  command;
-        u8  page_length; /* always 0x04 */
-        u16 payload_len;
-} __attribute__((packed));
-
-struct fcp_rscn_element {
-        u8  reserved:2;
-        u8  event_qual:4;
-        u8  addr_format:2;
-        u32 nport_did:24;
-} __attribute__((packed));
-
-/* see fc-ph */
-struct fcp_logo {
-        u32 command;
-        u32 nport_did;
-	u64 nport_wwpn;
-} __attribute__((packed));
-
-/*
- * FC-FS stuff
- */
-#define R_A_TOV				10 /* seconds */
-
-#define ZFCP_LS_RLS			0x0f
-#define ZFCP_LS_ADISC			0x52
-#define ZFCP_LS_RPS			0x56
-#define ZFCP_LS_RSCN			0x61
-#define ZFCP_LS_RNID			0x78
-
-struct zfcp_ls_adisc {
-	u8		code;
-	u8		field[3];
-	u32		hard_nport_id;
-	u64		wwpn;
-	u64		wwnn;
-	u32		nport_id;
-} __attribute__ ((packed));
-
-/*
- * FC-GS-2 stuff
- */
-#define ZFCP_CT_REVISION		0x01
-#define ZFCP_CT_DIRECTORY_SERVICE	0xFC
-#define ZFCP_CT_NAME_SERVER		0x02
-#define ZFCP_CT_SYNCHRONOUS		0x00
-#define ZFCP_CT_SCSI_FCP		0x08
-#define ZFCP_CT_UNABLE_TO_PERFORM_CMD	0x09
-#define ZFCP_CT_GID_PN			0x0121
-#define ZFCP_CT_GPN_FT			0x0172
-#define ZFCP_CT_ACCEPT			0x8002
-#define ZFCP_CT_REJECT			0x8001
-
-/*
- * FC-GS-4 stuff
- */
-#define ZFCP_CT_TIMEOUT			(3 * R_A_TOV)
-
 /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
 
 /*
@@ -205,7 +80,6 @@ struct zfcp_ls_adisc {
 #define ZFCP_COMMON_FLAGS			0xfff00000
 
 /* common status bits */
-#define ZFCP_STATUS_COMMON_REMOVE		0x80000000
 #define ZFCP_STATUS_COMMON_RUNNING		0x40000000
 #define ZFCP_STATUS_COMMON_ERP_FAILED		0x20000000
 #define ZFCP_STATUS_COMMON_UNBLOCKED		0x10000000
@@ -222,21 +96,10 @@ struct zfcp_ls_adisc {
 #define ZFCP_STATUS_ADAPTER_ERP_PENDING		0x00000100
 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED	0x00000200
 
-/* FC-PH/FC-GS well-known address identifiers for generic services */
-#define ZFCP_DID_WKA				0xFFFFF0
-
 /* remote port status */
 #define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
 #define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
 
-/* well known address (WKA) port status*/
-enum zfcp_wka_status {
-	ZFCP_WKA_PORT_OFFLINE,
-	ZFCP_WKA_PORT_CLOSING,
-	ZFCP_WKA_PORT_OPENING,
-	ZFCP_WKA_PORT_ONLINE,
-};
-
 /* logical unit status */
 #define ZFCP_STATUS_UNIT_SHARED			0x00000004
 #define ZFCP_STATUS_UNIT_READONLY		0x00000008
@@ -247,10 +110,7 @@ enum zfcp_wka_status {
 #define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
 #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED       0x00000080
-#define ZFCP_STATUS_FSFREQ_ABORTED              0x00000100
 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED         0x00000200
-#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP        0x00000400
-#define ZFCP_STATUS_FSFREQ_RETRY                0x00000800
 #define ZFCP_STATUS_FSFREQ_DISMISSED            0x00001000
 
 /************************* STRUCTURE DEFINITIONS *****************************/
@@ -265,125 +125,10 @@ struct zfcp_adapter_mempool {
 	mempool_t *scsi_abort;
 	mempool_t *status_read_req;
 	mempool_t *status_read_data;
-	mempool_t *gid_pn_data;
+	mempool_t *gid_pn;
 	mempool_t *qtcb_pool;
 };
 
-/*
- * header for CT_IU
- */
-struct ct_hdr {
-	u8 revision;		// 0x01
-	u8 in_id[3];		// 0x00
-	u8 gs_type;		// 0xFC	Directory Service
-	u8 gs_subtype;		// 0x02	Name Server
-	u8 options;		// 0x00 single bidirectional exchange
-	u8 reserved0;
-	u16 cmd_rsp_code;	// 0x0121 GID_PN, or 0x0100 GA_NXT
-	u16 max_res_size;	// <= (4096 - 16) / 4
-	u8 reserved1;
-	u8 reason_code;
-	u8 reason_code_expl;
-	u8 vendor_unique;
-} __attribute__ ((packed));
-
-/* nameserver request CT_IU -- for requests where
- * a port name is required */
-struct ct_iu_gid_pn_req {
-	struct ct_hdr header;
-	u64 wwpn;
-} __attribute__ ((packed));
-
-/* FS_ACC IU and data unit for GID_PN nameserver request */
-struct ct_iu_gid_pn_resp {
-	struct ct_hdr header;
-	u32 d_id;
-} __attribute__ ((packed));
-
-struct ct_iu_gpn_ft_req {
-	struct ct_hdr header;
-	u8 flags;
-	u8 domain_id_scope;
-	u8 area_id_scope;
-	u8 fc4_type;
-} __attribute__ ((packed));
-
-
-/**
- * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
- * @wka_port: port where the request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @completion: completion for synchronization purposes
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_ct {
-	struct zfcp_wka_port *wka_port;
-	struct scatterlist *req;
-	struct scatterlist *resp;
-	void (*handler)(unsigned long);
-	unsigned long handler_data;
-	struct completion *completion;
-	int status;
-};
-
-/* used for name server requests in error recovery */
-struct zfcp_gid_pn_data {
-	struct zfcp_send_ct ct;
-	struct scatterlist req;
-	struct scatterlist resp;
-	struct ct_iu_gid_pn_req ct_iu_req;
-	struct ct_iu_gid_pn_resp ct_iu_resp;
-        struct zfcp_port *port;
-};
-
-/**
- * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
- * @adapter: adapter where request is sent from
- * @port: port where ELS is destinated (port reference count has to be increased)
- * @d_id: destiniation id of port where request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @completion: completion for synchronization purposes
- * @ls_code: hex code of ELS command
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_els {
-	struct zfcp_adapter *adapter;
-	struct zfcp_port *port;
-	u32 d_id;
-	struct scatterlist *req;
-	struct scatterlist *resp;
-	void (*handler)(unsigned long);
-	unsigned long handler_data;
-	struct completion *completion;
-	int ls_code;
-	int status;
-};
-
-struct zfcp_wka_port {
-	struct zfcp_adapter	*adapter;
-	wait_queue_head_t	completion_wq;
-	enum zfcp_wka_status	status;
-	atomic_t		refcount;
-	u32			d_id;
-	u32			handle;
-	struct mutex		mutex;
-	struct delayed_work	work;
-};
-
-struct zfcp_wka_ports {
-	struct zfcp_wka_port ms; 	/* management service */
-	struct zfcp_wka_port ts; 	/* time service */
-	struct zfcp_wka_port ds; 	/* directory service */
-	struct zfcp_wka_port as; 	/* alias service */
-	struct zfcp_wka_port ks; 	/* key distribution service */
-};
-
 struct zfcp_qdio_queue {
 	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
 	u8		   first;	/* index of next free bfr in queue */
@@ -446,9 +191,7 @@ struct zfcp_qdio {
 };
 
 struct zfcp_adapter {
-	atomic_t                refcount;          /* reference count */
-	wait_queue_head_t	remove_wq;         /* can be used to wait for
-						      refcount drop to zero */
+	struct kref		ref;
 	u64			peer_wwnn;	   /* P2P peer WWNN */
 	u64			peer_wwpn;	   /* P2P peer WWPN */
 	u32			peer_d_id;	   /* P2P peer D_ID */
@@ -461,7 +204,8 @@ struct zfcp_adapter {
         u32			hardware_version;  /* of FCP channel */
 	u16			timer_ticks;       /* time int for a tick */
 	struct Scsi_Host	*scsi_host;	   /* Pointer to mid-layer */
-	struct list_head	port_list_head;	   /* remote port list */
+	struct list_head	port_list;	   /* remote port list */
+	rwlock_t		port_list_lock;    /* port list lock */
 	unsigned long		req_no;		   /* unique FSF req number */
 	struct list_head	*req_list;	   /* list of pending reqs */
 	spinlock_t		req_list_lock;	   /* request list lock */
@@ -485,7 +229,7 @@ struct zfcp_adapter {
 	u32			erp_low_mem_count; /* nr of erp actions waiting
 						      for memory */
 	struct task_struct	*erp_thread;
-	struct zfcp_wka_ports	*gs;		   /* generic services */
+	struct zfcp_fc_wka_ports *gs;		   /* generic services */
 	struct zfcp_dbf		*dbf;		   /* debug traces */
 	struct zfcp_adapter_mempool	pool;      /* Adapter memory pools */
 	struct fc_host_statistics *fc_stats;
@@ -500,11 +244,9 @@ struct zfcp_port {
 	struct device          sysfs_device;   /* sysfs device */
 	struct fc_rport        *rport;         /* rport of fc transport class */
 	struct list_head       list;	       /* list of remote ports */
-	atomic_t               refcount;       /* reference count */
-	wait_queue_head_t      remove_wq;      /* can be used to wait for
-						  refcount drop to zero */
 	struct zfcp_adapter    *adapter;       /* adapter used to access port */
-	struct list_head       unit_list_head; /* head of logical unit list */
+	struct list_head	unit_list;	/* head of logical unit list */
+	rwlock_t		unit_list_lock; /* unit list lock */
 	atomic_t	       status;	       /* status of this remote port */
 	u64		       wwnn;	       /* WWNN if known */
 	u64		       wwpn;	       /* WWPN */
@@ -523,9 +265,6 @@ struct zfcp_port {
 struct zfcp_unit {
 	struct device          sysfs_device;   /* sysfs device */
 	struct list_head       list;	       /* list of logical units */
-	atomic_t               refcount;       /* reference count */
-	wait_queue_head_t      remove_wq;      /* can be used to wait for
-						  refcount drop to zero */
 	struct zfcp_port       *port;	       /* remote port of unit */
 	atomic_t	       status;	       /* status of this logical unit */
 	u64		       fcp_lun;	       /* own FCP_LUN */
@@ -601,14 +340,11 @@ struct zfcp_fsf_req {
 struct zfcp_data {
 	struct scsi_host_template scsi_host_template;
 	struct scsi_transport_template *scsi_transport_template;
-	rwlock_t                config_lock;        /* serialises changes
-						       to adapter/port/unit
-						       lists */
-	struct mutex		config_mutex;
 	struct kmem_cache	*gpn_ft_cache;
 	struct kmem_cache	*qtcb_cache;
 	struct kmem_cache	*sr_buffer_cache;
 	struct kmem_cache	*gid_pn_cache;
+	struct kmem_cache	*adisc_cache;
 };
 
 /********************** ZFCP SPECIFIC DEFINES ********************************/
@@ -657,47 +393,4 @@ zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
 	return NULL;
 }
 
-/*
- *  functions needed for reference/usage counting
- */
-
-static inline void
-zfcp_unit_get(struct zfcp_unit *unit)
-{
-	atomic_inc(&unit->refcount);
-}
-
-static inline void
-zfcp_unit_put(struct zfcp_unit *unit)
-{
-	if (atomic_dec_return(&unit->refcount) == 0)
-		wake_up(&unit->remove_wq);
-}
-
-static inline void
-zfcp_port_get(struct zfcp_port *port)
-{
-	atomic_inc(&port->refcount);
-}
-
-static inline void
-zfcp_port_put(struct zfcp_port *port)
-{
-	if (atomic_dec_return(&port->refcount) == 0)
-		wake_up(&port->remove_wq);
-}
-
-static inline void
-zfcp_adapter_get(struct zfcp_adapter *adapter)
-{
-	atomic_inc(&adapter->refcount);
-}
-
-static inline void
-zfcp_adapter_put(struct zfcp_adapter *adapter)
-{
-	if (atomic_dec_return(&adapter->refcount) == 0)
-		wake_up(&adapter->remove_wq);
-}
-
 #endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index f73e2180f333..b51a11a82e63 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -99,9 +99,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
 		zfcp_erp_action_dismiss(&port->erp_action);
-	else
-		list_for_each_entry(unit, &port->unit_list_head, list)
-		    zfcp_erp_action_dismiss_unit(unit);
+	else {
+		read_lock(&port->unit_list_lock);
+		list_for_each_entry(unit, &port->unit_list, list)
+			zfcp_erp_action_dismiss_unit(unit);
+		read_unlock(&port->unit_list_lock);
+	}
 }
 
 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,9 +113,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
 
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
 		zfcp_erp_action_dismiss(&adapter->erp_action);
-	else
-		list_for_each_entry(port, &adapter->port_list_head, list)
+	else {
+		read_lock(&adapter->port_list_lock);
+		list_for_each_entry(port, &adapter->port_list, list)
 		    zfcp_erp_action_dismiss_port(port);
+		read_unlock(&adapter->port_list_lock);
+	}
 }
 
 static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
@@ -168,7 +174,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 
 	switch (need) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		zfcp_unit_get(unit);
+		if (!get_device(&unit->sysfs_device))
+			return NULL;
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
 		erp_action = &unit->erp_action;
 		if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
@@ -177,7 +184,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		zfcp_port_get(port);
+		if (!get_device(&port->sysfs_device))
+			return NULL;
 		zfcp_erp_action_dismiss_port(port);
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
@@ -186,7 +194,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		zfcp_adapter_get(adapter);
+		kref_get(&adapter->ref);
 		zfcp_erp_action_dismiss_adapter(adapter);
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
@@ -264,11 +272,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
 {
 	unsigned long flags;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
-	_zfcp_erp_adapter_reopen(adapter, clear, id, ref);
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	zfcp_erp_adapter_block(adapter, clear);
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
+	else
+		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+					NULL, NULL, id, ref);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
 /**
@@ -345,11 +358,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
+	write_lock_irqsave(&adapter->erp_lock, flags);
 	_zfcp_erp_port_forced_reopen(port, clear, id, ref);
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
 static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
@@ -377,15 +388,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
  */
 int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
 {
-	unsigned long flags;
 	int retval;
+	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
+	write_lock_irqsave(&adapter->erp_lock, flags);
 	retval = _zfcp_erp_port_reopen(port, clear, id, ref);
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	return retval;
 }
@@ -424,11 +433,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
 	struct zfcp_port *port = unit->port;
 	struct zfcp_adapter *adapter = port->adapter;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
+	write_lock_irqsave(&adapter->erp_lock, flags);
 	_zfcp_erp_unit_reopen(unit, clear, id, ref);
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
 static int status_change_set(unsigned long mask, atomic_t *status)
@@ -540,8 +547,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
 {
 	struct zfcp_port *port;
 
-	list_for_each_entry(port, &adapter->port_list_head, list)
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
 		_zfcp_erp_port_reopen(port, clear, id, ref);
+	read_unlock(&adapter->port_list_lock);
 }
 
 static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
@@ -549,8 +558,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
 {
 	struct zfcp_unit *unit;
 
-	list_for_each_entry(unit, &port->unit_list_head, list)
+	read_lock(&port->unit_list_lock);
+	list_for_each_entry(unit, &port->unit_list, list)
 		_zfcp_erp_unit_reopen(unit, clear, id, ref);
+	read_unlock(&port->unit_list_lock);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -590,16 +601,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
 {
 	unsigned long flags;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	read_lock(&adapter->erp_lock);
+	read_lock_irqsave(&adapter->erp_lock, flags);
 	if (list_empty(&adapter->erp_ready_head) &&
 	    list_empty(&adapter->erp_running_head)) {
 			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
 					  &adapter->status);
 			wake_up(&adapter->erp_done_wqh);
 	}
-	read_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
 static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
@@ -1170,28 +1179,28 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
-			zfcp_unit_get(unit);
+			get_device(&unit->sysfs_device);
 			if (scsi_queue_work(unit->port->adapter->scsi_host,
 					    &unit->scsi_work) <= 0)
-				zfcp_unit_put(unit);
+				put_device(&unit->sysfs_device);
 		}
-		zfcp_unit_put(unit);
+		put_device(&unit->sysfs_device);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		if (result == ZFCP_ERP_SUCCEEDED)
 			zfcp_scsi_schedule_rport_register(port);
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		if (result == ZFCP_ERP_SUCCEEDED) {
 			register_service_level(&adapter->service_level);
-			schedule_work(&adapter->scan_work);
+			queue_work(adapter->work_queue, &adapter->scan_work);
 		} else
 			unregister_service_level(&adapter->service_level);
-		zfcp_adapter_put(adapter);
+		kref_put(&adapter->ref, zfcp_adapter_release);
 		break;
 	}
 }
@@ -1214,12 +1223,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
 static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 {
 	int retval;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 	unsigned long flags;
+	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
+	kref_get(&adapter->ref);
 
+	write_lock_irqsave(&adapter->erp_lock, flags);
 	zfcp_erp_strategy_check_fsfreq(erp_action);
 
 	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1231,11 +1240,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 	zfcp_erp_action_to_running(erp_action);
 
 	/* no lock to allow for blocking operations */
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 	retval = zfcp_erp_strategy_do_action(erp_action);
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	write_lock(&adapter->erp_lock);
+	write_lock_irqsave(&adapter->erp_lock, flags);
 
 	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
 		retval = ZFCP_ERP_CONTINUES;
@@ -1273,12 +1280,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 		zfcp_erp_strategy_followup_failed(erp_action);
 
  unlock:
-	write_unlock(&adapter->erp_lock);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	if (retval != ZFCP_ERP_CONTINUES)
 		zfcp_erp_action_cleanup(erp_action, retval);
 
+	kref_put(&adapter->ref, zfcp_adapter_release);
 	return retval;
 }
 
@@ -1415,6 +1422,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
 				    void *ref, u32 mask, int set_or_clear)
 {
 	struct zfcp_port *port;
+	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
 	if (set_or_clear == ZFCP_SET) {
@@ -1429,10 +1437,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
 			atomic_set(&adapter->erp_counter, 0);
 	}
 
-	if (common_mask)
-		list_for_each_entry(port, &adapter->port_list_head, list)
+	if (common_mask) {
+		read_lock_irqsave(&adapter->port_list_lock, flags);
+		list_for_each_entry(port, &adapter->port_list, list)
 			zfcp_erp_modify_port_status(port, id, ref, common_mask,
 						    set_or_clear);
+		read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	}
 }
 
 /**
@@ -1449,6 +1460,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
 				 u32 mask, int set_or_clear)
 {
 	struct zfcp_unit *unit;
+	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
 	if (set_or_clear == ZFCP_SET) {
@@ -1463,10 +1475,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
 			atomic_set(&port->erp_counter, 0);
 	}
 
-	if (common_mask)
-		list_for_each_entry(unit, &port->unit_list_head, list)
+	if (common_mask) {
+		read_lock_irqsave(&port->unit_list_lock, flags);
+		list_for_each_entry(unit, &port->unit_list, list)
 			zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
 						    set_or_clear);
+		read_unlock_irqrestore(&port->unit_list_lock, flags);
+	}
 }
 
 /**
@@ -1502,12 +1517,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
  */
 void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
 {
-	unsigned long flags;
-
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	zfcp_erp_modify_port_status(port, id, ref,
 				    ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
 }
 
@@ -1535,13 +1546,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
  */
 void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
 {
-	unsigned long flags;
-
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	zfcp_erp_modify_port_status(port, id, ref,
 				    ZFCP_STATUS_COMMON_ERP_FAILED |
 				    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 
 /**
@@ -1574,12 +1581,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
 					 void *ref)
 {
 	struct zfcp_unit *unit;
+	unsigned long flags;
 	int status = atomic_read(&port->status);
 
 	if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
 			ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
-		list_for_each_entry(unit, &port->unit_list_head, list)
+		read_lock_irqsave(&port->unit_list_lock, flags);
+		list_for_each_entry(unit, &port->unit_list, list)
 				    zfcp_erp_unit_access_changed(unit, id, ref);
+		read_unlock_irqrestore(&port->unit_list_lock, flags);
 		return;
 	}
 
@@ -1595,14 +1605,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
 void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
 				     void *ref)
 {
-	struct zfcp_port *port;
 	unsigned long flags;
+	struct zfcp_port *port;
 
 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
 		return;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	list_for_each_entry(port, &adapter->port_list_head, list)
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
 		zfcp_erp_port_access_changed(port, id, ref);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b3f28deb4505..03dec832b465 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -9,26 +9,31 @@
 #ifndef ZFCP_EXT_H
 #define ZFCP_EXT_H
 
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
 #include "zfcp_def.h"
+#include "zfcp_fc.h"
 
 /* zfcp_aux.c */
 extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
 extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
-extern int zfcp_adapter_enqueue(struct ccw_device *);
-extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
 extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
 					   u32);
-extern void zfcp_port_dequeue(struct zfcp_port *);
 extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern void zfcp_unit_dequeue(struct zfcp_unit *);
 extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
 extern void zfcp_sg_free_table(struct scatterlist *, int);
 extern int zfcp_sg_setup_table(struct scatterlist *, int);
+extern void zfcp_device_unregister(struct device *,
+				   const struct attribute_group *);
+extern void zfcp_adapter_release(struct kref *);
+extern void zfcp_adapter_unregister(struct zfcp_adapter *);
 
 /* zfcp_ccw.c */
-extern int zfcp_ccw_register(void);
 extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
 extern struct ccw_driver zfcp_ccw_driver;
+extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
+extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
 
 /* zfcp_cfdc.c */
 extern struct miscdevice zfcp_cfdc_misc;
@@ -51,7 +56,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
 					  struct fsf_status_read_buffer *);
 extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
@@ -92,24 +97,22 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
 extern void zfcp_erp_timeout_handler(unsigned long);
 
 /* zfcp_fc.c */
-extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
-extern void _zfcp_fc_scan_ports_later(struct work_struct *);
+extern void zfcp_fc_scan_ports(struct work_struct *);
 extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
 extern void zfcp_fc_port_did_lookup(struct work_struct *);
 extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
-extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
 extern void zfcp_fc_test_link(struct zfcp_port *);
 extern void zfcp_fc_link_test_work(struct work_struct *);
-extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
 extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
-extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
 
 /* zfcp_fsf.c */
 extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
-extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
-extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -125,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
 extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
 extern int zfcp_fsf_status_read(struct zfcp_qdio *);
 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
-extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *);
-extern int zfcp_fsf_send_els(struct zfcp_send_els *);
+extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
+			    mempool_t *);
+extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
+			     struct zfcp_fsf_ct_els *);
 extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
 					  struct scsi_cmnd *);
 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -153,7 +158,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern struct zfcp_data zfcp_data;
 extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
 extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
-extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
 extern struct fc_function_template zfcp_transport_functions;
 extern void zfcp_scsi_rport_work(struct work_struct *);
 extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index df23bcead23d..ac5e3b7a3576 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -9,73 +9,38 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
 #include "zfcp_ext.h"
+#include "zfcp_fc.h"
 
-enum rscn_address_format {
-	RSCN_PORT_ADDRESS	= 0x0,
-	RSCN_AREA_ADDRESS	= 0x1,
-	RSCN_DOMAIN_ADDRESS	= 0x2,
-	RSCN_FABRIC_ADDRESS	= 0x3,
+static u32 zfcp_fc_rscn_range_mask[] = {
+	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
+	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
+	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
+	[ELS_ADDR_FMT_FAB]		= 0x000000,
 };
 
-static u32 rscn_range_mask[] = {
-	[RSCN_PORT_ADDRESS]		= 0xFFFFFF,
-	[RSCN_AREA_ADDRESS]		= 0xFFFF00,
-	[RSCN_DOMAIN_ADDRESS]		= 0xFF0000,
-	[RSCN_FABRIC_ADDRESS]		= 0x000000,
-};
-
-struct gpn_ft_resp_acc {
-	u8 control;
-	u8 port_id[3];
-	u8 reserved[4];
-	u64 wwpn;
-} __attribute__ ((packed));
-
-#define ZFCP_CT_SIZE_ONE_PAGE	(PAGE_SIZE - sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_ENTRIES	(ZFCP_CT_SIZE_ONE_PAGE \
-					/ sizeof(struct gpn_ft_resp_acc))
-#define ZFCP_GPN_FT_BUFFERS 4
-#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
-				- sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
-
-struct ct_iu_gpn_ft_resp {
-	struct ct_hdr header;
-	struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
-} __attribute__ ((packed));
-
-struct zfcp_gpn_ft {
-	struct zfcp_send_ct ct;
-	struct scatterlist sg_req;
-	struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
-};
-
-struct zfcp_fc_ns_handler_data {
-	struct completion done;
-	void (*handler)(unsigned long);
-	unsigned long handler_data;
-};
-
-static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
+static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
 {
 	if (mutex_lock_interruptible(&wka_port->mutex))
 		return -ERESTARTSYS;
 
-	if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
-	    wka_port->status == ZFCP_WKA_PORT_CLOSING) {
-		wka_port->status = ZFCP_WKA_PORT_OPENING;
+	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
+	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
 		if (zfcp_fsf_open_wka_port(wka_port))
-			wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 	}
 
 	mutex_unlock(&wka_port->mutex);
 
 	wait_event(wka_port->completion_wq,
-		   wka_port->status == ZFCP_WKA_PORT_ONLINE ||
-		   wka_port->status == ZFCP_WKA_PORT_OFFLINE);
+		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
+		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
 
-	if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
+	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
 		atomic_inc(&wka_port->refcount);
 		return 0;
 	}
@@ -85,24 +50,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
 static void zfcp_fc_wka_port_offline(struct work_struct *work)
 {
 	struct delayed_work *dw = to_delayed_work(work);
-	struct zfcp_wka_port *wka_port =
-			container_of(dw, struct zfcp_wka_port, work);
+	struct zfcp_fc_wka_port *wka_port =
+			container_of(dw, struct zfcp_fc_wka_port, work);
 
 	mutex_lock(&wka_port->mutex);
 	if ((atomic_read(&wka_port->refcount) != 0) ||
-	    (wka_port->status != ZFCP_WKA_PORT_ONLINE))
+	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
 		goto out;
 
-	wka_port->status = ZFCP_WKA_PORT_CLOSING;
+	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
 	if (zfcp_fsf_close_wka_port(wka_port)) {
-		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 		wake_up(&wka_port->completion_wq);
 	}
 out:
 	mutex_unlock(&wka_port->mutex);
 }
 
-static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
+static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
 {
 	if (atomic_dec_return(&wka_port->refcount) != 0)
 		return;
@@ -110,7 +75,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
 	schedule_delayed_work(&wka_port->work, HZ / 100);
 }
 
-static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
+static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
 				  struct zfcp_adapter *adapter)
 {
 	init_waitqueue_head(&wka_port->completion_wq);
@@ -118,107 +83,107 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
 	wka_port->adapter = adapter;
 	wka_port->d_id = d_id;
 
-	wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 	atomic_set(&wka_port->refcount, 0);
 	mutex_init(&wka_port->mutex);
 	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
 }
 
-static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
+static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
 {
 	cancel_delayed_work_sync(&wka->work);
 	mutex_lock(&wka->mutex);
-	wka->status = ZFCP_WKA_PORT_OFFLINE;
+	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
 	mutex_unlock(&wka->mutex);
 }
 
-void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs)
+void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
 {
+	if (!gs)
+		return;
 	zfcp_fc_wka_port_force_offline(&gs->ms);
 	zfcp_fc_wka_port_force_offline(&gs->ts);
 	zfcp_fc_wka_port_force_offline(&gs->ds);
 	zfcp_fc_wka_port_force_offline(&gs->as);
-	zfcp_fc_wka_port_force_offline(&gs->ks);
 }
 
 static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
-				   struct fcp_rscn_element *elem)
+				   struct fc_els_rscn_page *page)
 {
 	unsigned long flags;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct zfcp_port *port;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
-		if ((port->d_id & range) == (elem->nport_did & range))
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
 			zfcp_fc_test_link(port);
 		if (!port->d_id)
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
 					     "fcrscn1", NULL);
 	}
-
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
 
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
 	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
-	struct fcp_rscn_head *fcp_rscn_head;
-	struct fcp_rscn_element *fcp_rscn_element;
+	struct fc_els_rscn *head;
+	struct fc_els_rscn_page *page;
 	u16 i;
 	u16 no_entries;
-	u32 range_mask;
+	unsigned int afmt;
 
-	fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
-	fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
+	head = (struct fc_els_rscn *) status_buffer->payload.data;
+	page = (struct fc_els_rscn_page *) head;
 
 	/* see FC-FS */
-	no_entries = fcp_rscn_head->payload_len /
-			sizeof(struct fcp_rscn_element);
+	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
 
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
-		fcp_rscn_element++;
-		range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
-		_zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
+		page++;
+		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
+		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
+				       page);
 	}
-	schedule_work(&fsf_req->adapter->scan_work);
+	queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
 }
 
 static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
 {
+	unsigned long flags;
 	struct zfcp_adapter *adapter = req->adapter;
 	struct zfcp_port *port;
-	unsigned long flags;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	list_for_each_entry(port, &adapter->port_list_head, list)
-		if (port->wwpn == wwpn)
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
 			break;
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
-	if (port && (port->wwpn == wwpn))
-		zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
 
 static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
 {
-	struct fsf_status_read_buffer *status_buffer =
-		(struct fsf_status_read_buffer *)req->data;
-	struct fsf_plogi *els_plogi =
-		(struct fsf_plogi *) status_buffer->payload.data;
+	struct fsf_status_read_buffer *status_buffer;
+	struct fc_els_flogi *plogi;
 
-	zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
+	status_buffer = (struct fsf_status_read_buffer *) req->data;
+	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
+	zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
 }
 
 static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
 {
 	struct fsf_status_read_buffer *status_buffer =
 		(struct fsf_status_read_buffer *)req->data;
-	struct fcp_logo *els_logo =
-		(struct fcp_logo *) status_buffer->payload.data;
+	struct fc_els_logo *logo =
+		(struct fc_els_logo *) status_buffer->payload.data;
 
-	zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
+	zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
 }
 
 /**
@@ -232,79 +197,72 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
 	unsigned int els_type = status_buffer->payload.data[0];
 
 	zfcp_dbf_san_incoming_els(fsf_req);
-	if (els_type == LS_PLOGI)
+	if (els_type == ELS_PLOGI)
 		zfcp_fc_incoming_plogi(fsf_req);
-	else if (els_type == LS_LOGO)
+	else if (els_type == ELS_LOGO)
 		zfcp_fc_incoming_logo(fsf_req);
-	else if (els_type == LS_RSCN)
+	else if (els_type == ELS_RSCN)
 		zfcp_fc_incoming_rscn(fsf_req);
 }
 
-static void zfcp_fc_ns_handler(unsigned long data)
+static void zfcp_fc_ns_gid_pn_eval(void *data)
 {
-	struct zfcp_fc_ns_handler_data *compl_rec =
-			(struct zfcp_fc_ns_handler_data *) data;
-
-	if (compl_rec->handler)
-		compl_rec->handler(compl_rec->handler_data);
-
-	complete(&compl_rec->done);
-}
-
-static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
-{
-	struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
-	struct zfcp_send_ct *ct = &gid_pn->ct;
-	struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
-	struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
+	struct zfcp_fc_gid_pn *gid_pn = data;
+	struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
+	struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
+	struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
 	struct zfcp_port *port = gid_pn->port;
 
 	if (ct->status)
 		return;
-	if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT)
+	if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
 		return;
 
 	/* paranoia */
-	if (ct_iu_req->wwpn != port->wwpn)
+	if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
 		return;
 	/* looks like a valid d_id */
-	port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
+	port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+}
+
+static void zfcp_fc_complete(void *data)
+{
+	complete(data);
 }
 
 static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
-				     struct zfcp_gid_pn_data *gid_pn)
+				     struct zfcp_fc_gid_pn *gid_pn)
 {
 	struct zfcp_adapter *adapter = port->adapter;
-	struct zfcp_fc_ns_handler_data compl_rec;
+	DECLARE_COMPLETION_ONSTACK(completion);
 	int ret;
 
 	/* setup parameters for send generic command */
 	gid_pn->port = port;
-	gid_pn->ct.wka_port = &adapter->gs->ds;
-	gid_pn->ct.handler = zfcp_fc_ns_handler;
-	gid_pn->ct.handler_data = (unsigned long) &compl_rec;
-	gid_pn->ct.req = &gid_pn->req;
-	gid_pn->ct.resp = &gid_pn->resp;
-	sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
-		    sizeof(struct ct_iu_gid_pn_req));
-	sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
-		    sizeof(struct ct_iu_gid_pn_resp));
+	gid_pn->ct.handler = zfcp_fc_complete;
+	gid_pn->ct.handler_data = &completion;
+	gid_pn->ct.req = &gid_pn->sg_req;
+	gid_pn->ct.resp = &gid_pn->sg_resp;
+	sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
+		    sizeof(struct zfcp_fc_gid_pn_req));
+	sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
+		    sizeof(struct zfcp_fc_gid_pn_resp));
 
 	/* setup nameserver request */
-	gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
-	gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
-	gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
-	gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
-	gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
-	gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
-	gid_pn->ct_iu_req.wwpn = port->wwpn;
-
-	init_completion(&compl_rec.done);
-	compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
-	compl_rec.handler_data = (unsigned long) gid_pn;
-	ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req);
-	if (!ret)
-		wait_for_completion(&compl_rec.done);
+	gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
+	gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
+	gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
+	gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
+	gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
+	gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
+	gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+			       adapter->pool.gid_pn_req);
+	if (!ret) {
+		wait_for_completion(&completion);
+		zfcp_fc_ns_gid_pn_eval(gid_pn);
+	}
 	return ret;
 }
 
@@ -316,10 +274,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
 static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
 {
 	int ret;
-	struct zfcp_gid_pn_data *gid_pn;
+	struct zfcp_fc_gid_pn *gid_pn;
 	struct zfcp_adapter *adapter = port->adapter;
 
-	gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC);
+	gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
 	if (!gid_pn)
 		return -ENOMEM;
 
@@ -333,7 +291,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
 
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
 out:
-	mempool_free(gid_pn, adapter->pool.gid_pn_data);
+	mempool_free(gid_pn, adapter->pool.gid_pn);
 	return ret;
 }
 
@@ -357,7 +315,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 
 	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
 out:
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 }
 
 /**
@@ -366,9 +324,9 @@ out:
  */
 void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
 {
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 }
 
 /**
@@ -378,33 +336,36 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
  *
  * Evaluate PLOGI playload and copy important fields into zfcp_port structure
  */
-void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
-{
-	port->maxframe_size = plogi->serv_param.common_serv_param[7] |
-		((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
-	if (plogi->serv_param.class1_serv_param[0] & 0x80)
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
+{
+	if (plogi->fl_wwpn != port->wwpn) {
+		port->d_id = 0;
+		dev_warn(&port->adapter->ccw_device->dev,
+			 "A port opened with WWPN 0x%016Lx returned data that "
+			 "identifies it as WWPN 0x%016Lx\n",
+			 (unsigned long long) port->wwpn,
+			 (unsigned long long) plogi->fl_wwpn);
+		return;
+	}
+
+	port->wwnn = plogi->fl_wwnn;
+	port->maxframe_size = plogi->fl_csp.sp_bb_data;
+
+	if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
 		port->supported_classes |= FC_COS_CLASS1;
-	if (plogi->serv_param.class2_serv_param[0] & 0x80)
+	if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
 		port->supported_classes |= FC_COS_CLASS2;
-	if (plogi->serv_param.class3_serv_param[0] & 0x80)
+	if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
 		port->supported_classes |= FC_COS_CLASS3;
-	if (plogi->serv_param.class4_serv_param[0] & 0x80)
+	if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
 		port->supported_classes |= FC_COS_CLASS4;
 }
 
-struct zfcp_els_adisc {
-	struct zfcp_send_els els;
-	struct scatterlist req;
-	struct scatterlist resp;
-	struct zfcp_ls_adisc ls_adisc;
-	struct zfcp_ls_adisc ls_adisc_acc;
-};
-
-static void zfcp_fc_adisc_handler(unsigned long data)
+static void zfcp_fc_adisc_handler(void *data)
 {
-	struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
+	struct zfcp_fc_els_adisc *adisc = data;
 	struct zfcp_port *port = adisc->els.port;
-	struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
+	struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
 
 	if (adisc->els.status) {
 		/* request rejected or timed out */
@@ -414,9 +375,9 @@ static void zfcp_fc_adisc_handler(unsigned long data)
 	}
 
 	if (!port->wwnn)
-		port->wwnn = ls_adisc->wwnn;
+		port->wwnn = adisc_resp->adisc_wwnn;
 
-	if ((port->wwpn != ls_adisc->wwpn) ||
+	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
 	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
 				     "fcadh_2", NULL);
@@ -427,40 +388,44 @@ static void zfcp_fc_adisc_handler(unsigned long data)
 	zfcp_scsi_schedule_rport_register(port);
  out:
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
-	zfcp_port_put(port);
-	kfree(adisc);
+	put_device(&port->sysfs_device);
+	kmem_cache_free(zfcp_data.adisc_cache, adisc);
 }
 
 static int zfcp_fc_adisc(struct zfcp_port *port)
 {
-	struct zfcp_els_adisc *adisc;
+	struct zfcp_fc_els_adisc *adisc;
 	struct zfcp_adapter *adapter = port->adapter;
+	int ret;
 
-	adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
+	adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
 	if (!adisc)
 		return -ENOMEM;
 
+	adisc->els.port = port;
 	adisc->els.req = &adisc->req;
 	adisc->els.resp = &adisc->resp;
-	sg_init_one(adisc->els.req, &adisc->ls_adisc,
-		    sizeof(struct zfcp_ls_adisc));
-	sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
-		    sizeof(struct zfcp_ls_adisc));
+	sg_init_one(adisc->els.req, &adisc->adisc_req,
+		    sizeof(struct fc_els_adisc));
+	sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+		    sizeof(struct fc_els_adisc));
 
-	adisc->els.adapter = adapter;
-	adisc->els.port = port;
-	adisc->els.d_id = port->d_id;
 	adisc->els.handler = zfcp_fc_adisc_handler;
-	adisc->els.handler_data = (unsigned long) adisc;
-	adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
+	adisc->els.handler_data = adisc;
 
 	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
 	   without FC-AL-2 capability, so we don't set it */
-	adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
-	adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
-	adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
+	adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
+	adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
+	adisc->adisc_req.adisc_cmd = ELS_ADISC;
+	hton24(adisc->adisc_req.adisc_port_id,
+	       fc_host_port_id(adapter->scsi_host));
+
+	ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els);
+	if (ret)
+		kmem_cache_free(zfcp_data.adisc_cache, adisc);
 
-	return zfcp_fsf_send_els(&adisc->els);
+	return ret;
 }
 
 void zfcp_fc_link_test_work(struct work_struct *work)
@@ -469,7 +434,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
 		container_of(work, struct zfcp_port, test_link_work);
 	int retval;
 
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 	port->rport_task = RPORT_DEL;
 	zfcp_scsi_rport_work(&port->rport_work);
 
@@ -488,7 +453,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
 
 out:
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 }
 
 /**
@@ -501,12 +466,12 @@ out:
  */
 void zfcp_fc_test_link(struct zfcp_port *port)
 {
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 }
 
-static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
+static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
 {
 	struct scatterlist *sg = &gpn_ft->sg_req;
 
@@ -516,10 +481,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
 	kfree(gpn_ft);
 }
 
-static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
+static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
 {
-	struct zfcp_gpn_ft *gpn_ft;
-	struct ct_iu_gpn_ft_req *req;
+	struct zfcp_fc_gpn_ft *gpn_ft;
+	struct zfcp_fc_gpn_ft_req *req;
 
 	gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
 	if (!gpn_ft)
@@ -542,159 +507,152 @@ out:
 }
 
 
-static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 			       struct zfcp_adapter *adapter, int max_bytes)
 {
-	struct zfcp_send_ct *ct = &gpn_ft->ct;
-	struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
-	struct zfcp_fc_ns_handler_data compl_rec;
+	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
+	struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+	DECLARE_COMPLETION_ONSTACK(completion);
 	int ret;
 
 	/* prepare CT IU for GPN_FT */
-	req->header.revision = ZFCP_CT_REVISION;
-	req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
-	req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
-	req->header.options = ZFCP_CT_SYNCHRONOUS;
-	req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
-	req->header.max_res_size = max_bytes / 4;
-	req->flags = 0;
-	req->domain_id_scope = 0;
-	req->area_id_scope = 0;
-	req->fc4_type = ZFCP_CT_SCSI_FCP;
+	req->ct_hdr.ct_rev = FC_CT_REV;
+	req->ct_hdr.ct_fs_type = FC_FST_DIR;
+	req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
+	req->ct_hdr.ct_options = 0;
+	req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
+	req->ct_hdr.ct_mr_size = max_bytes / 4;
+	req->gpn_ft.fn_domain_id_scope = 0;
+	req->gpn_ft.fn_area_id_scope = 0;
+	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
 
 	/* prepare zfcp_send_ct */
-	ct->wka_port = &adapter->gs->ds;
-	ct->handler = zfcp_fc_ns_handler;
-	ct->handler_data = (unsigned long)&compl_rec;
+	ct->handler = zfcp_fc_complete;
+	ct->handler_data = &completion;
 	ct->req = &gpn_ft->sg_req;
 	ct->resp = gpn_ft->sg_resp;
 
-	init_completion(&compl_rec.done);
-	compl_rec.handler = NULL;
-	ret = zfcp_fsf_send_ct(ct, NULL);
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL);
 	if (!ret)
-		wait_for_completion(&compl_rec.done);
+		wait_for_completion(&completion);
 	return ret;
 }
 
-static void zfcp_fc_validate_port(struct zfcp_port *port)
+static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
 {
-	struct zfcp_adapter *adapter = port->adapter;
-
 	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
 		return;
 
 	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
 
 	if ((port->supported_classes != 0) ||
-	    !list_empty(&port->unit_list_head)) {
-		zfcp_port_put(port);
+	    !list_empty(&port->unit_list))
 		return;
-	}
-	zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL);
-	zfcp_erp_wait(adapter);
-	zfcp_port_put(port);
-	zfcp_port_dequeue(port);
+
+	list_move_tail(&port->list, lh);
 }
 
-static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+			       struct zfcp_adapter *adapter, int max_entries)
 {
-	struct zfcp_send_ct *ct = &gpn_ft->ct;
+	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
 	struct scatterlist *sg = gpn_ft->sg_resp;
-	struct ct_hdr *hdr = sg_virt(sg);
-	struct gpn_ft_resp_acc *acc = sg_virt(sg);
-	struct zfcp_adapter *adapter = ct->wka_port->adapter;
+	struct fc_ct_hdr *hdr = sg_virt(sg);
+	struct fc_gpn_ft_resp *acc = sg_virt(sg);
 	struct zfcp_port *port, *tmp;
+	unsigned long flags;
+	LIST_HEAD(remove_lh);
 	u32 d_id;
 	int ret = 0, x, last = 0;
 
 	if (ct->status)
 		return -EIO;
 
-	if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
-		if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
+	if (hdr->ct_cmd != FC_FS_ACC) {
+		if (hdr->ct_reason == FC_BA_RJT_UNABLE)
 			return -EAGAIN; /* might be a temporary condition */
 		return -EIO;
 	}
 
-	if (hdr->max_res_size) {
+	if (hdr->ct_mr_size) {
 		dev_warn(&adapter->ccw_device->dev,
 			 "The name server reported %d words residual data\n",
-			 hdr->max_res_size);
+			 hdr->ct_mr_size);
 		return -E2BIG;
 	}
 
-	mutex_lock(&zfcp_data.config_mutex);
-
 	/* first entry is the header */
 	for (x = 1; x < max_entries && !last; x++) {
-		if (x % (ZFCP_GPN_FT_ENTRIES + 1))
+		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
 			acc++;
 		else
 			acc = sg_virt(++sg);
 
-		last = acc->control & 0x80;
-		d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
-		       acc->port_id[2];
+		last = acc->fp_flags & FC_NS_FID_LAST;
+		d_id = ntoh24(acc->fp_fid);
 
 		/* don't attach ports with a well known address */
-		if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
+		if (d_id >= FC_FID_WELL_KNOWN_BASE)
 			continue;
 		/* skip the adapter's port and known remote ports */
-		if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
-			continue;
-		port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
-		if (port)
+		if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
 			continue;
 
-		port = zfcp_port_enqueue(adapter, acc->wwpn,
+		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
-		if (IS_ERR(port))
-			ret = PTR_ERR(port);
-		else
+		if (!IS_ERR(port))
 			zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
+		else if (PTR_ERR(port) != -EEXIST)
+			ret = PTR_ERR(port);
 	}
 
 	zfcp_erp_wait(adapter);
-	list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
-		zfcp_fc_validate_port(port);
-	mutex_unlock(&zfcp_data.config_mutex);
+	write_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
+		zfcp_fc_validate_port(port, &remove_lh);
+	write_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
+		zfcp_device_unregister(&port->sysfs_device,
+				       &zfcp_sysfs_port_attrs);
+	}
+
 	return ret;
 }
 
 /**
  * zfcp_fc_scan_ports - scan remote ports and attach new ports
- * @adapter: pointer to struct zfcp_adapter
+ * @work: reference to scheduled work
  */
-int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
+void zfcp_fc_scan_ports(struct work_struct *work)
 {
+	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+						    scan_work);
 	int ret, i;
-	struct zfcp_gpn_ft *gpn_ft;
+	struct zfcp_fc_gpn_ft *gpn_ft;
 	int chain, max_entries, buf_num, max_bytes;
 
 	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
-	buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
-	max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
-	max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
+	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
+	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
+	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
 
 	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
 	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
-		return 0;
+		return;
 
-	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
-	if (ret)
-		return ret;
+	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
+		return;
 
 	gpn_ft = zfcp_alloc_sg_env(buf_num);
-	if (!gpn_ft) {
-		ret = -ENOMEM;
+	if (!gpn_ft)
 		goto out;
-	}
 
 	for (i = 0; i < 3; i++) {
 		ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
 		if (!ret) {
-			ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries);
+			ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
 			if (ret == -EAGAIN)
 				ssleep(1);
 			else
@@ -704,174 +662,116 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
 	zfcp_free_sg_env(gpn_ft, buf_num);
 out:
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
-	return ret;
-}
-
-
-void _zfcp_fc_scan_ports_later(struct work_struct *work)
-{
-	zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
 }
 
-struct zfcp_els_fc_job {
-	struct zfcp_send_els els;
-	struct fc_bsg_job *job;
-};
-
-static void zfcp_fc_generic_els_handler(unsigned long data)
+static void zfcp_fc_ct_els_job_handler(void *data)
 {
-	struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
-	struct fc_bsg_job *job = els_fc_job->job;
-	struct fc_bsg_reply *reply = job->reply;
-
-	if (els_fc_job->els.status) {
-		/* request rejected or timed out */
-		reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
-		goto out;
-	}
-
-	reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
-	reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+	struct fc_bsg_job *job = data;
+	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
+	int status = zfcp_ct_els->status;
+	int reply_status;
 
-out:
-	job->state_flags = FC_RQST_STATE_DONE;
+	reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
+	job->reply->reply_data.ctels_reply.status = reply_status;
+	job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
 	job->job_done(job);
-	kfree(els_fc_job);
 }
 
-int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
+static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+				struct zfcp_adapter *adapter)
 {
-	struct zfcp_els_fc_job *els_fc_job;
+	struct zfcp_fsf_ct_els *els = job->dd_data;
 	struct fc_rport *rport = job->rport;
-	struct Scsi_Host *shost;
-	struct zfcp_adapter *adapter;
 	struct zfcp_port *port;
-	u8 *port_did;
-
-	shost = rport ? rport_to_shost(rport) : job->shost;
-	adapter = (struct zfcp_adapter *)shost->hostdata[0];
-
-	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
-		return -EINVAL;
-
-	els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
-	if (!els_fc_job)
-		return -ENOMEM;
+	u32 d_id;
 
-	els_fc_job->els.adapter = adapter;
 	if (rport) {
-		read_lock_irq(&zfcp_data.config_lock);
 		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
-		if (port)
-			els_fc_job->els.d_id = port->d_id;
-		read_unlock_irq(&zfcp_data.config_lock);
-		if (!port) {
-			kfree(els_fc_job);
+		if (!port)
 			return -EINVAL;
-		}
-	} else {
-		port_did = job->request->rqst_data.h_els.port_id;
-		els_fc_job->els.d_id = (port_did[0] << 16) +
-					(port_did[1] << 8) + port_did[2];
-	}
-
-	els_fc_job->els.req = job->request_payload.sg_list;
-	els_fc_job->els.resp = job->reply_payload.sg_list;
-	els_fc_job->els.handler = zfcp_fc_generic_els_handler;
-	els_fc_job->els.handler_data = (unsigned long) els_fc_job;
-	els_fc_job->job = job;
-
-	return zfcp_fsf_send_els(&els_fc_job->els);
-}
-
-struct zfcp_ct_fc_job {
-	struct zfcp_send_ct ct;
-	struct fc_bsg_job *job;
-};
-
-static void zfcp_fc_generic_ct_handler(unsigned long data)
-{
-	struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
-	struct fc_bsg_job *job = ct_fc_job->job;
-
-	job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
-				FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
-	job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
-	job->state_flags = FC_RQST_STATE_DONE;
-	job->job_done(job);
 
-	zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
+		d_id = port->d_id;
+		put_device(&port->sysfs_device);
+	} else
+		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
 
-	kfree(ct_fc_job);
+	return zfcp_fsf_send_els(adapter, d_id, els);
 }
 
-int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
+static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+			       struct zfcp_adapter *adapter)
 {
 	int ret;
 	u8 gs_type;
-	struct fc_rport *rport = job->rport;
-	struct Scsi_Host *shost;
-	struct zfcp_adapter *adapter;
-	struct zfcp_ct_fc_job *ct_fc_job;
+	struct zfcp_fsf_ct_els *ct = job->dd_data;
+	struct zfcp_fc_wka_port *wka_port;
 	u32 preamble_word1;
 
-	shost = rport ? rport_to_shost(rport) : job->shost;
-
-	adapter = (struct zfcp_adapter *)shost->hostdata[0];
-	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
-		return -EINVAL;
-
-	ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
-	if (!ct_fc_job)
-		return -ENOMEM;
-
 	preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
 	gs_type = (preamble_word1 & 0xff000000) >> 24;
 
 	switch (gs_type) {
 	case FC_FST_ALIAS:
-		ct_fc_job->ct.wka_port = &adapter->gs->as;
+		wka_port = &adapter->gs->as;
 		break;
 	case FC_FST_MGMT:
-		ct_fc_job->ct.wka_port = &adapter->gs->ms;
+		wka_port = &adapter->gs->ms;
 		break;
 	case FC_FST_TIME:
-		ct_fc_job->ct.wka_port = &adapter->gs->ts;
+		wka_port = &adapter->gs->ts;
 		break;
 	case FC_FST_DIR:
-		ct_fc_job->ct.wka_port = &adapter->gs->ds;
+		wka_port = &adapter->gs->ds;
 		break;
 	default:
-		kfree(ct_fc_job);
 		return -EINVAL; /* no such service */
 	}
 
-	ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
-	if (ret) {
-		kfree(ct_fc_job);
+	ret = zfcp_fc_wka_port_get(wka_port);
+	if (ret)
 		return ret;
-	}
 
-	ct_fc_job->ct.req = job->request_payload.sg_list;
-	ct_fc_job->ct.resp = job->reply_payload.sg_list;
-	ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
-	ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
-	ct_fc_job->ct.completion = NULL;
-	ct_fc_job->job = job;
+	ret = zfcp_fsf_send_ct(wka_port, ct, NULL);
+	if (ret)
+		zfcp_fc_wka_port_put(wka_port);
 
-	ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
-	if (ret) {
-		kfree(ct_fc_job);
-		zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
-	}
 	return ret;
 }
 
+int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_adapter *adapter;
+	struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+
+	shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+	adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+		return -EINVAL;
+
+	ct_els->req = job->request_payload.sg_list;
+	ct_els->resp = job->reply_payload.sg_list;
+	ct_els->handler = zfcp_fc_ct_els_job_handler;
+	ct_els->handler_data = job;
+
+	switch (job->request->msgcode) {
+	case FC_BSG_RPT_ELS:
+	case FC_BSG_HST_ELS_NOLOGIN:
+		return zfcp_fc_exec_els_job(job, adapter);
+	case FC_BSG_RPT_CT:
+	case FC_BSG_HST_CT:
+		return zfcp_fc_exec_ct_job(job, adapter);
+	default:
+		return -EINVAL;
+	}
+}
+
 int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
 {
-	struct zfcp_wka_ports *wka_ports;
+	struct zfcp_fc_wka_ports *wka_ports;
 
-	wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
+	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
 	if (!wka_ports)
 		return -ENOMEM;
 
@@ -880,7 +780,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
 	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
 	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
 	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
-	zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
 
 	return 0;
 }
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 000000000000..cb2a3669a384
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,260 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_FC_H
+#define ZFCP_FC_H
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "zfcp_fsf.h"
+
+#define ZFCP_FC_CT_SIZE_PAGE	  (PAGE_SIZE - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_ENT_PAGE	  (ZFCP_FC_CT_SIZE_PAGE \
+					/ sizeof(struct fc_gpn_ft_resp))
+#define ZFCP_FC_GPN_FT_NUM_BUFS	  4 /* memory pages */
+
+#define ZFCP_FC_GPN_FT_MAX_SIZE	  (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
+					- sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_MAX_ENT	  (ZFCP_FC_GPN_FT_NUM_BUFS * \
+					(ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+
+/**
+ * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN request
+ */
+struct zfcp_fc_gid_pn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_pn	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN response
+ */
+struct zfcp_fc_gid_pn_resp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gid_pn_resp	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
+ * @ct: data passed to zfcp_fsf for issuing fsf request
+ * @sg_req: scatterlist entry for request data
+ * @sg_resp: scatterlist entry for response data
+ * @gid_pn_req: GID_PN request data
+ * @gid_pn_resp: GID_PN response data
+ */
+struct zfcp_fc_gid_pn {
+	struct zfcp_fsf_ct_els ct;
+	struct scatterlist sg_req;
+	struct scatterlist sg_resp;
+	struct zfcp_fc_gid_pn_req gid_pn_req;
+	struct zfcp_fc_gid_pn_resp gid_pn_resp;
+	struct zfcp_port *port;
+};
+
+/**
+ * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: GPN_FT request
+ */
+struct zfcp_fc_gpn_ft_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_ft	gpn_ft;
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ */
+struct zfcp_fc_gpn_ft_resp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gpn_ft_resp	gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
+ * @ct: data passed to zfcp_fsf for issuing fsf request
+ * @sg_req: scatter list entry for gpn_ft request
+ * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ */
+struct zfcp_fc_gpn_ft {
+	struct zfcp_fsf_ct_els ct;
+	struct scatterlist sg_req;
+	struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
+};
+
+/**
+ * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
+ * @els: data required for issuing els fsf command
+ * @req: scatterlist entry for ELS ADISC request
+ * @resp: scatterlist entry for ELS ADISC response
+ * @adisc_req: ELS ADISC request data
+ * @adisc_resp: ELS ADISC response data
+ */
+struct zfcp_fc_els_adisc {
+	struct zfcp_fsf_ct_els els;
+	struct scatterlist req;
+	struct scatterlist resp;
+	struct fc_els_adisc adisc_req;
+	struct fc_els_adisc adisc_resp;
+};
+
+/**
+ * enum zfcp_fc_wka_status - FC WKA port status in zfcp
+ * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
+ * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
+ * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
+ * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
+ */
+enum zfcp_fc_wka_status {
+	ZFCP_FC_WKA_PORT_OFFLINE,
+	ZFCP_FC_WKA_PORT_CLOSING,
+	ZFCP_FC_WKA_PORT_OPENING,
+	ZFCP_FC_WKA_PORT_ONLINE,
+};
+
+/**
+ * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
+ * @adapter: Pointer to adapter structure this WKA port belongs to
+ * @completion_wq: Wait for completion of open/close command
+ * @status: Current status of WKA port
+ * @refcount: Reference count to keep port open as long as it is in use
+ * @d_id: FC destination id or well-known-address
+ * @handle: FSF handle for the open WKA port
+ * @mutex: Mutex used during opening/closing state changes
+ * @work: For delaying the closing of the WKA port
+ */
+struct zfcp_fc_wka_port {
+	struct zfcp_adapter	*adapter;
+	wait_queue_head_t	completion_wq;
+	enum zfcp_fc_wka_status	status;
+	atomic_t		refcount;
+	u32			d_id;
+	u32			handle;
+	struct mutex		mutex;
+	struct delayed_work	work;
+};
+
+/**
+ * struct zfcp_fc_wka_ports - Data structures for FC generic services
+ * @ms: FC Management service
+ * @ts: FC time service
+ * @ds: FC directory service
+ * @as: FC alias service
+ */
+struct zfcp_fc_wka_ports {
+	struct zfcp_fc_wka_port ms;
+	struct zfcp_fc_wka_port ts;
+	struct zfcp_fc_wka_port ds;
+	struct zfcp_fc_wka_port as;
+};
+
+/**
+ * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
+ * @fcp: fcp_cmnd to setup
+ * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ */
+static inline
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+{
+	char tag[2];
+
+	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+
+	if (scsi_populate_tag_msg(scsi, tag)) {
+		switch (tag[0]) {
+		case MSG_ORDERED_TAG:
+			fcp->fc_pri_ta |= FCP_PTA_ORDERED;
+			break;
+		case MSG_SIMPLE_TAG:
+			fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
+			break;
+		};
+	} else
+		fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+
+	if (scsi->sc_data_direction == DMA_FROM_DEVICE)
+		fcp->fc_flags |= FCP_CFL_RDDATA;
+	if (scsi->sc_data_direction == DMA_TO_DEVICE)
+		fcp->fc_flags |= FCP_CFL_WRDATA;
+
+	memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
+
+	fcp->fc_dl = scsi_bufflen(scsi);
+}
+
+/**
+ * zfcp_fc_fcp_tm - setup FCP command as task management command
+ * @fcp: fcp_cmnd to setup
+ * @dev: scsi_device where to send the task management command
+ * @tm: task management flags to setup tm command
+ */
+static inline
+void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
+{
+	int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
+	fcp->fc_tm_flags |= tm_flags;
+}
+
+/**
+ * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
+ * @fcp_rsp: FCP RSP IU to evaluate
+ * @scsi: SCSI command where to update status and sense buffer
+ */
+static inline
+void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+			  struct scsi_cmnd *scsi)
+{
+	struct fcp_resp_rsp_info *rsp_info;
+	char *sense;
+	u32 sense_len, resid;
+	u8 rsp_flags;
+
+	set_msg_byte(scsi, COMMAND_COMPLETE);
+	scsi->result |= fcp_rsp->resp.fr_status;
+
+	rsp_flags = fcp_rsp->resp.fr_flags;
+
+	if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
+		rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+		if (rsp_info->rsp_code == FCP_TMF_CMPL)
+			set_host_byte(scsi, DID_OK);
+		else {
+			set_host_byte(scsi, DID_ERROR);
+			return;
+		}
+	}
+
+	if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
+		sense = (char *) &fcp_rsp[1];
+		if (rsp_flags & FCP_RSP_LEN_VAL)
+			sense += fcp_rsp->ext.fr_sns_len;
+		sense_len = min(fcp_rsp->ext.fr_sns_len,
+				(u32) SCSI_SENSE_BUFFERSIZE);
+		memcpy(scsi->sense_buffer, sense, sense_len);
+	}
+
+	if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
+		resid = fcp_rsp->ext.fr_resid;
+		scsi_set_resid(scsi, resid);
+		if (scsi_bufflen(scsi) - resid < scsi->underflow &&
+		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
+		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+			set_host_byte(scsi, DID_ERROR);
+	}
+}
+
+#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4e41baa0c141..482dcd97aa5d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -10,7 +10,9 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/blktrace_api.h>
+#include <scsi/fc/fc_els.h>
 #include "zfcp_ext.h"
+#include "zfcp_fc.h"
 #include "zfcp_dbf.h"
 
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
@@ -122,36 +124,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
 
 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
 {
+	unsigned long flags;
 	struct fsf_status_read_buffer *sr_buf = req->data;
 	struct zfcp_adapter *adapter = req->adapter;
 	struct zfcp_port *port;
-	int d_id = sr_buf->d_id & ZFCP_DID_MASK;
-	unsigned long flags;
+	int d_id = ntoh24(sr_buf->d_id);
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	list_for_each_entry(port, &adapter->port_list_head, list)
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->d_id == d_id) {
-			read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
-			return;
+			break;
 		}
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
 
 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
 					 struct fsf_link_down_info *link_down)
 {
 	struct zfcp_adapter *adapter = req->adapter;
-	unsigned long flags;
 
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
 		return;
 
 	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	zfcp_scsi_schedule_rports_block(adapter);
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 
 	if (!link_down)
 		goto out;
@@ -291,7 +289,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 			zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
 							req);
 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
-			schedule_work(&adapter->scan_work);
+			queue_work(adapter->work_queue, &adapter->scan_work);
 		break;
 	case FSF_STATUS_READ_CFDC_UPDATED:
 		zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
@@ -317,7 +315,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		return;
 	case FSF_SQ_COMMAND_ABORTED:
-		req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
 		break;
 	case FSF_SQ_NO_RECOM:
 		dev_err(&req->adapter->ccw_device->dev,
@@ -358,8 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 	zfcp_dbf_hba_fsf_response(req);
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		return;
 	}
 
@@ -377,7 +373,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 	case FSF_PROT_ERROR_STATE:
 	case FSF_PROT_SEQ_NUMB_ERROR:
 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
-		req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PROT_UNSUPP_QTCB_TYPE:
 		dev_err(&adapter->ccw_device->dev,
@@ -480,20 +476,27 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
 
 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 {
-	struct fsf_qtcb_bottom_config *bottom;
+	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
 	struct zfcp_adapter *adapter = req->adapter;
 	struct Scsi_Host *shost = adapter->scsi_host;
+	struct fc_els_flogi *nsp, *plogi;
 
-	bottom = &req->qtcb->bottom.config;
+	/* adjust pointers for missing command code */
+	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+					- sizeof(u32));
+	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+					- sizeof(u32));
 
 	if (req->data)
 		memcpy(req->data, bottom, sizeof(*bottom));
 
-	fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
-	fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
-	fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
+	fc_host_port_name(shost) = nsp->fl_wwpn;
+	fc_host_node_name(shost) = nsp->fl_wwnn;
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
 	fc_host_speed(shost) = bottom->fc_link_speed;
 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+	fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
+	fc_host_active_fc4s(shost)[2] = 1; /* FCP */
 
 	adapter->hydra_version = bottom->adapter_type;
 	adapter->timer_ticks = bottom->timer_interval;
@@ -503,9 +506,9 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 
 	switch (bottom->fc_topology) {
 	case FSF_TOPO_P2P:
-		adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
-		adapter->peer_wwpn = bottom->plogi_payload.wwpn;
-		adapter->peer_wwnn = bottom->plogi_payload.wwnn;
+		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+		adapter->peer_wwpn = plogi->fl_wwpn;
+		adapter->peer_wwnn = plogi->fl_wwnn;
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		break;
 	case FSF_TOPO_FABRIC:
@@ -881,13 +884,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_port_boxed(unit->port, "fsafch3", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_unit_boxed(unit, "fsafch4", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                 break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (fsq->word[0]) {
@@ -958,10 +959,10 @@ out:
 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 {
 	struct zfcp_adapter *adapter = req->adapter;
-	struct zfcp_send_ct *send_ct = req->data;
+	struct zfcp_fsf_ct_els *ct = req->data;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
 
-	send_ct->status = -EINVAL;
+	ct->status = -EINVAL;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		goto skip_fsfstatus;
@@ -969,7 +970,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
         case FSF_GOOD:
 		zfcp_dbf_san_ct_response(req);
-		send_ct->status = 0;
+		ct->status = 0;
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
 		zfcp_fsf_class_not_supp(req);
@@ -985,8 +986,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 	case FSF_ACCESS_DENIED:
 		break;
         case FSF_PORT_BOXED:
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
@@ -1001,8 +1001,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 	}
 
 skip_fsfstatus:
-	if (send_ct->handler)
-		send_ct->handler(send_ct->handler_data);
+	if (ct->handler)
+		ct->handler(ct->handler_data);
 }
 
 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
@@ -1071,15 +1071,17 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
 				 int max_sbals)
 {
 	int ret;
+	unsigned int fcp_chan_timeout;
 
 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
 	if (ret)
 		return ret;
 
 	/* common settings for ct/gs and els requests */
+	fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000;
 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
-	req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
-	zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ);
+	req->qtcb->bottom.support.timeout = fcp_chan_timeout;
+	zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ);
 
 	return 0;
 }
@@ -1089,9 +1091,9 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
  * @ct: pointer to struct zfcp_send_ct with data for request
  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
  */
-int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
+int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+		     struct zfcp_fsf_ct_els *ct, mempool_t *pool)
 {
-	struct zfcp_wka_port *wka_port = ct->wka_port;
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int ret = -EIO;
@@ -1117,7 +1119,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
 	req->qtcb->header.port_handle = wka_port->handle;
 	req->data = ct;
 
-	zfcp_dbf_san_ct_request(req);
+	zfcp_dbf_san_ct_request(req, wka_port->d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1134,7 +1136,7 @@ out:
 
 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 {
-	struct zfcp_send_els *send_els = req->data;
+	struct zfcp_fsf_ct_els *send_els = req->data;
 	struct zfcp_port *port = send_els->port;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
 
@@ -1154,9 +1156,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]){
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			if (port && (send_els->ls_code != ZFCP_LS_ADISC))
-				zfcp_fc_test_link(port);
-			/*fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		case FSF_SQ_RETRY_IF_POSSIBLE:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1188,10 +1187,11 @@ skip_fsfstatus:
  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
  * @els: pointer to struct zfcp_send_els with data for the command
  */
-int zfcp_fsf_send_els(struct zfcp_send_els *els)
+int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+		      struct zfcp_fsf_ct_els *els)
 {
 	struct zfcp_fsf_req *req;
-	struct zfcp_qdio *qdio = els->adapter->qdio;
+	struct zfcp_qdio *qdio = adapter->qdio;
 	int ret = -EIO;
 
 	spin_lock_bh(&qdio->req_q_lock);
@@ -1211,7 +1211,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
 	if (ret)
 		goto failed_send;
 
-	req->qtcb->bottom.support.d_id = els->d_id;
+	hton24(req->qtcb->bottom.support.d_id, d_id);
 	req->handler = zfcp_fsf_send_els_handler;
 	req->data = els;
 
@@ -1422,7 +1422,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 {
 	struct zfcp_port *port = req->data;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
-	struct fsf_plogi *plogi;
+	struct fc_els_flogi *plogi;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		goto out;
@@ -1472,23 +1472,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 		 * another GID_PN straight after a port has been opened.
 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
 		 */
-		plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
+		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
 		if (req->qtcb->bottom.support.els1_length >=
-		    FSF_PLOGI_MIN_LEN) {
-			if (plogi->serv_param.wwpn != port->wwpn) {
-				port->d_id = 0;
-				dev_warn(&port->adapter->ccw_device->dev,
-					 "A port opened with WWPN 0x%016Lx "
-					 "returned data that identifies it as "
-					 "WWPN 0x%016Lx\n",
-					 (unsigned long long) port->wwpn,
-					 (unsigned long long)
-					  plogi->serv_param.wwpn);
-			} else {
-				port->wwnn = plogi->serv_param.wwnn;
+		    FSF_PLOGI_MIN_LEN)
 				zfcp_fc_plogi_evaluate(port, plogi);
-			}
-		}
 		break;
 	case FSF_UNKNOWN_OP_SUBTYPE:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1496,7 +1483,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 	}
 
 out:
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 }
 
 /**
@@ -1530,18 +1517,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	req->handler = zfcp_fsf_open_port_handler;
-	req->qtcb->bottom.support.d_id = port->d_id;
+	hton24(req->qtcb->bottom.support.d_id, port->d_id);
 	req->data = port;
 	req->erp_action = erp_action;
 	erp_action->fsf_req = req;
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req = NULL;
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1618,11 +1605,11 @@ out:
 
 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
 {
-	struct zfcp_wka_port *wka_port = req->data;
+	struct zfcp_fc_wka_port *wka_port = req->data;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 		goto out;
 	}
 
@@ -1635,13 +1622,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		/* fall through */
 	case FSF_ACCESS_DENIED:
-		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 		break;
 	case FSF_GOOD:
 		wka_port->handle = header->port_handle;
 		/* fall through */
 	case FSF_PORT_ALREADY_OPEN:
-		wka_port->status = ZFCP_WKA_PORT_ONLINE;
+		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
 	}
 out:
 	wake_up(&wka_port->completion_wq);
@@ -1649,10 +1636,10 @@ out:
 
 /**
  * zfcp_fsf_open_wka_port - create and send open wka-port request
- * @wka_port: pointer to struct zfcp_wka_port
+ * @wka_port: pointer to struct zfcp_fc_wka_port
  * Returns: 0 on success, error otherwise
  */
-int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1677,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	req->handler = zfcp_fsf_open_wka_port_handler;
-	req->qtcb->bottom.support.d_id = wka_port->d_id;
+	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
 	req->data = wka_port;
 
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1691,23 +1678,23 @@ out:
 
 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 {
-	struct zfcp_wka_port *wka_port = req->data;
+	struct zfcp_fc_wka_port *wka_port = req->data;
 
 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
 	}
 
-	wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 	wake_up(&wka_port->completion_wq);
 }
 
 /**
  * zfcp_fsf_close_wka_port - create and send close wka port request
- * @erp_action: pointer to struct zfcp_erp_action
+ * @wka_port: WKA port to open
  * Returns: 0 on success, error otherwise
  */
-int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1765,13 +1752,13 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
-		list_for_each_entry(unit, &port->unit_list_head, list)
+		read_lock(&port->unit_list_lock);
+		list_for_each_entry(unit, &port->unit_list, list)
 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
 					  &unit->status);
+		read_unlock(&port->unit_list_lock);
 		zfcp_erp_port_boxed(port, "fscpph2", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
-
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
@@ -1787,9 +1774,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
 		 */
 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
-		list_for_each_entry(unit, &port->unit_list_head, list)
+		read_lock(&port->unit_list_lock);
+		list_for_each_entry(unit, &port->unit_list, list)
 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
 					  &unit->status);
+		read_unlock(&port->unit_list_lock);
 		break;
 	}
 }
@@ -1873,8 +1862,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_SHARING_VIOLATION:
 		if (header->fsf_status_qual.word[0])
@@ -2036,8 +2024,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
@@ -2109,72 +2096,57 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
 	lat_rec->max = max(lat_rec->max, lat);
 }
 
-static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
+static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 {
-	struct fsf_qual_latency_info *lat_inf;
-	struct latency_cont *lat;
+	struct fsf_qual_latency_info *lat_in;
+	struct latency_cont *lat = NULL;
 	struct zfcp_unit *unit = req->unit;
+	struct zfcp_blk_drv_data blktrc;
+	int ticks = req->adapter->timer_ticks;
 
-	lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
-
-	switch (req->qtcb->bottom.io.data_direction) {
-	case FSF_DATADIR_READ:
-		lat = &unit->latencies.read;
-		break;
-	case FSF_DATADIR_WRITE:
-		lat = &unit->latencies.write;
-		break;
-	case FSF_DATADIR_CMND:
-		lat = &unit->latencies.cmd;
-		break;
-	default:
-		return;
-	}
+	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
 
-	spin_lock(&unit->latencies.lock);
-	zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
-	zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
-	lat->counter++;
-	spin_unlock(&unit->latencies.lock);
-}
-
-#ifdef CONFIG_BLK_DEV_IO_TRACE
-static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
-{
-	struct fsf_qual_latency_info *lat_inf;
-	struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
-	struct request *req = scsi_cmnd->request;
-	struct zfcp_blk_drv_data trace;
-	int ticks = fsf_req->adapter->timer_ticks;
+	blktrc.flags = 0;
+	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
+	blktrc.inb_usage = req->queue_req.qdio_inb_usage;
+	blktrc.outb_usage = req->queue_req.qdio_outb_usage;
+
+	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
+		blktrc.flags |= ZFCP_BLK_LAT_VALID;
+		blktrc.channel_lat = lat_in->channel_lat * ticks;
+		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+
+		switch (req->qtcb->bottom.io.data_direction) {
+		case FSF_DATADIR_READ:
+			lat = &unit->latencies.read;
+			break;
+		case FSF_DATADIR_WRITE:
+			lat = &unit->latencies.write;
+			break;
+		case FSF_DATADIR_CMND:
+			lat = &unit->latencies.cmd;
+			break;
+		}
 
-	trace.flags = 0;
-	trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
-	if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
-		trace.flags |= ZFCP_BLK_LAT_VALID;
-		lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
-		trace.channel_lat = lat_inf->channel_lat * ticks;
-		trace.fabric_lat = lat_inf->fabric_lat * ticks;
+		if (lat) {
+			spin_lock(&unit->latencies.lock);
+			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
+			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
+			lat->counter++;
+			spin_unlock(&unit->latencies.lock);
+		}
 	}
-	if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
-		trace.flags |= ZFCP_BLK_REQ_ERROR;
-	trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
-	trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
 
-	blk_add_driver_data(req->q, req, &trace, sizeof(trace));
-}
-#else
-static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
-{
+	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
+			    sizeof(blktrc));
 }
-#endif
 
 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
 {
 	struct scsi_cmnd *scpnt;
-	struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
-	    &(req->qtcb->bottom.io.fcp_rsp);
-	u32 sns_len;
-	char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
+	struct fcp_resp_with_ext *fcp_rsp;
 	unsigned long flags;
 
 	read_lock_irqsave(&req->adapter->abort_lock, flags);
@@ -2185,50 +2157,16 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
 		return;
 	}
 
-	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
-		set_host_byte(scpnt, DID_SOFT_ERROR);
-		goto skip_fsfstatus;
-	}
-
 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
-		set_host_byte(scpnt, DID_ERROR);
+		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
 		goto skip_fsfstatus;
 	}
 
-	set_msg_byte(scpnt, COMMAND_COMPLETE);
-
-	scpnt->result |= fcp_rsp_iu->scsi_status;
+	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
 
-	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
-		zfcp_fsf_req_latency(req);
-
-	zfcp_fsf_trace_latency(req);
-
-	if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
-		if (fcp_rsp_info[3] == RSP_CODE_GOOD)
-			set_host_byte(scpnt, DID_OK);
-		else {
-			set_host_byte(scpnt, DID_ERROR);
-			goto skip_fsfstatus;
-		}
-	}
-
-	if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
-		sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
-			  fcp_rsp_iu->fcp_rsp_len;
-		sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
-		sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
-
-		memcpy(scpnt->sense_buffer,
-		       zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
-	}
+	zfcp_fsf_req_trace(req, scpnt);
 
-	if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
-		scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
-		if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
-		    scpnt->underflow)
-			set_host_byte(scpnt, DID_ERROR);
-	}
 skip_fsfstatus:
 	if (scpnt->result != 0)
 		zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
@@ -2250,11 +2188,13 @@ skip_fsfstatus:
 
 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
 {
-	struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
-	    &(req->qtcb->bottom.io.fcp_rsp);
-	char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *rsp_info;
+
+	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
 
-	if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
+	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
 }
@@ -2314,13 +2254,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_port_boxed(unit->port, "fssfch5", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_unit_boxed(unit, "fssfch6", req);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
-			       ZFCP_STATUS_FSFREQ_RETRY;
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		if (header->fsf_status_qual.word[0] ==
@@ -2335,24 +2273,10 @@ skip_fsfstatus:
 	else {
 		zfcp_fsf_send_fcp_command_task_handler(req);
 		req->unit = NULL;
-		zfcp_unit_put(unit);
+		put_device(&unit->sysfs_device);
 	}
 }
 
-static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
-{
-	u32 *fcp_dl_ptr;
-
-	/*
-	 * fcp_dl_addr = start address of fcp_cmnd structure +
-	 * size of fixed part + size of dynamically sized add_dcp_cdb field
-	 * SEE FCP-2 documentation
-	 */
-	fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
-			(fcp_cmd->add_fcp_cdb_length << 2));
-	*fcp_dl_ptr = fcp_dl;
-}
-
 /**
  * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
  * @unit: unit where command is sent to
@@ -2362,7 +2286,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 				   struct scsi_cmnd *scsi_cmnd)
 {
 	struct zfcp_fsf_req *req;
-	struct fcp_cmnd_iu *fcp_cmnd_iu;
+	struct fcp_cmnd *fcp_cmnd;
 	unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
 	int real_bytes, retval = -EIO;
 	struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2387,23 +2311,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	zfcp_unit_get(unit);
+	get_device(&unit->sysfs_device);
 	req->unit = unit;
 	req->data = scsi_cmnd;
 	req->handler = zfcp_fsf_send_fcp_command_handler;
 	req->qtcb->header.lun_handle = unit->handle;
 	req->qtcb->header.port_handle = unit->port->handle;
 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
 
 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
 
-	fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
-	fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
 	/*
 	 * set depending on data direction:
 	 *      data direction bits in SBALE (SB Type)
 	 *      data direction bits in QTCB
-	 *      data direction bits in FCP_CMND IU
 	 */
 	switch (scsi_cmnd->sc_data_direction) {
 	case DMA_NONE:
@@ -2411,32 +2333,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 		break;
 	case DMA_FROM_DEVICE:
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
-		fcp_cmnd_iu->rddata = 1;
 		break;
 	case DMA_TO_DEVICE:
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
 		sbtype = SBAL_FLAGS0_TYPE_WRITE;
-		fcp_cmnd_iu->wddata = 1;
 		break;
 	case DMA_BIDIRECTIONAL:
 		goto failed_scsi_cmnd;
 	}
 
-	if (likely((scsi_cmnd->device->simple_tags) ||
-		   ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
-		    (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
-		fcp_cmnd_iu->task_attribute = SIMPLE_Q;
-	else
-		fcp_cmnd_iu->task_attribute = UNTAGGED;
-
-	if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
-		fcp_cmnd_iu->add_fcp_cdb_length =
-			(scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
-
-	memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
-
-	req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
-		fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
+	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
 
 	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
 					     scsi_sglist(scsi_cmnd),
@@ -2454,8 +2361,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 		goto failed_scsi_cmnd;
 	}
 
-	zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
-
 	retval = zfcp_fsf_req_send(req);
 	if (unlikely(retval))
 		goto failed_scsi_cmnd;
@@ -2463,7 +2368,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 	goto out;
 
 failed_scsi_cmnd:
-	zfcp_unit_put(unit);
+	put_device(&unit->sysfs_device);
 	zfcp_fsf_req_free(req);
 	scsi_cmnd->host_scribble = NULL;
 out:
@@ -2481,7 +2386,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req = NULL;
-	struct fcp_cmnd_iu *fcp_cmnd_iu;
+	struct fcp_cmnd *fcp_cmnd;
 	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
 
 	if (unlikely(!(atomic_read(&unit->status) &
@@ -2507,16 +2412,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
 	req->qtcb->header.port_handle = unit->port->handle;
 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
-	req->qtcb->bottom.io.fcp_cmnd_length = 	sizeof(struct fcp_cmnd_iu) +
-						sizeof(u32);
+	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
 
 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
-	fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
-	fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
-	fcp_cmnd_iu->task_management_flags = tm_flags;
+	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
 
 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
 	if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index dcc7c1dbcf58..b3de682b64cf 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -11,6 +11,7 @@
 
 #include <linux/pfn.h>
 #include <linux/scatterlist.h>
+#include <scsi/libfc.h>
 
 #define FSF_QTCB_CURRENT_VERSION		0x00000001
 
@@ -228,7 +229,8 @@ struct fsf_status_read_buffer {
 	u32 length;
 	u32 res1;
 	struct fsf_queue_designator queue_designator;
-	u32 d_id;
+	u8 res2;
+	u8 d_id[3];
 	u32 class;
 	u64 fcp_lun;
 	u8  res3[24];
@@ -309,22 +311,7 @@ struct fsf_qtcb_header {
 	u8  res4[16];
 } __attribute__ ((packed));
 
-struct fsf_nport_serv_param {
-	u8  common_serv_param[16];
-	u64 wwpn;
-	u64 wwnn;
-	u8  class1_serv_param[16];
-	u8  class2_serv_param[16];
-	u8  class3_serv_param[16];
-	u8  class4_serv_param[16];
-	u8  vendor_version_level[16];
-} __attribute__ ((packed));
-
 #define FSF_PLOGI_MIN_LEN	112
-struct fsf_plogi {
-	u32    code;
-	struct fsf_nport_serv_param serv_param;
-} __attribute__ ((packed));
 
 #define FSF_FCP_CMND_SIZE	288
 #define FSF_FCP_RSP_SIZE	128
@@ -342,8 +329,8 @@ struct fsf_qtcb_bottom_io {
 
 struct fsf_qtcb_bottom_support {
 	u32 operation_subtype;
-	u8  res1[12];
-	u32 d_id;
+	u8  res1[13];
+	u8 d_id[3];
 	u32 option;
 	u64 fcp_lun;
 	u64 res2;
@@ -372,18 +359,18 @@ struct fsf_qtcb_bottom_config {
 	u32 fc_topology;
 	u32 fc_link_speed;
 	u32 adapter_type;
-	u32 peer_d_id;
+	u8 res0;
+	u8 peer_d_id[3];
 	u8 res1[2];
 	u16 timer_interval;
-	u8 res2[8];
-	u32 s_id;
-	struct fsf_nport_serv_param nport_serv_param;
-	u8 reserved_nport_serv_param[16];
+	u8 res2[9];
+	u8 s_id[3];
+	u8 nport_serv_param[128];
 	u8 res3[8];
 	u32 adapter_ports;
 	u32 hardware_version;
 	u8 serial_number[32];
-	struct fsf_nport_serv_param plogi_payload;
+	u8 plogi_payload[112];
 	struct fsf_statistics_info stat_info;
 	u8 res4[112];
 } __attribute__ ((packed));
@@ -450,4 +437,22 @@ struct zfcp_blk_drv_data {
 	u64 fabric_lat;
 } __attribute__ ((packed));
 
+/**
+ * struct zfcp_fsf_ct_els - zfcp data for ct or els request
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_fsf_ct_els {
+	struct scatterlist *req;
+	struct scatterlist *resp;
+	void (*handler)(void *);
+	void *handler_data;
+	struct zfcp_port *port;
+	int status;
+};
+
 #endif				/* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0e1a34627a2e..771cc536a989 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,29 +9,33 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/types.h>
+#include <scsi/fc/fc_fcp.h>
 #include <asm/atomic.h>
 #include "zfcp_ext.h"
 #include "zfcp_dbf.h"
+#include "zfcp_fc.h"
 
 static unsigned int default_depth = 32;
 module_param_named(queue_depth, default_depth, uint, 0600);
 MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
 
-/* Find start of Sense Information in FCP response unit*/
-char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
+					int reason)
 {
-	char *fcp_sns_info_ptr;
-
-	fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
-	if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
-		fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
-
-	return fcp_sns_info_ptr;
-}
-
-static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
-{
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, depth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 	return sdev->queue_depth;
 }
 
@@ -39,7 +43,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 {
 	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
 	unit->device = NULL;
-	zfcp_unit_put(unit);
+	put_device(&unit->sysfs_device);
 }
 
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -99,12 +103,26 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 	}
 
 	status = atomic_read(&unit->status);
-	if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
-		     !(status & ZFCP_STATUS_COMMON_RUNNING))) {
+	if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
+		     !(atomic_read(&unit->port->status) &
+		       ZFCP_STATUS_COMMON_ERP_FAILED)) {
+		/* only unit access denied, but port is good
+		 * not covered by FC transport, have to fail here */
 		zfcp_scsi_command_fail(scpnt, DID_ERROR);
 		return 0;
 	}
 
+	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+		/* This could be either
+		 * open unit pending: this is temporary, will result in
+		 * 	open unit or ERP_FAILED, so retry command
+		 * call to rport_delete pending: mimic retry from
+		 * 	fc_remote_port_chkready until rport is BLOCKED
+		 */
+		zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
+		return 0;
+	}
+
 	ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
 	if (unlikely(ret == -EBUSY))
 		return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -115,49 +133,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 }
 
 static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
-					  int channel, unsigned int id,
-					  unsigned int lun)
+					  unsigned int id, u64 lun)
 {
+	unsigned long flags;
 	struct zfcp_port *port;
-	struct zfcp_unit *unit;
-	int scsi_lun;
+	struct zfcp_unit *unit = NULL;
 
-	list_for_each_entry(port, &adapter->port_list_head, list) {
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
 		if (!port->rport || (id != port->rport->scsi_target_id))
 			continue;
-		list_for_each_entry(unit, &port->unit_list_head, list) {
-			scsi_lun = scsilun_to_int(
-				(struct scsi_lun *)&unit->fcp_lun);
-			if (lun == scsi_lun)
-				return unit;
-		}
+		unit = zfcp_get_unit_by_lun(port, lun);
+		if (unit)
+			break;
 	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-	return NULL;
+	return unit;
 }
 
 static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
 {
 	struct zfcp_adapter *adapter;
 	struct zfcp_unit *unit;
-	unsigned long flags;
-	int retval = -ENXIO;
+	u64 lun;
 
 	adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
 	if (!adapter)
 		goto out;
 
-	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
+	int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
+	unit = zfcp_unit_lookup(adapter, sdp->id, lun);
 	if (unit) {
 		sdp->hostdata = unit;
 		unit->device = sdp;
-		zfcp_unit_get(unit);
-		retval = 0;
+		return 0;
 	}
-	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 out:
-	return retval;
+	return -ENXIO;
 }
 
 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -196,6 +209,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 			break;
 
 		zfcp_erp_wait(adapter);
+		fc_block_scsi_eh(scpnt);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
 			zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -235,6 +249,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 			break;
 
 		zfcp_erp_wait(adapter);
+		fc_block_scsi_eh(scpnt);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
 			zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -249,9 +264,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
 		zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
 		retval = FAILED;
-	} else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
-		zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
-		retval = FAILED;
 	} else
 		zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
 
@@ -261,12 +273,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 
 static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
 {
-	return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET);
+	return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
 }
 
 static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
 {
-	return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET);
+	return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
 }
 
 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
@@ -276,6 +288,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
 	zfcp_erp_wait(adapter);
+	fc_block_scsi_eh(scpnt);
 
 	return SUCCESS;
 }
@@ -303,7 +316,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 	adapter->scsi_host->max_lun = 1;
 	adapter->scsi_host->max_channel = 0;
 	adapter->scsi_host->unique_id = dev_id.devno;
-	adapter->scsi_host->max_cmd_len = 255;
+	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
 	adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
 
 	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -325,12 +338,11 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
 	if (!shost)
 		return;
 
-	read_lock_irq(&zfcp_data.config_lock);
-	list_for_each_entry(port, &adapter->port_list_head, list)
-		if (port->rport)
-			port->rport = NULL;
+	read_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		port->rport = NULL;
+	read_unlock_irq(&adapter->port_list_lock);
 
-	read_unlock_irq(&zfcp_data.config_lock);
 	fc_remove_host(shost);
 	scsi_remove_host(shost);
 	scsi_host_put(shost);
@@ -348,7 +360,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
 		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
 		if (!fc_stats)
 			return NULL;
-		adapter->fc_stats = fc_stats; /* freed in adater_dequeue */
+		adapter->fc_stats = fc_stats; /* freed in adapter_release */
 	}
 	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
 	return adapter->fc_stats;
@@ -464,7 +476,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
 		adapter->stats_reset = jiffies/HZ;
 		kfree(adapter->stats_reset_data);
 		adapter->stats_reset_data = data; /* finally freed in
-						     adapter_dequeue */
+						     adapter_release */
 	}
 }
 
@@ -495,7 +507,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
  * @rport: The FC rport where to teminate I/O
  *
  * Abort all pending SCSI commands for a port by closing the
- * port. Using a reopen for avoids a conflict with a shutdown
+ * port. Using a reopen avoiding a conflict with a shutdown
  * overwriting a reopen.
  */
 static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
@@ -505,15 +517,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
 	struct zfcp_adapter *adapter =
 		(struct zfcp_adapter *)shost->hostdata[0];
 
-	write_lock_irq(&zfcp_data.config_lock);
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
-	if (port)
-		zfcp_port_get(port);
-	write_unlock_irq(&zfcp_data.config_lock);
 
 	if (port) {
 		zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 	}
 }
 
@@ -555,31 +563,34 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
 
 void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
 {
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 	port->rport_task = RPORT_ADD;
 
 	if (!queue_work(port->adapter->work_queue, &port->rport_work))
-		zfcp_port_put(port);
+		put_device(&port->sysfs_device);
 }
 
 void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
 {
-	zfcp_port_get(port);
+	get_device(&port->sysfs_device);
 	port->rport_task = RPORT_DEL;
 
 	if (port->rport && queue_work(port->adapter->work_queue,
 				      &port->rport_work))
 		return;
 
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 }
 
 void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
 {
+	unsigned long flags;
 	struct zfcp_port *port;
 
-	list_for_each_entry(port, &adapter->port_list_head, list)
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
 		zfcp_scsi_schedule_rport_block(port);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
 
 void zfcp_scsi_rport_work(struct work_struct *work)
@@ -597,7 +608,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
 		}
 	}
 
-	zfcp_port_put(port);
+	put_device(&port->sysfs_device);
 }
 
 
@@ -615,21 +626,7 @@ void zfcp_scsi_scan(struct work_struct *work)
 				 scsilun_to_int((struct scsi_lun *)
 						&unit->fcp_lun), 0);
 
-	zfcp_unit_put(unit);
-}
-
-static int zfcp_execute_fc_job(struct fc_bsg_job *job)
-{
-	switch (job->request->msgcode) {
-	case FC_BSG_RPT_ELS:
-	case FC_BSG_HST_ELS_NOLOGIN:
-		return zfcp_fc_execute_els_fc_job(job);
-	case FC_BSG_RPT_CT:
-	case FC_BSG_HST_CT:
-		return zfcp_fc_execute_ct_fc_job(job);
-	default:
-		return -EINVAL;
-	}
+	put_device(&unit->sysfs_device);
 }
 
 struct fc_function_template zfcp_transport_functions = {
@@ -643,6 +640,7 @@ struct fc_function_template zfcp_transport_functions = {
 	.show_host_port_name = 1,
 	.show_host_permanent_port_name = 1,
 	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
 	.show_host_supported_speeds = 1,
 	.show_host_maxframe_size = 1,
 	.show_host_serial_number = 1,
@@ -652,13 +650,15 @@ struct fc_function_template zfcp_transport_functions = {
 	.get_host_port_state = zfcp_get_host_port_state,
 	.terminate_rport_io = zfcp_scsi_terminate_rport_io,
 	.show_host_port_state = 1,
-	.bsg_request = zfcp_execute_fc_job,
+	.show_host_active_fc4s = 1,
+	.bsg_request = zfcp_fc_exec_bsg_job,
 	/* no functions registered for following dynamic attributes but
 	   directly set by LLDD */
 	.show_host_port_type = 1,
 	.show_host_speed = 1,
 	.show_host_port_id = 1,
 	.disable_target_scan = 1,
+	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 };
 
 struct zfcp_data zfcp_data = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index d31000886ca8..f539e006683c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
  *
  * sysfs attributes.
  *
- * Copyright IBM Corporation 2008
+ * Copyright IBM Corporation 2008, 2009
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -19,30 +19,44 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
 						   struct device_attribute *at,\
 						   char *buf)		       \
 {									       \
-	struct _feat_def *_feat = dev_get_drvdata(dev);			       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
+					       sysfs_device);		       \
 									       \
 	return sprintf(buf, _format, _value);				       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
 		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
 
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
-		 atomic_read(&adapter->status));
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
-		 (unsigned long long) adapter->peer_wwnn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
-		 (unsigned long long) adapter->peer_wwpn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
-		 adapter->peer_d_id);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
-		 adapter->hydra_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
-		 adapter->fsf_lic_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
-		 adapter->hardware_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
-		 (atomic_read(&adapter->status) &
-		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+#define ZFCP_DEFINE_A_ATTR(_name, _format, _value)			     \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	     \
+						 struct device_attribute *at,\
+						 char *buf)		     \
+{									     \
+	struct ccw_device *cdev = to_ccwdev(dev);			     \
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);	     \
+	int i;								     \
+									     \
+	if (!adapter)							     \
+		return -ENODEV;						     \
+									     \
+	i = sprintf(buf, _format, _value);				     \
+	zfcp_ccw_adapter_put(adapter);					     \
+	return i;							     \
+}									     \
+static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO,				     \
+		     zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwnn);
+ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwpn);
+ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
+ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
+					 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
 
 ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
 		 atomic_read(&port->status));
@@ -73,7 +87,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev,	       \
 						struct device_attribute *attr, \
 						char *buf)		       \
 {									       \
-	struct _feat_def *_feat = dev_get_drvdata(dev);			       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
+					       sysfs_device);		       \
 									       \
 	if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED)       \
 		return sprintf(buf, "1\n");				       \
@@ -84,15 +99,13 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev,	       \
 						 struct device_attribute *attr,\
 						 const char *buf, size_t count)\
 {									       \
-	struct _feat_def *_feat = dev_get_drvdata(dev);			       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
+					       sysfs_device);		       \
 	unsigned long val;						       \
 	int retval = 0;							       \
 									       \
-	mutex_lock(&zfcp_data.config_mutex);				       \
-	if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) {	       \
-		retval = -EBUSY;					       \
-		goto out;						       \
-	}								       \
+	if (!(_feat && get_device(&_feat->sysfs_device)))		       \
+		return -EBUSY;						       \
 									       \
 	if (strict_strtoul(buf, 0, &val) || val != 0) {			       \
 		retval = -EINVAL;					       \
@@ -105,29 +118,82 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev,	       \
 				  _reopen_id, NULL);			       \
 	zfcp_erp_wait(_adapter);					       \
 out:									       \
-	mutex_unlock(&zfcp_data.config_mutex);				       \
+	put_device(&_feat->sysfs_device);				       \
 	return retval ? retval : (ssize_t) count;			       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO,			       \
 		     zfcp_sysfs_##_feat##_failed_show,			       \
 		     zfcp_sysfs_##_feat##_failed_store);
 
-ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
 ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
 ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
 
+static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	int i;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		i = sprintf(buf, "1\n");
+	else
+		i = sprintf(buf, "0\n");
+
+	zfcp_ccw_adapter_put(adapter);
+	return i;
+}
+
+static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	unsigned long val;
+	int retval = 0;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
+				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				"syafai2", NULL);
+	zfcp_erp_wait(adapter);
+out:
+	zfcp_ccw_adapter_put(adapter);
+	return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_adapter_failed_show,
+		     zfcp_sysfs_adapter_failed_store);
+
 static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
-	struct zfcp_adapter *adapter = dev_get_drvdata(dev);
-	int ret;
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
-	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
-		return -EBUSY;
+	if (!adapter)
+		return -ENODEV;
 
-	ret = zfcp_fc_scan_ports(adapter);
-	return ret ? ret : (ssize_t) count;
+	/* sync the user-space- with the kernel-invocation of scan_work */
+	queue_work(adapter->work_queue, &adapter->scan_work);
+	flush_work(&adapter->scan_work);
+	zfcp_ccw_adapter_put(adapter);
+
+	return (ssize_t) count;
 }
 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
 		     zfcp_sysfs_port_rescan_store);
@@ -136,44 +202,34 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
-	struct zfcp_adapter *adapter = dev_get_drvdata(dev);
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 	struct zfcp_port *port;
 	u64 wwpn;
-	int retval = 0;
-	LIST_HEAD(port_remove_lh);
+	int retval = -EINVAL;
 
-	mutex_lock(&zfcp_data.config_mutex);
-	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
-		retval = -EBUSY;
-		goto out;
-	}
+	if (!adapter)
+		return -ENODEV;
 
-	if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
-		retval = -EINVAL;
+	if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
 		goto out;
-	}
 
-	write_lock_irq(&zfcp_data.config_lock);
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
-	if (port && (atomic_read(&port->refcount) == 0)) {
-		zfcp_port_get(port);
-		atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
-		list_move(&port->list, &port_remove_lh);
-	} else
-		port = NULL;
-	write_unlock_irq(&zfcp_data.config_lock);
-
-	if (!port) {
-		retval = -ENXIO;
+	if (!port)
 		goto out;
-	}
+	else
+		retval = 0;
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_del(&port->list);
+	write_unlock_irq(&adapter->port_list_lock);
+
+	put_device(&port->sysfs_device);
 
 	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
-	zfcp_erp_wait(adapter);
-	zfcp_port_put(port);
-	zfcp_port_dequeue(port);
+	zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
  out:
-	mutex_unlock(&zfcp_data.config_mutex);
+	zfcp_ccw_adapter_put(adapter);
 	return retval ? retval : (ssize_t) count;
 }
 static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,16 +258,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf, size_t count)
 {
-	struct zfcp_port *port = dev_get_drvdata(dev);
+	struct zfcp_port *port = container_of(dev, struct zfcp_port,
+					      sysfs_device);
 	struct zfcp_unit *unit;
 	u64 fcp_lun;
 	int retval = -EINVAL;
 
-	mutex_lock(&zfcp_data.config_mutex);
-	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
-		retval = -EBUSY;
-		goto out;
-	}
+	if (!(port && get_device(&port->sysfs_device)))
+		return -EBUSY;
 
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
 		goto out;
@@ -219,15 +273,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 	unit = zfcp_unit_enqueue(port, fcp_lun);
 	if (IS_ERR(unit))
 		goto out;
-
-	retval = 0;
+	else
+		retval = 0;
 
 	zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
 	zfcp_erp_wait(unit->port->adapter);
 	flush_work(&unit->scsi_work);
-	zfcp_unit_put(unit);
 out:
-	mutex_unlock(&zfcp_data.config_mutex);
+	put_device(&port->sysfs_device);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -236,54 +289,37 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
-	struct zfcp_port *port = dev_get_drvdata(dev);
+	struct zfcp_port *port = container_of(dev, struct zfcp_port,
+					      sysfs_device);
 	struct zfcp_unit *unit;
 	u64 fcp_lun;
-	int retval = 0;
-	LIST_HEAD(unit_remove_lh);
+	int retval = -EINVAL;
 
-	mutex_lock(&zfcp_data.config_mutex);
-	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
-		retval = -EBUSY;
-		goto out;
-	}
+	if (!(port && get_device(&port->sysfs_device)))
+		return -EBUSY;
 
-	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
-		retval = -EINVAL;
+	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
 		goto out;
-	}
 
-	write_lock_irq(&zfcp_data.config_lock);
 	unit = zfcp_get_unit_by_lun(port, fcp_lun);
-	if (unit) {
-		write_unlock_irq(&zfcp_data.config_lock);
-		/* wait for possible timeout during SCSI probe */
-		flush_work(&unit->scsi_work);
-		write_lock_irq(&zfcp_data.config_lock);
-
-		if (atomic_read(&unit->refcount) == 0) {
-			zfcp_unit_get(unit);
-			atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
-					&unit->status);
-			list_move(&unit->list, &unit_remove_lh);
-		} else {
-			unit = NULL;
-		}
-	}
+	if (!unit)
+		goto out;
+	else
+		retval = 0;
 
-	write_unlock_irq(&zfcp_data.config_lock);
+	/* wait for possible timeout during SCSI probe */
+	flush_work(&unit->scsi_work);
 
-	if (!unit) {
-		retval = -ENXIO;
-		goto out;
-	}
+	write_lock_irq(&port->unit_list_lock);
+	list_del(&unit->list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	put_device(&unit->sysfs_device);
 
 	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
-	zfcp_erp_wait(unit->port->adapter);
-	zfcp_unit_put(unit);
-	zfcp_unit_dequeue(unit);
+	zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
 out:
-	mutex_unlock(&zfcp_data.config_mutex);
+	put_device(&port->sysfs_device);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 36c21b19e5d7..2d16d49fd3cd 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -186,8 +186,12 @@ static ssize_t twa_show_stats(struct device *dev,
 } /* End twa_show_stats() */
 
 /* This function will set a devices queue depth */
-static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+				  int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (queue_depth > TW_Q_LENGTH-2)
 		queue_depth = TW_Q_LENGTH-2;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
new file mode 100644
index 000000000000..4d314d740de4
--- /dev/null
+++ b/drivers/scsi/3w-sas.c
@@ -0,0 +1,1924 @@
+/*
+   3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
+
+   Written By: Adam Radford <linuxraid@lsi.com>
+
+   Copyright (C) 2009 LSI Corporation.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   NO WARRANTY
+   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+   solely responsible for determining the appropriateness of using and
+   distributing the Program and assumes all risks associated with its
+   exercise of rights under this Agreement, including but not limited to
+   the risks and costs of program errors, damage to or loss of data,
+   programs or equipment, and unavailability or interruption of operations.
+
+   DISCLAIMER OF LIABILITY
+   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+   Controllers supported by this driver:
+
+   LSI 3ware 9750 6Gb/s SAS/SATA-RAID
+
+   Bugs/Comments/Suggestions should be mailed to:
+   linuxraid@lsi.com
+
+   For more information, goto:
+   http://www.lsi.com
+
+   History
+   -------
+   3.26.02.000 - Initial driver release.
+*/
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/smp_lock.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include "3w-sas.h"
+
+/* Globals */
+#define TW_DRIVER_VERSION "3.26.02.000"
+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
+static unsigned int twl_device_extension_count;
+static int twl_major = -1;
+extern struct timezone sys_tz;
+
+/* Module parameters */
+MODULE_AUTHOR ("LSI");
+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(TW_DRIVER_VERSION);
+
+static int use_msi;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
+
+/* Function prototypes */
+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
+
+/* Functions */
+
+/* This function returns AENs through sysfs */
+static ssize_t twl_sysfs_aen_read(struct kobject *kobj,
+				  struct bin_attribute *bin_attr,
+				  char *outbuf, loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *shost = class_to_shost(dev);
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
+	unsigned long flags = 0;
+	ssize_t ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	spin_lock_irqsave(tw_dev->host->host_lock, flags);
+	ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
+	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+	return ret;
+} /* End twl_sysfs_aen_read() */
+
+/* aen_read sysfs attribute initializer */
+static struct bin_attribute twl_sysfs_aen_read_attr = {
+	.attr = {
+		.name = "3ware_aen_read",
+		.mode = S_IRUSR,
+	}, 
+	.size = 0,
+	.read = twl_sysfs_aen_read
+};
+
+/* This function returns driver compatibility info through sysfs */
+static ssize_t twl_sysfs_compat_info(struct kobject *kobj,
+				     struct bin_attribute *bin_attr,
+				     char *outbuf, loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *shost = class_to_shost(dev);
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
+	unsigned long flags = 0;
+	ssize_t ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	spin_lock_irqsave(tw_dev->host->host_lock, flags);
+	ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
+	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+	return ret;
+} /* End twl_sysfs_compat_info() */
+
+/* compat_info sysfs attribute initializer */
+static struct bin_attribute twl_sysfs_compat_info_attr = {
+	.attr = {
+		.name = "3ware_compat_info",
+		.mode = S_IRUSR,
+	}, 
+	.size = 0,
+	.read = twl_sysfs_compat_info
+};
+
+/* Show some statistics about the card */
+static ssize_t twl_show_stats(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *host = class_to_shost(dev);
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+	unsigned long flags = 0;
+	ssize_t len;
+
+	spin_lock_irqsave(tw_dev->host->host_lock, flags);
+	len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
+		       "Current commands posted:   %4d\n"
+		       "Max commands posted:       %4d\n"
+		       "Last sgl length:           %4d\n"
+		       "Max sgl length:            %4d\n"
+		       "Last sector count:         %4d\n"
+		       "Max sector count:          %4d\n"
+		       "SCSI Host Resets:          %4d\n"
+		       "AEN's:                     %4d\n", 
+		       TW_DRIVER_VERSION,
+		       tw_dev->posted_request_count,
+		       tw_dev->max_posted_request_count,
+		       tw_dev->sgl_entries,
+		       tw_dev->max_sgl_entries,
+		       tw_dev->sector_count,
+		       tw_dev->max_sector_count,
+		       tw_dev->num_resets,
+		       tw_dev->aen_count);
+	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+	return len;
+} /* End twl_show_stats() */
+
+/* This function will set a devices queue depth */
+static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+				  int reason)
+{
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
+	if (queue_depth > TW_Q_LENGTH-2)
+		queue_depth = TW_Q_LENGTH-2;
+	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
+	return queue_depth;
+} /* End twl_change_queue_depth() */
+
+/* stats sysfs attribute initializer */
+static struct device_attribute twl_host_stats_attr = {
+	.attr = {
+		.name = 	"3ware_stats",
+		.mode =		S_IRUGO,
+	},
+	.show = twl_show_stats
+};
+
+/* Host attributes initializer */
+static struct device_attribute *twl_host_attrs[] = {
+	&twl_host_stats_attr,
+	NULL,
+};
+
+/* This function will look up an AEN severity string */
+static char *twl_aen_severity_lookup(unsigned char severity_code)
+{
+	char *retval = NULL;
+
+	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
+	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
+		goto out;
+
+	retval = twl_aen_severity_table[severity_code];
+out:
+	return retval;
+} /* End twl_aen_severity_lookup() */
+
+/* This function will queue an event */
+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
+{
+	u32 local_time;
+	struct timeval time;
+	TW_Event *event;
+	unsigned short aen;
+	char host[16];
+	char *error_str;
+
+	tw_dev->aen_count++;
+
+	/* Fill out event info */
+	event = tw_dev->event_queue[tw_dev->error_index];
+
+	host[0] = '\0';
+	if (tw_dev->host)
+		sprintf(host, " scsi%d:", tw_dev->host->host_no);
+
+	aen = le16_to_cpu(header->status_block.error);
+	memset(event, 0, sizeof(TW_Event));
+
+	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
+	do_gettimeofday(&time);
+	local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+	event->time_stamp_sec = local_time;
+	event->aen_code = aen;
+	event->retrieved = TW_AEN_NOT_RETRIEVED;
+	event->sequence_id = tw_dev->error_sequence_id;
+	tw_dev->error_sequence_id++;
+
+	/* Check for embedded error string */
+	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
+
+	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
+	event->parameter_len = strlen(header->err_specific_desc);
+	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
+	if (event->severity != TW_AEN_SEVERITY_DEBUG)
+		printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
+		       host,
+		       twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
+		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
+		       header->err_specific_desc);
+	else
+		tw_dev->aen_count--;
+
+	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
+} /* End twl_aen_queue_event() */
+
+/* This function will attempt to post a command packet to the board */
+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
+{
+	dma_addr_t command_que_value;
+
+	command_que_value = tw_dev->command_packet_phys[request_id];
+	command_que_value += TW_COMMAND_OFFSET;
+
+	/* First write upper 4 bytes */
+	writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
+	/* Then the lower 4 bytes */
+	writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
+
+	tw_dev->state[request_id] = TW_S_POSTED;
+	tw_dev->posted_request_count++;
+	if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
+		tw_dev->max_posted_request_count = tw_dev->posted_request_count;
+
+	return 0;
+} /* End twl_post_command_packet() */
+
+/* This function will perform a pci-dma mapping for a scatter gather list */
+static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
+{
+	int use_sg;
+	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+	use_sg = scsi_dma_map(cmd);
+	if (!use_sg)
+		return 0;
+	else if (use_sg < 0) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
+		return 0;
+	}
+
+	cmd->SCp.phase = TW_PHASE_SGLIST;
+	cmd->SCp.have_data_in = use_sg;
+
+	return use_sg;
+} /* End twl_map_scsi_sg_data() */
+
+/* This function hands scsi cdb's to the firmware */
+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+{
+	TW_Command_Full *full_command_packet;
+	TW_Command_Apache *command_packet;
+	int i, sg_count;
+	struct scsi_cmnd *srb = NULL;
+	struct scatterlist *sglist = NULL, *sg;
+	int retval = 1;
+
+	if (tw_dev->srb[request_id]) {
+		srb = tw_dev->srb[request_id];
+		if (scsi_sglist(srb))
+			sglist = scsi_sglist(srb);
+	}
+
+	/* Initialize command packet */
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	full_command_packet->header.header_desc.size_header = 128;
+	full_command_packet->header.status_block.error = 0;
+	full_command_packet->header.status_block.severity__reserved = 0;
+
+	command_packet = &full_command_packet->command.newcommand;
+	command_packet->status = 0;
+	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
+
+	/* We forced 16 byte cdb use earlier */
+	if (!cdb)
+		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
+	else
+		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
+
+	if (srb) {
+		command_packet->unit = srb->device->id;
+		command_packet->request_id__lunl =
+			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+	} else {
+		command_packet->request_id__lunl =
+			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+		command_packet->unit = 0;
+	}
+
+	command_packet->sgl_offset = 16;
+
+	if (!sglistarg) {
+		/* Map sglist from scsi layer to cmd packet */
+		if (scsi_sg_count(srb)) {
+			sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
+			if (sg_count == 0)
+				goto out;
+
+			scsi_for_each_sg(srb, sg, sg_count, i) {
+				command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
+				command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
+			}
+			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+		}
+	} else {
+		/* Internal cdb post */
+		for (i = 0; i < use_sg; i++) {
+			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
+			command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
+		}
+		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+	}
+
+	/* Update some stats */
+	if (srb) {
+		tw_dev->sector_count = scsi_bufflen(srb) / 512;
+		if (tw_dev->sector_count > tw_dev->max_sector_count)
+			tw_dev->max_sector_count = tw_dev->sector_count;
+		tw_dev->sgl_entries = scsi_sg_count(srb);
+		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
+			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
+	}
+
+	/* Now post the command to the board */
+	retval = twl_post_command_packet(tw_dev, request_id);
+
+out:
+	return retval;
+} /* End twl_scsiop_execute_scsi() */
+
+/* This function will read the aen queue from the isr */
+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+{
+	char cdb[TW_MAX_CDB_LEN];
+	TW_SG_Entry_ISO sglist[1];
+	TW_Command_Full *full_command_packet;
+	int retval = 1;
+
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+	/* Initialize cdb */
+	memset(&cdb, 0, TW_MAX_CDB_LEN);
+	cdb[0] = REQUEST_SENSE; /* opcode */
+	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+	/* Initialize sglist */
+	memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
+	sglist[0].length = TW_SECTOR_SIZE;
+	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+	/* Mark internal command */
+	tw_dev->srb[request_id] = NULL;
+
+	/* Now post the command packet */
+	if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
+		goto out;
+	}
+	retval = 0;
+out:
+	return retval;
+} /* End twl_aen_read_queue() */
+
+/* This function will sync firmware time with the host time */
+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
+{
+	u32 schedulertime;
+	struct timeval utc;
+	TW_Command_Full *full_command_packet;
+	TW_Command *command_packet;
+	TW_Param_Apache *param;
+	u32 local_time;
+
+	/* Fill out the command packet */
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	memset(full_command_packet, 0, sizeof(TW_Command_Full));
+	command_packet = &full_command_packet->command.oldcommand;
+	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
+	command_packet->request_id = request_id;
+	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+	command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
+	command_packet->size = TW_COMMAND_SIZE;
+	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
+
+	/* Setup the param */
+	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+	memset(param, 0, TW_SECTOR_SIZE);
+	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
+	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
+	param->parameter_size_bytes = cpu_to_le16(4);
+
+	/* Convert system time in UTC to local time seconds since last 
+           Sunday 12:00AM */
+	do_gettimeofday(&utc);
+	local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
+	schedulertime = local_time - (3 * 86400);
+	schedulertime = cpu_to_le32(schedulertime % 604800);
+
+	memcpy(param->data, &schedulertime, sizeof(u32));
+
+	/* Mark internal command */
+	tw_dev->srb[request_id] = NULL;
+
+	/* Now post the command */
+	twl_post_command_packet(tw_dev, request_id);
+} /* End twl_aen_sync_time() */
+
+/* This function will assign an available request id */
+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
+{
+	*request_id = tw_dev->free_queue[tw_dev->free_head];
+	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
+	tw_dev->state[*request_id] = TW_S_STARTED;
+} /* End twl_get_request_id() */
+
+/* This function will free a request id */
+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
+{
+	tw_dev->free_queue[tw_dev->free_tail] = request_id;
+	tw_dev->state[request_id] = TW_S_FINISHED;
+	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
+} /* End twl_free_request_id() */
+
+/* This function will complete an aen request from the isr */
+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+	TW_Command_Full *full_command_packet;
+	TW_Command *command_packet;
+	TW_Command_Apache_Header *header;
+	unsigned short aen;
+	int retval = 1;
+
+	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+	tw_dev->posted_request_count--;
+	aen = le16_to_cpu(header->status_block.error);
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	command_packet = &full_command_packet->command.oldcommand;
+
+	/* First check for internal completion of set param for time sync */
+	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
+		/* Keep reading the queue in case there are more aen's */
+		if (twl_aen_read_queue(tw_dev, request_id))
+			goto out2;
+	        else {
+			retval = 0;
+			goto out;
+		}
+	}
+
+	switch (aen) {
+	case TW_AEN_QUEUE_EMPTY:
+		/* Quit reading the queue if this is the last one */
+		break;
+	case TW_AEN_SYNC_TIME_WITH_HOST:
+		twl_aen_sync_time(tw_dev, request_id);
+		retval = 0;
+		goto out;
+	default:
+		twl_aen_queue_event(tw_dev, header);
+
+		/* If there are more aen's, keep reading the queue */
+		if (twl_aen_read_queue(tw_dev, request_id))
+			goto out2;
+		else {
+			retval = 0;
+			goto out;
+		}
+	}
+	retval = 0;
+out2:
+	tw_dev->state[request_id] = TW_S_COMPLETED;
+	twl_free_request_id(tw_dev, request_id);
+	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+out:
+	return retval;
+} /* End twl_aen_complete() */
+
+/* This function will poll for a response */
+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
+{
+	unsigned long before;
+	dma_addr_t mfa;
+	u32 regh, regl;
+	u32 response;
+	int retval = 1;
+	int found = 0;
+
+	before = jiffies;
+
+	while (!found) {
+		if (sizeof(dma_addr_t) > 4) {
+			regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
+			regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+			mfa = ((u64)regh << 32) | regl;
+		} else
+			mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+
+		response = (u32)mfa;
+
+		if (TW_RESID_OUT(response) == request_id)
+			found = 1;
+
+		if (time_after(jiffies, before + HZ * seconds))
+			goto out;
+
+		msleep(50);
+	}
+	retval = 0;
+out: 
+	return retval;
+} /* End twl_poll_response() */
+
+/* This function will drain the aen queue */
+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
+{
+	int request_id = 0;
+	char cdb[TW_MAX_CDB_LEN];
+	TW_SG_Entry_ISO sglist[1];
+	int finished = 0, count = 0;
+	TW_Command_Full *full_command_packet;
+	TW_Command_Apache_Header *header;
+	unsigned short aen;
+	int first_reset = 0, queue = 0, retval = 1;
+
+	if (no_check_reset)
+		first_reset = 0;
+	else
+		first_reset = 1;
+
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+	/* Initialize cdb */
+	memset(&cdb, 0, TW_MAX_CDB_LEN);
+	cdb[0] = REQUEST_SENSE; /* opcode */
+	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+	/* Initialize sglist */
+	memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
+	sglist[0].length = TW_SECTOR_SIZE;
+	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+	/* Mark internal command */
+	tw_dev->srb[request_id] = NULL;
+
+	do {
+		/* Send command to the board */
+		if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
+			goto out;
+		}
+
+		/* Now poll for completion */
+		if (twl_poll_response(tw_dev, request_id, 30)) {
+			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
+			tw_dev->posted_request_count--;
+			goto out;
+		}
+
+		tw_dev->posted_request_count--;
+		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+		aen = le16_to_cpu(header->status_block.error);
+		queue = 0;
+		count++;
+
+		switch (aen) {
+		case TW_AEN_QUEUE_EMPTY:
+			if (first_reset != 1)
+				goto out;
+			else
+				finished = 1;
+			break;
+		case TW_AEN_SOFT_RESET:
+			if (first_reset == 0)
+				first_reset = 1;
+			else
+				queue = 1;
+			break;
+		case TW_AEN_SYNC_TIME_WITH_HOST:
+			break;
+		default:
+			queue = 1;
+		}
+
+		/* Now queue an event info */
+		if (queue)
+			twl_aen_queue_event(tw_dev, header);
+	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
+
+	if (count == TW_MAX_AEN_DRAIN)
+		goto out;
+
+	retval = 0;
+out:
+	tw_dev->state[request_id] = TW_S_INITIAL;
+	return retval;
+} /* End twl_aen_drain_queue() */
+
+/* This function will allocate memory and check if it is correctly aligned */
+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
+{
+	int i;
+	dma_addr_t dma_handle;
+	unsigned long *cpu_addr;
+	int retval = 1;
+
+	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+	if (!cpu_addr) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
+		goto out;
+	}
+
+	memset(cpu_addr, 0, size*TW_Q_LENGTH);
+
+	for (i = 0; i < TW_Q_LENGTH; i++) {
+		switch(which) {
+		case 0:
+			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
+			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
+			break;
+		case 1:
+			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
+			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
+			break;
+		case 2:
+			tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
+			tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
+			break;
+		}
+	}
+	retval = 0;
+out:
+	return retval;
+} /* End twl_allocate_memory() */
+
+/* This function will load the request id and various sgls for ioctls */
+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
+{
+	TW_Command *oldcommand;
+	TW_Command_Apache *newcommand;
+	TW_SG_Entry_ISO *sgl;
+	unsigned int pae = 0;
+
+	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
+		pae = 1;
+
+	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
+		newcommand = &full_command_packet->command.newcommand;
+		newcommand->request_id__lunl =
+			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+		if (length) {
+			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+			newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
+		}
+		newcommand->sgl_entries__lunh =
+			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+	} else {
+		oldcommand = &full_command_packet->command.oldcommand;
+		oldcommand->request_id = request_id;
+
+		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
+			/* Load the sg list */
+			sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
+			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+			sgl->length = TW_CPU_TO_SGL(length);
+			oldcommand->size += pae;
+			oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
+		}
+	}
+} /* End twl_load_sgl() */
+
+/* This function handles ioctl for the character device
+   This interface is used by smartmontools open source software */
+static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long timeout;
+	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
+	dma_addr_t dma_handle;
+	int request_id = 0;
+	TW_Ioctl_Driver_Command driver_command;
+	TW_Ioctl_Buf_Apache *tw_ioctl;
+	TW_Command_Full *full_command_packet;
+	TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
+	int retval = -EFAULT;
+	void __user *argp = (void __user *)arg;
+
+	/* Only let one of these through at a time */
+	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
+		retval = -EINTR;
+		goto out;
+	}
+
+	/* First copy down the driver command */
+	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
+		goto out2;
+
+	/* Check data buffer size */
+	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
+		retval = -EINVAL;
+		goto out2;
+	}
+
+	/* Hardware can only do multiple of 512 byte transfers */
+	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
+
+	/* Now allocate ioctl buf memory */
+	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+	if (!cpu_addr) {
+		retval = -ENOMEM;
+		goto out2;
+	}
+
+	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
+
+	/* Now copy down the entire ioctl */
+	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+		goto out3;
+
+	/* See which ioctl we are doing */
+	switch (cmd) {
+	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
+		spin_lock_irqsave(tw_dev->host->host_lock, flags);
+		twl_get_request_id(tw_dev, &request_id);
+
+		/* Flag internal command */
+		tw_dev->srb[request_id] = NULL;
+
+		/* Flag chrdev ioctl */
+		tw_dev->chrdev_request_id = request_id;
+
+		full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
+
+		/* Load request id and sglist for both command types */
+		twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
+
+		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
+
+		/* Now post the command packet to the controller */
+		twl_post_command_packet(tw_dev, request_id);
+		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
+
+		/* Now wait for command to complete */
+		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
+
+		/* We timed out, and didn't get an interrupt */
+		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
+			/* Now we need to reset the board */
+			printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
+			       tw_dev->host->host_no, TW_DRIVER, 0x6,
+			       cmd);
+			retval = -EIO;
+			twl_reset_device_extension(tw_dev, 1);
+			goto out3;
+		}
+
+		/* Now copy in the command packet response */
+		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
+		
+		/* Now complete the io */
+		spin_lock_irqsave(tw_dev->host->host_lock, flags);
+		tw_dev->posted_request_count--;
+		tw_dev->state[request_id] = TW_S_COMPLETED;
+		twl_free_request_id(tw_dev, request_id);
+		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+		break;
+	default:
+		retval = -ENOTTY;
+		goto out3;
+	}
+
+	/* Now copy the entire response to userspace */
+	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+		retval = 0;
+out3:
+	/* Now free ioctl buf memory */
+	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+out2:
+	mutex_unlock(&tw_dev->ioctl_lock);
+out:
+	return retval;
+} /* End twl_chrdev_ioctl() */
+
+/* This function handles open for the character device */
+static int twl_chrdev_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor_number;
+	int retval = -ENODEV;
+
+	if (!capable(CAP_SYS_ADMIN)) {
+		retval = -EACCES;
+		goto out;
+	}
+
+	cycle_kernel_lock();
+	minor_number = iminor(inode);
+	if (minor_number >= twl_device_extension_count)
+		goto out;
+	retval = 0;
+out:
+	return retval;
+} /* End twl_chrdev_open() */
+
+/* File operations struct for character device */
+static const struct file_operations twl_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= twl_chrdev_ioctl,
+	.open		= twl_chrdev_open,
+	.release	= NULL
+};
+
+/* This function passes sense data from firmware to scsi layer */
+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
+{
+	TW_Command_Apache_Header *header;
+	TW_Command_Full *full_command_packet;
+	unsigned short error;
+	char *error_str;
+	int retval = 1;
+
+	header = tw_dev->sense_buffer_virt[i];
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+
+	/* Get embedded firmware error string */
+	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
+
+	/* Don't print error for Logical unit not supported during rollcall */
+	error = le16_to_cpu(header->status_block.error);
+	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
+		if (print_host)
+			printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+			       tw_dev->host->host_no,
+			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+			       header->status_block.error,
+			       error_str, 
+			       header->err_specific_desc);
+		else
+			printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+			       header->status_block.error,
+			       error_str,
+			       header->err_specific_desc);
+	}
+
+	if (copy_sense) {
+		memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
+		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
+		goto out;
+	}
+out:
+	return retval;
+} /* End twl_fill_sense() */
+
+/* This function will free up device extension resources */
+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
+{
+	if (tw_dev->command_packet_virt[0])
+		pci_free_consistent(tw_dev->tw_pci_dev,
+				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
+				    tw_dev->command_packet_virt[0],
+				    tw_dev->command_packet_phys[0]);
+
+	if (tw_dev->generic_buffer_virt[0])
+		pci_free_consistent(tw_dev->tw_pci_dev,
+				    TW_SECTOR_SIZE*TW_Q_LENGTH,
+				    tw_dev->generic_buffer_virt[0],
+				    tw_dev->generic_buffer_phys[0]);
+
+	if (tw_dev->sense_buffer_virt[0])
+		pci_free_consistent(tw_dev->tw_pci_dev,
+				    sizeof(TW_Command_Apache_Header)*
+				    TW_Q_LENGTH,
+				    tw_dev->sense_buffer_virt[0],
+				    tw_dev->sense_buffer_phys[0]);
+
+	kfree(tw_dev->event_queue[0]);
+} /* End twl_free_device_extension() */
+
+/* This function will get parameter table entries from the firmware */
+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
+{
+	TW_Command_Full *full_command_packet;
+	TW_Command *command_packet;
+	TW_Param_Apache *param;
+	void *retval = NULL;
+
+	/* Setup the command packet */
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	memset(full_command_packet, 0, sizeof(TW_Command_Full));
+	command_packet = &full_command_packet->command.oldcommand;
+
+	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+	command_packet->size              = TW_COMMAND_SIZE;
+	command_packet->request_id        = request_id;
+	command_packet->byte6_offset.block_count = cpu_to_le16(1);
+
+	/* Now setup the param */
+	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+	memset(param, 0, TW_SECTOR_SIZE);
+	param->table_id = cpu_to_le16(table_id | 0x8000);
+	param->parameter_id = cpu_to_le16(parameter_id);
+	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
+
+	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+	command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
+
+	/* Post the command packet to the board */
+	twl_post_command_packet(tw_dev, request_id);
+
+	/* Poll for completion */
+	if (twl_poll_response(tw_dev, request_id, 30))
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
+	else
+		retval = (void *)&(param->data[0]);
+
+	tw_dev->posted_request_count--;
+	tw_dev->state[request_id] = TW_S_INITIAL;
+
+	return retval;
+} /* End twl_get_param() */
+
+/* This function will send an initconnection command to controller */
+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
+ 			      u32 set_features, unsigned short current_fw_srl, 
+			      unsigned short current_fw_arch_id, 
+			      unsigned short current_fw_branch, 
+			      unsigned short current_fw_build, 
+			      unsigned short *fw_on_ctlr_srl, 
+			      unsigned short *fw_on_ctlr_arch_id, 
+			      unsigned short *fw_on_ctlr_branch, 
+			      unsigned short *fw_on_ctlr_build, 
+			      u32 *init_connect_result)
+{
+	TW_Command_Full *full_command_packet;
+	TW_Initconnect *tw_initconnect;
+	int request_id = 0, retval = 1;
+
+	/* Initialize InitConnection command packet */
+	full_command_packet = tw_dev->command_packet_virt[request_id];
+	memset(full_command_packet, 0, sizeof(TW_Command_Full));
+	full_command_packet->header.header_desc.size_header = 128;
+	
+	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
+	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
+	tw_initconnect->request_id = request_id;
+	tw_initconnect->message_credits = cpu_to_le16(message_credits);
+	tw_initconnect->features = set_features;
+
+	/* Turn on 64-bit sgl support if we need to */
+	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+
+	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+
+	if (set_features & TW_EXTENDED_INIT_CONNECT) {
+		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
+		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
+		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
+		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
+		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
+	} else 
+		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
+
+	/* Send command packet to the board */
+	twl_post_command_packet(tw_dev, request_id);
+
+	/* Poll for completion */
+	if (twl_poll_response(tw_dev, request_id, 30)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
+	} else {
+		if (set_features & TW_EXTENDED_INIT_CONNECT) {
+			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
+			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
+			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
+			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
+			*init_connect_result = le32_to_cpu(tw_initconnect->result);
+		}
+		retval = 0;
+	}
+
+	tw_dev->posted_request_count--;
+	tw_dev->state[request_id] = TW_S_INITIAL;
+
+	return retval;
+} /* End twl_initconnection() */
+
+/* This function will initialize the fields of a device extension */
+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
+{
+	int i, retval = 1;
+
+	/* Initialize command packet buffers */
+	if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
+		goto out;
+	}
+
+	/* Initialize generic buffer */
+	if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
+		goto out;
+	}
+
+	/* Allocate sense buffers */
+	if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
+		goto out;
+	}
+
+	/* Allocate event info space */
+	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
+	if (!tw_dev->event_queue[0]) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
+		goto out;
+	}
+
+	for (i = 0; i < TW_Q_LENGTH; i++) {
+		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
+		tw_dev->free_queue[i] = i;
+		tw_dev->state[i] = TW_S_INITIAL;
+	}
+
+	tw_dev->free_head = TW_Q_START;
+	tw_dev->free_tail = TW_Q_START;
+	tw_dev->error_sequence_id = 1;
+	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+	mutex_init(&tw_dev->ioctl_lock);
+	init_waitqueue_head(&tw_dev->ioctl_wqueue);
+
+	retval = 0;
+out:
+	return retval;
+} /* End twl_initialize_device_extension() */
+
+/* This function will perform a pci-dma unmap */
+static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
+{
+	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+		scsi_dma_unmap(cmd);
+} /* End twl_unmap_scsi_data() */
+
+/* This function will handle attention interrupts */
+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
+{
+	int retval = 1;
+	u32 request_id, doorbell;
+
+	/* Read doorbell status */
+	doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
+
+	/* Check for controller errors */
+	if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
+		goto out;
+	}
+
+	/* Check if we need to perform an AEN drain */
+	if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
+		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
+			twl_get_request_id(tw_dev, &request_id);
+			if (twl_aen_read_queue(tw_dev, request_id)) {
+				tw_dev->state[request_id] = TW_S_COMPLETED;
+				twl_free_request_id(tw_dev, request_id);
+				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+			}
+		}
+	}
+
+	retval = 0;
+out:
+	/* Clear doorbell interrupt */
+	TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+	/* Make sure the clear was flushed by reading it back */
+	readl(TWL_HOBDBC_REG_ADDR(tw_dev));
+
+	return retval;
+} /* End twl_handle_attention_interrupt() */
+
+/* Interrupt service routine */
+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
+{
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
+	int i, handled = 0, error = 0;
+	dma_addr_t mfa = 0;
+	u32 reg, regl, regh, response, request_id = 0;
+	struct scsi_cmnd *cmd;
+	TW_Command_Full *full_command_packet;
+
+	spin_lock(tw_dev->host->host_lock);
+
+	/* Read host interrupt status */
+	reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
+
+	/* Check if this is our interrupt, otherwise bail */
+	if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
+		goto twl_interrupt_bail;
+
+	handled = 1;
+
+	/* If we are resetting, bail */
+	if (test_bit(TW_IN_RESET, &tw_dev->flags))
+		goto twl_interrupt_bail;
+
+	/* Attention interrupt */
+	if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
+		if (twl_handle_attention_interrupt(tw_dev)) {
+			TWL_MASK_INTERRUPTS(tw_dev);
+			goto twl_interrupt_bail;
+		}
+	}
+
+	/* Response interrupt */
+	while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
+		if (sizeof(dma_addr_t) > 4) {
+			regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
+			regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+			mfa = ((u64)regh << 32) | regl;
+		} else
+			mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+
+		error = 0;
+		response = (u32)mfa;
+
+		/* Check for command packet error */
+		if (!TW_NOTMFA_OUT(response)) {
+			for (i=0;i<TW_Q_LENGTH;i++) {
+				if (tw_dev->sense_buffer_phys[i] == mfa) {
+					request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
+					if (tw_dev->srb[request_id] != NULL)
+						error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
+					else {
+						/* Skip ioctl error prints */
+						if (request_id != tw_dev->chrdev_request_id)
+							error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
+						else
+							memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
+					}
+
+					/* Now re-post the sense buffer */
+					writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
+					writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
+					break;
+				}
+			}
+		} else
+			request_id = TW_RESID_OUT(response);
+
+		full_command_packet = tw_dev->command_packet_virt[request_id];
+
+		/* Check for correct state */
+		if (tw_dev->state[request_id] != TW_S_POSTED) {
+			if (tw_dev->srb[request_id] != NULL) {
+				TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
+				TWL_MASK_INTERRUPTS(tw_dev);
+				goto twl_interrupt_bail;
+			}
+		}
+
+		/* Check for internal command completion */
+		if (tw_dev->srb[request_id] == NULL) {
+			if (request_id != tw_dev->chrdev_request_id) {
+				if (twl_aen_complete(tw_dev, request_id))
+					TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
+			} else {
+				tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+				wake_up(&tw_dev->ioctl_wqueue);
+			}
+		} else {
+			cmd = tw_dev->srb[request_id];
+
+			if (!error)
+				cmd->result = (DID_OK << 16);
+			
+			/* Report residual bytes for single sgl */
+			if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
+				if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
+					scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+			}
+
+			/* Now complete the io */
+			tw_dev->state[request_id] = TW_S_COMPLETED;
+			twl_free_request_id(tw_dev, request_id);
+			tw_dev->posted_request_count--;
+			tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+			twl_unmap_scsi_data(tw_dev, request_id);
+		}
+
+		/* Check for another response interrupt */
+		reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
+	}
+
+twl_interrupt_bail:
+	spin_unlock(tw_dev->host->host_lock);
+	return IRQ_RETVAL(handled);
+} /* End twl_interrupt() */
+
+/* This function will poll for a register change */
+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
+{
+	unsigned long before;
+	int retval = 1;
+	u32 reg_value;
+
+	reg_value = readl(reg);
+	before = jiffies;
+
+        while ((reg_value & value) != result) {
+		reg_value = readl(reg);
+		if (time_after(jiffies, before + HZ * seconds))
+			goto out;
+		msleep(50);
+	}
+	retval = 0;
+out:
+	return retval;
+} /* End twl_poll_register() */
+
+/* This function will reset a controller */
+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
+{
+	int retval = 1;
+	int i = 0;
+	u32 status = 0;
+	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
+	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
+	u32 init_connect_result = 0;
+	int tries = 0;
+	int do_soft_reset = soft_reset;
+
+	while (tries < TW_MAX_RESET_TRIES) {
+		/* Do a soft reset if one is needed */
+		if (do_soft_reset) {
+			TWL_SOFT_RESET(tw_dev);
+
+			/* Make sure controller is in a good state */
+			if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
+				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
+				tries++;
+				continue;
+			}
+			if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
+				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
+				tries++;
+				continue;
+			}
+		}
+
+		/* Initconnect */
+		if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
+				       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
+				       TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
+				       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
+				       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
+				       &fw_on_ctlr_build, &init_connect_result)) {
+			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
+			do_soft_reset = 1;
+			tries++;
+			continue;
+		}
+
+		/* Load sense buffers */
+		while (i < TW_Q_LENGTH) {
+			writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
+			writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
+
+			/* Check status for over-run after each write */
+			status = readl(TWL_STATUS_REG_ADDR(tw_dev));
+			if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
+			    i++;
+		}
+
+		/* Now check status */
+		status = readl(TWL_STATUS_REG_ADDR(tw_dev));
+		if (status) {
+			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
+			do_soft_reset = 1;
+			tries++;
+			continue;
+		}
+
+		/* Drain the AEN queue */
+		if (twl_aen_drain_queue(tw_dev, soft_reset)) {
+			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
+			do_soft_reset = 1;
+			tries++;
+			continue;
+		}
+
+		/* Load rest of compatibility struct */
+		strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
+		tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
+		tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
+		tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
+		tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
+		tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
+		tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
+		tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
+		tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
+		tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
+
+		/* If we got here, controller is in a good state */
+		retval = 0;
+		goto out;
+	}
+out:
+	return retval;
+} /* End twl_reset_sequence() */
+
+/* This function will reset a device extension */
+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
+{
+	int i = 0, retval = 1;
+	unsigned long flags = 0;
+
+	/* Block SCSI requests while we are resetting */
+	if (ioctl_reset)
+		scsi_block_requests(tw_dev->host);
+
+	set_bit(TW_IN_RESET, &tw_dev->flags);
+	TWL_MASK_INTERRUPTS(tw_dev);
+	TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+	spin_lock_irqsave(tw_dev->host->host_lock, flags);
+
+	/* Abort all requests that are in progress */
+	for (i = 0; i < TW_Q_LENGTH; i++) {
+		if ((tw_dev->state[i] != TW_S_FINISHED) &&
+		    (tw_dev->state[i] != TW_S_INITIAL) &&
+		    (tw_dev->state[i] != TW_S_COMPLETED)) {
+			if (tw_dev->srb[i]) {
+				tw_dev->srb[i]->result = (DID_RESET << 16);
+				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+				twl_unmap_scsi_data(tw_dev, i);
+			}
+		}
+	}
+
+	/* Reset queues and counts */
+	for (i = 0; i < TW_Q_LENGTH; i++) {
+		tw_dev->free_queue[i] = i;
+		tw_dev->state[i] = TW_S_INITIAL;
+	}
+	tw_dev->free_head = TW_Q_START;
+	tw_dev->free_tail = TW_Q_START;
+	tw_dev->posted_request_count = 0;
+
+	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+	if (twl_reset_sequence(tw_dev, 1))
+		goto out;
+
+	TWL_UNMASK_INTERRUPTS(tw_dev);
+
+	clear_bit(TW_IN_RESET, &tw_dev->flags);
+	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+	retval = 0;
+out:
+	if (ioctl_reset)
+		scsi_unblock_requests(tw_dev->host);
+	return retval;
+} /* End twl_reset_device_extension() */
+
+/* This funciton returns unit geometry in cylinders/heads/sectors */
+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+{
+	int heads, sectors;
+	TW_Device_Extension *tw_dev;
+
+	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
+
+	if (capacity >= 0x200000) {
+		heads = 255;
+		sectors = 63;
+	} else {
+		heads = 64;
+		sectors = 32;
+	}
+
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = sector_div(capacity, heads * sectors); /* cylinders */
+
+	return 0;
+} /* End twl_scsi_biosparam() */
+
+/* This is the new scsi eh reset function */
+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+{
+	TW_Device_Extension *tw_dev = NULL;
+	int retval = FAILED;
+
+	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+	tw_dev->num_resets++;
+
+	sdev_printk(KERN_WARNING, SCpnt->device,
+		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
+		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
+
+	/* Make sure we are not issuing an ioctl or resetting from ioctl */
+	mutex_lock(&tw_dev->ioctl_lock);
+
+	/* Now reset the card and some of the device extension data */
+	if (twl_reset_device_extension(tw_dev, 0)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
+		goto out;
+	}
+
+	retval = SUCCESS;
+out:
+	mutex_unlock(&tw_dev->ioctl_lock);
+	return retval;
+} /* End twl_scsi_eh_reset() */
+
+/* This is the main scsi queue function to handle scsi opcodes */
+static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+	int request_id, retval;
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+	/* If we are resetting due to timed out ioctl, report as busy */
+	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
+		retval = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	/* Save done function into scsi_cmnd struct */
+	SCpnt->scsi_done = done;
+		
+	/* Get a free request id */
+	twl_get_request_id(tw_dev, &request_id);
+
+	/* Save the scsi command for use by the ISR */
+	tw_dev->srb[request_id] = SCpnt;
+
+	/* Initialize phase to zero */
+	SCpnt->SCp.phase = TW_PHASE_INITIAL;
+
+	retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+	if (retval) {
+		tw_dev->state[request_id] = TW_S_COMPLETED;
+		twl_free_request_id(tw_dev, request_id);
+		SCpnt->result = (DID_ERROR << 16);
+		done(SCpnt);
+		retval = 0;
+	}
+out:
+	return retval;
+} /* End twl_scsi_queue() */
+
+/* This function tells the controller to shut down */
+static void __twl_shutdown(TW_Device_Extension *tw_dev)
+{
+	/* Disable interrupts */
+	TWL_MASK_INTERRUPTS(tw_dev);
+
+	/* Free up the IRQ */
+	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+	printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
+
+	/* Tell the card we are shutting down */
+	if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
+	} else {
+		printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
+	}
+
+	/* Clear doorbell interrupt just before exit */
+	TWL_CLEAR_DB_INTERRUPT(tw_dev);
+} /* End __twl_shutdown() */
+
+/* Wrapper for __twl_shutdown */
+static void twl_shutdown(struct pci_dev *pdev)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	TW_Device_Extension *tw_dev;
+
+	if (!host)
+		return;
+
+	tw_dev = (TW_Device_Extension *)host->hostdata;
+
+	if (tw_dev->online) 
+		__twl_shutdown(tw_dev);
+} /* End twl_shutdown() */
+
+/* This function configures unit settings when a unit is coming on-line */
+static int twl_slave_configure(struct scsi_device *sdev)
+{
+	/* Force 60 second timeout */
+	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+	return 0;
+} /* End twl_slave_configure() */
+
+/* scsi_host_template initializer */
+static struct scsi_host_template driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "3w-sas",
+	.queuecommand		= twl_scsi_queue,
+	.eh_host_reset_handler	= twl_scsi_eh_reset,
+	.bios_param		= twl_scsi_biosparam,
+	.change_queue_depth	= twl_change_queue_depth,
+	.can_queue		= TW_Q_LENGTH-2,
+	.slave_configure	= twl_slave_configure,
+	.this_id		= -1,
+	.sg_tablesize		= TW_LIBERATOR_MAX_SGL_LENGTH,
+	.max_sectors		= TW_MAX_SECTORS,
+	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= twl_host_attrs,
+	.emulated		= 1
+};
+
+/* This function will probe and initialize a card */
+static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+{
+	struct Scsi_Host *host = NULL;
+	TW_Device_Extension *tw_dev;
+	int retval = -ENODEV;
+	int *ptr_phycount, phycount=0;
+
+	retval = pci_enable_device(pdev);
+	if (retval) {
+		TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
+		goto out_disable_device;
+	}
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+			TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+			retval = -ENODEV;
+			goto out_disable_device;
+		}
+
+	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
+	if (!host) {
+		TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
+		retval = -ENOMEM;
+		goto out_disable_device;
+	}
+	tw_dev = shost_priv(host);
+
+	/* Save values to device extension */
+	tw_dev->host = host;
+	tw_dev->tw_pci_dev = pdev;
+
+	if (twl_initialize_device_extension(tw_dev)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+		goto out_free_device_extension;
+	}
+
+	/* Request IO regions */
+	retval = pci_request_regions(pdev, "3w-sas");
+	if (retval) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
+		goto out_free_device_extension;
+	}
+
+	/* Save base address, use region 1 */
+	tw_dev->base_addr = pci_iomap(pdev, 1, 0);
+	if (!tw_dev->base_addr) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+		goto out_release_mem_region;
+	}
+
+	/* Disable interrupts on the card */
+	TWL_MASK_INTERRUPTS(tw_dev);
+
+	/* Initialize the card */
+	if (twl_reset_sequence(tw_dev, 0)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+		goto out_iounmap;
+	}
+
+	/* Set host specific parameters */
+	host->max_id = TW_MAX_UNITS;
+	host->max_cmd_len = TW_MAX_CDB_LEN;
+	host->max_lun = TW_MAX_LUNS;
+	host->max_channel = 0;
+
+	/* Register the card with the kernel SCSI layer */
+	retval = scsi_add_host(host, &pdev->dev);
+	if (retval) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
+		goto out_iounmap;
+	}
+
+	pci_set_drvdata(pdev, host);
+
+	printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
+	       host->host_no,
+	       (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
+				     TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
+	       (u64)pci_resource_start(pdev, 1), pdev->irq);
+
+	ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
+				     TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
+	if (ptr_phycount)
+		phycount = le32_to_cpu(*(int *)ptr_phycount);
+
+	printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
+	       host->host_no,
+	       (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
+				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
+	       (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
+				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
+	       phycount);
+
+	/* Try to enable MSI */
+	if (use_msi && !pci_enable_msi(pdev))
+		set_bit(TW_USING_MSI, &tw_dev->flags);
+
+	/* Now setup the interrupt handler */
+	retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
+	if (retval) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
+		goto out_remove_host;
+	}
+
+	twl_device_extension_list[twl_device_extension_count] = tw_dev;
+	twl_device_extension_count++;
+
+	/* Re-enable interrupts on the card */
+	TWL_UNMASK_INTERRUPTS(tw_dev);
+	
+	/* Finally, scan the host */
+	scsi_scan_host(host);
+
+	/* Add sysfs binary files */
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
+
+	if (twl_major == -1) {
+		if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
+			TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
+	}
+	tw_dev->online = 1;
+	return 0;
+
+out_remove_host:
+	if (test_bit(TW_USING_MSI, &tw_dev->flags))
+		pci_disable_msi(pdev);
+	scsi_remove_host(host);
+out_iounmap:
+	iounmap(tw_dev->base_addr);
+out_release_mem_region:
+	pci_release_regions(pdev);
+out_free_device_extension:
+	twl_free_device_extension(tw_dev);
+	scsi_host_put(host);
+out_disable_device:
+	pci_disable_device(pdev);
+
+	return retval;
+} /* End twl_probe() */
+
+/* This function is called to remove a device */
+static void twl_remove(struct pci_dev *pdev)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	TW_Device_Extension *tw_dev;
+
+	if (!host)
+		return;
+
+	tw_dev = (TW_Device_Extension *)host->hostdata;
+
+	if (!tw_dev->online)
+		return;
+
+	/* Remove sysfs binary files */
+	sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
+	sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
+
+	scsi_remove_host(tw_dev->host);
+
+	/* Unregister character device */
+	if (twl_major >= 0) {
+		unregister_chrdev(twl_major, "twl");
+		twl_major = -1;
+	}
+
+	/* Shutdown the card */
+	__twl_shutdown(tw_dev);
+
+	/* Disable MSI if enabled */
+	if (test_bit(TW_USING_MSI, &tw_dev->flags))
+		pci_disable_msi(pdev);
+
+	/* Free IO remapping */
+	iounmap(tw_dev->base_addr);
+
+	/* Free up the mem region */
+	pci_release_regions(pdev);
+
+	/* Free up device extension resources */
+	twl_free_device_extension(tw_dev);
+
+	scsi_host_put(tw_dev->host);
+	pci_disable_device(pdev);
+	twl_device_extension_count--;
+} /* End twl_remove() */
+
+#ifdef CONFIG_PM
+/* This function is called on PCI suspend */
+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+	printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
+	/* Disable interrupts */
+	TWL_MASK_INTERRUPTS(tw_dev);
+
+	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+	/* Tell the card we are shutting down */
+	if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
+	} else {
+		printk(KERN_WARNING "3w-sas: Suspend complete.\n");
+	}
+
+	/* Clear doorbell interrupt */
+	TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+} /* End twl_suspend() */
+
+/* This function is called on PCI resume */
+static int twl_resume(struct pci_dev *pdev)
+{
+	int retval = 0;
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+	printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+
+	retval = pci_enable_device(pdev);
+	if (retval) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
+		return retval;
+	}
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+			TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+			retval = -ENODEV;
+			goto out_disable_device;
+		}
+
+	/* Initialize the card */
+	if (twl_reset_sequence(tw_dev, 0)) {
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
+
+	/* Now setup the interrupt handler */
+	retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
+	if (retval) {
+		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
+
+	/* Now enable MSI if enabled */
+	if (test_bit(TW_USING_MSI, &tw_dev->flags))
+		pci_enable_msi(pdev);
+
+	/* Re-enable interrupts on the card */
+	TWL_UNMASK_INTERRUPTS(tw_dev);
+
+	printk(KERN_WARNING "3w-sas: Resume complete.\n");
+	return 0;
+
+out_disable_device:
+	scsi_remove_host(host);
+	pci_disable_device(pdev);
+
+	return retval;
+} /* End twl_resume() */
+#endif
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id twl_pci_tbl[] __devinitdata = {
+	{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
+
+/* pci_driver initializer */
+static struct pci_driver twl_driver = {
+	.name		= "3w-sas",
+	.id_table	= twl_pci_tbl,
+	.probe		= twl_probe,
+	.remove		= twl_remove,
+#ifdef CONFIG_PM
+	.suspend	= twl_suspend,
+	.resume		= twl_resume,
+#endif
+	.shutdown	= twl_shutdown
+};
+
+/* This function is called on driver initialization */
+static int __init twl_init(void)
+{
+	printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
+
+	return pci_register_driver(&twl_driver);
+} /* End twl_init() */
+
+/* This function is called on driver exit */
+static void __exit twl_exit(void)
+{
+	pci_unregister_driver(&twl_driver);
+} /* End twl_exit() */
+
+module_init(twl_init);
+module_exit(twl_exit);
+
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
new file mode 100644
index 000000000000..d474892701d4
--- /dev/null
+++ b/drivers/scsi/3w-sas.h
@@ -0,0 +1,396 @@
+/*
+   3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
+
+   Written By: Adam Radford <linuxraid@lsi.com>
+
+   Copyright (C) 2009 LSI Corporation.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   NO WARRANTY
+   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+   solely responsible for determining the appropriateness of using and
+   distributing the Program and assumes all risks associated with its
+   exercise of rights under this Agreement, including but not limited to
+   the risks and costs of program errors, damage to or loss of data,
+   programs or equipment, and unavailability or interruption of operations.
+
+   DISCLAIMER OF LIABILITY
+   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+   Bugs/Comments/Suggestions should be mailed to:
+   linuxraid@lsi.com
+
+   For more information, goto:
+   http://www.lsi.com
+*/
+
+#ifndef _3W_SAS_H
+#define _3W_SAS_H
+
+/* AEN severity table */
+static char *twl_aen_severity_table[] =
+{
+	"None", "ERROR", "WARNING", "INFO", "DEBUG", NULL
+};
+
+/* Liberator register offsets */
+#define TWL_STATUS                         0x0  /* Status */
+#define TWL_HIBDB                          0x20 /* Inbound doorbell */
+#define TWL_HISTAT                         0x30 /* Host interrupt status */
+#define TWL_HIMASK                         0x34 /* Host interrupt mask */
+#define TWL_HOBDB			   0x9C /* Outbound doorbell */
+#define TWL_HOBDBC                         0xA0 /* Outbound doorbell clear */
+#define TWL_SCRPD3                         0xBC /* Scratchpad */
+#define TWL_HIBQPL                         0xC0 /* Host inbound Q low */
+#define TWL_HIBQPH                         0xC4 /* Host inbound Q high */
+#define TWL_HOBQPL                         0xC8 /* Host outbound Q low */
+#define TWL_HOBQPH                         0xCC /* Host outbound Q high */
+#define TWL_HISTATUS_VALID_INTERRUPT	   0xC
+#define TWL_HISTATUS_ATTENTION_INTERRUPT   0x4
+#define TWL_HISTATUS_RESPONSE_INTERRUPT	   0x8
+#define TWL_STATUS_OVERRUN_SUBMIT	   0x2000
+#define TWL_ISSUE_SOFT_RESET		   0x100
+#define TWL_CONTROLLER_READY		   0x2000
+#define TWL_DOORBELL_CONTROLLER_ERROR	   0x200000
+#define TWL_DOORBELL_ATTENTION_INTERRUPT   0x40000
+#define TWL_PULL_MODE			   0x1
+
+/* Command packet opcodes used by the driver */
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_GET_PARAM	      0x12
+#define TW_OP_SET_PARAM	      0x13
+#define TW_OP_EXECUTE_SCSI    0x10
+
+/* Asynchronous Event Notification (AEN) codes used by the driver */
+#define TW_AEN_QUEUE_EMPTY       0x0000
+#define TW_AEN_SOFT_RESET        0x0001
+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
+#define TW_AEN_SEVERITY_ERROR    0x1
+#define TW_AEN_SEVERITY_DEBUG    0x4
+#define TW_AEN_NOT_RETRIEVED 0x1
+
+/* Command state defines */
+#define TW_S_INITIAL   0x1  /* Initial state */
+#define TW_S_STARTED   0x2  /* Id in use */
+#define TW_S_POSTED    0x4  /* Posted to the controller */
+#define TW_S_COMPLETED 0x8  /* Completed by isr */
+#define TW_S_FINISHED  0x10 /* I/O completely done */
+
+/* Compatibility defines */
+#define TW_9750_ARCH_ID 10
+#define TW_CURRENT_DRIVER_SRL 40
+#define TW_CURRENT_DRIVER_BUILD 0
+#define TW_CURRENT_DRIVER_BRANCH 0
+
+/* Phase defines */
+#define TW_PHASE_INITIAL 0
+#define TW_PHASE_SGLIST  2
+
+/* Misc defines */
+#define TW_SECTOR_SIZE                        512
+#define TW_MAX_UNITS			      32
+#define TW_INIT_MESSAGE_CREDITS		      0x100
+#define TW_INIT_COMMAND_PACKET_SIZE	      0x3
+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED  0x6
+#define TW_EXTENDED_INIT_CONNECT	      0x2
+#define TW_BASE_FW_SRL			      24
+#define TW_BASE_FW_BRANCH		      0
+#define TW_BASE_FW_BUILD		      1
+#define TW_Q_LENGTH			      256
+#define TW_Q_START			      0
+#define TW_MAX_SLOT			      32
+#define TW_MAX_RESET_TRIES		      2
+#define TW_MAX_CMDS_PER_LUN		      254
+#define TW_MAX_AEN_DRAIN		      255
+#define TW_IN_RESET                           2
+#define TW_USING_MSI			      3
+#define TW_IN_ATTENTION_LOOP		      4
+#define TW_MAX_SECTORS                        256
+#define TW_MAX_CDB_LEN                        16
+#define TW_IOCTL_CHRDEV_TIMEOUT               60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE                  -1
+#define TW_COMMAND_OFFSET                     128 /* 128 bytes */
+#define TW_VERSION_TABLE                      0x0402
+#define TW_TIMEKEEP_TABLE		      0x040A
+#define TW_INFORMATION_TABLE		      0x0403
+#define TW_PARAM_FWVER			      3
+#define TW_PARAM_FWVER_LENGTH		      16
+#define TW_PARAM_BIOSVER		      4
+#define TW_PARAM_BIOSVER_LENGTH		      16
+#define TW_PARAM_MODEL			      8
+#define TW_PARAM_MODEL_LENGTH		      16
+#define TW_PARAM_PHY_SUMMARY_TABLE	      1
+#define TW_PARAM_PHYCOUNT		      2
+#define TW_PARAM_PHYCOUNT_LENGTH	      1
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH        0x108  // Used by smartmontools
+#define TW_ALLOCATION_LENGTH		      128
+#define TW_SENSE_DATA_LENGTH		      18
+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED   0x10a
+#define TW_ERROR_INVALID_FIELD_IN_CDB	      0x10d
+#define TW_ERROR_UNIT_OFFLINE                 0x128
+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR    3
+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT    4
+#define TW_DRIVER 			      6
+#ifndef PCI_DEVICE_ID_3WARE_9750
+#define PCI_DEVICE_ID_3WARE_9750 0x1010
+#endif
+
+/* Bitmask macros to eliminate bitfields */
+
+/* opcode: 5, reserved: 3 */
+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_OP_OUT(x) (x & 0x1f)
+
+/* opcode: 5, sgloffset: 3 */
+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
+
+/* severity: 3, reserved: 5 */
+#define TW_SEV_OUT(x) (x & 0x7)
+
+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
+#define TW_NOTMFA_OUT(x) (x & 0x1)
+
+/* request_id: 12, lun: 4 */
+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+
+/* Register access macros */
+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
+
+/* Macros */
+#define TW_PRINTK(h,a,b,c) { \
+if (h) \
+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
+else \
+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
+}
+#define TW_MAX_LUNS 16
+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
+#define TW_PADDING_LENGTH_LIBERATOR 136
+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
+
+#pragma pack(1)
+
+/* SGL entry */
+typedef struct TAG_TW_SG_Entry_ISO {
+	dma_addr_t address;
+	dma_addr_t length;
+} TW_SG_Entry_ISO;
+
+/* Old Command Packet with ISO SGL */
+typedef struct TW_Command {
+	unsigned char opcode__sgloffset;
+	unsigned char size;
+	unsigned char request_id;
+	unsigned char unit__hostid;
+	/* Second DWORD */
+	unsigned char status;
+	unsigned char flags;
+	union {
+		unsigned short block_count;
+		unsigned short parameter_count;
+	} byte6_offset;
+	union {
+		struct {
+			u32 lba;
+			TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
+			unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
+		} io;
+		struct {
+			TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
+			u32 padding;
+			unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
+		} param;
+	} byte8_offset;
+} TW_Command;
+
+/* New Command Packet with ISO SGL */
+typedef struct TAG_TW_Command_Apache {
+	unsigned char opcode__reserved;
+	unsigned char unit;
+	unsigned short request_id__lunl;
+	unsigned char status;
+	unsigned char sgl_offset;
+	unsigned short sgl_entries__lunh;
+	unsigned char cdb[16];
+	TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
+	unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
+} TW_Command_Apache;
+
+/* New command packet header */
+typedef struct TAG_TW_Command_Apache_Header {
+	unsigned char sense_data[TW_SENSE_DATA_LENGTH];
+	struct {
+		char reserved[4];
+		unsigned short error;
+		unsigned char padding;
+		unsigned char severity__reserved;
+	} status_block;
+	unsigned char err_specific_desc[98];
+	struct {
+		unsigned char size_header;
+		unsigned short request_id;
+		unsigned char size_sense;
+	} header_desc;
+} TW_Command_Apache_Header;
+
+/* This struct is a union of the 2 command packets */
+typedef struct TAG_TW_Command_Full {
+	TW_Command_Apache_Header header;
+	union {
+		TW_Command oldcommand;
+		TW_Command_Apache newcommand;
+	} command;
+} TW_Command_Full;
+
+/* Initconnection structure */
+typedef struct TAG_TW_Initconnect {
+	unsigned char opcode__reserved;
+	unsigned char size;
+	unsigned char request_id;
+	unsigned char res2;
+	unsigned char status;
+	unsigned char flags;
+	unsigned short message_credits;
+	u32 features;
+	unsigned short fw_srl;
+	unsigned short fw_arch_id;
+	unsigned short fw_branch;
+	unsigned short fw_build;
+	u32 result;
+} TW_Initconnect;
+
+/* Event info structure */
+typedef struct TAG_TW_Event
+{
+	unsigned int sequence_id;
+	unsigned int time_stamp_sec;
+	unsigned short aen_code;
+	unsigned char severity;
+	unsigned char retrieved;
+	unsigned char repeat_count;
+	unsigned char parameter_len;
+	unsigned char parameter_data[98];
+} TW_Event;
+
+typedef struct TAG_TW_Ioctl_Driver_Command {
+	unsigned int control_code;
+	unsigned int status;
+	unsigned int unique_id;
+	unsigned int sequence_id;
+	unsigned int os_specific;
+	unsigned int buffer_length;
+} TW_Ioctl_Driver_Command;
+
+typedef struct TAG_TW_Ioctl_Apache {
+	TW_Ioctl_Driver_Command driver_command;
+        char padding[488];
+	TW_Command_Full firmware_command;
+	char data_buffer[1];
+} TW_Ioctl_Buf_Apache;
+
+/* GetParam descriptor */
+typedef struct {
+	unsigned short	table_id;
+	unsigned short	parameter_id;
+	unsigned short	parameter_size_bytes;
+	unsigned short  actual_parameter_size_bytes;
+	unsigned char	data[1];
+} TW_Param_Apache;
+
+/* Compatibility information structure */
+typedef struct TAG_TW_Compatibility_Info
+{
+	char driver_version[32];
+	unsigned short working_srl;
+	unsigned short working_branch;
+	unsigned short working_build;
+	unsigned short driver_srl_high;
+	unsigned short driver_branch_high;
+	unsigned short driver_build_high;
+	unsigned short driver_srl_low;
+	unsigned short driver_branch_low;
+	unsigned short driver_build_low;
+	unsigned short fw_on_ctlr_srl;
+	unsigned short fw_on_ctlr_branch;
+	unsigned short fw_on_ctlr_build;
+} TW_Compatibility_Info;
+
+#pragma pack()
+
+typedef struct TAG_TW_Device_Extension {
+	void                     __iomem *base_addr;
+	unsigned long	       	*generic_buffer_virt[TW_Q_LENGTH];
+	dma_addr_t	       	generic_buffer_phys[TW_Q_LENGTH];
+	TW_Command_Full	       	*command_packet_virt[TW_Q_LENGTH];
+	dma_addr_t		command_packet_phys[TW_Q_LENGTH];
+	TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
+	dma_addr_t		sense_buffer_phys[TW_Q_LENGTH];
+	struct pci_dev		*tw_pci_dev;
+	struct scsi_cmnd	*srb[TW_Q_LENGTH];
+	unsigned char		free_queue[TW_Q_LENGTH];
+	unsigned char		free_head;
+	unsigned char		free_tail;
+	int     		state[TW_Q_LENGTH];
+	unsigned int		posted_request_count;
+	unsigned int		max_posted_request_count;
+	unsigned int		max_sgl_entries;
+	unsigned int		sgl_entries;
+	unsigned int		num_resets;
+	unsigned int		sector_count;
+	unsigned int		max_sector_count;
+	unsigned int		aen_count;
+	struct Scsi_Host	*host;
+	long			flags;
+	TW_Event                *event_queue[TW_Q_LENGTH];
+	unsigned char           error_index;
+	unsigned int            error_sequence_id;
+	int			chrdev_request_id;
+	wait_queue_head_t	ioctl_wqueue;
+	struct mutex		ioctl_lock;
+	TW_Compatibility_Info	tw_compat_info;
+	char			online;
+} TW_Device_Extension;
+
+#endif /* _3W_SAS_H */
+
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index faa0fcfed71e..d224294c38fb 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -521,8 +521,12 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
 } /* End tw_show_stats() */
 
 /* This function will set a devices queue depth */
-static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+				 int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (queue_depth > TW_Q_LENGTH-2)
 		queue_depth = TW_Q_LENGTH-2;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f5a9addb7050..6c60a8060c58 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
-static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
+static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
 
 STATIC struct device_attribute *NCR_700_dev_attrs[];
@@ -2082,8 +2082,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp)
 }
 
 static int
-NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
+NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (depth > NCR_700_MAX_TAGS)
 		depth = NCR_700_MAX_TAGS;
 
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e11cca4c784c..36900c71a592 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -399,6 +399,17 @@ config SCSI_3W_9XXX
 	  Please read the comments at the top of
 	  <file:drivers/scsi/3w-9xxx.c>.
 
+config SCSI_3W_SAS
+	tristate "3ware 97xx SAS/SATA-RAID support"
+	depends on PCI && SCSI
+	help
+	  This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
+
+	  <http://www.lsi.com>
+
+	  Please read the comments at the top of
+	  <file:drivers/scsi/3w-sas.c>.
+
 config SCSI_7000FASST
 	tristate "7000FASST SCSI support"
 	depends on ISA && SCSI && ISA_DMA_API
@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
 	  substantial, so users of MultiMaster Host Adapters may not
 	  wish to include it.
 
+config VMWARE_PVSCSI
+	tristate "VMware PVSCSI driver support"
+	depends on PCI && SCSI && X86
+	help
+	  This driver supports VMware's para virtualized SCSI HBA.
+	  To compile this driver as a module, choose M here: the
+	  module will be called vmw_pvscsi.
+
 config LIBFC
 	tristate "LibFC module"
 	select SCSI_FC_ATTRS
@@ -644,7 +663,7 @@ config FCOE
 config FCOE_FNIC
 	tristate "Cisco FNIC Driver"
 	depends on PCI && X86
-	select LIBFC
+	select LIBFCOE
 	help
 	  This is support for the Cisco PCI-Express FCoE HBA.
 
@@ -1818,6 +1837,14 @@ config SCSI_PMCRAID
 	---help---
 	  This driver supports the PMC SIERRA MaxRAID adapters.
 
+config SCSI_PM8001
+	tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver"
+	depends on PCI && SCSI
+	select SCSI_SAS_LIBSAS
+	help
+	  This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip
+	  based host adapters.
+
 config SCSI_SRP
 	tristate "SCSI RDMA Protocol helper library"
 	depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3ad61db5e3fa..280d3c657d60 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_SCSI_AIC79XX)	+= aic7xxx/
 obj-$(CONFIG_SCSI_AACRAID)	+= aacraid/
 obj-$(CONFIG_SCSI_AIC7XXX_OLD)	+= aic7xxx_old.o
 obj-$(CONFIG_SCSI_AIC94XX)	+= aic94xx/
+obj-$(CONFIG_SCSI_PM8001)	+= pm8001/
 obj-$(CONFIG_SCSI_IPS)		+= ips.o
 obj-$(CONFIG_SCSI_FD_MCS)	+= fd_mcs.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
@@ -113,6 +114,7 @@ obj-$(CONFIG_SCSI_MESH)		+= mesh.o
 obj-$(CONFIG_SCSI_MAC53C94)	+= mac53c94.o
 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
 obj-$(CONFIG_SCSI_3W_9XXX)	+= 3w-9xxx.o
+obj-$(CONFIG_SCSI_3W_SAS)	+= 3w-sas.o
 obj-$(CONFIG_SCSI_PPA)		+= ppa.o
 obj-$(CONFIG_SCSI_IMM)		+= imm.o
 obj-$(CONFIG_JAZZ_ESP)		+= esp_scsi.o	jazz_esp.o
@@ -133,6 +135,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
+obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9b97c3e016fe..e9373a2d14fa 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -472,8 +472,12 @@ static int aac_slave_configure(struct scsi_device *sdev)
  *	total capacity and the queue depth supported by the target device.
  */
 
-static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
+				  int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
 	    (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
 		struct scsi_device * dev;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 80aac01b5a6f..47d5d19f8c92 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -98,8 +98,11 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
 static const char *arcmsr_info(struct Scsi_Host *);
 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
-								int queue_depth)
+					  int queue_depth, int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
 		queue_depth = ARCMSR_MAX_CMD_PERLUN;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index b36020dcf012..a93a5040f087 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -20,8 +20,10 @@
 
 #include <linux/pci.h>
 #include <linux/if_vlan.h>
-
-#define FW_VER_LEN 32
+#include <linux/blk-iopoll.h>
+#define FW_VER_LEN	32
+#define MCC_Q_LEN	128
+#define MCC_CQ_LEN	256
 
 struct be_dma_mem {
 	void *va;
@@ -74,18 +76,14 @@ static inline void queue_tail_inc(struct be_queue_info *q)
 
 struct be_eq_obj {
 	struct be_queue_info q;
-	char desc[32];
-
-	/* Adaptive interrupt coalescing (AIC) info */
-	bool enable_aic;
-	u16 min_eqd;		/* in usecs */
-	u16 max_eqd;		/* in usecs */
-	u16 cur_eqd;		/* in usecs */
+	struct beiscsi_hba *phba;
+	struct be_queue_info *cq;
+	struct blk_iopoll	iopoll;
 };
 
 struct be_mcc_obj {
-	struct be_queue_info *q;
-	struct be_queue_info *cq;
+	struct be_queue_info q;
+	struct be_queue_info cq;
 };
 
 struct be_ctrl_info {
@@ -176,8 +174,4 @@ static inline void swap_dws(void *wrb, int len)
 	} while (len);
 #endif /* __BIG_ENDIAN */
 }
-
-extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
-			      u16 num_popped);
-
 #endif /* BEISCSI_H */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 08007b6e42df..698a527d6cca 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -19,6 +19,16 @@
 #include "be_mgmt.h"
 #include "be_main.h"
 
+static void be_mcc_notify(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+	u32 val = 0;
+
+	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
+}
+
 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
 {
 	if (compl->flags != 0) {
@@ -54,13 +64,56 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
 	return 0;
 }
 
+
 static inline bool is_link_state_evt(u32 trailer)
 {
 	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-		ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE);
+		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
+		  ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
+	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+
+	if (be_mcc_compl_is_new(compl)) {
+		queue_tail_inc(mcc_cq);
+		return compl;
+	}
+	return NULL;
+}
+
+static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+{
+	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+		struct be_async_event_link_state *evt)
+{
+	switch (evt->port_link_status) {
+	case ASYNC_EVENT_LINK_DOWN:
+		SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
+						evt->physical_port);
+		phba->state |= BE_ADAPTER_LINK_DOWN;
+		break;
+	case ASYNC_EVENT_LINK_UP:
+		phba->state = BE_ADAPTER_UP;
+		SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
+						evt->physical_port);
+		iscsi_host_for_each_session(phba->shost,
+					    be2iscsi_fail_session);
+		break;
+	default:
+		SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
+				    "Physical Port %d \n",
+				     evt->port_link_status,
+				     evt->physical_port);
+	}
 }
 
-void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
 		       u16 num_popped)
 {
 	u32 val = 0;
@@ -68,7 +121,66 @@ void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
 	if (arm)
 		val |= 1 << DB_CQ_REARM_SHIFT;
 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
-	iowrite32(val, ctrl->db + DB_CQ_OFFSET);
+	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
+}
+
+
+int beiscsi_process_mcc(struct beiscsi_hba *phba)
+{
+	struct be_mcc_compl *compl;
+	int num = 0, status = 0;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
+	spin_lock_bh(&phba->ctrl.mcc_cq_lock);
+	while ((compl = be_mcc_compl_get(phba))) {
+		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+			/* Interpret flags as an async trailer */
+			BUG_ON(!is_link_state_evt(compl->flags));
+
+			/* Interpret compl as a async link evt */
+			beiscsi_async_link_state_process(phba,
+				(struct be_async_event_link_state *) compl);
+		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+				status = be_mcc_compl_process(ctrl, compl);
+				atomic_dec(&phba->ctrl.mcc_obj.q.used);
+		}
+		be_mcc_compl_use(compl);
+		num++;
+	}
+
+	if (num)
+		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
+
+	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
+	return status;
+}
+
+/* Wait till no more pending mcc requests are present */
+static int be_mcc_wait_compl(struct beiscsi_hba *phba)
+{
+#define mcc_timeout		120000 /* 5s timeout */
+	int i, status;
+	for (i = 0; i < mcc_timeout; i++) {
+		status = beiscsi_process_mcc(phba);
+		if (status)
+			return status;
+
+		if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
+			break;
+		udelay(100);
+	}
+	if (i == mcc_timeout) {
+		dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
+		return -1;
+	}
+	return 0;
+}
+
+/* Notify MCC requests and wait for completion */
+int be_mcc_notify_wait(struct beiscsi_hba *phba)
+{
+	be_mcc_notify(phba);
+	return be_mcc_wait_compl(phba);
 }
 
 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
@@ -142,6 +254,52 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
 	return 0;
 }
 
+/*
+ * Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
+ */
+static int be_mbox_notify_wait(struct beiscsi_hba *phba)
+{
+	int status;
+	u32 val = 0;
+	void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
+	struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
+	struct be_mcc_mailbox *mbox = mbox_mem->va;
+	struct be_mcc_compl *compl = &mbox->compl;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
+	val |= MPU_MAILBOX_DB_HI_MASK;
+	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
+	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+	iowrite32(val, db);
+
+	/* wait for ready to be set */
+	status = be_mbox_db_ready_wait(ctrl);
+	if (status != 0)
+		return status;
+
+	val = 0;
+	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
+	val |= (u32)(mbox_mem->dma >> 4) << 2;
+	iowrite32(val, db);
+
+	status = be_mbox_db_ready_wait(ctrl);
+	if (status != 0)
+		return status;
+
+	/* A cq entry has been made now */
+	if (be_mcc_compl_is_new(compl)) {
+		status = be_mcc_compl_process(ctrl, &mbox->compl);
+		be_mcc_compl_use(compl);
+		if (status)
+			return status;
+	} else {
+		dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
+		return -1;
+	}
+	return 0;
+}
+
 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 				bool embedded, u8 sge_cnt)
 {
@@ -203,6 +361,20 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
 	return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 }
 
+struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+	struct be_mcc_wrb *wrb;
+
+	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+	wrb = queue_head_node(mccq);
+	queue_head_inc(mccq);
+	atomic_inc(&mccq->used);
+	memset(wrb, 0, sizeof(*wrb));
+	return wrb;
+}
+
+
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 			  struct be_queue_info *eq, int eq_delay)
 {
@@ -212,6 +384,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 	struct be_dma_mem *q_mem = &eq->dma_mem;
 	int status;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 
@@ -249,6 +422,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 	int status;
 	u8 *endian_check;
 
+	SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 
@@ -282,6 +456,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 	void *ctxt = &req->context;
 	int status;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 
@@ -289,7 +464,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
-
 	if (!q_mem->va)
 		SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
 
@@ -329,6 +503,53 @@ static u32 be_encoded_q_len(int q_len)
 		len_encoded = 0;
 	return len_encoded;
 }
+
+int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
+			struct be_queue_info *mccq,
+			struct be_queue_info *cq)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_mcc_create *req;
+	struct be_dma_mem *q_mem = &mccq->dma_mem;
+	struct be_ctrl_info *ctrl;
+	void *ctxt;
+	int status;
+
+	spin_lock(&phba->ctrl.mbox_lock);
+	ctrl = &phba->ctrl;
+	wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	req = embedded_payload(wrb);
+	ctxt = &req->context;
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
+		      PCI_FUNC(phba->pcidev->devfn));
+	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+		be_encoded_q_len(mccq->len));
+	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+	be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+	status = be_mbox_notify_wait(phba);
+	if (!status) {
+		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+		mccq->id = le16_to_cpu(resp->id);
+		mccq->created = true;
+	}
+	spin_unlock(&phba->ctrl.mbox_lock);
+
+	return status;
+}
+
 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 			  int queue_type)
 {
@@ -337,6 +558,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 	u8 subsys = 0, opcode = 0;
 	int status;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -350,6 +572,10 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 		subsys = CMD_SUBSYSTEM_COMMON;
 		opcode = OPCODE_COMMON_CQ_DESTROY;
 		break;
+	case QTYPE_MCCQ:
+		subsys = CMD_SUBSYSTEM_COMMON;
+		opcode = OPCODE_COMMON_MCC_DESTROY;
+		break;
 	case QTYPE_WRBQ:
 		subsys = CMD_SUBSYSTEM_ISCSI;
 		opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
@@ -377,30 +603,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 	return status;
 }
 
-int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
-{
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
-	int status;
-
-	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-			   OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
-			   sizeof(*req));
-
-	status = be_mbox_notify(ctrl);
-	if (!status) {
-		struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
-
-		memcpy(mac_addr, resp->mac_address, ETH_ALEN);
-	}
-
-	spin_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 				    struct be_queue_info *cq,
 				    struct be_queue_info *dq, int length,
@@ -412,6 +614,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 	void *ctxt = &req->context;
 	int status;
 
+	SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 
@@ -468,8 +671,10 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
 	status = be_mbox_notify(ctrl);
-	if (!status)
+	if (!status) {
 		wrbq->id = le16_to_cpu(resp->cid);
+		wrbq->created = true;
+	}
 	spin_unlock(&ctrl->mbox_lock);
 	return status;
 }
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index c20d686cbb43..5de8acb924cb 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -47,6 +47,8 @@ struct be_mcc_wrb {
 
 #define CQE_FLAGS_VALID_MASK (1 << 31)
 #define CQE_FLAGS_ASYNC_MASK (1 << 30)
+#define CQE_FLAGS_COMPLETED_MASK 	(1 << 28)
+#define CQE_FLAGS_CONSUMED_MASK 	(1 << 27)
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
@@ -173,7 +175,7 @@ struct be_cmd_req_hdr {
 	u8 domain;		/* dword 0 */
 	u32 timeout;		/* dword 1 */
 	u32 request_length;	/* dword 2 */
-	u32 rsvd;		/* dword 3 */
+	u32 rsvd0;		/* dword 3 */
 };
 
 struct be_cmd_resp_hdr {
@@ -382,7 +384,6 @@ struct be_cmd_req_modify_eq_delay {
 
 #define ETH_ALEN	6
 
-
 struct be_cmd_req_get_mac_addr {
 	struct be_cmd_req_hdr hdr;
 	u32 nic_port_count;
@@ -417,14 +418,21 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 
 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 			  int type);
+int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
+			struct be_queue_info *mccq,
+			struct be_queue_info *cq);
+
 int be_poll_mcc(struct be_ctrl_info *ctrl);
-unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl);
-int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr);
+unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
+				      struct beiscsi_hba *phba);
+int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr);
 
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
+struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
+int be_mcc_notify_wait(struct beiscsi_hba *phba);
 
 int be_mbox_notify(struct be_ctrl_info *ctrl);
 
@@ -531,6 +539,23 @@ struct amap_sol_cqe {
 	u8 valid;		/* dword 3 */
 } __packed;
 
+#define SOL_ICD_INDEX_MASK	0x0003FFC0
+struct amap_sol_cqe_ring {
+	u8 hw_sts[8];		/* dword 0 */
+	u8 i_sts[8];		/* dword 0 */
+	u8 i_resp[8];		/* dword 0 */
+	u8 i_flags[7];		/* dword 0 */
+	u8 s;			/* dword 0 */
+	u8 i_exp_cmd_sn[32];	/* dword 1 */
+	u8 code[6];		/* dword 2 */
+	u8 icd_index[12];	/* dword 2 */
+	u8 rsvd[6];		/* dword 2 */
+	u8 i_cmd_wnd[8];	/* dword 2 */
+	u8 i_res_cnt[31];	/* dword 3 */
+	u8 valid;		/* dword 3 */
+} __packed;
+
+
 
 /**
  * Post WRB Queue Doorbell Register used by the host Storage
@@ -664,8 +689,8 @@ struct be_fw_cfg {
 #define	OPCODE_COMMON_TCP_UPLOAD	56
 #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
 /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
-#define CMD_ISCSI_CONNECTION_INVALIDATE 1
-#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2
+#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
+#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
 
 #define INI_WR_CMD			1	/* Initiator write command */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 2fd25442cfaf..d587b0362f18 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -67,11 +67,11 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 		cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
 	}
 
-	 cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
-					   shost, cmds_max,
-					   sizeof(*beiscsi_sess),
-					   sizeof(*io_task),
-					   initial_cmdsn, ISCSI_MAX_TARGET);
+	cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
+					  shost, cmds_max,
+					  sizeof(*beiscsi_sess),
+					  sizeof(*io_task),
+					  initial_cmdsn, ISCSI_MAX_TARGET);
 	if (!cls_session)
 		return NULL;
 	sess = cls_session->dd_data;
@@ -297,7 +297,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address);
+		be_cmd_get_mac_addr(phba, phba->mac_address);
 		len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 		break;
 	default:
@@ -377,16 +377,12 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct beiscsi_offload_params params;
-	struct iscsi_session *session = conn->session;
-	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
-	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 
 	memset(&params, 0, sizeof(struct beiscsi_offload_params));
 	beiscsi_ep = beiscsi_conn->ep;
 	if (!beiscsi_ep)
 		SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
 
-	free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle);
 	beiscsi_conn->login_in_progress = 0;
 	beiscsi_set_params_for_offld(beiscsi_conn, &params);
 	beiscsi_offload_connection(beiscsi_conn, &params);
@@ -498,6 +494,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
 		SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
 		return ERR_PTR(ret);
 	}
+
+	if (phba->state) {
+		ret = -EBUSY;
+		SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n");
+		return ERR_PTR(ret);
+	}
+
 	ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
 	if (!ep) {
 		ret = -ENOMEM;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4f1aca346e38..1a557fa77888 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -39,7 +39,8 @@
 
 static unsigned int be_iopoll_budget = 10;
 static unsigned int be_max_phys_size = 64;
-static unsigned int enable_msix;
+static unsigned int enable_msix = 1;
+static unsigned int ring_mode;
 
 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -58,6 +59,17 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
 	return 0;
 }
 
+/*------------------- PCI Driver operations and data ----------------- */
+static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
+	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+
 static struct scsi_host_template beiscsi_sht = {
 	.module = THIS_MODULE,
 	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
@@ -76,16 +88,8 @@ static struct scsi_host_template beiscsi_sht = {
 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
 	.use_clustering = ENABLE_CLUSTERING,
 };
-static struct scsi_transport_template *beiscsi_scsi_transport;
 
-/*------------------- PCI Driver operations and data ----------------- */
-static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
-	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
-	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
-	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
-	{ 0 }
-};
-MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+static struct scsi_transport_template *beiscsi_scsi_transport;
 
 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
 {
@@ -104,7 +108,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
 	shost->transportt = beiscsi_scsi_transport;
-
 	phba = iscsi_host_priv(shost);
 	memset(phba, 0, sizeof(*phba));
 	phba->shost = shost;
@@ -181,6 +184,7 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
 		return ret;
 	}
 
+	pci_set_master(pcidev);
 	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
 		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
 		if (ret) {
@@ -203,7 +207,6 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
 	status = beiscsi_map_pci_bars(phba, pdev);
 	if (status)
 		return status;
-
 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
 	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
 						  mbox_mem_alloc->size,
@@ -219,6 +222,9 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 	spin_lock_init(&ctrl->mbox_lock);
+	spin_lock_init(&phba->ctrl.mcc_lock);
+	spin_lock_init(&phba->ctrl.mcc_cq_lock);
+
 	return status;
 }
 
@@ -268,6 +274,113 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
 }
 
 /**
+ * be_isr_mcc - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr_mcc(int irq, void *dev_id)
+{
+	struct beiscsi_hba *phba;
+	struct be_eq_entry *eqe = NULL;
+	struct be_queue_info *eq;
+	struct be_queue_info *mcc;
+	unsigned int num_eq_processed;
+	struct be_eq_obj *pbe_eq;
+	unsigned long flags;
+
+	pbe_eq = dev_id;
+	eq = &pbe_eq->q;
+	phba =  pbe_eq->phba;
+	mcc = &phba->ctrl.mcc_obj.cq;
+	eqe = queue_tail_node(eq);
+	if (!eqe)
+		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
+
+	num_eq_processed = 0;
+
+	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+				& EQE_VALID_MASK) {
+		if (((eqe->dw[offsetof(struct amap_eq_entry,
+		     resource_id) / 32] &
+		     EQE_RESID_MASK) >> 16) == mcc->id) {
+			spin_lock_irqsave(&phba->isr_lock, flags);
+			phba->todo_mcc_cq = 1;
+			spin_unlock_irqrestore(&phba->isr_lock, flags);
+		}
+		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+		queue_tail_inc(eq);
+		eqe = queue_tail_node(eq);
+		num_eq_processed++;
+	}
+	if (phba->todo_mcc_cq)
+		queue_work(phba->wq, &phba->work_cqs);
+	if (num_eq_processed)
+		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * be_isr_msix - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr_msix(int irq, void *dev_id)
+{
+	struct beiscsi_hba *phba;
+	struct be_eq_entry *eqe = NULL;
+	struct be_queue_info *eq;
+	struct be_queue_info *cq;
+	unsigned int num_eq_processed;
+	struct be_eq_obj *pbe_eq;
+	unsigned long flags;
+
+	pbe_eq = dev_id;
+	eq = &pbe_eq->q;
+	cq = pbe_eq->cq;
+	eqe = queue_tail_node(eq);
+	if (!eqe)
+		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
+
+	phba = pbe_eq->phba;
+	num_eq_processed = 0;
+	if (blk_iopoll_enabled) {
+		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+					& EQE_VALID_MASK) {
+			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+				blk_iopoll_sched(&pbe_eq->iopoll);
+
+			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+			queue_tail_inc(eq);
+			eqe = queue_tail_node(eq);
+			num_eq_processed++;
+		}
+		if (num_eq_processed)
+			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
+
+		return IRQ_HANDLED;
+	} else {
+		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+						& EQE_VALID_MASK) {
+			spin_lock_irqsave(&phba->isr_lock, flags);
+			phba->todo_cq = 1;
+			spin_unlock_irqrestore(&phba->isr_lock, flags);
+			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+			queue_tail_inc(eq);
+			eqe = queue_tail_node(eq);
+			num_eq_processed++;
+		}
+		if (phba->todo_cq)
+			queue_work(phba->wq, &phba->work_cqs);
+
+		if (num_eq_processed)
+			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
+
+		return IRQ_HANDLED;
+	}
+}
+
+/**
  * be_isr - The isr routine of the driver.
  * @irq: Not used
  * @dev_id: Pointer to host adapter structure
@@ -280,48 +393,70 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 	struct be_eq_entry *eqe = NULL;
 	struct be_queue_info *eq;
 	struct be_queue_info *cq;
+	struct be_queue_info *mcc;
 	unsigned long flags, index;
-	unsigned int num_eq_processed;
+	unsigned int num_mcceq_processed, num_ioeq_processed;
 	struct be_ctrl_info *ctrl;
+	struct be_eq_obj *pbe_eq;
 	int isr;
 
 	phba = dev_id;
-	if (!enable_msix) {
-		ctrl = &phba->ctrl;;
-		isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
-			       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
-		if (!isr)
-			return IRQ_NONE;
-	}
+	ctrl = &phba->ctrl;;
+	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
+		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
+	if (!isr)
+		return IRQ_NONE;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
-	eq = &phwi_context->be_eq.q;
-	cq = &phwi_context->be_cq;
+	pbe_eq = &phwi_context->be_eq[0];
+
+	eq = &phwi_context->be_eq[0].q;
+	mcc = &phba->ctrl.mcc_obj.cq;
 	index = 0;
 	eqe = queue_tail_node(eq);
 	if (!eqe)
 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 
-	num_eq_processed = 0;
+	num_ioeq_processed = 0;
+	num_mcceq_processed = 0;
 	if (blk_iopoll_enabled) {
 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 					& EQE_VALID_MASK) {
-			if (!blk_iopoll_sched_prep(&phba->iopoll))
-				blk_iopoll_sched(&phba->iopoll);
-
+			if (((eqe->dw[offsetof(struct amap_eq_entry,
+			     resource_id) / 32] &
+			     EQE_RESID_MASK) >> 16) == mcc->id) {
+				spin_lock_irqsave(&phba->isr_lock, flags);
+				phba->todo_mcc_cq = 1;
+				spin_unlock_irqrestore(&phba->isr_lock, flags);
+				num_mcceq_processed++;
+			} else {
+				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+					blk_iopoll_sched(&pbe_eq->iopoll);
+				num_ioeq_processed++;
+			}
 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 			queue_tail_inc(eq);
 			eqe = queue_tail_node(eq);
-			num_eq_processed++;
-			SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
 		}
-		if (num_eq_processed) {
-			hwi_ring_eq_db(phba, eq->id, 0,	num_eq_processed, 0, 1);
+		if (num_ioeq_processed || num_mcceq_processed) {
+			if (phba->todo_mcc_cq)
+				queue_work(phba->wq, &phba->work_cqs);
+
+		if ((num_mcceq_processed) && (!num_ioeq_processed))
+				hwi_ring_eq_db(phba, eq->id, 0,
+					      (num_ioeq_processed +
+					       num_mcceq_processed) , 1, 1);
+			else
+				hwi_ring_eq_db(phba, eq->id, 0,
+					       (num_ioeq_processed +
+						num_mcceq_processed), 0, 1);
+
 			return IRQ_HANDLED;
 		} else
 			return IRQ_NONE;
 	} else {
+		cq = &phwi_context->be_cq[0];
 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 						& EQE_VALID_MASK) {
 
@@ -339,13 +474,14 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 			queue_tail_inc(eq);
 			eqe = queue_tail_node(eq);
-			num_eq_processed++;
+			num_ioeq_processed++;
 		}
 		if (phba->todo_cq || phba->todo_mcc_cq)
 			queue_work(phba->wq, &phba->work_cqs);
 
-		if (num_eq_processed) {
-			hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1);
+		if (num_ioeq_processed) {
+			hwi_ring_eq_db(phba, eq->id, 0,
+				       num_ioeq_processed, 1, 1);
 			return IRQ_HANDLED;
 		} else
 			return IRQ_NONE;
@@ -355,13 +491,32 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
 {
 	struct pci_dev *pcidev = phba->pcidev;
-	int ret;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	int ret, msix_vec, i = 0;
+	char desc[32];
 
-	ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba);
-	if (ret) {
-		shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
-			     "Failed to register irq\\n");
-		return ret;
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+
+	if (phba->msix_enabled) {
+		for (i = 0; i < phba->num_cpus; i++) {
+			sprintf(desc, "beiscsi_msix_%04x", i);
+			msix_vec = phba->msix_entries[i].vector;
+			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
+					  &phwi_context->be_eq[i]);
+		}
+		msix_vec = phba->msix_entries[i].vector;
+		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
+				  &phwi_context->be_eq[i]);
+	} else {
+		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
+				  "beiscsi", phba);
+		if (ret) {
+			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
+				     "Failed to register irq\\n");
+			return ret;
+		}
 	}
 	return 0;
 }
@@ -378,15 +533,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 }
 
-/*
- * async pdus include
- * a. unsolicited NOP-In (target initiated NOP-In)
- * b. Async Messages
- * c. Reject PDU
- * d. Login response
- * These headers arrive unprocessed by the EP firmware and iSCSI layer
- * process them
- */
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
 			  struct beiscsi_hba *phba,
@@ -397,6 +543,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
 {
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 	struct iscsi_session *session = conn->session;
+	struct iscsi_task *task;
+	struct beiscsi_io_task *io_task;
+	struct iscsi_hdr *login_hdr;
 
 	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
 						PDUBASE_OPCODE_MASK) {
@@ -412,6 +561,10 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
 		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
 		break;
 	case ISCSI_OP_LOGIN_RSP:
+		task = conn->login_task;
+		io_task = task->dd_data;
+		login_hdr = (struct iscsi_hdr *)ppdu;
+		login_hdr->itt = io_task->libiscsi_itt;
 		break;
 	default:
 		shost_printk(KERN_WARNING, phba->shost,
@@ -440,7 +593,8 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 						io_sgl_alloc_index];
 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
 		phba->io_sgl_hndl_avbl--;
-		if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1))
+		if (phba->io_sgl_alloc_index == (phba->params.
+						 ios_per_ctrl - 1))
 			phba->io_sgl_alloc_index = 0;
 		else
 			phba->io_sgl_alloc_index++;
@@ -490,9 +644,18 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	pwrb_context = &phwi_ctrlr->wrb_context[cid];
-	pwrb_handle = pwrb_context->pwrb_handle_base[index];
-	pwrb_handle->wrb_index = index;
-	pwrb_handle->nxt_wrb_index = index;
+	if (pwrb_context->wrb_handles_available) {
+		pwrb_handle = pwrb_context->pwrb_handle_base[
+					    pwrb_context->alloc_index];
+		pwrb_context->wrb_handles_available--;
+		pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
+		if (pwrb_context->alloc_index ==
+						(phba->params.wrbs_per_cxn - 1))
+			pwrb_context->alloc_index = 0;
+		else
+			pwrb_context->alloc_index++;
+	} else
+		pwrb_handle = NULL;
 	return pwrb_handle;
 }
 
@@ -508,11 +671,20 @@ static void
 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
 		struct wrb_handle *pwrb_handle)
 {
+	if (!ring_mode)
+		pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
+					       pwrb_handle;
+	pwrb_context->wrb_handles_available++;
+	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
+		pwrb_context->free_index = 0;
+	else
+		pwrb_context->free_index++;
+
 	SE_DEBUG(DBG_LVL_8,
-		 "FREE WRB: pwrb_handle=%p free_index=%d=0x%x"
+		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
 		 "wrb_handles_available=%d \n",
 		 pwrb_handle, pwrb_context->free_index,
-		 pwrb_context->free_index, pwrb_context->wrb_handles_available);
+		 pwrb_context->wrb_handles_available);
 }
 
 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -540,6 +712,8 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
 
+	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
+			     phba->eh_sgl_free_index);
 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
 		/*
 		 * this can happen if clean_task is called on a task that
@@ -572,10 +746,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
 	u32 resid = 0, exp_cmdsn, max_cmdsn;
 	u8 rsp, status, flags;
 
-	exp_cmdsn = be32_to_cpu(psol->
+	exp_cmdsn = (psol->
 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
 			& SOL_EXP_CMD_SN_MASK);
-	max_cmdsn = be32_to_cpu((psol->
+	max_cmdsn = ((psol->
 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
 			& SOL_EXP_CMD_SN_MASK) +
 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
@@ -610,9 +784,9 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
 	}
 
 	if (status == SAM_STAT_CHECK_CONDITION) {
+		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
 		sense = sts_bhs->sense_info + sizeof(unsigned short);
-		sense_len =
-		    cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
+		sense_len =  cpu_to_be16(*slen);
 		memcpy(task->sc->sense_buffer, sense,
 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
 	}
@@ -620,8 +794,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
 		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
 							& SOL_RES_CNT_MASK)
 			 conn->rxdata_octets += (psol->
-			      dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
-							& SOL_RES_CNT_MASK);
+			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
+			     & SOL_RES_CNT_MASK);
 	}
 unmap:
 	scsi_dma_unmap(io_task->scsi_cmnd);
@@ -633,6 +807,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
 		   struct iscsi_task *task, struct sol_cqe *psol)
 {
 	struct iscsi_logout_rsp *hdr;
+	struct beiscsi_io_task *io_task = task->dd_data;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 
 	hdr = (struct iscsi_logout_rsp *)task->hdr;
@@ -651,7 +826,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
 					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
 	hdr->hlength = 0;
-
+	hdr->itt = io_task->libiscsi_itt;
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
 }
 
@@ -661,6 +836,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
 {
 	struct iscsi_tm_rsp *hdr;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
+	struct beiscsi_io_task *io_task = task->dd_data;
 
 	hdr = (struct iscsi_tm_rsp *)task->hdr;
 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -668,11 +844,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
 					32] & SOL_RESP_MASK);
 	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
-				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
+				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
 	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
 			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+	hdr->itt = io_task->libiscsi_itt;
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
 }
 
@@ -681,18 +858,36 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
 {
 	struct hwi_wrb_context *pwrb_context;
-	struct wrb_handle *pwrb_handle;
+	struct wrb_handle *pwrb_handle = NULL;
+	struct sgl_handle *psgl_handle = NULL;
 	struct hwi_controller *phwi_ctrlr;
+	struct iscsi_task *task;
+	struct beiscsi_io_task *io_task;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 	struct iscsi_session *session = conn->session;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
+	if (ring_mode) {
+		psgl_handle = phba->sgl_hndl_array[((psol->
+			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
+				32] & SOL_ICD_INDEX_MASK) >> 6)];
+		pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
+		task = psgl_handle->task;
+		pwrb_handle = NULL;
+	} else {
+		pwrb_context = &phwi_ctrlr->wrb_context[((psol->
 				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
 				SOL_CID_MASK) >> 6)];
-	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
 				32] & SOL_WRB_INDEX_MASK) >> 16)];
+		task = pwrb_handle->pio_handle;
+	}
+
+	io_task = task->dd_data;
+	spin_lock(&phba->mgmt_sgl_lock);
+	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+	spin_unlock(&phba->mgmt_sgl_lock);
 	spin_lock_bh(&session->lock);
 	free_wrb_handle(phba, pwrb_context, pwrb_handle);
 	spin_unlock_bh(&session->lock);
@@ -704,6 +899,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
 {
 	struct iscsi_nopin *hdr;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
+	struct beiscsi_io_task *io_task = task->dd_data;
 
 	hdr = (struct iscsi_nopin *)task->hdr;
 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -715,6 +911,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
 	hdr->opcode = ISCSI_OP_NOOP_IN;
+	hdr->itt = io_task->libiscsi_itt;
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
 }
 
@@ -726,25 +923,33 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
 	struct iscsi_wrb *pwrb = NULL;
 	struct hwi_controller *phwi_ctrlr;
 	struct iscsi_task *task;
-	struct beiscsi_io_task *io_task;
+	struct sgl_handle *psgl_handle = NULL;
+	unsigned int type;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 	struct iscsi_session *session = conn->session;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-
-	pwrb_context = &phwi_ctrlr->
-		wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
-		& SOL_CID_MASK) >> 6)];
-	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+	if (ring_mode) {
+		psgl_handle = phba->sgl_hndl_array[((psol->
+			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
+			      32] & SOL_ICD_INDEX_MASK) >> 6)];
+		task = psgl_handle->task;
+		type = psgl_handle->type;
+	} else {
+		pwrb_context = &phwi_ctrlr->
+				wrb_context[((psol->dw[offsetof
+				(struct amap_sol_cqe, cid) / 32]
+				& SOL_CID_MASK) >> 6)];
+		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
 				32] & SOL_WRB_INDEX_MASK) >> 16)];
-
-	task = pwrb_handle->pio_handle;
-	io_task = task->dd_data;
+		task = pwrb_handle->pio_handle;
+		pwrb = pwrb_handle->pwrb;
+		type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
+			 WRB_TYPE_MASK) >> 28;
+	}
 	spin_lock_bh(&session->lock);
-	pwrb = pwrb_handle->pwrb;
-	switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
-		 WRB_TYPE_MASK) >> 28) {
+	switch (type) {
 	case HWH_TYPE_IO:
 	case HWH_TYPE_IO_RD:
 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
@@ -773,12 +978,21 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
 		break;
 
 	default:
-		shost_printk(KERN_WARNING, phba->shost,
-			    "wrb_index 0x%x CID 0x%x\n",
-			    ((psol->dw[offsetof(struct amap_iscsi_wrb, type) /
-					32] & SOL_WRB_INDEX_MASK) >> 16),
-			    ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
-					& SOL_CID_MASK) >> 6));
+		if (ring_mode)
+			shost_printk(KERN_WARNING, phba->shost,
+				"In hwi_complete_cmd, unknown type = %d"
+				"icd_index 0x%x CID 0x%x\n", type,
+				((psol->dw[offsetof(struct amap_sol_cqe_ring,
+				icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
+				psgl_handle->cid);
+		else
+			shost_printk(KERN_WARNING, phba->shost,
+				"In hwi_complete_cmd, unknown type = %d"
+				"wrb_index 0x%x CID 0x%x\n", type,
+				((psol->dw[offsetof(struct amap_iscsi_wrb,
+				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
+				((psol->dw[offsetof(struct amap_sol_cqe,
+				cid) / 32] & SOL_CID_MASK) >> 6));
 		break;
 	}
 
@@ -1208,40 +1422,55 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
 	hwi_post_async_buffers(phba, pasync_handle->is_header);
 }
 
-static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
+
+static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
 {
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
 	struct be_queue_info *cq;
 	struct sol_cqe *sol;
 	struct dmsg_cqe *dmsg;
 	unsigned int num_processed = 0;
 	unsigned int tot_nump = 0;
 	struct beiscsi_conn *beiscsi_conn;
+	struct sgl_handle *psgl_handle = NULL;
+	struct beiscsi_hba *phba;
 
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-	cq = &phwi_context->be_cq;
+	cq = pbe_eq->cq;
 	sol = queue_tail_node(cq);
+	phba = pbe_eq->phba;
 
 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
 	       CQE_VALID_MASK) {
 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 
-		beiscsi_conn = phba->conn_table[(u32) (sol->
+		if (ring_mode) {
+			psgl_handle = phba->sgl_hndl_array[((sol->
+				      dw[offsetof(struct amap_sol_cqe_ring,
+				      icd_index) / 32] & SOL_ICD_INDEX_MASK)
+				      >> 6)];
+			beiscsi_conn = phba->conn_table[psgl_handle->cid];
+			if (!beiscsi_conn || !beiscsi_conn->ep) {
+				shost_printk(KERN_WARNING, phba->shost,
+				     "Connection table empty for cid = %d\n",
+				      psgl_handle->cid);
+				return 0;
+			}
+
+		} else {
+			beiscsi_conn = phba->conn_table[(u32) (sol->
 				 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
 				 SOL_CID_MASK) >> 6];
 
-		if (!beiscsi_conn || !beiscsi_conn->ep) {
-			shost_printk(KERN_WARNING, phba->shost,
+			if (!beiscsi_conn || !beiscsi_conn->ep) {
+				shost_printk(KERN_WARNING, phba->shost,
 				     "Connection table empty for cid = %d\n",
 				     (u32)(sol->dw[offsetof(struct amap_sol_cqe,
 				     cid) / 32] & SOL_CID_MASK) >> 6);
-			return 0;
+				return 0;
+			}
 		}
 
 		if (num_processed >= 32) {
-			hwi_ring_cq_db(phba, phwi_context->be_cq.id,
+			hwi_ring_cq_db(phba, cq->id,
 					num_processed, 0, 0);
 			tot_nump += num_processed;
 			num_processed = 0;
@@ -1258,8 +1487,12 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
 			break;
 		case UNSOL_HDR_NOTIFY:
+			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
+			hwi_process_default_pdu_ring(beiscsi_conn, phba,
+					     (struct i_t_dpdu_cqe *)sol);
+			break;
 		case UNSOL_DATA_NOTIFY:
-			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n");
+			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
 					     (struct i_t_dpdu_cqe *)sol);
 			break;
@@ -1278,13 +1511,21 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
 		case CMD_CXN_KILLED_ITT_INVALID:
 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
-			SE_DEBUG(DBG_LVL_1,
+			if (ring_mode) {
+				SE_DEBUG(DBG_LVL_1,
+				 "CQ Error notification for cmd.. "
+				 "code %d cid 0x%x\n",
+				 sol->dw[offsetof(struct amap_sol_cqe, code) /
+				 32] & CQE_CODE_MASK, psgl_handle->cid);
+			} else {
+				SE_DEBUG(DBG_LVL_1,
 				 "CQ Error notification for cmd.. "
 				 "code %d cid 0x%x\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
 				 32] & SOL_CID_MASK));
+			}
 			break;
 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
 			SE_DEBUG(DBG_LVL_1,
@@ -1306,23 +1547,37 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
 		case CXN_KILLED_OVER_RUN_RESIDUAL:
 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
-			SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID "
+			if (ring_mode) {
+				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
+				 "0x%x...\n",
+				 sol->dw[offsetof(struct amap_sol_cqe, code) /
+				 32] & CQE_CODE_MASK, psgl_handle->cid);
+			} else {
+				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
 				 "0x%x...\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
 				 sol->dw[offsetof(struct amap_sol_cqe, cid) /
 				 32] & CQE_CID_MASK);
+			}
 			iscsi_conn_failure(beiscsi_conn->conn,
 					   ISCSI_ERR_CONN_FAILED);
 			break;
 		case CXN_KILLED_RST_SENT:
 		case CXN_KILLED_RST_RCVD:
-			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent "
-				 "on CID 0x%x...\n",
+			if (ring_mode) {
+				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
+				"received/sent on CID 0x%x...\n",
+				 sol->dw[offsetof(struct amap_sol_cqe, code) /
+				 32] & CQE_CODE_MASK, psgl_handle->cid);
+			} else {
+				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
+				"received/sent on CID 0x%x...\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
 				 sol->dw[offsetof(struct amap_sol_cqe, cid) /
 				 32] & CQE_CID_MASK);
+			}
 			iscsi_conn_failure(beiscsi_conn->conn,
 					   ISCSI_ERR_CONN_FAILED);
 			break;
@@ -1344,8 +1599,7 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
 
 	if (num_processed > 0) {
 		tot_nump += num_processed;
-		hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed,
-			       1, 0);
+		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
 	}
 	return tot_nump;
 }
@@ -1353,21 +1607,30 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
 static void beiscsi_process_all_cqs(struct work_struct *work)
 {
 	unsigned long flags;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	struct be_eq_obj *pbe_eq;
 	struct beiscsi_hba *phba =
 	    container_of(work, struct beiscsi_hba, work_cqs);
 
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+	if (phba->msix_enabled)
+		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
+	else
+		pbe_eq = &phwi_context->be_eq[0];
+
 	if (phba->todo_mcc_cq) {
 		spin_lock_irqsave(&phba->isr_lock, flags);
 		phba->todo_mcc_cq = 0;
 		spin_unlock_irqrestore(&phba->isr_lock, flags);
-		SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n");
 	}
 
 	if (phba->todo_cq) {
 		spin_lock_irqsave(&phba->isr_lock, flags);
 		phba->todo_cq = 0;
 		spin_unlock_irqrestore(&phba->isr_lock, flags);
-		beiscsi_process_cq(phba);
+		beiscsi_process_cq(pbe_eq);
 	}
 }
 
@@ -1375,19 +1638,15 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
 	static unsigned int ret;
 	struct beiscsi_hba *phba;
+	struct be_eq_obj *pbe_eq;
 
-	phba = container_of(iop, struct beiscsi_hba, iopoll);
-
-	ret = beiscsi_process_cq(phba);
+	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
+	ret = beiscsi_process_cq(pbe_eq);
 	if (ret < budget) {
-		struct hwi_controller *phwi_ctrlr;
-		struct hwi_context_memory *phwi_context;
-
-		phwi_ctrlr = phba->phwi_ctrlr;
-		phwi_context = phwi_ctrlr->phwi_ctxt;
+		phba = pbe_eq->phba;
 		blk_iopoll_complete(iop);
-		hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0,
-							0, 1, 1);
+		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
+		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
 	}
 	return ret;
 }
@@ -1537,14 +1796,12 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
 
 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 {
-	unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages;
+	unsigned int num_cq_pages, num_async_pdu_buf_pages;
 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
 
 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
 				      sizeof(struct sol_cqe));
-	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
-				      sizeof(struct be_eq_entry));
 	num_async_pdu_buf_pages =
 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
 				       phba->params.defpdu_hdr_sz);
@@ -1565,8 +1822,6 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
 					    sizeof(struct hwi_context_memory);
 
-	phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
-	phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
 
 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
 	    * (phba->params.wrbs_per_cxn)
@@ -1751,8 +2006,6 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 
 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
-		SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
-						pwrb_context);
 		pwrb_context->pwrb_handle_base =
 				kzalloc(sizeof(struct wrb_handle *) *
 					phba->params.wrbs_per_cxn, GFP_KERNEL);
@@ -1767,6 +2020,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 				pwrb_context->pwrb_handle_basestd[j] =
 								pwrb_handle;
 				pwrb_context->wrb_handles_available++;
+				pwrb_handle->wrb_index = j;
 				pwrb_handle++;
 			}
 			pwrb_context->free_index = 0;
@@ -1785,6 +2039,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 				pwrb_context->pwrb_handle_basestd[j] =
 				    pwrb_handle;
 				pwrb_context->wrb_handles_available++;
+				pwrb_handle->wrb_index = j;
 				pwrb_handle++;
 			}
 			pwrb_context->free_index = 0;
@@ -2042,79 +2297,126 @@ static int be_fill_queue(struct be_queue_info *q,
 	return 0;
 }
 
-static int beiscsi_create_eq(struct beiscsi_hba *phba,
+static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 			     struct hwi_context_memory *phwi_context)
 {
-	unsigned int idx;
-	int ret;
+	unsigned int i, num_eq_pages;
+	int ret, eq_for_mcc;
 	struct be_queue_info *eq;
 	struct be_dma_mem *mem;
-	struct be_mem_descriptor *mem_descr;
 	void *eq_vaddress;
+	dma_addr_t paddr;
 
-	idx = 0;
-	eq = &phwi_context->be_eq.q;
-	mem = &eq->dma_mem;
-	mem_descr = phba->init_mem;
-	mem_descr += HWI_MEM_EQ;
-	eq_vaddress = mem_descr->mem_array[idx].virtual_address;
-
-	ret = be_fill_queue(eq, phba->params.num_eq_entries,
-			    sizeof(struct be_eq_entry), eq_vaddress);
-	if (ret) {
-		shost_printk(KERN_ERR, phba->shost,
-			     "be_fill_queue Failed for EQ \n");
-		return ret;
-	}
+	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
+				      sizeof(struct be_eq_entry));
 
-	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
+	if (phba->msix_enabled)
+		eq_for_mcc = 1;
+	else
+		eq_for_mcc = 0;
+	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+		eq = &phwi_context->be_eq[i].q;
+		mem = &eq->dma_mem;
+		phwi_context->be_eq[i].phba = phba;
+		eq_vaddress = pci_alloc_consistent(phba->pcidev,
+						     num_eq_pages * PAGE_SIZE,
+						     &paddr);
+		if (!eq_vaddress)
+			goto create_eq_error;
+
+		mem->va = eq_vaddress;
+		ret = be_fill_queue(eq, phba->params.num_eq_entries,
+				    sizeof(struct be_eq_entry), eq_vaddress);
+		if (ret) {
+			shost_printk(KERN_ERR, phba->shost,
+				     "be_fill_queue Failed for EQ \n");
+			goto create_eq_error;
+		}
 
-	ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
-				    phwi_context->be_eq.cur_eqd);
-	if (ret) {
-		shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create"
-			     "Failedfor EQ \n");
-		return ret;
+		mem->dma = paddr;
+		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+					    phwi_context->cur_eqd);
+		if (ret) {
+			shost_printk(KERN_ERR, phba->shost,
+				     "beiscsi_cmd_eq_create"
+				     "Failedfor EQ \n");
+			goto create_eq_error;
+		}
+		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
 	}
-	SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
 	return 0;
+create_eq_error:
+	for (i = 0; i < (phba->num_cpus + 1); i++) {
+		eq = &phwi_context->be_eq[i].q;
+		mem = &eq->dma_mem;
+		if (mem->va)
+			pci_free_consistent(phba->pcidev, num_eq_pages
+					    * PAGE_SIZE,
+					    mem->va, mem->dma);
+	}
+	return ret;
 }
 
-static int beiscsi_create_cq(struct beiscsi_hba *phba,
+static int beiscsi_create_cqs(struct beiscsi_hba *phba,
 			     struct hwi_context_memory *phwi_context)
 {
-	unsigned int idx;
+	unsigned int i, num_cq_pages;
 	int ret;
 	struct be_queue_info *cq, *eq;
 	struct be_dma_mem *mem;
-	struct be_mem_descriptor *mem_descr;
+	struct be_eq_obj *pbe_eq;
 	void *cq_vaddress;
+	dma_addr_t paddr;
 
-	idx = 0;
-	cq = &phwi_context->be_cq;
-	eq = &phwi_context->be_eq.q;
-	mem = &cq->dma_mem;
-	mem_descr = phba->init_mem;
-	mem_descr += HWI_MEM_CQ;
-	cq_vaddress = mem_descr->mem_array[idx].virtual_address;
-	ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
-			    sizeof(struct sol_cqe), cq_vaddress);
-	if (ret) {
-		shost_printk(KERN_ERR, phba->shost,
-			     "be_fill_queue Failed for ISCSI CQ \n");
-		return ret;
-	}
+	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+				      sizeof(struct sol_cqe));
 
-	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
-	ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0);
-	if (ret) {
-		shost_printk(KERN_ERR, phba->shost,
-			     "beiscsi_cmd_eq_create Failed for ISCSI CQ \n");
-		return ret;
+	for (i = 0; i < phba->num_cpus; i++) {
+		cq = &phwi_context->be_cq[i];
+		eq = &phwi_context->be_eq[i].q;
+		pbe_eq = &phwi_context->be_eq[i];
+		pbe_eq->cq = cq;
+		pbe_eq->phba = phba;
+		mem = &cq->dma_mem;
+		cq_vaddress = pci_alloc_consistent(phba->pcidev,
+						     num_cq_pages * PAGE_SIZE,
+						     &paddr);
+		if (!cq_vaddress)
+			goto create_cq_error;
+		ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
+				    sizeof(struct sol_cqe), cq_vaddress);
+		if (ret) {
+			shost_printk(KERN_ERR, phba->shost,
+				     "be_fill_queue Failed for ISCSI CQ \n");
+			goto create_cq_error;
+		}
+
+		mem->dma = paddr;
+		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+					    false, 0);
+		if (ret) {
+			shost_printk(KERN_ERR, phba->shost,
+				     "beiscsi_cmd_eq_create"
+				     "Failed for ISCSI CQ \n");
+			goto create_cq_error;
+		}
+		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
+						 cq->id, eq->id);
+		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
 	}
-	SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
-	SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
 	return 0;
+
+create_cq_error:
+	for (i = 0; i < phba->num_cpus; i++) {
+		cq = &phwi_context->be_cq[i];
+		mem = &cq->dma_mem;
+		if (mem->va)
+			pci_free_consistent(phba->pcidev, num_cq_pages
+					    * PAGE_SIZE,
+					    mem->va, mem->dma);
+	}
+	return ret;
+
 }
 
 static int
@@ -2132,7 +2434,7 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
 
 	idx = 0;
 	dq = &phwi_context->be_def_hdrq;
-	cq = &phwi_context->be_cq;
+	cq = &phwi_context->be_cq[0];
 	mem = &dq->dma_mem;
 	mem_descr = phba->init_mem;
 	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
@@ -2176,7 +2478,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
 
 	idx = 0;
 	dataq = &phwi_context->be_def_dataq;
-	cq = &phwi_context->be_cq;
+	cq = &phwi_context->be_cq[0];
 	mem = &dataq->dma_mem;
 	mem_descr = phba->init_mem;
 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2239,6 +2541,30 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
 	return 0;
 }
 
+static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
+{
+	struct be_dma_mem *mem = &q->dma_mem;
+	if (mem->va)
+		pci_free_consistent(phba->pcidev, mem->size,
+			mem->va, mem->dma);
+}
+
+static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
+		u16 len, u16 entry_size)
+{
+	struct be_dma_mem *mem = &q->dma_mem;
+
+	memset(q, 0, sizeof(*q));
+	q->len = len;
+	q->entry_size = entry_size;
+	mem->size = len * entry_size;
+	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
+	if (!mem->va)
+		return -1;
+	memset(mem->va, 0, mem->size);
+	return 0;
+}
+
 static int
 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
 			 struct hwi_context_memory *phwi_context,
@@ -2328,13 +2654,29 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
 	}
 }
 
+static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *q;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
+	q = &phba->ctrl.mcc_obj.q;
+	if (q->created)
+		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+	be_queue_free(phba, q);
+
+	q = &phba->ctrl.mcc_obj.cq;
+	if (q->created)
+		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+	be_queue_free(phba, q);
+}
+
 static void hwi_cleanup(struct beiscsi_hba *phba)
 {
 	struct be_queue_info *q;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
-	int i;
+	int i, eq_num;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -2343,7 +2685,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
 		if (q->created)
 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
 	}
-
 	free_wrb_handles(phba);
 
 	q = &phwi_context->be_def_hdrq;
@@ -2356,13 +2697,76 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
 
 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
 
-	q = &phwi_context->be_cq;
-	if (q->created)
-		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+	for (i = 0; i < (phba->num_cpus); i++) {
+		q = &phwi_context->be_cq[i];
+		if (q->created)
+			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+	}
+	if (phba->msix_enabled)
+		eq_num = 1;
+	else
+		eq_num = 0;
+	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
+		q = &phwi_context->be_eq[i].q;
+		if (q->created)
+			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+	}
+	be_mcc_queues_destroy(phba);
+}
 
-	q = &phwi_context->be_eq.q;
-	if (q->created)
-		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+static int be_mcc_queues_create(struct beiscsi_hba *phba,
+				struct hwi_context_memory *phwi_context)
+{
+	struct be_queue_info *q, *cq;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
+	/* Alloc MCC compl queue */
+	cq = &phba->ctrl.mcc_obj.cq;
+	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
+			sizeof(struct be_mcc_compl)))
+		goto err;
+	/* Ask BE to create MCC compl queue; */
+	if (phba->msix_enabled) {
+		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
+					 [phba->num_cpus].q, false, true, 0))
+		goto mcc_cq_free;
+	} else {
+		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
+					  false, true, 0))
+		goto mcc_cq_free;
+	}
+
+	/* Alloc MCC queue */
+	q = &phba->ctrl.mcc_obj.q;
+	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+		goto mcc_cq_destroy;
+
+	/* Ask BE to create MCC queue */
+	if (beiscsi_cmd_mccq_create(phba, q, cq))
+		goto mcc_q_free;
+
+	return 0;
+
+mcc_q_free:
+	be_queue_free(phba, q);
+mcc_cq_destroy:
+	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+	be_queue_free(phba, cq);
+err:
+	return -1;
+}
+
+static int find_num_cpus(void)
+{
+	int  num_cpus = 0;
+
+	num_cpus = num_online_cpus();
+	if (num_cpus >= MAX_CPUS)
+		num_cpus = MAX_CPUS - 1;
+
+	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
+	return num_cpus;
 }
 
 static int hwi_init_port(struct beiscsi_hba *phba)
@@ -2376,26 +2780,33 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 	def_pdu_ring_sz =
 		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
 	phwi_ctrlr = phba->phwi_ctrlr;
-
 	phwi_context = phwi_ctrlr->phwi_ctxt;
-	phwi_context->be_eq.max_eqd = 0;
-	phwi_context->be_eq.min_eqd = 0;
-	phwi_context->be_eq.cur_eqd = 64;
-	phwi_context->be_eq.enable_aic = false;
+	phwi_context->max_eqd = 0;
+	phwi_context->min_eqd = 0;
+	phwi_context->cur_eqd = 64;
 	be_cmd_fw_initialize(&phba->ctrl);
-	status = beiscsi_create_eq(phba, phwi_context);
+
+	status = beiscsi_create_eqs(phba, phwi_context);
 	if (status != 0) {
 		shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
 		goto error;
 	}
 
-	status = mgmt_check_supported_fw(ctrl);
+	status = be_mcc_queues_create(phba, phwi_context);
+	if (status != 0)
+		goto error;
+
+	status = mgmt_check_supported_fw(ctrl, phba);
 	if (status != 0) {
 		shost_printk(KERN_ERR, phba->shost,
 			     "Unsupported fw version \n");
 		goto error;
 	}
 
+	if (phba->fw_config.iscsi_features == 0x1)
+		ring_mode = 1;
+	else
+		ring_mode = 0;
 	status = mgmt_get_fw_config(ctrl, phba);
 	if (status != 0) {
 		shost_printk(KERN_ERR, phba->shost,
@@ -2403,7 +2814,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 		goto error;
 	}
 
-	status = beiscsi_create_cq(phba, phwi_context);
+	status = beiscsi_create_cqs(phba, phwi_context);
 	if (status != 0) {
 		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
 		goto error;
@@ -2447,7 +2858,6 @@ error:
 	return -ENOMEM;
 }
 
-
 static int hwi_init_controller(struct beiscsi_hba *phba)
 {
 	struct hwi_controller *phwi_ctrlr;
@@ -2530,6 +2940,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 	phba->io_sgl_hndl_avbl = 0;
 	phba->eh_sgl_hndl_avbl = 0;
+
+	if (ring_mode) {
+		phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
+					      phba->params.icds_per_ctrl,
+						 GFP_KERNEL);
+		if (!phba->sgl_hndl_array) {
+			shost_printk(KERN_ERR, phba->shost,
+			     "Mem Alloc Failed. Failing to load\n");
+			return -ENOMEM;
+		}
+	}
+
 	mem_descr_sglh = phba->init_mem;
 	mem_descr_sglh += HWI_MEM_SGLH;
 	if (1 == mem_descr_sglh->num_elements) {
@@ -2537,6 +2959,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 						 phba->params.ios_per_ctrl,
 						 GFP_KERNEL);
 		if (!phba->io_sgl_hndl_base) {
+			if (ring_mode)
+				kfree(phba->sgl_hndl_array);
 			shost_printk(KERN_ERR, phba->shost,
 				     "Mem Alloc Failed. Failing to load\n");
 			return -ENOMEM;
@@ -2656,13 +3080,12 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
 	struct hwi_context_memory *phwi_context;
 	struct be_queue_info *eq;
 	u8 __iomem *addr;
-	u32 reg;
+	u32 reg, i;
 	u32 enabled;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 
-	eq = &phwi_context->be_eq.q;
 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
 	reg = ioread32(addr);
@@ -2673,9 +3096,11 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
 		iowrite32(reg, addr);
-		SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
-
-		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+		for (i = 0; i <= phba->num_cpus; i++) {
+			eq = &phwi_context->be_eq[i].q;
+			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
+			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+		}
 	} else
 		shost_printk(KERN_WARNING, phba->shost,
 			     "In hwi_enable_intr, Not Enabled \n");
@@ -2720,6 +3145,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
 	if (hba_setup_cid_tbls(phba)) {
 		shost_printk(KERN_ERR, phba->shost,
 			     "Failed in hba_setup_cid_tbls\n");
+		if (ring_mode)
+			kfree(phba->sgl_hndl_array);
 		kfree(phba->io_sgl_hndl_base);
 		kfree(phba->eh_sgl_hndl_base);
 		goto do_cleanup_ctrlr;
@@ -2738,17 +3165,25 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
 	struct hwi_context_memory *phwi_context;
 	struct be_queue_info *eq;
 	struct be_eq_entry *eqe = NULL;
+	int i, eq_msix;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
-	eq = &phwi_context->be_eq.q;
-	eqe = queue_tail_node(eq);
+	if (phba->msix_enabled)
+		eq_msix = 1;
+	else
+		eq_msix = 0;
 
-	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
-						& EQE_VALID_MASK) {
-		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
-		queue_tail_inc(eq);
+	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
+		eq = &phwi_context->be_eq[i].q;
 		eqe = queue_tail_node(eq);
+
+		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+					& EQE_VALID_MASK) {
+			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+			queue_tail_inc(eq);
+			eqe = queue_tail_node(eq);
+		}
 	}
 }
 
@@ -2762,6 +3197,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
 			     "mgmt_epfw_cleanup FAILED \n");
 	hwi_cleanup(phba);
 	hwi_purge_eq(phba);
+	if (ring_mode)
+		kfree(phba->sgl_hndl_array);
 	kfree(phba->io_sgl_hndl_base);
 	kfree(phba->eh_sgl_hndl_base);
 	kfree(phba->cid_array);
@@ -2846,8 +3283,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
 
 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
-	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) <<
-					DB_DEF_PDU_WRB_INDEX_SHIFT;
+	if (!ring_mode)
+		doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
+			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -2856,7 +3294,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
 			      int *index, int *age)
 {
-	*index = be32_to_cpu(itt) >> 16;
+	*index = (int)itt;
 	if (age)
 		*age = conn->session->age;
 }
@@ -2885,15 +3323,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 
 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
 					  GFP_KERNEL, &paddr);
-
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
-
 	io_task->bhs_pa.u.a64.address = paddr;
+	io_task->libiscsi_itt = (itt_t)task->itt;
 	io_task->pwrb_handle = alloc_wrb_handle(phba,
 						beiscsi_conn->beiscsi_conn_cid,
 						task->itt);
-	io_task->pwrb_handle->pio_handle = task;
 	io_task->conn = beiscsi_conn;
 
 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -2905,7 +3341,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 		spin_unlock(&phba->io_sgl_lock);
 		if (!io_task->psgl_handle)
 			goto free_hndls;
-
 	} else {
 		io_task->scsi_cmnd = NULL;
 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -2932,8 +3367,18 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 				goto free_hndls;
 		}
 	}
-	itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) |
-			(unsigned int)(io_task->psgl_handle->sgl_index));
+	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
+				 wrb_index << 16) | (unsigned int)
+				(io_task->psgl_handle->sgl_index));
+	if (ring_mode) {
+		phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
+				     phba->fw_config.iscsi_cid_start] =
+				     io_task->psgl_handle;
+		io_task->psgl_handle->task = task;
+		io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
+	} else
+		io_task->pwrb_handle->pio_handle = task;
+
 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
 	return 0;
 
@@ -3006,7 +3451,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
 
 	if (writedir) {
-		SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
 		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
 		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
 			      &io_task->cmd_bhs->iscsi_data_pdu,
@@ -3016,11 +3460,18 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
 			      ISCSI_OPCODE_SCSI_DATA_OUT);
 		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
 			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
+		if (ring_mode)
+			io_task->psgl_handle->type = INI_WR_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      INI_WR_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
 	} else {
-		SE_DEBUG(DBG_LVL_4, "READ Command \t");
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
+		if (ring_mode)
+			io_task->psgl_handle->type = INI_RD_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      INI_RD_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
 	}
 	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3045,7 +3496,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
 
 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
-	doorbell |= (io_task->pwrb_handle->wrb_index &
+	if (!ring_mode)
+		doorbell |= (io_task->pwrb_handle->wrb_index &
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
@@ -3059,10 +3511,16 @@ static int beiscsi_mtask(struct iscsi_task *task)
 	struct iscsi_conn *conn = task->conn;
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct iscsi_session *session;
 	struct iscsi_wrb *pwrb = NULL;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_wrb_context *pwrb_context;
+	struct wrb_handle *pwrb_handle;
 	unsigned int doorbell = 0;
+	unsigned int i, cid;
 	struct iscsi_task *aborted_task;
 
+	cid = beiscsi_conn->beiscsi_conn_cid;
 	pwrb = io_task->pwrb_handle->pwrb;
 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
 		      be32_to_cpu(task->cmdsn));
@@ -3073,38 +3531,63 @@ static int beiscsi_mtask(struct iscsi_task *task)
 
 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
 	case ISCSI_OP_LOGIN:
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD);
+		if (ring_mode)
+			io_task->psgl_handle->type = TGT_DM_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      TGT_DM_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_NOOP_OUT:
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
+		if (ring_mode)
+			io_task->psgl_handle->type = INI_RD_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      INI_RD_CMD);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_TEXT:
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
+		if (ring_mode)
+			io_task->psgl_handle->type = INI_WR_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      INI_WR_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		aborted_task = iscsi_itt_to_task(conn,
-					((struct iscsi_tm *)task->hdr)->rtt);
+		session = conn->session;
+		i = ((struct iscsi_tm *)task->hdr)->rtt;
+		phwi_ctrlr = phba->phwi_ctrlr;
+		pwrb_context = &phwi_ctrlr->wrb_context[cid];
+		pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
+								>> 16];
+		aborted_task = pwrb_handle->pio_handle;
 		 if (!aborted_task)
 			return 0;
+
 		aborted_io_task = aborted_task->dd_data;
 		if (!aborted_io_task->scsi_cmnd)
 			return 0;
 
 		mgmt_invalidate_icds(phba,
 				     aborted_io_task->psgl_handle->sgl_index,
-				     beiscsi_conn->beiscsi_conn_cid);
-		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD);
+				     cid);
+		if (ring_mode)
+			io_task->psgl_handle->type = INI_TMF_CMD;
+		else
+			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+				      INI_TMF_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_LOGOUT:
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+		if (ring_mode)
+			io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
+		else
 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
 				HWH_TYPE_LOGOUT);
 		hwi_write_buffer(pwrb, task);
@@ -3122,8 +3605,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
 		      io_task->pwrb_handle->nxt_wrb_index);
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
 
-	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
-	doorbell |= (io_task->pwrb_handle->wrb_index &
+	doorbell |= cid & DB_WRB_POST_CID_MASK;
+	if (!ring_mode)
+		doorbell |= (io_task->pwrb_handle->wrb_index &
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -3165,9 +3649,14 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
 }
 
+
 static void beiscsi_remove(struct pci_dev *pcidev)
 {
 	struct beiscsi_hba *phba = NULL;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	struct be_eq_obj *pbe_eq;
+	unsigned int i, msix_vec;
 
 	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
 	if (!phba) {
@@ -3175,12 +3664,24 @@ static void beiscsi_remove(struct pci_dev *pcidev)
 		return;
 	}
 
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
 	hwi_disable_intr(phba);
-	if (phba->pcidev->irq)
-		free_irq(phba->pcidev->irq, phba);
+	if (phba->msix_enabled) {
+		for (i = 0; i <= phba->num_cpus; i++) {
+			msix_vec = phba->msix_entries[i].vector;
+			free_irq(msix_vec, &phwi_context->be_eq[i]);
+		}
+	} else
+		if (phba->pcidev->irq)
+			free_irq(phba->pcidev->irq, phba);
+	pci_disable_msix(phba->pcidev);
 	destroy_workqueue(phba->wq);
 	if (blk_iopoll_enabled)
-		blk_iopoll_disable(&phba->iopoll);
+		for (i = 0; i < phba->num_cpus; i++) {
+			pbe_eq = &phwi_context->be_eq[i];
+			blk_iopoll_disable(&pbe_eq->iopoll);
+		}
 
 	beiscsi_clean_port(phba);
 	beiscsi_free_mem(phba);
@@ -3194,11 +3695,29 @@ static void beiscsi_remove(struct pci_dev *pcidev)
 	iscsi_host_free(phba->shost);
 }
 
+static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+{
+	int i, status;
+
+	for (i = 0; i <= phba->num_cpus; i++)
+		phba->msix_entries[i].entry = i;
+
+	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
+				 (phba->num_cpus + 1));
+	if (!status)
+		phba->msix_enabled = true;
+
+	return;
+}
+
 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 				const struct pci_device_id *id)
 {
 	struct beiscsi_hba *phba = NULL;
-	int ret;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	struct be_eq_obj *pbe_eq;
+	int ret, msix_vec, num_cpus, i;
 
 	ret = beiscsi_enable_pci(pcidev);
 	if (ret < 0) {
@@ -3213,8 +3732,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 			" Failed in beiscsi_hba_alloc \n");
 		goto disable_pci;
 	}
+	SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
 
 	pci_set_drvdata(pcidev, phba);
+	if (enable_msix)
+		num_cpus = find_num_cpus();
+	else
+		num_cpus = 1;
+	phba->num_cpus = num_cpus;
+	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
+
+	if (enable_msix)
+		beiscsi_msix_enable(phba);
 	ret = be_ctrl_init(phba, pcidev);
 	if (ret) {
 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3235,7 +3764,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 
 	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
 		 phba->shost->host_no);
-	phba->wq = create_singlethread_workqueue(phba->wq_name);
+	phba->wq = create_workqueue(phba->wq_name);
 	if (!phba->wq) {
 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
 				"Failed to allocate work queue\n");
@@ -3244,11 +3773,16 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 
 	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
 
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
 	if (blk_iopoll_enabled) {
-		blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll);
-		blk_iopoll_enable(&phba->iopoll);
+		for (i = 0; i < phba->num_cpus; i++) {
+			pbe_eq = &phwi_context->be_eq[i];
+			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+					be_iopoll);
+			blk_iopoll_enable(&pbe_eq->iopoll);
+		}
 	}
-
 	ret = beiscsi_init_irqs(phba);
 	if (ret < 0) {
 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3261,17 +3795,26 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 			     "Failed to hwi_enable_intr\n");
 		goto free_ctrlr;
 	}
-
 	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
 	return 0;
 
 free_ctrlr:
-	if (phba->pcidev->irq)
-		free_irq(phba->pcidev->irq, phba);
+	if (phba->msix_enabled) {
+		for (i = 0; i <= phba->num_cpus; i++) {
+			msix_vec = phba->msix_entries[i].vector;
+			free_irq(msix_vec, &phwi_context->be_eq[i]);
+		}
+	} else
+		if (phba->pcidev->irq)
+			free_irq(phba->pcidev->irq, phba);
+	pci_disable_msix(phba->pcidev);
 free_blkenbld:
 	destroy_workqueue(phba->wq);
 	if (blk_iopoll_enabled)
-		blk_iopoll_disable(&phba->iopoll);
+		for (i = 0; i < phba->num_cpus; i++) {
+			pbe_eq = &phwi_context->be_eq[i];
+			blk_iopoll_disable(&pbe_eq->iopoll);
+		}
 free_twq:
 	beiscsi_clean_port(phba);
 	beiscsi_free_mem(phba);
@@ -3316,7 +3859,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
 		ISCSI_USERNAME | ISCSI_PASSWORD |
 		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-		ISCSI_LU_RESET_TMO |
+		ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
 		ISCSI_PING_TMO | ISCSI_RECV_TMO |
 		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
@@ -3351,6 +3894,7 @@ static struct pci_driver beiscsi_pci_driver = {
 	.id_table = beiscsi_pci_id_table
 };
 
+
 static int __init beiscsi_module_init(void)
 {
 	int ret;
@@ -3373,6 +3917,7 @@ static int __init beiscsi_module_init(void)
 			 "beiscsi pci driver.\n");
 		goto unregister_iscsi_transport;
 	}
+	ring_mode = 0;
 	return 0;
 
 unregister_iscsi_transport:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 53c9b70ac7ac..25e6b208b771 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -21,11 +21,9 @@
 #ifndef _BEISCSI_MAIN_
 #define _BEISCSI_MAIN_
 
-
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/in.h>
-#include <linux/blk-iopoll.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -35,12 +33,8 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #include "be.h"
-
-
-
 #define DRV_NAME		"be2iscsi"
 #define BUILD_STR		"2.0.527.0"
-
 #define BE_NAME			"ServerEngines BladeEngine2" \
 				"Linux iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -49,6 +43,8 @@
 #define BE_DEVICE_ID1		0x212
 #define OC_DEVICE_ID1		0x702
 #define OC_DEVICE_ID2		0x703
+#define OC_DEVICE_ID3		0x712
+#define OC_DEVICE_ID4		0x222
 
 #define BE2_MAX_SESSIONS	64
 #define BE2_CMDS_PER_CXN	128
@@ -63,6 +59,7 @@
 #define BE2_IO_DEPTH \
 	(BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
 
+#define MAX_CPUS		31
 #define BEISCSI_SGLIST_ELEMENTS	BE2_SGE
 
 #define BEISCSI_MAX_CMNDS	1024	/* Max IO's per Ctrlr sht->can_queue */
@@ -79,7 +76,7 @@
 #define BE_SENSE_INFO_SIZE		258
 #define BE_ISCSI_PDU_HEADER_SIZE	64
 #define BE_MIN_MEM_SIZE			16384
-
+#define MAX_CMD_SZ			65536
 #define IIOC_SCSI_DATA                  0x05	/* Write Operation */
 
 #define DBG_LVL				0x00000001
@@ -100,6 +97,8 @@ do {							\
 	}						\
 } while (0);
 
+#define BE_ADAPTER_UP		0x00000000
+#define BE_ADAPTER_LINK_DOWN	0x00000001
 /**
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * So have atleast 8 of them by default
@@ -160,21 +159,19 @@ do {							\
 
 enum be_mem_enum {
 	HWI_MEM_ADDN_CONTEXT,
-	HWI_MEM_CQ,
-	HWI_MEM_EQ,
 	HWI_MEM_WRB,
 	HWI_MEM_WRBH,
-	HWI_MEM_SGLH,	/* 5 */
+	HWI_MEM_SGLH,
 	HWI_MEM_SGE,
-	HWI_MEM_ASYNC_HEADER_BUF,
+	HWI_MEM_ASYNC_HEADER_BUF, 	/* 5 */
 	HWI_MEM_ASYNC_DATA_BUF,
 	HWI_MEM_ASYNC_HEADER_RING,
-	HWI_MEM_ASYNC_DATA_RING,	/* 10 */
+	HWI_MEM_ASYNC_DATA_RING,
 	HWI_MEM_ASYNC_HEADER_HANDLE,
-	HWI_MEM_ASYNC_DATA_HANDLE,
+	HWI_MEM_ASYNC_DATA_HANDLE, 	/* 10 */
 	HWI_MEM_ASYNC_PDU_CONTEXT,
 	ISCSI_MEM_GLOBAL_HEADER,
-	SE_MEM_MAX  	/* 15 */
+	SE_MEM_MAX
 };
 
 struct be_bus_address32 {
@@ -212,6 +209,9 @@ struct be_mem_descriptor {
 
 struct sgl_handle {
 	unsigned int sgl_index;
+	unsigned int type;
+	unsigned int cid;
+	struct iscsi_task *task;
 	struct iscsi_sge *pfrag;
 };
 
@@ -274,13 +274,17 @@ struct beiscsi_hba {
 	struct pci_dev *pcidev;
 	unsigned int state;
 	unsigned short asic_revision;
-	struct blk_iopoll	iopoll;
+	unsigned int num_cpus;
+	unsigned int nxt_cqid;
+	struct msix_entry msix_entries[MAX_CPUS + 1];
+	bool msix_enabled;
 	struct be_mem_descriptor *init_mem;
 
 	unsigned short io_sgl_alloc_index;
 	unsigned short io_sgl_free_index;
 	unsigned short io_sgl_hndl_avbl;
 	struct sgl_handle **io_sgl_hndl_base;
+	struct sgl_handle **sgl_hndl_array;
 
 	unsigned short eh_sgl_alloc_index;
 	unsigned short eh_sgl_free_index;
@@ -315,6 +319,7 @@ struct beiscsi_hba {
 		unsigned short cid_alloc;
 		unsigned short cid_free;
 		unsigned short avlbl_cids;
+		unsigned short iscsi_features;
 		spinlock_t cid_lock;
 	} fw_config;
 
@@ -343,6 +348,7 @@ struct beiscsi_conn {
 	unsigned short login_in_progress;
 	struct sgl_handle *plogin_sgl_handle;
 	struct beiscsi_session *beiscsi_sess;
+	struct iscsi_task *task;
 };
 
 /* This structure is used by the chip */
@@ -390,7 +396,7 @@ struct beiscsi_io_task {
 	unsigned int flags;
 	unsigned short cid;
 	unsigned short header_len;
-
+	itt_t libiscsi_itt;
 	struct be_cmd_bhs *cmd_bhs;
 	struct be_bus_address bhs_pa;
 	unsigned short bhs_len;
@@ -599,7 +605,6 @@ struct amap_cq_db {
 
 void beiscsi_process_eq(struct beiscsi_hba *phba);
 
-
 struct iscsi_wrb {
 	u32 dw[16];
 } __packed;
@@ -820,10 +825,12 @@ struct wrb_handle {
 };
 
 struct hwi_context_memory {
-	struct be_eq_obj be_eq;
-	struct be_queue_info be_cq;
-	struct be_queue_info be_mcc_cq;
-	struct be_queue_info be_mcc;
+	/* Adaptive interrupt coalescing (AIC) info */
+	u16 min_eqd;		/* in usecs */
+	u16 max_eqd;		/* in usecs */
+	u16 cur_eqd;		/* in usecs */
+	struct be_eq_obj be_eq[MAX_CPUS];
+	struct be_queue_info be_cq[MAX_CPUS];
 
 	struct be_queue_info be_def_hdrq;
 	struct be_queue_info be_def_dataq;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 12e644fc746e..79c2bd525a84 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -35,7 +35,6 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
-
 	status = be_mbox_notify(ctrl);
 	if (!status) {
 		struct be_fw_cfg *pfw_cfg;
@@ -58,7 +57,8 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
 	return status;
 }
 
-unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
+unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
+				      struct beiscsi_hba *phba)
 {
 	struct be_dma_mem nonemb_cmd;
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -85,7 +85,6 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
 	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
 	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
 	sge->len = cpu_to_le32(nonemb_cmd.size);
-
 	status = be_mbox_notify(ctrl);
 	if (!status) {
 		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
@@ -95,21 +94,25 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
 			resp->params.hba_attribs.firmware_version_string);
 		SE_DEBUG(DBG_LVL_8,
 			"Developer Build, not performing version check...\n");
-
+		phba->fw_config.iscsi_features =
+				resp->params.hba_attribs.iscsi_features;
+		SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
+				      phba->fw_config.iscsi_features);
 	} else
 		SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
+	spin_unlock(&ctrl->mbox_lock);
 	if (nonemb_cmd.va)
 		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 
-	spin_unlock(&ctrl->mbox_lock);
 	return status;
 }
 
+
 unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
 	struct iscsi_cleanup_req *req = embedded_payload(wrb);
 	int status = 0;
 
@@ -124,7 +127,7 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
 	req->hdr_ring_id = 0;
 	req->data_ring_id = 0;
 
-	status = be_mbox_notify(ctrl);
+	status =  be_mcc_notify_wait(phba);
 	if (status)
 		shost_printk(KERN_WARNING, phba->shost,
 			     " mgmt_epfw_cleanup , FAILED\n");
@@ -137,7 +140,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
 {
 	struct be_dma_mem nonemb_cmd;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
 	struct be_sge *sge = nonembedded_sgl(wrb);
 	struct invalidate_commands_params_in *req;
 	int status = 0;
@@ -169,7 +172,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
 	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
 	sge->len = cpu_to_le32(nonemb_cmd.size);
 
-	status = be_mbox_notify(ctrl);
+	status = be_mcc_notify_wait(phba);
 	if (status)
 		SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
 	spin_unlock(&ctrl->mbox_lock);
@@ -186,7 +189,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
 					 unsigned short savecfg_flag)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
 	struct iscsi_invalidate_connection_params_in *req =
 						embedded_payload(wrb);
 	int status = 0;
@@ -205,7 +208,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
 	else
 		req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
 	req->save_cfg = savecfg_flag;
-	status = be_mbox_notify(ctrl);
+	status =  be_mcc_notify_wait(phba);
 	if (status)
 		SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
 
@@ -217,7 +220,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
 				unsigned short cid, unsigned int upload_flag)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
 	struct tcp_upload_params_in *req = embedded_payload(wrb);
 	int status = 0;
 
@@ -229,7 +232,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
 			   OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
 	req->id = (unsigned short)cid;
 	req->upload_type = (unsigned char)upload_flag;
-	status = be_mbox_notify(ctrl);
+	status = be_mcc_notify_wait(phba);
 	if (status)
 		SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
 	spin_unlock(&ctrl->mbox_lock);
@@ -245,13 +248,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 	struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
 	struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
 	struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
 	unsigned short def_hdr_id;
 	unsigned short def_data_id;
 	struct phys_addr template_address = { 0, 0 };
 	struct phys_addr *ptemplate_address;
 	int status = 0;
+	unsigned int i;
 	unsigned short cid = beiscsi_ep->ep_cid;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
@@ -296,14 +300,18 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 
 	}
 	req->cid = cid;
-	req->cq_id = phwi_context->be_cq.id;
+	i = phba->nxt_cqid++;
+	if (phba->nxt_cqid == phba->num_cpus)
+		phba->nxt_cqid = 0;
+	req->cq_id = phwi_context->be_cq[i].id;
+	SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id);
 	req->defq_id = def_hdr_id;
 	req->hdr_ring_id = def_hdr_id;
 	req->data_ring_id = def_data_id;
 	req->do_offload = 1;
 	req->dataout_template_pa.lo = ptemplate_address->lo;
 	req->dataout_template_pa.hi = ptemplate_address->hi;
-	status = be_mbox_notify(ctrl);
+	status = be_mcc_notify_wait(phba);
 	if (!status) {
 		struct iscsi_endpoint *ep;
 		struct tcp_connect_and_offload_out *ptcpcnct_out =
@@ -311,7 +319,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 
 		ep = phba->ep_array[ptcpcnct_out->cid];
 		beiscsi_ep = ep->dd_data;
-		beiscsi_ep->fw_handle = 0;
+		beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
 		beiscsi_ep->cid_vld = 1;
 		SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
 	} else
@@ -319,3 +327,30 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 	spin_unlock(&ctrl->mbox_lock);
 	return status;
 }
+
+int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+	struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
+	int status;
+
+	SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
+	spin_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+			   OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+			   sizeof(*req));
+
+	status = be_mcc_notify_wait(phba);
+	if (!status) {
+		struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
+
+		memcpy(mac_addr, resp->mac_address, ETH_ALEN);
+	}
+
+	spin_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 00e816ee8070..24eaff923f85 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -175,7 +175,9 @@ struct mgmt_hba_attributes {
 	u8 phy_port;
 	u32 firmware_post_status;
 	u32 hba_mtu[8];
-	u32 future_u32[4];
+	u8 iscsi_features;
+	u8 future_u8[3];
+	u32 future_u32[3];
 } __packed;
 
 struct mgmt_controller_attributes {
@@ -246,4 +248,8 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
 					 unsigned short cid,
 					 unsigned short issue_reset,
 					 unsigned short savecfg_flag);
+
+unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl,
+			  struct beiscsi_hba *phba,
+			  char *buf, unsigned int len);
 #endif
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 0050c838c358..961fe439daad 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -51,7 +51,7 @@ bfad_int_to_lun(u32 luno)
 	lun.bfa_lun     = 0;
 	lun.scsi_lun[0] = bfa_os_htons(luno);
 
-	return (lun.bfa_lun);
+	return lun.bfa_lun;
 }
 
 /**
@@ -68,7 +68,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
 {
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
 
-	return ((u8 *) cmnd->cmnd);
+	return (u8 *) cmnd->cmnd;
 }
 
 /**
@@ -97,7 +97,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
 {
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
 
-	return (scsi_bufflen(cmnd));
+	return scsi_bufflen(cmnd);
 }
 
 /**
@@ -129,7 +129,7 @@ bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
 	sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
 	addr = (u64) sg_dma_address(sge);
 
-	return (*(union bfi_addr_u *) &addr);
+	return *((union bfi_addr_u *) &addr);
 }
 
 static inline u32
@@ -197,7 +197,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
 {
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
 
-	return (cmnd->cmd_len);
+	return cmnd->cmd_len;
 }
 
 
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
index 7a959c34e789..2b917792c6bc 100644
--- a/drivers/scsi/bfa/bfa_cee.c
+++ b/drivers/scsi/bfa/bfa_cee.c
@@ -228,7 +228,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
 u32
 bfa_cee_meminfo(void)
 {
-	return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo());
+	return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
 }
 
 /**
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
index 1b71d349451a..caeb1143a4e6 100644
--- a/drivers/scsi/bfa/bfa_csdebug.c
+++ b/drivers/scsi/bfa/bfa_csdebug.c
@@ -47,12 +47,12 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
 	tqe = bfa_q_next(q);
 	while (tqe != q) {
 		if (tqe == qe)
-			return (1);
+			return 1;
 		tqe = bfa_q_next(tqe);
 		if (tqe == NULL)
 			break;
 	}
-	return (0);
+	return 0;
 }
 
 
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 401babe3494e..790c945aeae6 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -131,7 +131,7 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 
-	return (fcpim->path_tov / 1000);
+	return fcpim->path_tov / 1000;
 }
 
 bfa_status_t
@@ -169,7 +169,7 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 
-	return (fcpim->q_depth);
+	return fcpim->q_depth;
 }
 
 
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
index 153206cfb37a..5cf418460f75 100644
--- a/drivers/scsi/bfa/bfa_fcpim_priv.h
+++ b/drivers/scsi/bfa/bfa_fcpim_priv.h
@@ -35,7 +35,7 @@
 #define BFA_FCPIM_PATHTOV_MAX	(90 * 1000)	/* in millisecs */
 
 #define bfa_fcpim_stats(__fcpim, __stats)   \
-    (__fcpim)->stats.__stats ++
+    ((__fcpim)->stats.__stats++)
 
 struct bfa_fcpim_mod_s {
 	struct bfa_s 	*bfa;
@@ -143,7 +143,7 @@ struct bfa_itnim_s {
 	struct bfa_itnim_hal_stats_s	stats;
 };
 
-#define bfa_itnim_is_online(_itnim) (_itnim)->is_online
+#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
 #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
 	(&fcpim->ioim_arr[_iotag])
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index 992435987deb..aef648b55dfc 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -388,32 +388,29 @@ bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
 		bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
 		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
-		if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+		if (BFA_PORT_IS_DISABLED(pport->bfa))
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
-		} else {
+		else
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
-		}
 		break;
 
 	case BFA_PPORT_SM_STOP:
 		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
 		bfa_pport_reset_linkinfo(pport);
-		if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+		if (BFA_PORT_IS_DISABLED(pport->bfa))
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
-		} else {
+		else
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
-		}
 		break;
 
 	case BFA_PPORT_SM_HWFAIL:
 		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
 		bfa_pport_reset_linkinfo(pport);
 		bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
-		if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+		if (BFA_PORT_IS_DISABLED(pport->bfa))
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
-		} else {
+		else
 			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
-		}
 		break;
 
 	default:
@@ -999,10 +996,10 @@ bfa_pport_enable(struct bfa_s *bfa)
 	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
 
 	if (pport->diag_busy)
-		return (BFA_STATUS_DIAG_BUSY);
+		return BFA_STATUS_DIAG_BUSY;
 	else if (bfa_sm_cmp_state
 		 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 
 	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
 	return BFA_STATUS_OK;
@@ -1032,7 +1029,7 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
 
 	pport->cfg.speed = speed;
 
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 /**
@@ -1068,7 +1065,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
 	}
 
 	pport->cfg.topology = topology;
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 /**
@@ -1094,7 +1091,7 @@ bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
 	pport->cfg.cfg_hardalpa = BFA_TRUE;
 	pport->cfg.hardalpa = alpa;
 
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_status_t
@@ -1106,7 +1103,7 @@ bfa_pport_clr_hardalpa(struct bfa_s *bfa)
 	bfa_trc(bfa, pport->cfg.hardalpa);
 
 	pport->cfg.cfg_hardalpa = BFA_FALSE;
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_boolean_t
@@ -1138,16 +1135,16 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
 	 * with in range
 	 */
 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
-		return (BFA_STATUS_INVLD_DFSZ);
+		return BFA_STATUS_INVLD_DFSZ;
 
 	/*
 	 * power of 2, if not the max frame size of 2112
 	 */
 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
-		return (BFA_STATUS_INVLD_DFSZ);
+		return BFA_STATUS_INVLD_DFSZ;
 
 	pport->cfg.maxfrsize = maxfrsize;
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 u16
@@ -1415,7 +1412,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
 
 	if (port->stats_busy) {
 		bfa_trc(bfa, port->stats_busy);
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 	}
 
 	port->stats_busy = BFA_TRUE;
@@ -1427,7 +1424,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
 
 	bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
 			BFA_PORT_STATS_TOV);
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_status_t
@@ -1437,7 +1434,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 
 	if (port->stats_busy) {
 		bfa_trc(bfa, port->stats_busy);
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 	}
 
 	port->stats_busy = BFA_TRUE;
@@ -1448,7 +1445,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 
 	bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
 			BFA_PORT_STATS_TOV);
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_status_t
@@ -1515,7 +1512,7 @@ bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
 	/*
 	 * QoS stats is embedded in port stats
 	 */
-	return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg));
+	return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
 }
 
 bfa_status_t
@@ -1525,7 +1522,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 
 	if (port->stats_busy) {
 		bfa_trc(bfa, port->stats_busy);
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 	}
 
 	port->stats_busy = BFA_TRUE;
@@ -1536,7 +1533,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 
 	bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
 			BFA_PORT_STATS_TOV);
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 /**
@@ -1545,7 +1542,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 bfa_status_t
 bfa_pport_trunk_disable(struct bfa_s *bfa)
 {
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_boolean_t
@@ -1562,8 +1559,8 @@ bfa_pport_is_disabled(struct bfa_s *bfa)
 {
 	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
 
-	return (bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
-		BFA_PPORT_ST_DISABLED);
+	return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
+		BFA_PPORT_ST_DISABLED;
 
 }
 
@@ -1572,7 +1569,7 @@ bfa_pport_is_ratelim(struct bfa_s *bfa)
 {
 	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
 
-return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE);
+	return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
 
 }
 
@@ -1620,7 +1617,7 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
 
 	pport->cfg.trl_def_speed = speed;
 
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 /**
@@ -1632,7 +1629,7 @@ bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
 	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
 
 	bfa_trc(bfa, pport->cfg.trl_def_speed);
-	return (pport->cfg.trl_def_speed);
+	return pport->cfg.trl_def_speed;
 
 }
 
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 8975ed041dc0..c7ab257f10a7 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -568,11 +568,10 @@ bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port)
 
 	__port_action[port->fabric->fab_type].offline(port);
 
-	if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) {
+	if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
 		bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
-	} else {
+	else
 		bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE);
-	}
 	bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles,
 			port->fabric->vf_drv,
 			(port->vport == NULL) ? NULL : port->vport->vport_drv);
@@ -777,7 +776,7 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
 	}
 
 	bfa_trc(port->fcs, pwwn);
-	return (NULL);
+	return NULL;
 }
 
 /**
@@ -796,7 +795,7 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
 	}
 
 	bfa_trc(port->fcs, nwwn);
-	return (NULL);
+	return NULL;
 }
 
 /**
@@ -870,7 +869,7 @@ bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
 bfa_boolean_t
 bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
 {
-	return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online));
+	return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online);
 }
 
 /**
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
index 4754a0e9006a..cf0ad6782686 100644
--- a/drivers/scsi/bfa/bfa_fcxp.c
+++ b/drivers/scsi/bfa/bfa_fcxp.c
@@ -199,7 +199,7 @@ bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
 	if (fcxp)
 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
 
-	return (fcxp);
+	return fcxp;
 }
 
 static void
@@ -503,7 +503,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
 
 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
 	if (fcxp == NULL)
-		return (NULL);
+		return NULL;
 
 	bfa_trc(bfa, fcxp->fcxp_tag);
 
@@ -568,7 +568,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
 		}
 	}
 
-	return (fcxp);
+	return fcxp;
 }
 
 /**
@@ -709,7 +709,7 @@ bfa_status_t
 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
 {
 	bfa_assert(0);
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index 0ca125712a04..b36540e4ed76 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -59,7 +59,7 @@ bfa_intx(struct bfa_s *bfa)
 	qintr = intr & __HFN_INT_RME_MASK;
 	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
 
-	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) {
+	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
 		if (intr & (__HFN_INT_RME_Q0 << queue))
 			bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
 	}
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
index 8ce6e6b105c8..5fc301cf4d1b 100644
--- a/drivers/scsi/bfa/bfa_intr_priv.h
+++ b/drivers/scsi/bfa/bfa_intr_priv.h
@@ -26,9 +26,9 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
 void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
 
-#define bfa_reqq_pi(__bfa, __reqq)	(__bfa)->iocfc.req_cq_pi[__reqq]
+#define bfa_reqq_pi(__bfa, __reqq)	((__bfa)->iocfc.req_cq_pi[__reqq])
 #define bfa_reqq_ci(__bfa, __reqq)					\
-	*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)
+	(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
 
 #define bfa_reqq_full(__bfa, __reqq)				\
 	(((bfa_reqq_pi(__bfa, __reqq) + 1) &			\
@@ -50,14 +50,16 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 } while (0)
 
 #define bfa_rspq_pi(__bfa, __rspq)					\
-	*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)
+	(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
 
-#define bfa_rspq_ci(__bfa, __rspq)	(__bfa)->iocfc.rsp_cq_ci[__rspq]
+#define bfa_rspq_ci(__bfa, __rspq)	((__bfa)->iocfc.rsp_cq_ci[__rspq])
 #define bfa_rspq_elem(__bfa, __rspq, __ci)				\
-	&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]
+	(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
 
-#define CQ_INCR(__index, __size)					\
-			(__index)++; (__index) &= ((__size) - 1)
+#define CQ_INCR(__index, __size) do {					\
+			(__index)++;					\
+			(__index) &= ((__size) - 1);      \
+} while (0)
 
 /**
  * Queue element to wait for room in request queue. FIFO order is
@@ -94,7 +96,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 	wqe->cbarg = cbarg;
 }
 
-#define bfa_reqq(__bfa, __reqq)	&(__bfa)->reqq_waitq[__reqq]
+#define bfa_reqq(__bfa, __reqq)	(&(__bfa)->reqq_waitq[__reqq])
 
 /**
  * static inline void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 149348934ce3..397d7e9eade5 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -51,7 +51,7 @@ BFA_TRC_FILE(HAL, IOC);
 	 (sizeof(struct bfa_trc_mod_s) -			\
 	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
-#define bfa_ioc_stats(_ioc, _stats)	(_ioc)->stats._stats ++
+#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
 
 #define BFA_FLASH_CHUNK_NO(off)         (off / BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_FLASH_OFFSET_IN_CHUNK(off)  (off % BFI_FLASH_CHUNK_SZ_WORDS)
@@ -1953,8 +1953,8 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
 bfa_boolean_t
 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
 {
-	return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
-		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled));
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
+		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
 /**
@@ -1963,9 +1963,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
 bfa_boolean_t
 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
 {
-	return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
 		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
-		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch));
+		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
 }
 
 #define bfa_ioc_state_disabled(__sm)		\
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 58efd4b13143..7c30f05ab137 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -179,16 +179,16 @@ struct bfa_ioc_s {
 	struct bfa_ioc_mbox_mod_s mbox_mod;
 };
 
-#define bfa_ioc_pcifn(__ioc)		(__ioc)->pcidev.pci_func
-#define bfa_ioc_devid(__ioc)		(__ioc)->pcidev.device_id
-#define bfa_ioc_bar0(__ioc)		(__ioc)->pcidev.pci_bar_kva
+#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
 #define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
-		((__stats)->drv_stats) = (__ioc)->stats
+		(((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)	\
 		bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
-#define bfa_ioc_maxfrsize(__ioc)	(__ioc)->attr->maxfrsize
-#define bfa_ioc_rx_bbcredit(__ioc)	(__ioc)->attr->rx_bbcredit
+#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
 	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
 
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index 12350b022d63..d7ab792a9e54 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -794,7 +794,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
 
 	if (iocfc->stats_busy) {
 		bfa_trc(bfa, iocfc->stats_busy);
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 	}
 
 	iocfc->stats_busy = BFA_TRUE;
@@ -804,7 +804,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
 
 	bfa_iocfc_stats_query(bfa);
 
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 bfa_status_t
@@ -814,7 +814,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
 
 	if (iocfc->stats_busy) {
 		bfa_trc(bfa, iocfc->stats_busy);
-		return (BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
 	}
 
 	iocfc->stats_busy = BFA_TRUE;
@@ -822,7 +822,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
 	iocfc->stats_cbarg = cbarg;
 
 	bfa_iocfc_stats_clear(bfa);
-	return (BFA_STATUS_OK);
+	return BFA_STATUS_OK;
 }
 
 /**
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index 7ad177ed4cfc..ce9a830a4207 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -107,13 +107,13 @@ struct bfa_iocfc_s {
 
 #define bfa_lpuid(__bfa)		bfa_ioc_portid(&(__bfa)->ioc)
 #define bfa_msix_init(__bfa, __nvecs)	\
-	(__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)
+	((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
 #define bfa_msix_install(__bfa)	\
-	(__bfa)->iocfc.hwif.hw_msix_install(__bfa)
+	((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
 #define bfa_msix_uninstall(__bfa)	\
-	(__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)
+	((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
 #define bfa_isr_mode_set(__bfa, __msix)	\
-	(__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)
+	((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
 #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)	\
 	(__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)
 
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 7ae2552e1e14..f81d359b7089 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -105,13 +105,13 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
 				list_del(&ioim->qe);
 				list_add_tail(&ioim->qe,
-					&ioim->fcpim->ioim_comp_q);
+						&ioim->fcpim->ioim_comp_q);
 				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
 						__bfa_cb_ioim_pathtov, ioim);
 			} else {
 				list_del(&ioim->qe);
 				list_add_tail(&ioim->qe,
-					&ioim->itnim->pending_q);
+						&ioim->itnim->pending_q);
 			}
 			break;
 		}
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
index 4d5c61a4f85c..eabf7d38bd09 100644
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -1029,7 +1029,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
 	bfa_stats(itnim, creates);
 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
 
-	return (itnim);
+	return itnim;
 }
 
 void
@@ -1061,7 +1061,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
 bfa_boolean_t
 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
 {
-	return (
+	return
 		itnim->fcpim->path_tov && itnim->iotov_active &&
 		(bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
@@ -1069,7 +1069,7 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
-);
+	;
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c
index c2735e55cf03..e7514016c9c6 100644
--- a/drivers/scsi/bfa/bfa_log.c
+++ b/drivers/scsi/bfa/bfa_log.c
@@ -231,9 +231,9 @@ bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
 		return BFA_LOG_INVALID;
 
 	if (log_mod)
-		return (log_mod->log_level[mod_id]);
+		return log_mod->log_level[mod_id];
 	else
-		return (bfa_log_info[mod_id].level);
+		return bfa_log_info[mod_id].level;
 }
 
 enum bfa_log_severity
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 4b97e2759908..51f698a06b6d 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -59,8 +59,8 @@ struct bfa_pport_s {
 	u8			*stats_kva;
 	u64		stats_pa;
 	union bfa_pport_stats_u *stats;	/*  pport stats */
-	u32		mypid : 24;
-	u32		rsvd_b : 8;
+	u32		mypid:24;
+	u32		rsvd_b:8;
 	struct bfa_timer_s 	timer;	/*  timer */
 	union bfa_pport_stats_u 	*stats_ret;
 					/*  driver stats location */
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 16da77a8db28..3e1990a74258 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -677,7 +677,7 @@ bfa_rport_alloc(struct bfa_rport_mod_s *mod)
 	if (rport)
 		list_add_tail(&rport->qe, &mod->rp_active_q);
 
-	return (rport);
+	return rport;
 }
 
 static void
@@ -834,7 +834,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
 
 	if (rp == NULL)
-		return (NULL);
+		return NULL;
 
 	rp->bfa = bfa;
 	rp->rport_drv = rport_drv;
@@ -843,7 +843,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
 	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
 
-	return (rp);
+	return rp;
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
index 010d40d1e5d3..ff7a4dc0bf3c 100644
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -23,13 +23,14 @@ BFA_TRC_FILE(HAL, TSKIM);
 /**
  * task management completion handling
  */
-#define bfa_tskim_qcomp(__tskim, __cbfn) do {			     \
-	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \
+#define bfa_tskim_qcomp(__tskim, __cbfn) do {			\
+	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe,	\
+			 __cbfn, (__tskim));      \
 	bfa_tskim_notify_comp(__tskim);      \
 } while (0)
 
-#define bfa_tskim_notify_comp(__tskim) do {				     \
-	if ((__tskim)->notify)					     	     \
+#define bfa_tskim_notify_comp(__tskim) do {			 \
+	if ((__tskim)->notify)					 \
 		bfa_itnim_tskdone((__tskim)->itnim);      \
 } while (0)
 
diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c
index ff5f9deb1b22..4b3c2417d180 100644
--- a/drivers/scsi/bfa/bfa_uf.c
+++ b/drivers/scsi/bfa/bfa_uf.c
@@ -185,7 +185,7 @@ bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
 	struct bfa_uf_s   *uf;
 
 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
-	return (uf);
+	return uf;
 }
 
 static void
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6f2be5abf561..b52b773d49d9 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -188,8 +188,8 @@ static struct bfad_port_s *
 bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv,
 		  struct bfad_vport_s *vp_drv)
 {
-	return ((vp_drv) ? (&(vp_drv)->drv_port)
-		: ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)));
+	return (vp_drv) ? (&(vp_drv)->drv_port)
+		: ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport));
 }
 
 struct bfad_port_s *
@@ -716,7 +716,7 @@ bfad_drv_init(struct bfad_s *bfad)
 	if ((bfad->bfad_flags & BFAD_MSIX_ON)
 	    && bfad_install_msix_handler(bfad)) {
 		printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
-		       __FUNCTION__, bfad->inst_no);
+		       __func__, bfad->inst_no);
 	}
 
 	bfad_init_timer(bfad);
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
index bd34b0db2d6b..2ad65f275a92 100644
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ b/drivers/scsi/bfa/bfad_fwimg.c
@@ -65,10 +65,10 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
 	memcpy(*bfi_image, fw->data, fw->size);
 	*bfi_image_size = fw->size/sizeof(u32);
 
-	return(*bfi_image);
+	return *bfi_image;
 
 error:
-	return(NULL);
+	return NULL;
 }
 
 u32 *
@@ -78,12 +78,12 @@ bfad_get_firmware_buf(struct pci_dev *pdev)
 		if (bfi_image_ct_size == 0)
 			bfad_read_firmware(pdev, &bfi_image_ct,
 				&bfi_image_ct_size, BFAD_FW_FILE_CT);
-		return(bfi_image_ct);
+		return bfi_image_ct;
 	} else {
 		if (bfi_image_cb_size == 0)
 			bfad_read_firmware(pdev, &bfi_image_cb,
 				&bfi_image_cb_size, BFAD_FW_FILE_CB);
-		return(bfi_image_cb);
+		return bfi_image_cb;
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 55d012a9a668..f788c2a0ab07 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1050,7 +1050,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
 		} else {
 			printk(KERN_WARNING
 				"%s: itnim %llx is already in online state\n",
-				__FUNCTION__,
+				__func__,
 				bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
 		}
 
diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h
index 1d3e74ec338c..b36be15044a4 100644
--- a/drivers/scsi/bfa/bfad_im_compat.h
+++ b/drivers/scsi/bfa/bfad_im_compat.h
@@ -31,7 +31,7 @@ u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
 static inline u32 *
 bfad_load_fwimg(struct pci_dev *pdev)
 {
-	return(bfad_get_firmware_buf(pdev));
+	return bfad_get_firmware_buf(pdev);
 }
 
 static inline void
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index f104e029cac9..7de8832f6fee 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -23,13 +23,12 @@ BFA_TRC_FILE(LDRV, INTR);
 /**
  *  bfa_isr BFA driver interrupt functions
  */
-irqreturn_t bfad_intx(int irq, void *dev_id);
 static int msix_disable;
 module_param(msix_disable, int, S_IRUGO | S_IWUSR);
 /**
  * Line based interrupt handler.
  */
-irqreturn_t
+static irqreturn_t
 bfad_intx(int irq, void *dev_id)
 {
 	struct bfad_s         *bfad = dev_id;
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index a8b14c47b009..a4b5dd449573 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -36,12 +36,12 @@ BFA_TRC_FILE(FCS, FABRIC);
 #define BFA_FCS_FABRIC_RETRY_DELAY	(2000)	/* Milliseconds */
 #define BFA_FCS_FABRIC_CLEANUP_DELAY	(10000)	/* Milliseconds */
 
-#define bfa_fcs_fabric_set_opertype(__fabric) do {          \
-    if (bfa_pport_get_topology((__fabric)->fcs->bfa)    \
-				== BFA_PPORT_TOPOLOGY_P2P)   \
-	    (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT;       \
-    else                                                    \
-	    (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT;      \
+#define bfa_fcs_fabric_set_opertype(__fabric) do {             \
+	if (bfa_pport_get_topology((__fabric)->fcs->bfa)       \
+				== BFA_PPORT_TOPOLOGY_P2P)     \
+		(__fabric)->oper_type = BFA_PPORT_TYPE_NPORT;  \
+	else                                                   \
+		(__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \
 } while (0)
 
 /*
@@ -887,7 +887,7 @@ bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
 bfa_boolean_t
 bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
 {
-	return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback));
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
 }
 
 enum bfa_pport_type
@@ -974,7 +974,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
 int
 bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
 {
-	return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online));
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
 }
 
 
@@ -1015,7 +1015,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
 u16
 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
 {
-	return (fabric->num_vports);
+	return fabric->num_vports;
 }
 
 /**
diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c
index d174706b9caa..fee5456451cb 100644
--- a/drivers/scsi/bfa/fcbuild.c
+++ b/drivers/scsi/bfa/fcbuild.c
@@ -188,14 +188,14 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
 	switch (els_cmd->els_code) {
 	case FC_ELS_LS_RJT:
 		if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
-			return (FC_PARSE_BUSY);
+			return FC_PARSE_BUSY;
 		else
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 
 	case FC_ELS_ACC:
-		return (FC_PARSE_OK);
+		return FC_PARSE_OK;
 	}
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 static void
@@ -228,7 +228,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 	bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
 	bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
 
-	return (sizeof(struct fc_logi_s));
+	return sizeof(struct fc_logi_s);
 }
 
 u16
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 	flogi->csp.npiv_supp    = 1; /* @todo. field name is not correct */
 	vvl_info[0]	= bfa_os_htonl(FLOGI_VVL_BRCD);
 
-	return (sizeof(struct fc_logi_s));
+	return sizeof(struct fc_logi_s);
 }
 
 u16
@@ -287,7 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 
 	flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
 
-	return (sizeof(struct fc_logi_s));
+	return sizeof(struct fc_logi_s);
 }
 
 u16
@@ -306,7 +306,7 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 	flogi->port_name = port_name;
 	flogi->node_name = node_name;
 
-	return (sizeof(struct fc_logi_s));
+	return sizeof(struct fc_logi_s);
 }
 
 u16
@@ -338,26 +338,26 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 	case FC_ELS_LS_RJT:
 		ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1);
 		if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
-			return (FC_PARSE_BUSY);
+			return FC_PARSE_BUSY;
 		else
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 	case FC_ELS_ACC:
 		plogi = (struct fc_logi_s *) (fchs + 1);
 		if (len < sizeof(struct fc_logi_s))
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 
 		if (!wwn_is_equal(plogi->port_name, port_name))
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 
 		if (!plogi->class3.class_valid)
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 
 		if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
-			return (FC_PARSE_FAILURE);
+			return FC_PARSE_FAILURE;
 
-		return (FC_PARSE_OK);
+		return FC_PARSE_OK;
 	default:
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 	}
 }
 
@@ -372,7 +372,7 @@ fc_plogi_parse(struct fchs_s *fchs)
 	if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
 	    || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
 	    || (plogi->class3.rxsz == 0))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	return FC_PARSE_OK;
 }
@@ -393,7 +393,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 	prli->parampage.servparams.task_retry_id = 0;
 	prli->parampage.servparams.confirm       = 1;
 
-	return (sizeof(struct fc_prli_s));
+	return sizeof(struct fc_prli_s);
 }
 
 u16
@@ -414,41 +414,41 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 	prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
 
-	return (sizeof(struct fc_prli_s));
+	return sizeof(struct fc_prli_s);
 }
 
 enum fc_parse_status
 fc_prli_rsp_parse(struct fc_prli_s *prli, int len)
 {
 	if (len < sizeof(struct fc_prli_s))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (prli->command != FC_ELS_ACC)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD)
 	    && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (prli->parampage.servparams.target != 1)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 enum fc_parse_status
 fc_prli_parse(struct fc_prli_s *prli)
 {
 	if (prli->parampage.type != FC_TYPE_FCP)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (!prli->parampage.imagepair)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (!prli->parampage.servparams.initiator)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 u16
@@ -462,7 +462,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
 	logo->nport_id = (s_id);
 	logo->orig_port_name = port_name;
 
-	return (sizeof(struct fc_logo_s));
+	return sizeof(struct fc_logo_s);
 }
 
 static          u16
@@ -484,7 +484,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 	adisc->orig_node_name = node_name;
 	adisc->nport_id = (s_id);
 
-	return (sizeof(struct fc_adisc_s));
+	return sizeof(struct fc_adisc_s);
 }
 
 u16
@@ -511,15 +511,15 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
 {
 
 	if (len < sizeof(struct fc_adisc_s))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (adisc->els_cmd.els_code != FC_ELS_ACC)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (!wwn_is_equal(adisc->orig_port_name, port_name))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 enum fc_parse_status
@@ -529,14 +529,14 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
 	struct fc_adisc_s     *adisc = (struct fc_adisc_s *) pld;
 
 	if (adisc->els_cmd.els_code != FC_ELS_ACC)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if ((adisc->nport_id == (host_dap))
 	    && wwn_is_equal(adisc->orig_port_name, port_name)
 	    && wwn_is_equal(adisc->orig_node_name, node_name))
-		return (FC_PARSE_OK);
+		return FC_PARSE_OK;
 
-	return (FC_PARSE_FAILURE);
+	return FC_PARSE_FAILURE;
 }
 
 enum fc_parse_status
@@ -550,13 +550,13 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
 	if ((bfa_os_ntohs(pdisc->class3.rxsz) <
 		 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
 	    || (pdisc->class3.rxsz == 0))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (!wwn_is_equal(pdisc->port_name, port_name))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	if (!wwn_is_equal(pdisc->node_name, node_name))
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	return FC_PARSE_OK;
 }
@@ -570,7 +570,7 @@ fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 	fchs->s_id = (s_id);
 	fchs->ox_id = bfa_os_htons(ox_id);
 
-	return (sizeof(struct fchs_s));
+	return sizeof(struct fchs_s);
 }
 
 enum fc_parse_status
@@ -578,9 +578,9 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
 {
 	if ((fchs->cat_info == FC_CAT_BA_ACC)
 	    || (fchs->cat_info == FC_CAT_BA_RJT))
-		return (FC_PARSE_OK);
+		return FC_PARSE_OK;
 
-	return (FC_PARSE_FAILURE);
+	return FC_PARSE_FAILURE;
 }
 
 u16
@@ -597,7 +597,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
 	rrq->ox_id = bfa_os_htons(rrq_oxid);
 	rrq->rx_id = FC_RXID_ANY;
 
-	return (sizeof(struct fc_rrq_s));
+	return sizeof(struct fc_rrq_s);
 }
 
 u16
@@ -611,7 +611,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 	memset(acc, 0, sizeof(struct fc_els_cmd_s));
 	acc->els_code = FC_ELS_ACC;
 
-	return (sizeof(struct fc_els_cmd_s));
+	return sizeof(struct fc_els_cmd_s);
 }
 
 u16
@@ -627,7 +627,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
 	ls_rjt->reason_code_expl = reason_code_expl;
 	ls_rjt->vendor_unique = 0x00;
 
-	return (sizeof(struct fc_ls_rjt_s));
+	return sizeof(struct fc_ls_rjt_s);
 }
 
 u16
@@ -643,7 +643,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
 	ba_acc->ox_id = fchs->ox_id;
 	ba_acc->rx_id = fchs->rx_id;
 
-	return (sizeof(struct fc_ba_acc_s));
+	return sizeof(struct fc_ba_acc_s);
 }
 
 u16
@@ -654,7 +654,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
 	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
 	els_cmd->els_code = FC_ELS_ACC;
 
-	return (sizeof(struct fc_els_cmd_s));
+	return sizeof(struct fc_els_cmd_s);
 }
 
 int
@@ -696,7 +696,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 		tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
 		tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
 	}
-	return (bfa_os_ntohs(tprlo_acc->payload_len));
+	return bfa_os_ntohs(tprlo_acc->payload_len);
 }
 
 u16
@@ -721,7 +721,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
 		prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
 	}
 
-	return (bfa_os_ntohs(prlo_acc->payload_len));
+	return bfa_os_ntohs(prlo_acc->payload_len);
 }
 
 u16
@@ -735,7 +735,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 	rnid->els_cmd.els_code = FC_ELS_RNID;
 	rnid->node_id_data_format = data_format;
 
-	return (sizeof(struct fc_rnid_cmd_s));
+	return sizeof(struct fc_rnid_cmd_s);
 }
 
 u16
@@ -759,10 +759,10 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
 		rnid_acc->specific_id_data_length =
 			sizeof(struct fc_rnid_general_topology_data_s);
 		bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
-		return (sizeof(struct fc_rnid_acc_s));
+		return sizeof(struct fc_rnid_acc_s);
 	} else {
-		return (sizeof(struct fc_rnid_acc_s) -
-			sizeof(struct fc_rnid_general_topology_data_s));
+		return sizeof(struct fc_rnid_acc_s) -
+			sizeof(struct fc_rnid_general_topology_data_s);
 	}
 
 }
@@ -776,7 +776,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
 	memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
 
 	rpsc->els_cmd.els_code = FC_ELS_RPSC;
-	return (sizeof(struct fc_rpsc_cmd_s));
+	return sizeof(struct fc_rpsc_cmd_s);
 }
 
 u16
@@ -797,8 +797,8 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
 	for (i = 0; i < npids; i++)
 		rpsc2->pid_list[i].pid = pid_list[i];
 
-	return (sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
-			(sizeof(u32))));
+	return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
+			(sizeof(u32)));
 }
 
 u16
@@ -819,7 +819,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 	rpsc_acc->speed_info[0].port_op_speed =
 		bfa_os_htons(oper_speed->port_op_speed);
 
-	return (sizeof(struct fc_rpsc_acc_s));
+	return sizeof(struct fc_rpsc_acc_s);
 
 }
 
@@ -856,7 +856,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 	pdisc->port_name = port_name;
 	pdisc->node_name = node_name;
 
-	return (sizeof(struct fc_logi_s));
+	return sizeof(struct fc_logi_s);
 }
 
 u16
@@ -865,21 +865,21 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 	struct fc_logi_s     *pdisc = (struct fc_logi_s *) (fchs + 1);
 
 	if (len < sizeof(struct fc_logi_s))
-		return (FC_PARSE_LEN_INVAL);
+		return FC_PARSE_LEN_INVAL;
 
 	if (pdisc->els_cmd.els_code != FC_ELS_ACC)
-		return (FC_PARSE_ACC_INVAL);
+		return FC_PARSE_ACC_INVAL;
 
 	if (!wwn_is_equal(pdisc->port_name, port_name))
-		return (FC_PARSE_PWWN_NOT_EQUAL);
+		return FC_PARSE_PWWN_NOT_EQUAL;
 
 	if (!pdisc->class3.class_valid)
-		return (FC_PARSE_NWWN_NOT_EQUAL);
+		return FC_PARSE_NWWN_NOT_EQUAL;
 
 	if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
-		return (FC_PARSE_RXSZ_INVAL);
+		return FC_PARSE_RXSZ_INVAL;
 
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 u16
@@ -903,7 +903,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 		prlo->prlo_params[page].resp_process_assc = 0;
 	}
 
-	return (bfa_os_ntohs(prlo->payload_len));
+	return bfa_os_ntohs(prlo->payload_len);
 }
 
 u16
@@ -916,7 +916,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
 	len = len;
 
 	if (prlo->command != FC_ELS_ACC)
-		return (FC_PARSE_FAILURE);
+		return FC_PARSE_FAILURE;
 
 	num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
 
@@ -936,7 +936,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
 		if (prlo->prlo_acc_params[page].resp_process_assc != 0)
 			return FC_PARSE_FAILURE;
 	}
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 
 }
 
@@ -968,7 +968,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 		}
 	}
 
-	return (bfa_os_ntohs(tprlo->payload_len));
+	return bfa_os_ntohs(tprlo->payload_len);
 }
 
 u16
@@ -981,23 +981,23 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
 	len = len;
 
 	if (tprlo->command != FC_ELS_ACC)
-		return (FC_PARSE_ACC_INVAL);
+		return FC_PARSE_ACC_INVAL;
 
 	num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
 
 	for (page = 0; page < num_pages; page++) {
 		if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
-			return (FC_PARSE_NOT_FCP);
+			return FC_PARSE_NOT_FCP;
 		if (tprlo->tprlo_acc_params[page].opa_valid != 0)
-			return (FC_PARSE_OPAFLAG_INVAL);
+			return FC_PARSE_OPAFLAG_INVAL;
 		if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
-			return (FC_PARSE_RPAFLAG_INVAL);
+			return FC_PARSE_RPAFLAG_INVAL;
 		if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
-			return (FC_PARSE_OPA_INVAL);
+			return FC_PARSE_OPA_INVAL;
 		if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
-			return (FC_PARSE_RPA_INVAL);
+			return FC_PARSE_RPA_INVAL;
 	}
-	return (FC_PARSE_OK);
+	return FC_PARSE_OK;
 }
 
 enum fc_parse_status
@@ -1024,7 +1024,7 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 	fchs->cat_info = FC_CAT_BA_RJT;
 	ba_rjt->reason_code = reason_code;
 	ba_rjt->reason_expl = reason_expl;
-	return (sizeof(struct fc_ba_rjt_s));
+	return sizeof(struct fc_ba_rjt_s);
 }
 
 static void
@@ -1073,7 +1073,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 
 	bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
 	gidpn->port_name = port_name;
-	return (sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1090,7 +1090,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 
 	bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
 	gpnid->dap = port_id;
-	return (sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s));
+	return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1107,7 +1107,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 
 	bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
 	gnnid->dap = port_id;
-	return (sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s));
+	return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1137,7 +1137,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
 	if (set_br_reg)
 		scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE;
 
-	return (sizeof(struct fc_scr_s));
+	return sizeof(struct fc_scr_s);
 }
 
 u16
@@ -1157,7 +1157,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
 	rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
 	rscn->event[0].portid = s_id;
 
-	return (sizeof(struct fc_rscn_pl_s));
+	return sizeof(struct fc_rscn_pl_s);
 }
 
 u16
@@ -1188,7 +1188,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 		rftid->fc4_type[index] |= bfa_os_htonl(type_value);
 	}
 
-	return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1210,7 +1210,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
 	bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
 			(bitmap_size < 32 ? bitmap_size : 32));
 
-	return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1231,7 +1231,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	rffid->fc4ftr_bits  = fc4_ftrs;
 	rffid->fc4_type		= fc4_type;
 
-	return (sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1253,7 +1253,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	rspnid->spn_len = (u8) strlen((char *)name);
 	strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
 
-	return (sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1275,7 +1275,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 	gidft->domain_id = 0;
 	gidft->area_id = 0;
 
-	return (sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1294,7 +1294,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	rpnid->port_id = port_id;
 	rpnid->port_name = port_name;
 
-	return (sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1313,7 +1313,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	rnnid->port_id = port_id;
 	rnnid->node_name = node_name;
 
-	return (sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1332,7 +1332,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	rcsid->port_id = port_id;
 	rcsid->cos = cos;
 
-	return (sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1351,7 +1351,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	rptid->port_id = port_id;
 	rptid->port_type = port_type;
 
-	return (sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s));
+	return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s);
 }
 
 u16
@@ -1368,7 +1368,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
 	bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
 	ganxt->port_id = port_id;
 
-	return (sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s));
+	return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
 }
 
 /*
@@ -1385,7 +1385,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
 
-	return (sizeof(struct ct_hdr_s));
+	return sizeof(struct ct_hdr_s);
 }
 
 /*
@@ -1425,7 +1425,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 	bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
 	gmal->wwn = wwn;
 
-	return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t));
+	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
 }
 
 /*
@@ -1445,5 +1445,5 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 	bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
 	gfn->wwn = wwn;
 
-	return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t));
+	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
 }
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
index 4d248424f7b3..8fa7f270ef7b 100644
--- a/drivers/scsi/bfa/fcbuild.h
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -32,8 +32,8 @@
  * Utility Macros/functions
  */
 
-#define fcif_sof_set(_ifhdr, _sof)	(_ifhdr)->sof = FC_ ## _sof
-#define fcif_eof_set(_ifhdr, _eof)	(_ifhdr)->eof = FC_ ## _eof
+#define fcif_sof_set(_ifhdr, _sof)	((_ifhdr)->sof = FC_ ## _sof)
+#define fcif_eof_set(_ifhdr, _eof)	((_ifhdr)->eof = FC_ ## _eof)
 
 #define wwn_is_equal(_wwn1, _wwn2)		\
 	(memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
@@ -49,7 +49,7 @@
 static inline   u32
 fc_get_ctresp_pyld_len(u32 resp_len)
 {
-	return (resp_len - sizeof(struct ct_hdr_s));
+	return resp_len - sizeof(struct ct_hdr_s);
 }
 
 /*
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 8ce5d8934677..1f3c06efaa9e 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -286,11 +286,10 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
 		bfa_fcb_itnim_offline(itnim->itnim_drv);
 		bfa_itnim_offline(itnim->bfa_itnim);
-		if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) {
+		if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
 			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
-		} else {
+		else
 			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
-		}
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -732,7 +731,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
 		return NULL;
 
 	bfa_assert(rport->itnim != NULL);
-	return (rport->itnim);
+	return rport->itnim;
 }
 
 bfa_status_t
diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h
index deee685e8478..8d08230e6295 100644
--- a/drivers/scsi/bfa/fcs.h
+++ b/drivers/scsi/bfa/fcs.h
@@ -23,7 +23,7 @@
 #ifndef __FCS_H__
 #define __FCS_H__
 
-#define __fcs_min_cfg(__fcs)       (__fcs)->min_cfg
+#define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
 
 void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
 
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index b845eb272c78..df2a1e54e16b 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -72,9 +72,9 @@ static u16 bfa_fcs_port_fdmi_build_rpa_pyld(
 			struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
 static u16 bfa_fcs_port_fdmi_build_portattr_block(
 			struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld);
-void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
+static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
 			struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
-void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
+static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
 			struct bfa_fcs_fdmi_port_attr_s *port_attr);
 /**
  *  fcs_fdmi_sm FCS FDMI state machine
@@ -1091,7 +1091,7 @@ bfa_fcs_port_fdmi_timeout(void *arg)
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
 			 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
 {
@@ -1145,7 +1145,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
 
 }
 
-void
+static void
 bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
 			  struct bfa_fcs_fdmi_port_attr_s *port_attr)
 {
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
index da8cac093d3d..d9cbc2a783d4 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -54,7 +54,7 @@ bfa_aen_get_max_cfg_entry(void)
 static inline s32
 bfa_aen_get_meminfo(void)
 {
-	return (sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry());
+	return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
 }
 
 static inline s32
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index 64c1412c5703..d4bc0d9fa42c 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -76,11 +76,11 @@ struct bfa_meminfo_s {
 	struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
 };
 #define bfa_meminfo_kva(_m)	\
-	(_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp
+	((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
 #define bfa_meminfo_dma_virt(_m)	\
-	(_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp
+	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
 #define bfa_meminfo_dma_phys(_m)	\
-	(_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp
+	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
 
 /**
  * Generic Scatter Gather Element used by driver
@@ -100,7 +100,7 @@ struct bfa_sge_s {
 /*
  * bfa stats interfaces
  */
-#define bfa_stats(_mod, _stats)	(_mod)->stats._stats ++
+#define bfa_stats(_mod, _stats)	((_mod)->stats._stats++)
 
 #define bfa_ioc_get_stats(__bfa, __ioc_stats)	\
 	bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
@@ -136,7 +136,7 @@ void bfa_isr_enable(struct bfa_s *bfa);
 void bfa_isr_disable(struct bfa_s *bfa);
 void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
 			u32 *num_vecs, u32 *max_vec_bit);
-#define bfa_msix(__bfa, __vec) (__bfa)->msix.handler[__vec](__bfa, __vec)
+#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec))
 
 void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
 void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 0c80b74f72ef..268d956bad89 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -34,10 +34,10 @@ struct bfa_fcxp_s;
  */
 struct bfa_rport_info_s {
 	u16        max_frmsz;	/*  max rcv pdu size               */
-	u32        pid : 24,	/*  remote port ID                 */
-			lp_tag : 8;
-	u32        local_pid : 24,	/*  local port ID		    */
-			cisc : 8;	/*  CIRO supported		    */
+	u32        pid:24,		/*  remote port ID                 */
+			lp_tag:8;
+	u32        local_pid:24,	/*  local port ID		    */
+			cisc:8;		/*  CIRO supported		    */
 	u8         fc_class;	/*  supported FC classes. enum fc_cos */
 	u8         vf_en;		/*  virtual fabric enable          */
 	u16        vf_id;		/*  virtual fabric ID              */
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
index 6cadfe0d4ba1..7042c18e542d 100644
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -93,13 +93,13 @@ union bfi_addr_u {
  */
 struct bfi_sge_s {
 #ifdef __BIGENDIAN
-	u32        flags	: 2,
-			rsvd	: 2,
-			sg_len	: 28;
+	u32        flags:2,
+			rsvd:2,
+			sg_len:28;
 #else
-	u32        sg_len	: 28,
-			rsvd	: 2,
-			flags	: 2;
+	u32        sg_len:28,
+			rsvd:2,
+			flags:2;
 #endif
 	union bfi_addr_u sga;
 };
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index 026e9c06ae97..96ef05670659 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -142,7 +142,7 @@ enum {
 	BFI_ADAPTER_UNSUPP    = 0x400000,	/*  unknown adapter type  */
 };
 
-#define BFI_ADAPTER_GETP(__prop,__adap_prop)          		\
+#define BFI_ADAPTER_GETP(__prop, __adap_prop)          		\
     (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >>         \
      BFI_ADAPTER_ ## __prop ## _SH)
 #define BFI_ADAPTER_SETP(__prop, __val)         		\
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
index 414b0e30f6ef..c59d47badb4b 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -55,8 +55,8 @@ struct bfi_lps_login_rsp_s {
 	u16	bb_credit;
 	u8		f_port;
 	u8		npiv_en;
-	u32	lp_pid : 24;
-	u32	auth_req : 8;
+	u32	lp_pid:24;
+	u32	auth_req:8;
 	mac_t		lp_mac;
 	mac_t		fcf_mac;
 	u8		ext_status;
diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h
index 3520f55f09d7..e1cd83b56ec6 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_rport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_rport.h
@@ -38,10 +38,10 @@ struct bfi_rport_create_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u16        bfa_handle;	/*  host rport handle		*/
 	u16        max_frmsz;	/*  max rcv pdu size		*/
-	u32        pid       : 24,	/*  remote port ID		*/
-			lp_tag    : 8;	/*  local port tag		*/
-	u32        local_pid : 24,	/*  local port ID		*/
-			cisc      : 8;
+	u32        pid:24,		/*  remote port ID		*/
+			lp_tag:8;	/*  local port tag		*/
+	u32        local_pid:24,	/*  local port ID		*/
+			cisc:8;
 	u8         fc_class;	/*  supported FC classes	*/
 	u8         vf_en;		/*  virtual fabric enable	*/
 	u16        vf_id;		/*  virtual fabric ID		*/
diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h
index af8c1d533ba8..650f8d0aaff9 100644
--- a/drivers/scsi/bfa/include/cs/bfa_checksum.h
+++ b/drivers/scsi/bfa/include/cs/bfa_checksum.h
@@ -31,7 +31,7 @@ bfa_checksum_u32(u32 *buf, int sz)
 	for (i = 0; i < m; i++)
 		sum ^= buf[i];
 
-	return (sum);
+	return sum;
 }
 
 static inline u16
@@ -43,7 +43,7 @@ bfa_checksum_u16(u16 *buf, int sz)
 	for (i = 0; i < m; i++)
 		sum ^= buf[i];
 
-	return (sum);
+	return sum;
 }
 
 static inline u8
@@ -55,6 +55,6 @@ bfa_checksum_u8(u8 *buf, int sz)
 	for (i = 0; i < sz; i++)
 		sum ^= buf[i];
 
-	return (sum);
+	return sum;
 }
 #endif
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
index 9877066680a6..b0a92baf6657 100644
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -24,8 +24,8 @@
 
 typedef void (*bfa_sm_t)(void *sm, int event);
 
-#define bfa_sm_set_state(_sm, _state)	(_sm)->sm = (bfa_sm_t)(_state)
-#define bfa_sm_send_event(_sm, _event)	(_sm)->sm((_sm), (_event))
+#define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
 #define bfa_sm_get_state(_sm)		((_sm)->sm)
 #define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
 
@@ -62,7 +62,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
 } while (0)
 
 #define bfa_fsm_send_event(_fsm, _event)	\
-	(_fsm)->fsm((_fsm), (_event))
+	((_fsm)->fsm((_fsm), (_event)))
 #define bfa_fsm_cmp_state(_fsm, _state)		\
 	((_fsm)->fsm == (bfa_fsm_t)(_state))
 
diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h
index 3e743928c74c..310771c888e7 100644
--- a/drivers/scsi/bfa/include/cs/bfa_trc.h
+++ b/drivers/scsi/bfa/include/cs/bfa_trc.h
@@ -24,7 +24,7 @@
 #endif
 
 #ifndef BFA_TRC_TS
-#define BFA_TRC_TS(_trcm)	((_trcm)->ticks ++)
+#define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
 #endif
 
 struct bfa_trc_s {
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
index 4ffd2242d3de..08b79d5e46f3 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h
@@ -75,7 +75,7 @@ struct bfa_fcs_fabric_s {
 							    */
 };
 
-#define bfa_fcs_fabric_npiv_capable(__f)    (__f)->is_npiv
+#define bfa_fcs_fabric_npiv_capable(__f)    ((__f)->is_npiv)
 #define bfa_fcs_fabric_is_switched(__f)			\
 	((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
 
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
index b85cba884b96..967ceb0eb074 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -125,12 +125,12 @@ union bfa_fcs_port_topo_u {
 struct bfa_fcs_port_s {
 	struct list_head         qe;	/*  used by port/vport */
 	bfa_sm_t               sm;	/*  state machine */
-	struct bfa_fcs_fabric_s *fabric;	/*  parent fabric */
-	struct bfa_port_cfg_s  port_cfg;	/*  port configuration */
+	struct bfa_fcs_fabric_s *fabric;/*  parent fabric */
+	struct bfa_port_cfg_s  port_cfg;/*  port configuration */
 	struct bfa_timer_s link_timer;	/*  timer for link offline */
-	u32        pid : 24;	/*  FC address */
-	u8         lp_tag;		/*  lport tag */
-	u16        num_rports;	/*  Num of r-ports */
+	u32 pid:24;	/*  FC address */
+	u8  lp_tag;	/*  lport tag */
+	u16 num_rports;	/*  Num of r-ports */
 	struct list_head rport_q;	/*  queue of discovered r-ports */
 	struct bfa_fcs_s *fcs;	/*  FCS instance */
 	union bfa_fcs_port_topo_u port_topo;	/*  fabric/loop/n2n details */
@@ -188,13 +188,14 @@ bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port)
 }
 
 
-#define bfa_fcs_port_get_opertype(_lport)	(_lport)->fabric->oper_type
+#define bfa_fcs_port_get_opertype(_lport)	((_lport)->fabric->oper_type)
 
 
-#define bfa_fcs_port_get_fabric_name(_lport)	(_lport)->fabric->fabric_name
+#define bfa_fcs_port_get_fabric_name(_lport)	((_lport)->fabric->fabric_name)
 
 
-#define bfa_fcs_port_get_fabric_ipaddr(_lport)	(_lport)->fabric->fabric_ip_addr
+#define bfa_fcs_port_get_fabric_ipaddr(_lport) \
+		((_lport)->fabric->fabric_ip_addr)
 
 /**
  * bfa fcs port public functions
diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h
index c59d6630b070..c8648bcba41a 100644
--- a/drivers/scsi/bfa/include/protocol/ct.h
+++ b/drivers/scsi/bfa/include/protocol/ct.h
@@ -193,11 +193,11 @@ struct fcgs_rftid_req_s {
 #define FC_GS_FCP_FC4_FEATURE_TARGET	 0x01
 
 struct fcgs_rffid_req_s{
-    u32    rsvd          :8;
-    u32    dap        	  :24;		/* port identifier	*/
-    u32    rsvd1         :16;
-    u32    fc4ftr_bits   :8;		/* fc4 feature bits	*/
-    u32    fc4_type      :8;		/* corresponding FC4 Type */
+    u32    rsvd:8;
+    u32    dap:24;		/* port identifier	*/
+    u32    rsvd1:16;
+    u32    fc4ftr_bits:8;	/* fc4 feature bits	*/
+    u32    fc4_type:8;	/* corresponding FC4 Type */
 };
 
 /**
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 3e39ba58cfb5..14969eecf6a9 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -486,14 +486,14 @@ struct fc_rsi_s {
  * see FC-PH-X table 113 & 115 for explanation also FCP table 8
  */
 struct fc_prli_params_s{
-	u32        reserved: 16;
+	u32        reserved:16;
 #ifdef __BIGENDIAN
-	u32        reserved1: 5;
-	u32        rec_support : 1;
-	u32        task_retry_id : 1;
-	u32        retry : 1;
+	u32        reserved1:5;
+	u32        rec_support:1;
+	u32        task_retry_id:1;
+	u32        retry:1;
 
-	u32        confirm : 1;
+	u32        confirm:1;
 	u32        doverlay:1;
 	u32        initiator:1;
 	u32        target:1;
@@ -502,10 +502,10 @@ struct fc_prli_params_s{
 	u32        rxrdisab:1;
 	u32        wxrdisab:1;
 #else
-	u32        retry : 1;
-	u32        task_retry_id : 1;
-	u32        rec_support : 1;
-	u32        reserved1: 5;
+	u32        retry:1;
+	u32        task_retry_id:1;
+	u32        rec_support:1;
+	u32        reserved1:5;
 
 	u32        wxrdisab:1;
 	u32        rxrdisab:1;
@@ -514,7 +514,7 @@ struct fc_prli_params_s{
 	u32        target:1;
 	u32        initiator:1;
 	u32        doverlay:1;
-	u32        confirm : 1;
+	u32        confirm:1;
 #endif
 };
 
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
index a418dedebe9e..f7c7f4f3c640 100644
--- a/drivers/scsi/bfa/loop.c
+++ b/drivers/scsi/bfa/loop.c
@@ -58,49 +58,16 @@ static const u8   port_loop_alpa_map[] = {
 /*
  * Local Functions
  */
-bfa_status_t    bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
-					     u8 alpa);
-
-void            bfa_fcs_port_loop_plogi_response(void *fcsarg,
-						 struct bfa_fcxp_s *fcxp,
-						 void *cbarg,
-						 bfa_status_t req_status,
-						 u32 rsp_len,
-						 u32 resid_len,
-						 struct fchs_s *rsp_fchs);
-
-bfa_status_t    bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port,
-					     u8 alpa);
-
-void            bfa_fcs_port_loop_adisc_response(void *fcsarg,
-						 struct bfa_fcxp_s *fcxp,
-						 void *cbarg,
-						 bfa_status_t req_status,
-						 u32 rsp_len,
-						 u32 resid_len,
-						 struct fchs_s *rsp_fchs);
-
-bfa_status_t    bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port,
-						 u8 alpa);
-
-void            bfa_fcs_port_loop_plogi_acc_response(void *fcsarg,
-						     struct bfa_fcxp_s *fcxp,
-						     void *cbarg,
-						     bfa_status_t req_status,
-						     u32 rsp_len,
-						     u32 resid_len,
-						     struct fchs_s *rsp_fchs);
-
-bfa_status_t    bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port,
-						 u8 alpa);
-
-void            bfa_fcs_port_loop_adisc_acc_response(void *fcsarg,
-						     struct bfa_fcxp_s *fcxp,
-						     void *cbarg,
-						     bfa_status_t req_status,
-						     u32 rsp_len,
-						     u32 resid_len,
-						     struct fchs_s *rsp_fchs);
+static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port,
+					u8 alpa);
+
+static void bfa_fcs_port_loop_plogi_response(void *fcsarg,
+					struct bfa_fcxp_s *fcxp,
+					void *cbarg,
+					bfa_status_t req_status,
+					u32 rsp_len,
+					u32 resid_len,
+					struct fchs_s *rsp_fchs);
 /**
  *   Called by port to initializar in provate LOOP topology.
  */
@@ -179,7 +146,7 @@ bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port)
 /**
  * Local Functions.
  */
-bfa_status_t
+static bfa_status_t
 bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
 {
 	struct fchs_s          fchs;
@@ -208,7 +175,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
 /**
  *   Called by fcxp to notify the Plogi response
  */
-void
+static void
 bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
 				 void *cbarg, bfa_status_t req_status,
 				 u32 rsp_len, u32 resid_len,
@@ -244,179 +211,3 @@ bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
 		bfa_assert(0);
 	}
 }
-
-bfa_status_t
-bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, u8 alpa)
-{
-	struct fchs_s          fchs;
-	struct bfa_fcxp_s *fcxp;
-	int             len;
-
-	bfa_trc(port->fcs, alpa);
-
-	fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
-				  NULL);
-	bfa_assert(fcxp);
-
-	len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
-				 bfa_fcs_port_get_fcid(port), 0,
-				 port->port_cfg.pwwn, port->port_cfg.nwwn,
-				 bfa_pport_get_maxfrsize(port->fcs->bfa));
-
-	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
-				 FC_CLASS_3, len, &fchs,
-				 bfa_fcs_port_loop_plogi_acc_response,
-				 (void *)port, FC_MAX_PDUSZ, 0); /* No response
-								  * expected
-								  */
-
-	return BFA_STATUS_OK;
-}
-
-/*
- *  Plogi Acc Response
- * We donot do any processing here.
- */
-void
-bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
-				     void *cbarg, bfa_status_t req_status,
-				     u32 rsp_len, u32 resid_len,
-				     struct fchs_s *rsp_fchs)
-{
-
-	struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
-
-	bfa_trc(port->fcs, port->pid);
-
-	/*
-	 * Sanity Checks
-	 */
-	if (req_status != BFA_STATUS_OK) {
-		bfa_trc(port->fcs, req_status);
-		return;
-	}
-}
-
-bfa_status_t
-bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, u8 alpa)
-{
-	struct fchs_s          fchs;
-	struct bfa_fcxp_s *fcxp;
-	int             len;
-
-	bfa_trc(port->fcs, alpa);
-
-	fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
-				  NULL);
-	bfa_assert(fcxp);
-
-	len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
-			     bfa_fcs_port_get_fcid(port), 0,
-			     port->port_cfg.pwwn, port->port_cfg.nwwn);
-
-	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
-			  FC_CLASS_3, len, &fchs,
-			  bfa_fcs_port_loop_adisc_response, (void *)port,
-			  FC_MAX_PDUSZ, FC_RA_TOV);
-
-	return BFA_STATUS_OK;
-}
-
-/**
- *   Called by fcxp to notify the ADISC response
- */
-void
-bfa_fcs_port_loop_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
-				 void *cbarg, bfa_status_t req_status,
-				 u32 rsp_len, u32 resid_len,
-				 struct fchs_s *rsp_fchs)
-{
-	struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
-	struct bfa_fcs_rport_s *rport;
-	struct fc_adisc_s     *adisc_resp;
-	struct fc_els_cmd_s   *els_cmd;
-	u32        pid = rsp_fchs->s_id;
-
-	bfa_trc(port->fcs, req_status);
-
-	/*
-	 * Sanity Checks
-	 */
-	if (req_status != BFA_STATUS_OK) {
-		/*
-		 * TBD : we may need to retry certain requests
-		 */
-		bfa_fcxp_free(fcxp);
-		return;
-	}
-
-	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
-	adisc_resp = (struct fc_adisc_s *) els_cmd;
-
-	if (els_cmd->els_code == FC_ELS_ACC) {
-	} else {
-		bfa_trc(port->fcs, adisc_resp->els_cmd.els_code);
-
-		/*
-		 * TBD: we may need to check for reject codes and retry
-		 */
-		rport = bfa_fcs_port_get_rport_by_pid(port, pid);
-		if (rport) {
-			list_del(&rport->qe);
-			bfa_fcs_rport_delete(rport);
-		}
-
-	}
-	return;
-}
-
-bfa_status_t
-bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, u8 alpa)
-{
-	struct fchs_s          fchs;
-	struct bfa_fcxp_s *fcxp;
-	int             len;
-
-	bfa_trc(port->fcs, alpa);
-
-	fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL,
-				  NULL);
-	bfa_assert(fcxp);
-
-	len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
-				 bfa_fcs_port_get_fcid(port), 0,
-				 port->port_cfg.pwwn, port->port_cfg.nwwn);
-
-	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
-				FC_CLASS_3, len, &fchs,
-				bfa_fcs_port_loop_adisc_acc_response,
-				(void *)port, FC_MAX_PDUSZ, 0); /* no reponse
-								 * expected
-								 */
-
-	return BFA_STATUS_OK;
-}
-
-/*
- *  Adisc Acc Response
- * We donot do any processing here.
- */
-void
-bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
-				     void *cbarg, bfa_status_t req_status,
-				     u32 rsp_len, u32 resid_len,
-				     struct fchs_s *rsp_fchs)
-{
-
-	struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg;
-
-	bfa_trc(port->fcs, port->pid);
-
-	/*
-	 * Sanity Checks
-	 */
-	if (req_status != BFA_STATUS_OK) {
-		bfa_trc(port->fcs, req_status);
-		return;
-	}
-}
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index 8f51a83f1834..1e06792cd4c2 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -43,7 +43,7 @@ bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg)
 struct bfa_fcs_port_s *
 bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
 {
-	return (&fcs->fabric.bport);
+	return &fcs->fabric.bport;
 }
 
 wwn_t
@@ -88,11 +88,10 @@ bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index,
 	}
 
 	bfa_trc(fcs, i);
-	if (rport) {
+	if (rport)
 		return rport->pwwn;
-	} else {
+	else
 		return (wwn_t) 0;
-	}
 }
 
 void
@@ -198,17 +197,17 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
 	vf = bfa_fcs_vf_lookup(fcs, vf_id);
 	if (vf == NULL) {
 		bfa_trc(fcs, vf_id);
-		return (NULL);
+		return NULL;
 	}
 
 	if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
-		return (&vf->bport);
+		return &vf->bport;
 
 	vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
 	if (vport)
-		return (&vport->lport);
+		return &vport->lport;
 
-	return (NULL);
+	return NULL;
 }
 
 /*
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index 59fea99d67a4..2f8b880060bb 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -932,11 +932,10 @@ bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 	}
 	ns->fcxp = fcxp;
 
-	if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
+	if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
 		fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
-	} else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
+	else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port))
 		fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET;
-	}
 
 	len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP,
diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c
index 86af818d17bb..fcb8864d3276 100644
--- a/drivers/scsi/bfa/plog.c
+++ b/drivers/scsi/bfa/plog.c
@@ -180,5 +180,5 @@ bfa_plog_disable(struct bfa_plog_s *plog)
 bfa_boolean_t
 bfa_plog_get_setting(struct bfa_plog_s *plog)
 {
-	return((bfa_boolean_t)plog->plog_enabled);
+	return (bfa_boolean_t)plog->plog_enabled;
 }
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index 8a1f59d596c1..e1932c885ac2 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -79,7 +79,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	bfa_trc(rport->fcs, event);
 
 	switch (event) {
-	case RPFSM_EVENT_RPORT_ONLINE :
+	case RPFSM_EVENT_RPORT_ONLINE:
 		if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
 			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
 			rpf->rpsc_retries = 0;
@@ -87,7 +87,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 			break;
 		};
 
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		break;
 
 	default:
@@ -107,7 +107,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
 		break;
 
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
 		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
 		rpf->rpsc_retries = 0;
@@ -130,11 +130,10 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	case RPFSM_EVENT_RPSC_COMP:
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
 		/* Update speed info in f/w via BFA */
-		if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) {
+		if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN)
 			bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
-		} else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) {
+		else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN)
 			bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
-		}
 		break;
 
 	case RPFSM_EVENT_RPSC_FAIL:
@@ -154,7 +153,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 		}
 		break;
 
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
 		bfa_fcxp_discard(rpf->fcxp);
 		rpf->rpsc_retries = 0;
@@ -174,13 +173,13 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	bfa_trc(rport->fcs, event);
 
 	switch (event) {
-	case RPFSM_EVENT_TIMEOUT :
+	case RPFSM_EVENT_TIMEOUT:
 		/* re-send the RPSC */
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
 		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
 		break;
 
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		bfa_timer_stop(&rpf->timer);
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
 		rpf->rpsc_retries = 0;
@@ -201,7 +200,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	bfa_trc(rport->fcs, event);
 
 	switch (event) {
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
 		rpf->rpsc_retries = 0;
 		break;
@@ -221,12 +220,12 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	bfa_trc(rport->fcs, event);
 
 	switch (event) {
-	case RPFSM_EVENT_RPORT_ONLINE :
+	case RPFSM_EVENT_RPORT_ONLINE:
 		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
 		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
 		break;
 
-	case RPFSM_EVENT_RPORT_OFFLINE :
+	case RPFSM_EVENT_RPORT_OFFLINE:
 		break;
 
 	default:
@@ -366,10 +365,9 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 		bfa_trc(rport->fcs, ls_rjt->reason_code);
 		bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
 		rport->stats.rpsc_rejects++;
-		if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+		if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
 			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
-		} else {
+		else
 			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
-		}
 	}
 }
diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c
index 31d81fe2fc48..391a4790bebd 100644
--- a/drivers/scsi/bfa/vfapi.c
+++ b/drivers/scsi/bfa/vfapi.c
@@ -189,7 +189,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
 {
 	bfa_trc(fcs, vf_id);
 	if (vf_id == FC_VF_ID_NULL)
-		return (&fcs->fabric);
+		return &fcs->fabric;
 
 	/**
 	 * @todo vf support
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index c10af06c5714..e90f1e38c32d 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -31,13 +31,13 @@
 
 BFA_TRC_FILE(FCS, VPORT);
 
-#define __vport_fcs(__vp)       (__vp)->lport.fcs
-#define __vport_pwwn(__vp)      (__vp)->lport.port_cfg.pwwn
-#define __vport_nwwn(__vp)      (__vp)->lport.port_cfg.nwwn
-#define __vport_bfa(__vp)       (__vp)->lport.fcs->bfa
-#define __vport_fcid(__vp)      (__vp)->lport.pid
-#define __vport_fabric(__vp)    (__vp)->lport.fabric
-#define __vport_vfid(__vp)      (__vp)->lport.fabric->vf_id
+#define __vport_fcs(__vp)       ((__vp)->lport.fcs)
+#define __vport_pwwn(__vp)      ((__vp)->lport.port_cfg.pwwn)
+#define __vport_nwwn(__vp)      ((__vp)->lport.port_cfg.nwwn)
+#define __vport_bfa(__vp)       ((__vp)->lport.fcs->bfa)
+#define __vport_fcid(__vp)      ((__vp)->lport.pid)
+#define __vport_fabric(__vp)    ((__vp)->lport.fabric)
+#define __vport_vfid(__vp)      ((__vp)->lport.fabric->vf_id)
 
 #define BFA_FCS_VPORT_MAX_RETRIES  5
 /*
@@ -641,9 +641,9 @@ bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
 	bfa_get_attr(fcs->bfa, &ioc_attr);
 
 	if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
-		return (BFA_FCS_MAX_VPORTS_SUPP_CT);
+		return BFA_FCS_MAX_VPORTS_SUPP_CT;
 	else
-		return (BFA_FCS_MAX_VPORTS_SUPP_CB);
+		return BFA_FCS_MAX_VPORTS_SUPP_CB;
 }
 
 
@@ -675,7 +675,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
 		     struct bfad_vport_s *vport_drv)
 {
 	if (vport_cfg->pwwn == 0)
-		return (BFA_STATUS_INVALID_WWN);
+		return BFA_STATUS_INVALID_WWN;
 
 	if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
 		return BFA_STATUS_VPORT_WWN_BP;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 5edde1a8c04d..2b973f3c2eb2 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -232,7 +232,6 @@ struct bnx2i_conn {
 	struct iscsi_cls_conn *cls_conn;
 	struct bnx2i_hba *hba;
 	struct completion cmd_cleanup_cmpl;
-	int is_bound;
 
 	u32 iscsi_conn_cid;
 #define BNX2I_CID_RESERVED	0x5AFF
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index cafb888c2376..070118a8f184 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1161,9 +1161,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
 	struct bnx2i_cmd *cmd = task->dd_data;
 	struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
 
-	if (!bnx2i_conn->is_bound)
-		return -ENOTCONN;
-
 	/*
 	 * If there is no scsi_cmnd this must be a mgmt task
 	 */
@@ -1371,7 +1368,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
 	bnx2i_conn->ep = bnx2i_ep;
 	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
 	bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
-	bnx2i_conn->is_bound = 1;
 
 	ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
 						bnx2i_ep->ep_iscsi_cid);
@@ -1896,9 +1892,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
 		conn = bnx2i_conn->cls_conn->dd_data;
 		session = conn->session;
 
-		spin_lock_bh(&session->lock);
-		bnx2i_conn->is_bound = 0;
-		spin_unlock_bh(&session->lock);
+		iscsi_suspend_queue(conn);
 	}
 
 	hba = bnx2i_ep->hba;
@@ -2034,7 +2028,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
 				  ISCSI_USERNAME | ISCSI_PASSWORD |
 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-				  ISCSI_LU_RESET_TMO |
+				  ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
 				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
 				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 63abb06c4edb..9129bcf117cf 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -141,6 +141,7 @@ static const struct value_name_pair serv_out12_arr[] = {
 static const struct value_name_pair serv_in16_arr[] = {
 	{0x10, "Read capacity(16)"},
 	{0x11, "Read long(16)"},
+	{0x12, "Get LBA status"},
 };
 #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr)
 
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 2631bddd255e..969c83162cc4 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -937,7 +937,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
 				ISCSI_USERNAME | ISCSI_PASSWORD |
 				ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-				ISCSI_LU_RESET_TMO |
+				ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
 				ISCSI_PING_TMO | ISCSI_RECV_TMO |
 				ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 3ee1cbc89479..e19a1a55270c 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -226,7 +226,7 @@ store_dh_state(struct device *dev, struct device_attribute *attr,
 			 * Activate a device handler
 			 */
 			if (scsi_dh->activate)
-				err = scsi_dh->activate(sdev);
+				err = scsi_dh->activate(sdev, NULL, NULL);
 			else
 				err = 0;
 		}
@@ -304,18 +304,15 @@ static int scsi_dh_notifier(struct notifier_block *nb,
 	sdev = to_scsi_device(dev);
 
 	if (action == BUS_NOTIFY_ADD_DEVICE) {
+		err = device_create_file(dev, &scsi_dh_state_attr);
+		/* don't care about err */
 		devinfo = device_handler_match(NULL, sdev);
-		if (!devinfo)
-			goto out;
-
-		err = scsi_dh_handler_attach(sdev, devinfo);
-		if (!err)
-			err = device_create_file(dev, &scsi_dh_state_attr);
+		if (devinfo)
+			err = scsi_dh_handler_attach(sdev, devinfo);
 	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 		device_remove_file(dev, &scsi_dh_state_attr);
 		scsi_dh_handler_detach(sdev, NULL);
 	}
-out:
 	return err;
 }
 
@@ -423,10 +420,17 @@ EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
 /*
  * scsi_dh_activate - activate the path associated with the scsi_device
  *      corresponding to the given request queue.
- * @q - Request queue that is associated with the scsi_device to be
- *      activated.
+ *     Returns immediately without waiting for activation to be completed.
+ * @q    - Request queue that is associated with the scsi_device to be
+ *         activated.
+ * @fn   - Function to be called upon completion of the activation.
+ *         Function fn is called with data (below) and the error code.
+ *         Function fn may be called from the same calling context. So,
+ *         do not hold the lock in the caller which may be needed in fn.
+ * @data - data passed to the function fn upon completion.
+ *
  */
-int scsi_dh_activate(struct request_queue *q)
+int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
 {
 	int err = 0;
 	unsigned long flags;
@@ -445,7 +449,7 @@ int scsi_dh_activate(struct request_queue *q)
 		return err;
 
 	if (scsi_dh->activate)
-		err = scsi_dh->activate(sdev);
+		err = scsi_dh->activate(sdev, fn, data);
 	put_device(&sdev->sdev_gendev);
 	return err;
 }
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index b5cdefaf2608..4f0d0138f48b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -60,11 +60,17 @@ struct alua_dh_data {
 	int			bufflen;
 	unsigned char		sense[SCSI_SENSE_BUFFERSIZE];
 	int			senselen;
+	struct scsi_device	*sdev;
+	activate_complete	callback_fn;
+	void			*callback_data;
 };
 
 #define ALUA_POLICY_SWITCH_CURRENT	0
 #define ALUA_POLICY_SWITCH_ALL		1
 
+static char print_alua_state(int);
+static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
+
 static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
 {
 	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -231,18 +237,71 @@ done:
 }
 
 /*
+ * alua_stpg - Evaluate SET TARGET GROUP STATES
+ * @sdev: the device to be evaluated
+ * @state: the new target group state
+ *
+ * Send a SET TARGET GROUP STATES command to the device.
+ * We only have to test here if we should resubmit the command;
+ * any other error is assumed as a failure.
+ */
+static void stpg_endio(struct request *req, int error)
+{
+	struct alua_dh_data *h = req->end_io_data;
+	struct scsi_sense_hdr sense_hdr;
+	unsigned err = SCSI_DH_IO;
+
+	if (error || host_byte(req->errors) != DID_OK ||
+			msg_byte(req->errors) != COMMAND_COMPLETE)
+		goto done;
+
+	if (err == SCSI_DH_IO && h->senselen > 0) {
+		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+					   &sense_hdr);
+		if (!err) {
+			err = SCSI_DH_IO;
+			goto done;
+		}
+		err = alua_check_sense(h->sdev, &sense_hdr);
+		if (err == ADD_TO_MLQUEUE) {
+			err = SCSI_DH_RETRY;
+			goto done;
+		}
+		sdev_printk(KERN_INFO, h->sdev,
+			    "%s: stpg sense code: %02x/%02x/%02x\n",
+			    ALUA_DH_NAME, sense_hdr.sense_key,
+			    sense_hdr.asc, sense_hdr.ascq);
+		err = SCSI_DH_IO;
+	}
+	if (err == SCSI_DH_OK) {
+		h->state = TPGS_STATE_OPTIMIZED;
+		sdev_printk(KERN_INFO, h->sdev,
+			    "%s: port group %02x switched to state %c\n",
+			    ALUA_DH_NAME, h->group_id,
+			    print_alua_state(h->state));
+	}
+done:
+	blk_put_request(req);
+	if (h->callback_fn) {
+		h->callback_fn(h->callback_data, err);
+		h->callback_fn = h->callback_data = NULL;
+	}
+	return;
+}
+
+/*
  * submit_stpg - Issue a SET TARGET GROUP STATES command
- * @sdev: sdev the command should be sent to
  *
  * Currently we're only setting the current target port group state
  * to 'active/optimized' and let the array firmware figure out
  * the states of the remaining groups.
  */
-static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static unsigned submit_stpg(struct alua_dh_data *h)
 {
 	struct request *rq;
 	int err = SCSI_DH_RES_TEMP_UNAVAIL;
 	int stpg_len = 8;
+	struct scsi_device *sdev = h->sdev;
 
 	/* Prepare the data buffer */
 	memset(h->buff, 0, stpg_len);
@@ -252,7 +311,7 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
 
 	rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
 	if (!rq)
-		goto done;
+		return SCSI_DH_RES_TEMP_UNAVAIL;
 
 	/* Prepare the command. */
 	rq->cmd[0] = MAINTENANCE_OUT;
@@ -266,17 +325,9 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
 	rq->sense = h->sense;
 	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
 	rq->sense_len = h->senselen = 0;
+	rq->end_io_data = h;
 
-	err = blk_execute_rq(rq->q, NULL, rq, 1);
-	if (err == -EIO) {
-		sdev_printk(KERN_INFO, sdev,
-			    "%s: stpg failed with %x\n",
-			    ALUA_DH_NAME, rq->errors);
-		h->senselen = rq->sense_len;
-		err = SCSI_DH_IO;
-	}
-	blk_put_request(rq);
-done:
+	blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
 	return err;
 }
 
@@ -477,50 +528,6 @@ static int alua_check_sense(struct scsi_device *sdev,
 }
 
 /*
- * alua_stpg - Evaluate SET TARGET GROUP STATES
- * @sdev: the device to be evaluated
- * @state: the new target group state
- *
- * Send a SET TARGET GROUP STATES command to the device.
- * We only have to test here if we should resubmit the command;
- * any other error is assumed as a failure.
- */
-static int alua_stpg(struct scsi_device *sdev, int state,
-		     struct alua_dh_data *h)
-{
-	struct scsi_sense_hdr sense_hdr;
-	unsigned err;
-	int retry = ALUA_FAILOVER_RETRIES;
-
- retry:
-	err = submit_stpg(sdev, h);
-	if (err == SCSI_DH_IO && h->senselen > 0) {
-		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
-					   &sense_hdr);
-		if (!err)
-			return SCSI_DH_IO;
-		err = alua_check_sense(sdev, &sense_hdr);
-		if (retry > 0 && err == ADD_TO_MLQUEUE) {
-			retry--;
-			goto retry;
-		}
-		sdev_printk(KERN_INFO, sdev,
-			    "%s: stpg sense code: %02x/%02x/%02x\n",
-			    ALUA_DH_NAME, sense_hdr.sense_key,
-			    sense_hdr.asc, sense_hdr.ascq);
-		err = SCSI_DH_IO;
-	}
-	if (err == SCSI_DH_OK) {
-		h->state = state;
-		sdev_printk(KERN_INFO, sdev,
-			    "%s: port group %02x switched to state %c\n",
-			    ALUA_DH_NAME, h->group_id,
-			    print_alua_state(h->state) );
-	}
-	return err;
-}
-
-/*
  * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
  * @sdev: the device to be evaluated.
  *
@@ -652,7 +659,8 @@ out:
  * based on a certain policy. But until we actually encounter them it
  * should be okay.
  */
-static int alua_activate(struct scsi_device *sdev)
+static int alua_activate(struct scsi_device *sdev,
+			activate_complete fn, void *data)
 {
 	struct alua_dh_data *h = get_alua_data(sdev);
 	int err = SCSI_DH_OK;
@@ -663,11 +671,19 @@ static int alua_activate(struct scsi_device *sdev)
 			goto out;
 	}
 
-	if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
-		err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
+	if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) {
+		h->callback_fn = fn;
+		h->callback_data = data;
+		err = submit_stpg(h);
+		if (err == SCSI_DH_OK)
+			return 0;
+		h->callback_fn = h->callback_data = NULL;
+	}
 
 out:
-	return err;
+	if (fn)
+		fn(data, err);
+	return 0;
 }
 
 /*
@@ -745,6 +761,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
 	h->rel_port = -1;
 	h->buff = h->inq;
 	h->bufflen = ALUA_INQUIRY_SIZE;
+	h->sdev = sdev;
 
 	err = alua_initialize(sdev, h);
 	if (err != SCSI_DH_OK)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 0cffe84976fe..61966750bd60 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -528,7 +528,8 @@ retry:
 	return err;
 }
 
-static int clariion_activate(struct scsi_device *sdev)
+static int clariion_activate(struct scsi_device *sdev,
+				activate_complete fn, void *data)
 {
 	struct clariion_dh_data *csdev = get_clariion_data(sdev);
 	int result;
@@ -559,7 +560,9 @@ done:
 		    csdev->port, lun_state[csdev->lun_state],
 		    csdev->default_sp + 'A');
 
-	return result;
+	if (fn)
+		fn(data, result);
+	return 0;
 }
 /*
  * params - parameters in the following format
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index f7da7530875e..857fdd6032b2 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -39,8 +39,14 @@ struct hp_sw_dh_data {
 	unsigned char sense[SCSI_SENSE_BUFFERSIZE];
 	int path_state;
 	int retries;
+	int retry_cnt;
+	struct scsi_device *sdev;
+	activate_complete	callback_fn;
+	void			*callback_data;
 };
 
+static int hp_sw_start_stop(struct hp_sw_dh_data *);
+
 static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
 {
 	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -191,19 +197,53 @@ static int start_done(struct scsi_device *sdev, unsigned char *sense)
 	return rc;
 }
 
+static void start_stop_endio(struct request *req, int error)
+{
+	struct hp_sw_dh_data *h = req->end_io_data;
+	unsigned err = SCSI_DH_OK;
+
+	if (error || host_byte(req->errors) != DID_OK ||
+			msg_byte(req->errors) != COMMAND_COMPLETE) {
+		sdev_printk(KERN_WARNING, h->sdev,
+			    "%s: sending start_stop_unit failed with %x\n",
+			    HP_SW_NAME, req->errors);
+		err = SCSI_DH_IO;
+		goto done;
+	}
+
+	if (req->sense_len > 0) {
+		err = start_done(h->sdev, h->sense);
+		if (err == SCSI_DH_RETRY) {
+			err = SCSI_DH_IO;
+			if (--h->retry_cnt) {
+				blk_put_request(req);
+				err = hp_sw_start_stop(h);
+				if (err == SCSI_DH_OK)
+					return;
+			}
+		}
+	}
+done:
+	blk_put_request(req);
+	if (h->callback_fn) {
+		h->callback_fn(h->callback_data, err);
+		h->callback_fn = h->callback_data = NULL;
+	}
+	return;
+
+}
+
 /*
  * hp_sw_start_stop - Send START STOP UNIT command
  * @sdev: sdev command should be sent to
  *
  * Sending START STOP UNIT activates the SP.
  */
-static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
+static int hp_sw_start_stop(struct hp_sw_dh_data *h)
 {
 	struct request *req;
-	int ret, retry;
 
-retry:
-	req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
+	req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
 	if (!req)
 		return SCSI_DH_RES_TEMP_UNAVAIL;
 
@@ -217,32 +257,10 @@ retry:
 	req->sense = h->sense;
 	memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
 	req->sense_len = 0;
-	retry = h->retries;
-
-	ret = blk_execute_rq(req->q, NULL, req, 1);
-	if (ret == -EIO) {
-		if (req->sense_len > 0) {
-			ret = start_done(sdev, h->sense);
-		} else {
-			sdev_printk(KERN_WARNING, sdev,
-				    "%s: sending start_stop_unit failed with %x\n",
-				    HP_SW_NAME, req->errors);
-			ret = SCSI_DH_IO;
-		}
-	} else
-		ret = SCSI_DH_OK;
+	req->end_io_data = h;
 
-	if (ret == SCSI_DH_RETRY) {
-		if (--retry) {
-			blk_put_request(req);
-			goto retry;
-		}
-		ret = SCSI_DH_IO;
-	}
-
-	blk_put_request(req);
-
-	return ret;
+	blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
+	return SCSI_DH_OK;
 }
 
 static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -268,7 +286,8 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
  * activate the passive path (and deactivate the
  * previously active one).
  */
-static int hp_sw_activate(struct scsi_device *sdev)
+static int hp_sw_activate(struct scsi_device *sdev,
+				activate_complete fn, void *data)
 {
 	int ret = SCSI_DH_OK;
 	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
@@ -276,14 +295,18 @@ static int hp_sw_activate(struct scsi_device *sdev)
 	ret = hp_sw_tur(sdev, h);
 
 	if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
-		ret = hp_sw_start_stop(sdev, h);
+		h->retry_cnt = h->retries;
+		h->callback_fn = fn;
+		h->callback_data = data;
+		ret = hp_sw_start_stop(h);
 		if (ret == SCSI_DH_OK)
-			sdev_printk(KERN_INFO, sdev,
-				    "%s: activated path\n",
-				    HP_SW_NAME);
+			return 0;
+		h->callback_fn = h->callback_data = NULL;
 	}
 
-	return ret;
+	if (fn)
+		fn(data, ret);
+	return 0;
 }
 
 static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
@@ -326,6 +349,7 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
 	h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
 	h->path_state = HP_SW_PATH_UNINITIALIZED;
 	h->retries = HP_SW_RETRIES;
+	h->sdev = sdev;
 
 	ret = hp_sw_tur(sdev, h);
 	if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 268189d31d9c..47cfe1c49c3e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -22,6 +22,7 @@
 #include <scsi/scsi.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_dh.h>
+#include <linux/workqueue.h>
 
 #define RDAC_NAME "rdac"
 #define RDAC_RETRY_COUNT 5
@@ -138,7 +139,13 @@ struct rdac_controller {
 	} mode_select;
 	u8	index;
 	u8	array_name[ARRAY_LABEL_LEN];
+	spinlock_t		ms_lock;
+	int			ms_queued;
+	struct work_struct	ms_work;
+	struct scsi_device	*ms_sdev;
+	struct list_head	ms_head;
 };
+
 struct c8_inquiry {
 	u8	peripheral_info;
 	u8	page_code; /* 0xC8 */
@@ -198,8 +205,17 @@ static const char *lun_state[] =
 	"owned (AVT mode)",
 };
 
+struct rdac_queue_data {
+	struct list_head	entry;
+	struct rdac_dh_data	*h;
+	activate_complete	callback_fn;
+	void			*callback_data;
+};
+
 static LIST_HEAD(ctlr_list);
 static DEFINE_SPINLOCK(list_lock);
+static struct workqueue_struct *kmpath_rdacd;
+static void send_mode_select(struct work_struct *work);
 
 /*
  * module parameter to enable rdac debug logging.
@@ -281,7 +297,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
 		rdac_pg->subpage_code = 0x1;
 		rdac_pg->page_len[0] = 0x01;
 		rdac_pg->page_len[1] = 0x28;
-		rdac_pg->lun_table[h->lun] = 0x81;
 	} else {
 		struct rdac_pg_legacy *rdac_pg;
 
@@ -291,7 +306,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
 		common = &rdac_pg->common;
 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
 		rdac_pg->page_len = 0x68;
-		rdac_pg->lun_table[h->lun] = 0x81;
 	}
 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
@@ -325,6 +339,7 @@ static void release_controller(struct kref *kref)
 	struct rdac_controller *ctlr;
 	ctlr = container_of(kref, struct rdac_controller, kref);
 
+	flush_workqueue(kmpath_rdacd);
 	spin_lock(&list_lock);
 	list_del(&ctlr->node);
 	spin_unlock(&list_lock);
@@ -363,6 +378,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
 
 	kref_init(&ctlr->kref);
 	ctlr->use_ms10 = -1;
+	ctlr->ms_queued = 0;
+	ctlr->ms_sdev = NULL;
+	spin_lock_init(&ctlr->ms_lock);
+	INIT_WORK(&ctlr->ms_work, send_mode_select);
+	INIT_LIST_HEAD(&ctlr->ms_head);
 	list_add(&ctlr->node, &ctlr_list);
 done:
 	spin_unlock(&list_lock);
@@ -490,7 +510,7 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
 }
 
 static int mode_select_handle_sense(struct scsi_device *sdev,
-				    unsigned char *sensebuf)
+					unsigned char *sensebuf)
 {
 	struct scsi_sense_hdr sense_hdr;
 	int err = SCSI_DH_IO, ret;
@@ -533,11 +553,29 @@ done:
 	return err;
 }
 
-static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
+static void send_mode_select(struct work_struct *work)
 {
+	struct rdac_controller *ctlr =
+		container_of(work, struct rdac_controller, ms_work);
 	struct request *rq;
+	struct scsi_device *sdev = ctlr->ms_sdev;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
 	struct request_queue *q = sdev->request_queue;
 	int err, retry_cnt = RDAC_RETRY_COUNT;
+	struct rdac_queue_data *tmp, *qdata;
+	LIST_HEAD(list);
+	u8 *lun_table;
+
+	spin_lock(&ctlr->ms_lock);
+	list_splice_init(&ctlr->ms_head, &list);
+	ctlr->ms_queued = 0;
+	ctlr->ms_sdev = NULL;
+	spin_unlock(&ctlr->ms_lock);
+
+	if (ctlr->use_ms10)
+		lun_table = ctlr->mode_select.expanded.lun_table;
+	else
+		lun_table = ctlr->mode_select.legacy.lun_table;
 
 retry:
 	err = SCSI_DH_RES_TEMP_UNAVAIL;
@@ -545,6 +583,10 @@ retry:
 	if (!rq)
 		goto done;
 
+	list_for_each_entry(qdata, &list, entry) {
+		lun_table[qdata->h->lun] = 0x81;
+	}
+
 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
 		"%s MODE_SELECT command",
 		(char *) h->ctlr->array_name, h->ctlr->index,
@@ -565,10 +607,45 @@ retry:
 	}
 
 done:
-	return err;
+	list_for_each_entry_safe(qdata, tmp, &list, entry) {
+		list_del(&qdata->entry);
+		if (err == SCSI_DH_OK)
+			qdata->h->state = RDAC_STATE_ACTIVE;
+		if (qdata->callback_fn)
+			qdata->callback_fn(qdata->callback_data, err);
+		kfree(qdata);
+	}
+	return;
+}
+
+static int queue_mode_select(struct scsi_device *sdev,
+				activate_complete fn, void *data)
+{
+	struct rdac_queue_data *qdata;
+	struct rdac_controller *ctlr;
+
+	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
+	if (!qdata)
+		return SCSI_DH_RETRY;
+
+	qdata->h = get_rdac_data(sdev);
+	qdata->callback_fn = fn;
+	qdata->callback_data = data;
+
+	ctlr = qdata->h->ctlr;
+	spin_lock(&ctlr->ms_lock);
+	list_add_tail(&qdata->entry, &ctlr->ms_head);
+	if (!ctlr->ms_queued) {
+		ctlr->ms_queued = 1;
+		ctlr->ms_sdev = sdev;
+		queue_work(kmpath_rdacd, &ctlr->ms_work);
+	}
+	spin_unlock(&ctlr->ms_lock);
+	return SCSI_DH_OK;
 }
 
-static int rdac_activate(struct scsi_device *sdev)
+static int rdac_activate(struct scsi_device *sdev,
+			activate_complete fn, void *data)
 {
 	struct rdac_dh_data *h = get_rdac_data(sdev);
 	int err = SCSI_DH_OK;
@@ -577,10 +654,15 @@ static int rdac_activate(struct scsi_device *sdev)
 	if (err != SCSI_DH_OK)
 		goto done;
 
-	if (h->lun_state == RDAC_LUN_UNOWNED)
-		err = send_mode_select(sdev, h);
+	if (h->lun_state == RDAC_LUN_UNOWNED) {
+		err = queue_mode_select(sdev, fn, data);
+		if (err == SCSI_DH_OK)
+			return 0;
+	}
 done:
-	return err;
+	if (fn)
+		fn(data, err);
+	return 0;
 }
 
 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -790,13 +872,26 @@ static int __init rdac_init(void)
 	int r;
 
 	r = scsi_register_device_handler(&rdac_dh);
-	if (r != 0)
+	if (r != 0) {
 		printk(KERN_ERR "Failed to register scsi device handler.");
+		goto done;
+	}
+
+	/*
+	 * Create workqueue to handle mode selects for rdac
+	 */
+	kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
+	if (!kmpath_rdacd) {
+		scsi_unregister_device_handler(&rdac_dh);
+		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+	}
+done:
 	return r;
 }
 
 static void __exit rdac_exit(void)
 {
+	destroy_workqueue(kmpath_rdacd);
 	scsi_unregister_device_handler(&rdac_dh);
 }
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 704b8e034946..a30ffaa1222c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -66,14 +66,14 @@ LIST_HEAD(fcoe_hostlist);
 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
-static int fcoe_reset(struct Scsi_Host *shost);
+static int fcoe_reset(struct Scsi_Host *);
 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
 static int fcoe_rcv(struct sk_buff *, struct net_device *,
 		    struct packet_type *, struct net_device *);
-static int fcoe_percpu_receive_thread(void *arg);
-static void fcoe_clean_pending_queue(struct fc_lport *lp);
-static void fcoe_percpu_clean(struct fc_lport *lp);
-static int fcoe_link_ok(struct fc_lport *lp);
+static int fcoe_percpu_receive_thread(void *);
+static void fcoe_clean_pending_queue(struct fc_lport *);
+static void fcoe_percpu_clean(struct fc_lport *);
+static int fcoe_link_ok(struct fc_lport *);
 
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
@@ -82,15 +82,69 @@ static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
 static void fcoe_dev_cleanup(void);
-static struct fcoe_interface *
-	fcoe_hostlist_lookup_port(const struct net_device *dev);
+static struct fcoe_interface
+*fcoe_hostlist_lookup_port(const struct net_device *);
+
+static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
+			 struct packet_type *, struct net_device *);
+
+static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
+static void fcoe_update_src_mac(struct fc_lport *, u8 *);
+static u8 *fcoe_get_src_mac(struct fc_lport *);
+static void fcoe_destroy_work(struct work_struct *);
+
+static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
+			  unsigned int);
+static int fcoe_ddp_done(struct fc_lport *, u16);
+
+static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+
+static int fcoe_create(const char *, struct kernel_param *);
+static int fcoe_destroy(const char *, struct kernel_param *);
+
+static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
+				      u32 did, struct fc_frame *,
+				      unsigned int op,
+				      void (*resp)(struct fc_seq *,
+						   struct fc_frame *,
+						   void *),
+				      void *, u32 timeout);
+static void fcoe_recv_frame(struct sk_buff *skb);
+
+static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
+
+module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
+module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
 
-/* notification function from net device */
+/* notification function for packets from net device */
 static struct notifier_block fcoe_notifier = {
 	.notifier_call = fcoe_device_notification,
 };
 
-static struct scsi_transport_template *scsi_transport_fcoe_sw;
+/* notification function for CPU hotplug events */
+static struct notifier_block fcoe_cpu_notifier = {
+	.notifier_call = fcoe_cpu_callback,
+};
+
+static struct scsi_transport_template *fcoe_transport_template;
+static struct scsi_transport_template *fcoe_vport_transport_template;
+
+static int fcoe_vport_destroy(struct fc_vport *);
+static int fcoe_vport_create(struct fc_vport *, bool disabled);
+static int fcoe_vport_disable(struct fc_vport *, bool disable);
+static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+
+static struct libfc_function_template fcoe_libfc_fcn_templ = {
+	.frame_send = fcoe_xmit,
+	.ddp_setup = fcoe_ddp_setup,
+	.ddp_done = fcoe_ddp_done,
+	.elsct_send = fcoe_elsct_send,
+	.get_lesb = fcoe_get_lesb,
+};
 
 struct fc_function_template fcoe_transport_function = {
 	.show_host_node_name = 1,
@@ -123,6 +177,48 @@ struct fc_function_template fcoe_transport_function = {
 	.issue_fc_host_lip = fcoe_reset,
 
 	.terminate_rport_io = fc_rport_terminate_io,
+
+	.vport_create = fcoe_vport_create,
+	.vport_delete = fcoe_vport_destroy,
+	.vport_disable = fcoe_vport_disable,
+	.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
+
+	.bsg_request = fc_lport_bsg_request,
+};
+
+struct fc_function_template fcoe_vport_transport_function = {
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fc_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+
+	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+	.get_fc_host_stats = fc_get_host_stats,
+	.issue_fc_host_lip = fcoe_reset,
+
+	.terminate_rport_io = fc_rport_terminate_io,
+
+	.bsg_request = fc_lport_bsg_request,
 };
 
 static struct scsi_host_template fcoe_shost_template = {
@@ -137,20 +233,17 @@ static struct scsi_host_template fcoe_shost_template = {
 	.change_queue_depth = fc_change_queue_depth,
 	.change_queue_type = fc_change_queue_type,
 	.this_id = -1,
-	.cmd_per_lun = 32,
+	.cmd_per_lun = 3,
 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
 	.use_clustering = ENABLE_CLUSTERING,
 	.sg_tablesize = SG_ALL,
 	.max_sectors = 0xffff,
 };
 
-static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
-			 struct packet_type *ptype,
-			 struct net_device *orig_dev);
 /**
- * fcoe_interface_setup()
- * @fcoe: new fcoe_interface
- * @netdev : ptr to the associated netdevice struct
+ * fcoe_interface_setup() - Setup a FCoE interface
+ * @fcoe:   The new FCoE interface
+ * @netdev: The net device that the fcoe interface is on
  *
  * Returns : 0 for success
  * Locking: must be called with the RTNL mutex held
@@ -160,23 +253,36 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 {
 	struct fcoe_ctlr *fip = &fcoe->ctlr;
 	struct netdev_hw_addr *ha;
+	struct net_device *real_dev;
 	u8 flogi_maddr[ETH_ALEN];
+	const struct net_device_ops *ops;
 
 	fcoe->netdev = netdev;
 
+	/* Let LLD initialize for FCoE */
+	ops = netdev->netdev_ops;
+	if (ops->ndo_fcoe_enable) {
+		if (ops->ndo_fcoe_enable(netdev))
+			FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
+					" specific feature for LLD.\n");
+	}
+
 	/* Do not support for bonding device */
 	if ((netdev->priv_flags & IFF_MASTER_ALB) ||
 	    (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
 	    (netdev->priv_flags & IFF_MASTER_8023AD)) {
+		FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
 		return -EOPNOTSUPP;
 	}
 
 	/* look for SAN MAC address, if multiple SAN MACs exist, only
 	 * use the first one for SPMA */
+	real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
+		vlan_dev_real_dev(netdev) : netdev;
 	rcu_read_lock();
-	for_each_dev_addr(netdev, ha) {
+	for_each_dev_addr(real_dev, ha) {
 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
-		    (is_valid_ether_addr(fip->ctl_src_addr))) {
+		    (is_valid_ether_addr(ha->addr))) {
 			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
 			fip->spma = 1;
 			break;
@@ -216,19 +322,16 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 	return 0;
 }
 
-static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
-static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
-static void fcoe_destroy_work(struct work_struct *work);
-
 /**
- * fcoe_interface_create()
- * @netdev: network interface
+ * fcoe_interface_create() - Create a FCoE interface on a net device
+ * @netdev: The net device to create the FCoE interface on
  *
  * Returns: pointer to a struct fcoe_interface or NULL on error
  */
 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
 {
 	struct fcoe_interface *fcoe;
+	int err;
 
 	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
 	if (!fcoe) {
@@ -245,15 +348,22 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
 	fcoe_ctlr_init(&fcoe->ctlr);
 	fcoe->ctlr.send = fcoe_fip_send;
 	fcoe->ctlr.update_mac = fcoe_update_src_mac;
+	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
 
-	fcoe_interface_setup(fcoe, netdev);
+	err = fcoe_interface_setup(fcoe, netdev);
+	if (err) {
+		fcoe_ctlr_destroy(&fcoe->ctlr);
+		kfree(fcoe);
+		dev_put(netdev);
+		return NULL;
+	}
 
 	return fcoe;
 }
 
 /**
- * fcoe_interface_cleanup() - clean up netdev configurations
- * @fcoe:
+ * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * @fcoe: The FCoE interface to be cleaned up
  *
  * Caller must be holding the RTNL mutex
  */
@@ -262,6 +372,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 	struct net_device *netdev = fcoe->netdev;
 	struct fcoe_ctlr *fip = &fcoe->ctlr;
 	u8 flogi_maddr[ETH_ALEN];
+	const struct net_device_ops *ops;
 
 	/*
 	 * Don't listen for Ethernet packets anymore.
@@ -276,16 +387,22 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 	/* Delete secondary MAC addresses */
 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
 	dev_unicast_delete(netdev, flogi_maddr);
-	if (!is_zero_ether_addr(fip->data_src_addr))
-		dev_unicast_delete(netdev, fip->data_src_addr);
 	if (fip->spma)
 		dev_unicast_delete(netdev, fip->ctl_src_addr);
 	dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+
+	/* Tell the LLD we are done w/ FCoE */
+	ops = netdev->netdev_ops;
+	if (ops->ndo_fcoe_disable) {
+		if (ops->ndo_fcoe_disable(netdev))
+			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
+					" specific feature for LLD.\n");
+	}
 }
 
 /**
  * fcoe_interface_release() - fcoe_port kref release function
- * @kref: embedded reference count in an fcoe_interface struct
+ * @kref: Embedded reference count in an fcoe_interface struct
  */
 static void fcoe_interface_release(struct kref *kref)
 {
@@ -301,8 +418,8 @@ static void fcoe_interface_release(struct kref *kref)
 }
 
 /**
- * fcoe_interface_get()
- * @fcoe:
+ * fcoe_interface_get() - Get a reference to a FCoE interface
+ * @fcoe: The FCoE interface to be held
  */
 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
 {
@@ -310,8 +427,8 @@ static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
 }
 
 /**
- * fcoe_interface_put()
- * @fcoe:
+ * fcoe_interface_put() - Put a reference to a FCoE interface
+ * @fcoe: The FCoE interface to be released
  */
 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
 {
@@ -319,15 +436,16 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
 }
 
 /**
- * fcoe_fip_recv - handle a received FIP frame.
- * @skb: the receive skb
- * @dev: associated &net_device
- * @ptype: the &packet_type structure which was used to register this handler.
- * @orig_dev: original receive &net_device, in case @dev is a bond.
+ * fcoe_fip_recv() - Handler for received FIP frames
+ * @skb:      The receive skb
+ * @netdev:   The associated net device
+ * @ptype:    The packet_type structure which was used to register this handler
+ * @orig_dev: The original net_device the the skb was received on.
+ *	      (in case dev is a bond)
  *
  * Returns: 0 for success
  */
-static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
+static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
 			 struct packet_type *ptype,
 			 struct net_device *orig_dev)
 {
@@ -339,9 +457,9 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
 }
 
 /**
- * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
- * @fip: FCoE controller.
- * @skb: FIP Packet.
+ * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
+ * @fip: The FCoE controller
+ * @skb: The FIP packet to be sent
  */
 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
@@ -350,88 +468,101 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 }
 
 /**
- * fcoe_update_src_mac() - Update Ethernet MAC filters.
- * @fip: FCoE controller.
- * @old: Unicast MAC address to delete if the MAC is non-zero.
- * @new: Unicast MAC address to add.
+ * fcoe_update_src_mac() - Update the Ethernet MAC filters
+ * @lport: The local port to update the source MAC on
+ * @addr:  Unicast MAC address to add
  *
  * Remove any previously-set unicast MAC filter.
  * Add secondary FCoE MAC address filter for our OUI.
  */
-static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
+static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
 {
-	struct fcoe_interface *fcoe;
+	struct fcoe_port *port = lport_priv(lport);
+	struct fcoe_interface *fcoe = port->fcoe;
 
-	fcoe = fcoe_from_ctlr(fip);
 	rtnl_lock();
-	if (!is_zero_ether_addr(old))
-		dev_unicast_delete(fcoe->netdev, old);
-	dev_unicast_add(fcoe->netdev, new);
+	if (!is_zero_ether_addr(port->data_src_addr))
+		dev_unicast_delete(fcoe->netdev, port->data_src_addr);
+	if (!is_zero_ether_addr(addr))
+		dev_unicast_add(fcoe->netdev, addr);
+	memcpy(port->data_src_addr, addr, ETH_ALEN);
 	rtnl_unlock();
 }
 
 /**
- * fcoe_lport_config() - sets up the fc_lport
- * @lp: ptr to the fc_lport
+ * fcoe_get_src_mac() - return the Ethernet source address for an lport
+ * @lport: libfc lport
+ */
+static u8 *fcoe_get_src_mac(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+
+	return port->data_src_addr;
+}
+
+/**
+ * fcoe_lport_config() - Set up a local port
+ * @lport: The local port to be setup
  *
  * Returns: 0 for success
  */
-static int fcoe_lport_config(struct fc_lport *lp)
+static int fcoe_lport_config(struct fc_lport *lport)
 {
-	lp->link_up = 0;
-	lp->qfull = 0;
-	lp->max_retry_count = 3;
-	lp->max_rport_retry_count = 3;
-	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
-	lp->r_a_tov = 2 * 2 * 1000;
-	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
-			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
-
-	fc_lport_init_stats(lp);
+	lport->link_up = 0;
+	lport->qfull = 0;
+	lport->max_retry_count = 3;
+	lport->max_rport_retry_count = 3;
+	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
+	lport->r_a_tov = 2 * 2 * 1000;
+	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+	lport->does_npiv = 1;
+
+	fc_lport_init_stats(lport);
 
 	/* lport fc_lport related configuration */
-	fc_lport_config(lp);
+	fc_lport_config(lport);
 
 	/* offload related configuration */
-	lp->crc_offload = 0;
-	lp->seq_offload = 0;
-	lp->lro_enabled = 0;
-	lp->lro_xid = 0;
-	lp->lso_max = 0;
+	lport->crc_offload = 0;
+	lport->seq_offload = 0;
+	lport->lro_enabled = 0;
+	lport->lro_xid = 0;
+	lport->lso_max = 0;
 
 	return 0;
 }
 
 /**
- * fcoe_queue_timer() - fcoe queue timer
- * @lp: the fc_lport pointer
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
  *
  * Calls fcoe_check_wait_queue on timeout
- *
  */
-static void fcoe_queue_timer(ulong lp)
+static void fcoe_queue_timer(ulong lport)
 {
-	fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
+	fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
 }
 
 /**
- * fcoe_netdev_config() - Set up netdev for SW FCoE
- * @lp : ptr to the fc_lport
- * @netdev : ptr to the associated netdevice struct
+ * fcoe_netdev_config() - Set up net devive for SW FCoE
+ * @lport:  The local port that is associated with the net device
+ * @netdev: The associated net device
  *
- * Must be called after fcoe_lport_config() as it will use lport mutex
+ * Must be called after fcoe_lport_config() as it will use local port mutex
  *
- * Returns : 0 for success
+ * Returns: 0 for success
  */
-static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
+static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
 {
 	u32 mfs;
 	u64 wwnn, wwpn;
 	struct fcoe_interface *fcoe;
 	struct fcoe_port *port;
+	int vid = 0;
 
 	/* Setup lport private data to point to fcoe softc */
-	port = lport_priv(lp);
+	port = lport_priv(lport);
 	fcoe = port->fcoe;
 
 	/*
@@ -439,86 +570,112 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
 	 * will return 0, so do this first.
 	 */
-	mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
-			     sizeof(struct fcoe_crc_eof));
-	if (fc_set_mfs(lp, mfs))
+	mfs = netdev->mtu;
+	if (netdev->features & NETIF_F_FCOE_MTU) {
+		mfs = FCOE_MTU;
+		FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
+	}
+	mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
+	if (fc_set_mfs(lport, mfs))
 		return -EINVAL;
 
 	/* offload features support */
 	if (netdev->features & NETIF_F_SG)
-		lp->sg_supp = 1;
+		lport->sg_supp = 1;
 
 	if (netdev->features & NETIF_F_FCOE_CRC) {
-		lp->crc_offload = 1;
+		lport->crc_offload = 1;
 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
 	}
 	if (netdev->features & NETIF_F_FSO) {
-		lp->seq_offload = 1;
-		lp->lso_max = netdev->gso_max_size;
+		lport->seq_offload = 1;
+		lport->lso_max = netdev->gso_max_size;
 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
-				lp->lso_max);
+				lport->lso_max);
 	}
 	if (netdev->fcoe_ddp_xid) {
-		lp->lro_enabled = 1;
-		lp->lro_xid = netdev->fcoe_ddp_xid;
+		lport->lro_enabled = 1;
+		lport->lro_xid = netdev->fcoe_ddp_xid;
 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
-				lp->lro_xid);
+				lport->lro_xid);
 	}
 	skb_queue_head_init(&port->fcoe_pending_queue);
 	port->fcoe_pending_queue_active = 0;
-	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
+	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
 
-	wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
-	fc_set_wwnn(lp, wwnn);
-	/* XXX - 3rd arg needs to be vlan id */
-	wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
-	fc_set_wwpn(lp, wwpn);
+	if (!lport->vport) {
+		/*
+		 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
+		 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0.
+		 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID
+		 */
+		if (netdev->priv_flags & IFF_802_1Q_VLAN)
+			vid = vlan_dev_vlan_id(netdev);
+		wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+		fc_set_wwnn(lport, wwnn);
+		wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
+		fc_set_wwpn(lport, wwpn);
+	}
 
 	return 0;
 }
 
 /**
- * fcoe_shost_config() - Sets up fc_lport->host
- * @lp : ptr to the fc_lport
- * @shost : ptr to the associated scsi host
- * @dev : device associated to scsi host
+ * fcoe_shost_config() - Set up the SCSI host associated with a local port
+ * @lport: The local port
+ * @shost: The SCSI host to associate with the local port
+ * @dev:   The device associated with the SCSI host
  *
  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
  *
- * Returns : 0 for success
+ * Returns: 0 for success
  */
-static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
-				struct device *dev)
+static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
+			     struct device *dev)
 {
 	int rc = 0;
 
 	/* lport scsi host config */
-	lp->host = shost;
-
-	lp->host->max_lun = FCOE_MAX_LUN;
-	lp->host->max_id = FCOE_MAX_FCP_TARGET;
-	lp->host->max_channel = 0;
-	lp->host->transportt = scsi_transport_fcoe_sw;
+	lport->host->max_lun = FCOE_MAX_LUN;
+	lport->host->max_id = FCOE_MAX_FCP_TARGET;
+	lport->host->max_channel = 0;
+	if (lport->vport)
+		lport->host->transportt = fcoe_vport_transport_template;
+	else
+		lport->host->transportt = fcoe_transport_template;
 
 	/* add the new host to the SCSI-ml */
-	rc = scsi_add_host(lp->host, dev);
+	rc = scsi_add_host(lport->host, dev);
 	if (rc) {
-		FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
+		FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
 				"error on scsi_add_host\n");
 		return rc;
 	}
-	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
-		FCOE_NAME, FCOE_VERSION,
-		fcoe_netdev(lp)->name);
+
+	if (!lport->vport)
+		fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
+
+	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
+		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
+		 fcoe_netdev(lport)->name);
 
 	return 0;
 }
 
-/*
- * fcoe_oem_match() - match for read types IO
- * @fp: the fc_frame for new IO.
+/**
+ * fcoe_oem_match() - The match routine for the offloaded exchange manager
+ * @fp: The I/O frame
+ *
+ * This routine will be associated with an exchange manager (EM). When
+ * the libfc exchange handling code is looking for an EM to use it will
+ * call this routine and pass it the frame that it wishes to send. This
+ * routine will return True if the associated EM is to be used and False
+ * if the echange code should continue looking for an EM.
  *
- * Returns : true for read types IO, otherwise returns false.
+ * The offload EM that this routine is associated with will handle any
+ * packets that are for SCSI read requests.
+ *
+ * Returns: True for read types I/O, otherwise returns false.
  */
 bool fcoe_oem_match(struct fc_frame *fp)
 {
@@ -527,14 +684,14 @@ bool fcoe_oem_match(struct fc_frame *fp)
 }
 
 /**
- * fcoe_em_config() - allocates em for this lport
- * @lp: the fcoe that em is to allocated for
+ * fcoe_em_config() - Allocate and configure an exchange manager
+ * @lport: The local port that the new EM will be associated with
  *
- * Returns : 0 on success
+ * Returns: 0 on success
  */
-static inline int fcoe_em_config(struct fc_lport *lp)
+static inline int fcoe_em_config(struct fc_lport *lport)
 {
-	struct fcoe_port *port = lport_priv(lp);
+	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->fcoe;
 	struct fcoe_interface *oldfcoe = NULL;
 	struct net_device *old_real_dev, *cur_real_dev;
@@ -545,8 +702,9 @@ static inline int fcoe_em_config(struct fc_lport *lp)
 	 * Check if need to allocate an em instance for
 	 * offload exchange ids to be shared across all VN_PORTs/lport.
 	 */
-	if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
-		lp->lro_xid = 0;
+	if (!lport->lro_enabled || !lport->lro_xid ||
+	    (lport->lro_xid >= max_xid)) {
+		lport->lro_xid = 0;
 		goto skip_oem;
 	}
 
@@ -572,16 +730,16 @@ static inline int fcoe_em_config(struct fc_lport *lp)
 	}
 
 	if (fcoe->oem) {
-		if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
+		if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
 			printk(KERN_ERR "fcoe_em_config: failed to add "
 			       "offload em:%p on interface:%s\n",
 			       fcoe->oem, fcoe->netdev->name);
 			return -ENOMEM;
 		}
 	} else {
-		fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
-					    FCOE_MIN_XID, lp->lro_xid,
-					    fcoe_oem_match);
+		fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
+					      FCOE_MIN_XID, lport->lro_xid,
+					      fcoe_oem_match);
 		if (!fcoe->oem) {
 			printk(KERN_ERR "fcoe_em_config: failed to allocate "
 			       "em for offload exches on interface:%s\n",
@@ -593,10 +751,10 @@ static inline int fcoe_em_config(struct fc_lport *lp)
 	/*
 	 * Exclude offload EM xid range from next EM xid range.
 	 */
-	min_xid += lp->lro_xid + 1;
+	min_xid += lport->lro_xid + 1;
 
 skip_oem:
-	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
+	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
 		printk(KERN_ERR "fcoe_em_config: failed to "
 		       "allocate em on interface %s\n", fcoe->netdev->name);
 		return -ENOMEM;
@@ -606,8 +764,8 @@ skip_oem:
 }
 
 /**
- * fcoe_if_destroy() - FCoE software HBA tear-down function
- * @lport: fc_lport to destroy
+ * fcoe_if_destroy() - Tear down a SW FCoE instance
+ * @lport: The local port to be destroyed
  */
 static void fcoe_if_destroy(struct fc_lport *lport)
 {
@@ -630,6 +788,11 @@ static void fcoe_if_destroy(struct fc_lport *lport)
 	/* Free existing transmit skbs */
 	fcoe_clean_pending_queue(lport);
 
+	rtnl_lock();
+	if (!is_zero_ether_addr(port->data_src_addr))
+		dev_unicast_delete(netdev, port->data_src_addr);
+	rtnl_unlock();
+
 	/* receives may not be stopped until after this */
 	fcoe_interface_put(fcoe);
 
@@ -650,82 +813,89 @@ static void fcoe_if_destroy(struct fc_lport *lport)
 	scsi_host_put(lport->host);
 }
 
-/*
- * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
- * @lp:	the corresponding fc_lport
- * @xid: the exchange id for this ddp transfer
- * @sgl: the scatterlist describing this transfer
- * @sgc: number of sg items
+/**
+ * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
+ * @lport: The local port to setup DDP for
+ * @xid:   The exchange ID for this DDP transfer
+ * @sgl:   The scatterlist describing this transfer
+ * @sgc:   The number of sg items
  *
- * Returns : 0 no ddp
+ * Returns: 0 if the DDP context was not configured
  */
-static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
-			     struct scatterlist *sgl, unsigned int sgc)
+static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
+			  struct scatterlist *sgl, unsigned int sgc)
 {
-	struct net_device *n = fcoe_netdev(lp);
+	struct net_device *netdev = fcoe_netdev(lport);
 
-	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
-		return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
+	if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
+		return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
+							      xid, sgl,
+							      sgc);
 
 	return 0;
 }
 
-/*
- * fcoe_ddp_done - calls LLD's ddp_done through net_device
- * @lp:	the corresponding fc_lport
- * @xid: the exchange id for this ddp transfer
+/**
+ * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
+ * @lport: The local port to complete DDP on
+ * @xid:   The exchange ID for this DDP transfer
  *
- * Returns : the length of data that have been completed by ddp
+ * Returns: the length of data that have been completed by DDP
  */
-static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
+static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
 {
-	struct net_device *n = fcoe_netdev(lp);
+	struct net_device *netdev = fcoe_netdev(lport);
 
-	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
-		return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
+	if (netdev->netdev_ops->ndo_fcoe_ddp_done)
+		return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
 	return 0;
 }
 
-static struct libfc_function_template fcoe_libfc_fcn_templ = {
-	.frame_send = fcoe_xmit,
-	.ddp_setup = fcoe_ddp_setup,
-	.ddp_done = fcoe_ddp_done,
-};
-
 /**
- * fcoe_if_create() - this function creates the fcoe port
- * @fcoe: fcoe_interface structure to create an fc_lport instance on
- * @parent: device pointer to be the parent in sysfs for the SCSI host
+ * fcoe_if_create() - Create a FCoE instance on an interface
+ * @fcoe:   The FCoE interface to create a local port on
+ * @parent: The device pointer to be the parent in sysfs for the SCSI host
+ * @npiv:   Indicates if the port is a vport or not
  *
- * Creates fc_lport struct and scsi_host for lport, configures lport.
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
  *
- * Returns : The allocated fc_lport or an error pointer
+ * Returns: The allocated fc_lport or an error pointer
  */
 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
-				       struct device *parent)
+				       struct device *parent, int npiv)
 {
-	int rc;
+	struct net_device *netdev = fcoe->netdev;
 	struct fc_lport *lport = NULL;
 	struct fcoe_port *port;
 	struct Scsi_Host *shost;
-	struct net_device *netdev = fcoe->netdev;
+	int rc;
+	/*
+	 * parent is only a vport if npiv is 1,
+	 * but we'll only use vport in that case so go ahead and set it
+	 */
+	struct fc_vport *vport = dev_to_vport(parent);
 
 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
 
-	shost = libfc_host_alloc(&fcoe_shost_template,
-				 sizeof(struct fcoe_port));
-	if (!shost) {
+	if (!npiv) {
+		lport = libfc_host_alloc(&fcoe_shost_template,
+					 sizeof(struct fcoe_port));
+	} else	{
+		lport = libfc_vport_create(vport,
+					   sizeof(struct fcoe_port));
+	}
+	if (!lport) {
 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
 		rc = -ENOMEM;
 		goto out;
 	}
-	lport = shost_priv(shost);
+	shost = lport->host;
 	port = lport_priv(lport);
 	port->lport = lport;
 	port->fcoe = fcoe;
 	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
 
-	/* configure fc_lport, e.g., em */
+	/* configure a fc_lport including the exchange manager */
 	rc = fcoe_lport_config(lport);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
@@ -733,6 +903,13 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 		goto out_host_put;
 	}
 
+	if (npiv) {
+		FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
+				vport->node_name, vport->port_name);
+		fc_set_wwnn(lport, vport->node_name);
+		fc_set_wwpn(lport, vport->port_name);
+	}
+
 	/* configure lport network properties */
 	rc = fcoe_netdev_config(lport, netdev);
 	if (rc) {
@@ -757,21 +934,24 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 		goto out_lp_destroy;
 	}
 
-	/*
-	 * fcoe_em_alloc() and fcoe_hostlist_add() both
-	 * need to be atomic with respect to other changes to the hostlist
-	 * since fcoe_em_alloc() looks for an existing EM
-	 * instance on host list updated by fcoe_hostlist_add().
-	 *
-	 * This is currently handled through the fcoe_config_mutex begin held.
-	 */
+	if (!npiv) {
+		/*
+		 * fcoe_em_alloc() and fcoe_hostlist_add() both
+		 * need to be atomic with respect to other changes to the
+		 * hostlist since fcoe_em_alloc() looks for an existing EM
+		 * instance on host list updated by fcoe_hostlist_add().
+		 *
+		 * This is currently handled through the fcoe_config_mutex
+		 * begin held.
+		 */
 
-	/* lport exch manager allocation */
-	rc = fcoe_em_config(lport);
-	if (rc) {
-		FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
-				"interface\n");
-		goto out_lp_destroy;
+		/* lport exch manager allocation */
+		rc = fcoe_em_config(lport);
+		if (rc) {
+			FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
+					"for the interface\n");
+			goto out_lp_destroy;
+		}
 	}
 
 	fcoe_interface_get(fcoe);
@@ -786,17 +966,20 @@ out:
 }
 
 /**
- * fcoe_if_init() - attach to scsi transport
+ * fcoe_if_init() - Initialization routine for fcoe.ko
+ *
+ * Attaches the SW FCoE transport to the FC transport
  *
- * Returns : 0 on success
+ * Returns: 0 on success
  */
 static int __init fcoe_if_init(void)
 {
 	/* attach to scsi transport */
-	scsi_transport_fcoe_sw =
-		fc_attach_transport(&fcoe_transport_function);
+	fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
+	fcoe_vport_transport_template =
+		fc_attach_transport(&fcoe_vport_transport_function);
 
-	if (!scsi_transport_fcoe_sw) {
+	if (!fcoe_transport_template) {
 		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
 		return -ENODEV;
 	}
@@ -805,20 +988,24 @@ static int __init fcoe_if_init(void)
 }
 
 /**
- * fcoe_if_exit() - detach from scsi transport
+ * fcoe_if_exit() - Tear down fcoe.ko
+ *
+ * Detaches the SW FCoE transport from the FC transport
  *
- * Returns : 0 on success
+ * Returns: 0 on success
  */
 int __exit fcoe_if_exit(void)
 {
-	fc_release_transport(scsi_transport_fcoe_sw);
-	scsi_transport_fcoe_sw = NULL;
+	fc_release_transport(fcoe_transport_template);
+	fc_release_transport(fcoe_vport_transport_template);
+	fcoe_transport_template = NULL;
+	fcoe_vport_transport_template = NULL;
 	return 0;
 }
 
 /**
- * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
- * @cpu: cpu index for the online cpu
+ * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
+ * @cpu: The CPU index of the CPU to create a receive thread for
  */
 static void fcoe_percpu_thread_create(unsigned int cpu)
 {
@@ -841,8 +1028,8 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
 }
 
 /**
- * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
- * @cpu: cpu index the rx thread is to be removed
+ * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
+ * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
  *
  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
  * current CPU's Rx thread. If the thread being destroyed is bound to
@@ -890,7 +1077,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
 		} else {
 			/*
 			 * The targeted CPU is not initialized and cannot accept
-			 * new  skbs. Unlock the targeted CPU and drop the skbs
+			 * new	skbs. Unlock the targeted CPU and drop the skbs
 			 * on the CPU that is going offline.
 			 */
 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
@@ -931,12 +1118,12 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
 }
 
 /**
- * fcoe_cpu_callback() - fcoe cpu hotplug event callback
- * @nfb: callback data block
- * @action: event triggering the callback
- * @hcpu: index for the cpu of this event
+ * fcoe_cpu_callback() - Handler for CPU hotplug events
+ * @nfb:    The callback data block
+ * @action: The event triggering the callback
+ * @hcpu:   The index of the CPU that the event is for
  *
- * This creates or destroys per cpu data for fcoe
+ * This creates or destroys per-CPU data for fcoe
  *
  * Returns NOTIFY_OK always.
  */
@@ -962,25 +1149,22 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block fcoe_cpu_notifier = {
-	.notifier_call = fcoe_cpu_callback,
-};
-
 /**
- * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
- * @skb: the receive skb
- * @dev: associated net device
- * @ptype: context
- * @olddev: last device
+ * fcoe_rcv() - Receive packets from a net device
+ * @skb:    The received packet
+ * @netdev: The net device that the packet was received on
+ * @ptype:  The packet type context
+ * @olddev: The last device net device
  *
- * this function will receive the packet and build fc frame and pass it up
+ * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
+ * FC frame and passes the frame to libfc.
  *
  * Returns: 0 for success
  */
-int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
+int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 	     struct packet_type *ptype, struct net_device *olddev)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fcoe_rcv_info *fr;
 	struct fcoe_interface *fcoe;
 	struct fc_frame_header *fh;
@@ -988,15 +1172,15 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 	unsigned int cpu;
 
 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
-	lp = fcoe->ctlr.lp;
-	if (unlikely(lp == NULL)) {
-		FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
+	lport = fcoe->ctlr.lp;
+	if (unlikely(!lport)) {
+		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
 		goto err2;
 	}
-	if (!lp->link_up)
+	if (!lport->link_up)
 		goto err2;
 
-	FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
+	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
 			"data:%p tail:%p end:%p sum:%d dev:%s",
 			skb->len, skb->data_len, skb->head, skb->data,
 			skb_tail_pointer(skb), skb_end_pointer(skb),
@@ -1004,7 +1188,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 
 	/* check for FCOE packet type */
 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
-		FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
+		FCOE_NETDEV_DBG(netdev, "Wrong FC type frame");
 		goto err;
 	}
 
@@ -1013,14 +1197,14 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 	 * and FC headers are pulled into the linear data area.
 	 */
 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
-	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
 		goto err;
 
 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
 	fh = (struct fc_frame_header *) skb_transport_header(skb);
 
 	fr = fcoe_dev_from_skb(skb);
-	fr->fr_dev = lp;
+	fr->fr_dev = lport;
 	fr->ptype = ptype;
 
 	/*
@@ -1042,7 +1226,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 		 * the first CPU now. For non-SMP systems this
 		 * will check the same CPU twice.
 		 */
-		FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
+		FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
 				"ready for incoming skb- using first online "
 				"CPU.\n");
 
@@ -1061,15 +1245,29 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 	 * this skb. We also have this receive thread locked,
 	 * so we're free to queue skbs into it's queue.
 	 */
-	__skb_queue_tail(&fps->fcoe_rx_list, skb);
-	if (fps->fcoe_rx_list.qlen == 1)
-		wake_up_process(fps->thread);
 
-	spin_unlock_bh(&fps->fcoe_rx_list.lock);
+	/* If this is a SCSI-FCP frame, and this is already executing on the
+	 * correct CPU, and the queue for this CPU is empty, then go ahead
+	 * and process the frame directly in the softirq context.
+	 * This lets us process completions without context switching from the
+	 * NET_RX softirq, to our receive processing thread, and then back to
+	 * BLOCK softirq context.
+	 */
+	if (fh->fh_type == FC_TYPE_FCP &&
+	    cpu == smp_processor_id() &&
+	    skb_queue_empty(&fps->fcoe_rx_list)) {
+		spin_unlock_bh(&fps->fcoe_rx_list.lock);
+		fcoe_recv_frame(skb);
+	} else {
+		__skb_queue_tail(&fps->fcoe_rx_list, skb);
+		if (fps->fcoe_rx_list.qlen == 1)
+			wake_up_process(fps->thread);
+		spin_unlock_bh(&fps->fcoe_rx_list.lock);
+	}
 
 	return 0;
 err:
-	fc_lport_get_stats(lp)->ErrorFrames++;
+	fc_lport_get_stats(lport)->ErrorFrames++;
 
 err2:
 	kfree_skb(skb);
@@ -1077,17 +1275,21 @@ err2:
 }
 
 /**
- * fcoe_start_io() - pass to netdev to start xmit for fcoe
- * @skb: the skb to be xmitted
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
  *
  * Returns: 0 for success
  */
 static inline int fcoe_start_io(struct sk_buff *skb)
 {
+	struct sk_buff *nskb;
 	int rc;
 
-	skb_get(skb);
-	rc = dev_queue_xmit(skb);
+	nskb = skb_clone(skb, GFP_ATOMIC);
+	rc = dev_queue_xmit(nskb);
 	if (rc != 0)
 		return rc;
 	kfree_skb(skb);
@@ -1095,9 +1297,15 @@ static inline int fcoe_start_io(struct sk_buff *skb)
 }
 
 /**
- * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
- * @skb: the skb to be xmitted
- * @tlen: total len
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb:  The packet to be transmitted
+ * @tlen: The total length of the trailer
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
  *
  * Returns: 0 for success
  */
@@ -1136,11 +1344,12 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
 }
 
 /**
- * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
- * @fp: the fc_frame containing data to be checksummed
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
  *
- * This uses crc32() to calculate the crc for port frame
- * Return   : 32 bit crc
+ * Return: The 32 bit CRC value
  */
 u32 fcoe_fc_crc(struct fc_frame *fp)
 {
@@ -1171,13 +1380,13 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
 }
 
 /**
- * fcoe_xmit() - FCoE frame transmit function
- * @lp:	the associated local fcoe
- * @fp: the fc_frame to be transmitted
+ * fcoe_xmit() - Transmit a FCoE frame
+ * @lport: The local port that the frame is to be transmitted for
+ * @fp:	   The frame to be transmitted
  *
- * Return   : 0 for success
+ * Return: 0 for success
  */
-int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 {
 	int wlen;
 	u32 crc;
@@ -1189,7 +1398,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	unsigned int hlen;		/* header length implies the version */
 	unsigned int tlen;		/* trailer length */
 	unsigned int elen;		/* eth header, may include vlan */
-	struct fcoe_port *port = lport_priv(lp);
+	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->fcoe;
 	u8 sof, eof;
 	struct fcoe_hdr *hp;
@@ -1200,13 +1409,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	skb = fp_skb(fp);
 	wlen = skb->len / FCOE_WORD_TO_BYTE;
 
-	if (!lp->link_up) {
+	if (!lport->link_up) {
 		kfree_skb(skb);
 		return 0;
 	}
 
 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
-	    fcoe_ctlr_els_send(&fcoe->ctlr, skb))
+	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
 		return 0;
 
 	sof = fr_sof(fp);
@@ -1218,7 +1427,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
 
 	/* crc offload */
-	if (likely(lp->crc_offload)) {
+	if (likely(lport->crc_offload)) {
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum_start = skb_headroom(skb);
 		skb->csum_offset = skb->len;
@@ -1271,7 +1480,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
 		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
 	else
-		memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN);
+		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
 	hp = (struct fcoe_hdr *)(eh + 1);
 	memset(hp, 0, sizeof(*hp));
@@ -1280,7 +1489,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	hp->fcoe_sof = sof;
 
 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
-	if (lp->seq_offload && fr_max_payload(fp)) {
+	if (lport->seq_offload && fr_max_payload(fp)) {
 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
 	} else {
@@ -1288,23 +1497,23 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 		skb_shinfo(skb)->gso_size = 0;
 	}
 	/* update tx stats: regardless if LLD fails */
-	stats = fc_lport_get_stats(lp);
+	stats = fc_lport_get_stats(lport);
 	stats->TxFrames++;
 	stats->TxWords += wlen;
 
 	/* send down to lld */
-	fr_dev(fp) = lp;
+	fr_dev(fp) = lport;
 	if (port->fcoe_pending_queue.qlen)
-		fcoe_check_wait_queue(lp, skb);
+		fcoe_check_wait_queue(lport, skb);
 	else if (fcoe_start_io(skb))
-		fcoe_check_wait_queue(lp, skb);
+		fcoe_check_wait_queue(lport, skb);
 
 	return 0;
 }
 
 /**
- * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
- * @skb: the skb being completed.
+ * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
+ * @skb: The completed skb (argument required by destructor)
  */
 static void fcoe_percpu_flush_done(struct sk_buff *skb)
 {
@@ -1312,26 +1521,134 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
 }
 
 /**
- * fcoe_percpu_receive_thread() - recv thread per cpu
- * @arg: ptr to the fcoe per cpu struct
- *
- * Return: 0 for success
+ * fcoe_recv_frame() - process a single received frame
+ * @skb: frame to process
  */
-int fcoe_percpu_receive_thread(void *arg)
+static void fcoe_recv_frame(struct sk_buff *skb)
 {
-	struct fcoe_percpu_s *p = arg;
 	u32 fr_len;
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fcoe_rcv_info *fr;
 	struct fcoe_dev_stats *stats;
 	struct fc_frame_header *fh;
-	struct sk_buff *skb;
 	struct fcoe_crc_eof crc_eof;
 	struct fc_frame *fp;
 	u8 *mac = NULL;
 	struct fcoe_port *port;
 	struct fcoe_hdr *hp;
 
+	fr = fcoe_dev_from_skb(skb);
+	lport = fr->fr_dev;
+	if (unlikely(!lport)) {
+		if (skb->destructor != fcoe_percpu_flush_done)
+			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
+		kfree_skb(skb);
+		return;
+	}
+
+	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
+			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+			skb->len, skb->data_len,
+			skb->head, skb->data, skb_tail_pointer(skb),
+			skb_end_pointer(skb), skb->csum,
+			skb->dev ? skb->dev->name : "<NULL>");
+
+	/*
+	 * Save source MAC address before discarding header.
+	 */
+	port = lport_priv(lport);
+	if (skb_is_nonlinear(skb))
+		skb_linearize(skb);	/* not ideal */
+	mac = eth_hdr(skb)->h_source;
+
+	/*
+	 * Frame length checks and setting up the header pointers
+	 * was done in fcoe_rcv already.
+	 */
+	hp = (struct fcoe_hdr *) skb_network_header(skb);
+	fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+	stats = fc_lport_get_stats(lport);
+	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+		if (stats->ErrorFrames < 5)
+			printk(KERN_WARNING "fcoe: FCoE version "
+			       "mismatch: The frame has "
+			       "version %x, but the "
+			       "initiator supports version "
+			       "%x\n", FC_FCOE_DECAPS_VER(hp),
+			       FC_FCOE_VER);
+		stats->ErrorFrames++;
+		kfree_skb(skb);
+		return;
+	}
+
+	skb_pull(skb, sizeof(struct fcoe_hdr));
+	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+	stats->RxFrames++;
+	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+	fp = (struct fc_frame *)skb;
+	fc_frame_init(fp);
+	fr_dev(fp) = lport;
+	fr_sof(fp) = hp->fcoe_sof;
+
+	/* Copy out the CRC and EOF trailer for access */
+	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+		kfree_skb(skb);
+		return;
+	}
+	fr_eof(fp) = crc_eof.fcoe_eof;
+	fr_crc(fp) = crc_eof.fcoe_crc32;
+	if (pskb_trim(skb, fr_len)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	/*
+	 * We only check CRC if no offload is available and if it is
+	 * it's solicited data, in which case, the FCP layer would
+	 * check it during the copy.
+	 */
+	if (lport->crc_offload &&
+	    skb->ip_summed == CHECKSUM_UNNECESSARY)
+		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+	else
+		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+	fh = fc_frame_header_get(fp);
+	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+	    fh->fh_type == FC_TYPE_FCP) {
+		fc_exch_recv(lport, fp);
+		return;
+	}
+	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+		if (le32_to_cpu(fr_crc(fp)) !=
+		    ~crc32(~0, skb->data, fr_len)) {
+			if (stats->InvalidCRCCount < 5)
+				printk(KERN_WARNING "fcoe: dropping "
+				       "frame with CRC error\n");
+			stats->InvalidCRCCount++;
+			stats->ErrorFrames++;
+			fc_frame_free(fp);
+			return;
+		}
+		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+	}
+	fc_exch_recv(lport, fp);
+}
+
+/**
+ * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
+ * @arg: The per-CPU context
+ *
+ * Return: 0 for success
+ */
+int fcoe_percpu_receive_thread(void *arg)
+{
+	struct fcoe_percpu_s *p = arg;
+	struct sk_buff *skb;
+
 	set_user_nice(current, -20);
 
 	while (!kthread_should_stop()) {
@@ -1347,129 +1664,27 @@ int fcoe_percpu_receive_thread(void *arg)
 			spin_lock_bh(&p->fcoe_rx_list.lock);
 		}
 		spin_unlock_bh(&p->fcoe_rx_list.lock);
-		fr = fcoe_dev_from_skb(skb);
-		lp = fr->fr_dev;
-		if (unlikely(lp == NULL)) {
-			if (skb->destructor != fcoe_percpu_flush_done)
-				FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
-			kfree_skb(skb);
-			continue;
-		}
-
-		FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
-				"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
-				skb->len, skb->data_len,
-				skb->head, skb->data, skb_tail_pointer(skb),
-				skb_end_pointer(skb), skb->csum,
-				skb->dev ? skb->dev->name : "<NULL>");
-
-		/*
-		 * Save source MAC address before discarding header.
-		 */
-		port = lport_priv(lp);
-		if (skb_is_nonlinear(skb))
-			skb_linearize(skb);	/* not ideal */
-		mac = eth_hdr(skb)->h_source;
-
-		/*
-		 * Frame length checks and setting up the header pointers
-		 * was done in fcoe_rcv already.
-		 */
-		hp = (struct fcoe_hdr *) skb_network_header(skb);
-		fh = (struct fc_frame_header *) skb_transport_header(skb);
-
-		stats = fc_lport_get_stats(lp);
-		if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
-			if (stats->ErrorFrames < 5)
-				printk(KERN_WARNING "fcoe: FCoE version "
-				       "mismatch: The frame has "
-				       "version %x, but the "
-				       "initiator supports version "
-				       "%x\n", FC_FCOE_DECAPS_VER(hp),
-				       FC_FCOE_VER);
-			stats->ErrorFrames++;
-			kfree_skb(skb);
-			continue;
-		}
-
-		skb_pull(skb, sizeof(struct fcoe_hdr));
-		fr_len = skb->len - sizeof(struct fcoe_crc_eof);
-
-		stats->RxFrames++;
-		stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-
-		fp = (struct fc_frame *)skb;
-		fc_frame_init(fp);
-		fr_dev(fp) = lp;
-		fr_sof(fp) = hp->fcoe_sof;
-
-		/* Copy out the CRC and EOF trailer for access */
-		if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
-			kfree_skb(skb);
-			continue;
-		}
-		fr_eof(fp) = crc_eof.fcoe_eof;
-		fr_crc(fp) = crc_eof.fcoe_crc32;
-		if (pskb_trim(skb, fr_len)) {
-			kfree_skb(skb);
-			continue;
-		}
-
-		/*
-		 * We only check CRC if no offload is available and if it is
-		 * it's solicited data, in which case, the FCP layer would
-		 * check it during the copy.
-		 */
-		if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
-			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
-		else
-			fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
-		fh = fc_frame_header_get(fp);
-		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
-		    fh->fh_type == FC_TYPE_FCP) {
-			fc_exch_recv(lp, fp);
-			continue;
-		}
-		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
-			if (le32_to_cpu(fr_crc(fp)) !=
-			    ~crc32(~0, skb->data, fr_len)) {
-				if (stats->InvalidCRCCount < 5)
-					printk(KERN_WARNING "fcoe: dropping "
-					       "frame with CRC error\n");
-				stats->InvalidCRCCount++;
-				stats->ErrorFrames++;
-				fc_frame_free(fp);
-				continue;
-			}
-			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
-		}
-		if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
-		    fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
-			fc_frame_free(fp);
-			continue;
-		}
-		fc_exch_recv(lp, fp);
+		fcoe_recv_frame(skb);
 	}
 	return 0;
 }
 
 /**
- * fcoe_check_wait_queue() - attempt to clear the transmit backlog
- * @lp: the fc_lport
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
  *
- * This empties the wait_queue, dequeue the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet, if all skb have been
- * transmitted, return qlen or -1 if a error occurs, then restore
- * wait_queue and try again later.
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
  *
- * The wait_queue is used when the skb transmit fails. skb will go
- * in the wait_queue which will be emptied by the timer function or
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
  * by the next skb transmit.
  */
-static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
+static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
 {
-	struct fcoe_port *port = lport_priv(lp);
+	struct fcoe_port *port = lport_priv(lport);
 	int rc;
 
 	spin_lock_bh(&port->fcoe_pending_queue.lock);
@@ -1501,19 +1716,19 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
 	}
 
 	if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
-		lp->qfull = 0;
+		lport->qfull = 0;
 	if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
 		mod_timer(&port->timer, jiffies + 2);
 	port->fcoe_pending_queue_active = 0;
 out:
 	if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-		lp->qfull = 1;
+		lport->qfull = 1;
 	spin_unlock_bh(&port->fcoe_pending_queue.lock);
 	return;
 }
 
 /**
- * fcoe_dev_setup() - setup link change notification interface
+ * fcoe_dev_setup() - Setup the link change notification interface
  */
 static void fcoe_dev_setup(void)
 {
@@ -1521,7 +1736,7 @@ static void fcoe_dev_setup(void)
 }
 
 /**
- * fcoe_dev_cleanup() - cleanup link change notification interface
+ * fcoe_dev_cleanup() - Cleanup the link change notification interface
  */
 static void fcoe_dev_cleanup(void)
 {
@@ -1529,19 +1744,19 @@ static void fcoe_dev_cleanup(void)
 }
 
 /**
- * fcoe_device_notification() - netdev event notification callback
- * @notifier: context of the notification
- * @event: type of event
- * @ptr: fixed array for output parsed ifname
+ * fcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event:    The type of event
+ * @ptr:      The net device that the event was on
  *
- * This function is called by the ethernet driver in case of link change event
+ * This function is called by the Ethernet driver in case of link change event.
  *
  * Returns: 0 for success
  */
 static int fcoe_device_notification(struct notifier_block *notifier,
 				    ulong event, void *ptr)
 {
-	struct fc_lport *lp = NULL;
+	struct fc_lport *lport = NULL;
 	struct net_device *netdev = ptr;
 	struct fcoe_interface *fcoe;
 	struct fcoe_port *port;
@@ -1552,11 +1767,11 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 
 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
 		if (fcoe->netdev == netdev) {
-			lp = fcoe->ctlr.lp;
+			lport = fcoe->ctlr.lp;
 			break;
 		}
 	}
-	if (lp == NULL) {
+	if (!lport) {
 		rc = NOTIFY_DONE;
 		goto out;
 	}
@@ -1570,10 +1785,12 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 	case NETDEV_CHANGE:
 		break;
 	case NETDEV_CHANGEMTU:
+		if (netdev->features & NETIF_F_FCOE_MTU)
+			break;
 		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
 				     sizeof(struct fcoe_crc_eof));
 		if (mfs >= FC_MIN_MAX_FRAME)
-			fc_set_mfs(lp, mfs);
+			fc_set_mfs(lport, mfs);
 		break;
 	case NETDEV_REGISTER:
 		break;
@@ -1588,22 +1805,22 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
 				"from netdev netlink\n", event);
 	}
-	if (link_possible && !fcoe_link_ok(lp))
+	if (link_possible && !fcoe_link_ok(lport))
 		fcoe_ctlr_link_up(&fcoe->ctlr);
 	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
-		stats = fc_lport_get_stats(lp);
+		stats = fc_lport_get_stats(lport);
 		stats->LinkFailureCount++;
-		fcoe_clean_pending_queue(lp);
+		fcoe_clean_pending_queue(lport);
 	}
 out:
 	return rc;
 }
 
 /**
- * fcoe_if_to_netdev() - parse a name buffer to get netdev
- * @buffer: incoming buffer to be copied
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
  *
- * Returns: NULL or ptr to net_device
+ * Returns: NULL or a ptr to net_device
  */
 static struct net_device *fcoe_if_to_netdev(const char *buffer)
 {
@@ -1621,9 +1838,11 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
 }
 
 /**
- * fcoe_destroy() - handles the destroy from sysfs
- * @buffer: expected to be an eth if name
- * @kp: associated kernel param
+ * fcoe_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp:	    The associated kernel parameter
+ *
+ * Called from sysfs.
  *
  * Returns: 0 for success
  */
@@ -1631,7 +1850,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
 {
 	struct fcoe_interface *fcoe;
 	struct net_device *netdev;
-	int rc;
+	int rc = 0;
 
 	mutex_lock(&fcoe_config_mutex);
 #ifdef CONFIG_FCOE_MODULE
@@ -1670,6 +1889,10 @@ out_nodev:
 	return rc;
 }
 
+/**
+ * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
+ * @work: Handle to the FCoE port to be destroyed
+ */
 static void fcoe_destroy_work(struct work_struct *work)
 {
 	struct fcoe_port *port;
@@ -1681,9 +1904,11 @@ static void fcoe_destroy_work(struct work_struct *work)
 }
 
 /**
- * fcoe_create() - Handles the create call from sysfs
- * @buffer: expected to be an eth if name
- * @kp: associated kernel param
+ * fcoe_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp:	    The associated kernel param
+ *
+ * Called from sysfs.
  *
  * Returns: 0 for success
  */
@@ -1726,7 +1951,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
 		goto out_putdev;
 	}
 
-	lport = fcoe_if_create(fcoe, &netdev->dev);
+	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
 	if (IS_ERR(lport)) {
 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
 		       netdev->name);
@@ -1762,16 +1987,9 @@ out_nodev:
 	return rc;
 }
 
-module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
-
 /**
- * fcoe_link_ok() - Check if link is ok for the fc_lport
- * @lp: ptr to the fc_lport
+ * fcoe_link_ok() - Check if the link is OK for a local port
+ * @lport: The local port to check link on
  *
  * Any permanently-disqualifying conditions have been previously checked.
  * This also updates the speed setting, which may change with link for 100/1000.
@@ -1783,26 +2001,26 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
  * Returns: 0 if link is OK for use by FCoE.
  *
  */
-int fcoe_link_ok(struct fc_lport *lp)
+int fcoe_link_ok(struct fc_lport *lport)
 {
-	struct fcoe_port *port = lport_priv(lp);
-	struct net_device *dev = port->fcoe->netdev;
+	struct fcoe_port *port = lport_priv(lport);
+	struct net_device *netdev = port->fcoe->netdev;
 	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
 
-	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
-	    (!dev_ethtool_get_settings(dev, &ecmd))) {
-		lp->link_supported_speeds &=
+	if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
+	    (!dev_ethtool_get_settings(netdev, &ecmd))) {
+		lport->link_supported_speeds &=
 			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
 				      SUPPORTED_1000baseT_Full))
-			lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
 		if (ecmd.supported & SUPPORTED_10000baseT_Full)
-			lp->link_supported_speeds |=
+			lport->link_supported_speeds |=
 				FC_PORTSPEED_10GBIT;
 		if (ecmd.speed == SPEED_1000)
-			lp->link_speed = FC_PORTSPEED_1GBIT;
+			lport->link_speed = FC_PORTSPEED_1GBIT;
 		if (ecmd.speed == SPEED_10000)
-			lp->link_speed = FC_PORTSPEED_10GBIT;
+			lport->link_speed = FC_PORTSPEED_10GBIT;
 
 		return 0;
 	}
@@ -1810,8 +2028,8 @@ int fcoe_link_ok(struct fc_lport *lp)
 }
 
 /**
- * fcoe_percpu_clean() - Clear the pending skbs for an lport
- * @lp: the fc_lport
+ * fcoe_percpu_clean() - Clear all pending skbs for an local port
+ * @lport: The local port whose skbs are to be cleared
  *
  * Must be called with fcoe_create_mutex held to single-thread completion.
  *
@@ -1820,7 +2038,7 @@ int fcoe_link_ok(struct fc_lport *lp)
  * there no packets that will be handled by the lport, but also that any
  * threads already handling packet have returned.
  */
-void fcoe_percpu_clean(struct fc_lport *lp)
+void fcoe_percpu_clean(struct fc_lport *lport)
 {
 	struct fcoe_percpu_s *pp;
 	struct fcoe_rcv_info *fr;
@@ -1838,7 +2056,7 @@ void fcoe_percpu_clean(struct fc_lport *lp)
 		     skb = next) {
 			next = skb->next;
 			fr = fcoe_dev_from_skb(skb);
-			if (fr->fr_dev == lp) {
+			if (fr->fr_dev == lport) {
 				__skb_unlink(skb, list);
 				kfree_skb(skb);
 			}
@@ -1867,13 +2085,11 @@ void fcoe_percpu_clean(struct fc_lport *lp)
 
 /**
  * fcoe_clean_pending_queue() - Dequeue a skb and free it
- * @lp: the corresponding fc_lport
- *
- * Returns: none
+ * @lport: The local port to dequeue a skb on
  */
-void fcoe_clean_pending_queue(struct fc_lport *lp)
+void fcoe_clean_pending_queue(struct fc_lport *lport)
 {
-	struct fcoe_port  *port = lport_priv(lp);
+	struct fcoe_port  *port = lport_priv(lport);
 	struct sk_buff *skb;
 
 	spin_lock_bh(&port->fcoe_pending_queue.lock);
@@ -1886,10 +2102,10 @@ void fcoe_clean_pending_queue(struct fc_lport *lp)
 }
 
 /**
- * fcoe_reset() - Resets the fcoe
- * @shost: shost the reset is from
+ * fcoe_reset() - Reset a local port
+ * @shost: The SCSI host associated with the local port to be reset
  *
- * Returns: always 0
+ * Returns: Always 0 (return value required by FC transport template)
  */
 int fcoe_reset(struct Scsi_Host *shost)
 {
@@ -1899,30 +2115,33 @@ int fcoe_reset(struct Scsi_Host *shost)
 }
 
 /**
- * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
- * @dev: this is currently ptr to net_device
+ * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
+ * @netdev: The net device used as a key
+ *
+ * Locking: Must be called with the RNL mutex held.
  *
- * Returns: NULL or the located fcoe_port
- * Locking: must be called with the RNL mutex held
+ * Returns: NULL or the FCoE interface
  */
 static struct fcoe_interface *
-fcoe_hostlist_lookup_port(const struct net_device *dev)
+fcoe_hostlist_lookup_port(const struct net_device *netdev)
 {
 	struct fcoe_interface *fcoe;
 
 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
-		if (fcoe->netdev == dev)
+		if (fcoe->netdev == netdev)
 			return fcoe;
 	}
 	return NULL;
 }
 
 /**
- * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
- * @netdev: ptr to net_device
+ * fcoe_hostlist_lookup() - Find the local port associated with a
+ *			    given net device
+ * @netdev: The netdevice used as a key
  *
- * Returns: 0 for success
- * Locking: must be called with the RTNL mutex held
+ * Locking: Must be called with the RTNL mutex held
+ *
+ * Returns: NULL or the local port
  */
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 {
@@ -1933,11 +2152,13 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 }
 
 /**
- * fcoe_hostlist_add() - Add a lport to lports list
- * @lp: ptr to the fc_lport to be added
+ * fcoe_hostlist_add() - Add the FCoE interface identified by a local
+ *			 port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
  *
- * Returns: 0 for success
  * Locking: must be called with the RTNL mutex held
+ *
+ * Returns: 0 for success
  */
 static int fcoe_hostlist_add(const struct fc_lport *lport)
 {
@@ -1954,15 +2175,15 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
 }
 
 /**
- * fcoe_init() - fcoe module loading initialization
+ * fcoe_init() - Initialize fcoe.ko
  *
- * Returns 0 on success, negative on failure
+ * Returns: 0 on success, or a negative value on failure
  */
 static int __init fcoe_init(void)
 {
+	struct fcoe_percpu_s *p;
 	unsigned int cpu;
 	int rc = 0;
-	struct fcoe_percpu_s *p;
 
 	mutex_lock(&fcoe_config_mutex);
 
@@ -1999,15 +2220,15 @@ out_free:
 module_init(fcoe_init);
 
 /**
- * fcoe_exit() - fcoe module unloading cleanup
+ * fcoe_exit() - Clean up fcoe.ko
  *
- * Returns 0 on success, negative on failure
+ * Returns: 0 on success or a  negative value on failure
  */
 static void __exit fcoe_exit(void)
 {
-	unsigned int cpu;
 	struct fcoe_interface *fcoe, *tmp;
 	struct fcoe_port *port;
+	unsigned int cpu;
 
 	mutex_lock(&fcoe_config_mutex);
 
@@ -2033,9 +2254,238 @@ static void __exit fcoe_exit(void)
 	/* flush any asyncronous interface destroys,
 	 * this should happen after the netdev notifier is unregistered */
 	flush_scheduled_work();
+	/* That will flush out all the N_Ports on the hostlist, but now we
+	 * may have NPIV VN_Ports scheduled for destruction */
+	flush_scheduled_work();
 
 	/* detach from scsi transport
 	 * must happen after all destroys are done, therefor after the flush */
 	fcoe_if_exit();
 }
 module_exit(fcoe_exit);
+
+/**
+ * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
+ * @seq: active sequence in the FLOGI or FDISC exchange
+ * @fp: response frame, or error encoded in a pointer (timeout)
+ * @arg: pointer the the fcoe_ctlr structure
+ *
+ * This handles MAC address managment for FCoE, then passes control on to
+ * the libfc FLOGI response handler.
+ */
+static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fcoe_ctlr *fip = arg;
+	struct fc_exch *exch = fc_seq_exch(seq);
+	struct fc_lport *lport = exch->lp;
+	u8 *mac;
+
+	if (IS_ERR(fp))
+		goto done;
+
+	mac = fr_cb(fp)->granted_mac;
+	if (is_zero_ether_addr(mac)) {
+		/* pre-FIP */
+		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+			fc_frame_free(fp);
+			return;
+		}
+	}
+	fcoe_update_src_mac(lport, mac);
+done:
+	fc_lport_flogi_resp(seq, fp, lport);
+}
+
+/**
+ * fcoe_logo_resp() - FCoE specific LOGO response handler
+ * @seq: active sequence in the LOGO exchange
+ * @fp: response frame, or error encoded in a pointer (timeout)
+ * @arg: pointer the the fcoe_ctlr structure
+ *
+ * This handles MAC address managment for FCoE, then passes control on to
+ * the libfc LOGO response handler.
+ */
+static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fc_lport *lport = arg;
+	static u8 zero_mac[ETH_ALEN] = { 0 };
+
+	if (!IS_ERR(fp))
+		fcoe_update_src_mac(lport, zero_mac);
+	fc_lport_logo_resp(seq, fp, lport);
+}
+
+/**
+ * fcoe_elsct_send - FCoE specific ELS handler
+ *
+ * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
+ * using FCoE specific response handlers and passing the FIP controller as
+ * the argument (the lport is still available from the exchange).
+ *
+ * Most of the work here is just handed off to the libfc routine.
+ */
+static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
+				      struct fc_frame *fp, unsigned int op,
+				      void (*resp)(struct fc_seq *,
+						   struct fc_frame *,
+						   void *),
+				      void *arg, u32 timeout)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct fcoe_interface *fcoe = port->fcoe;
+	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+	switch (op) {
+	case ELS_FLOGI:
+	case ELS_FDISC:
+		return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
+				     fip, timeout);
+	case ELS_LOGO:
+		/* only hook onto fabric logouts, not port logouts */
+		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+			break;
+		return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
+				     lport, timeout);
+	}
+	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
+
+/**
+ * fcoe_vport_create() - create an fc_host/scsi_host for a vport
+ * @vport: fc_vport object to create a new fc_host for
+ * @disabled: start the new fc_host in a disabled state by default?
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
+{
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+	struct fcoe_port *port = lport_priv(n_port);
+	struct fcoe_interface *fcoe = port->fcoe;
+	struct net_device *netdev = fcoe->netdev;
+	struct fc_lport *vn_port;
+
+	mutex_lock(&fcoe_config_mutex);
+	vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
+	mutex_unlock(&fcoe_config_mutex);
+
+	if (IS_ERR(vn_port)) {
+		printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
+		       netdev->name);
+		return -EIO;
+	}
+
+	if (disabled) {
+		fc_vport_set_state(vport, FC_VPORT_DISABLED);
+	} else {
+		vn_port->boot_time = jiffies;
+		fc_fabric_login(vn_port);
+		fc_vport_setlink(vn_port);
+	}
+	return 0;
+}
+
+/**
+ * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
+ * @vport: fc_vport object that is being destroyed
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_vport_destroy(struct fc_vport *vport)
+{
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+	struct fc_lport *vn_port = vport->dd_data;
+	struct fcoe_port *port = lport_priv(vn_port);
+
+	mutex_lock(&n_port->lp_mutex);
+	list_del(&vn_port->list);
+	mutex_unlock(&n_port->lp_mutex);
+	schedule_work(&port->destroy_work);
+	return 0;
+}
+
+/**
+ * fcoe_vport_disable() - change vport state
+ * @vport: vport to bring online/offline
+ * @disable: should the vport be disabled?
+ */
+static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
+{
+	struct fc_lport *lport = vport->dd_data;
+
+	if (disable) {
+		fc_vport_set_state(vport, FC_VPORT_DISABLED);
+		fc_fabric_logoff(lport);
+	} else {
+		lport->boot_time = jiffies;
+		fc_fabric_login(lport);
+		fc_vport_setlink(lport);
+	}
+
+	return 0;
+}
+
+/**
+ * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
+ * @vport: fc_vport with a new symbolic name string
+ *
+ * After generating a new symbolic name string, a new RSPN_ID request is
+ * sent to the name server.  There is no response handler, so if it fails
+ * for some reason it will not be retried.
+ */
+static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
+{
+	struct fc_lport *lport = vport->dd_data;
+	struct fc_frame *fp;
+	size_t len;
+
+	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
+		 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
+		 fcoe_netdev(lport)->name, vport->symbolic_name);
+
+	if (lport->state != LPORT_ST_READY)
+		return;
+
+	len = strnlen(fc_host_symbolic_name(lport->host), 255);
+	fp = fc_frame_alloc(lport,
+			    sizeof(struct fc_ct_hdr) +
+			    sizeof(struct fc_ns_rspn) + len);
+	if (!fp)
+		return;
+	lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
+			     NULL, NULL, 3 * lport->r_a_tov);
+}
+
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+static void fcoe_get_lesb(struct fc_lport *lport,
+			 struct fc_els_lesb *fc_lesb)
+{
+	unsigned int cpu;
+	u32 lfc, vlfc, mdac;
+	struct fcoe_dev_stats *devst;
+	struct fcoe_fc_els_lesb *lesb;
+	struct net_device *netdev = fcoe_netdev(lport);
+
+	lfc = 0;
+	vlfc = 0;
+	mdac = 0;
+	lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
+	memset(lesb, 0, sizeof(*lesb));
+	for_each_possible_cpu(cpu) {
+		devst = per_cpu_ptr(lport->dev_stats, cpu);
+		lfc += devst->LinkFailureCount;
+		vlfc += devst->VLinkFailureCount;
+		mdac += devst->MissDiscAdvCount;
+	}
+	lesb->lesb_link_fail = htonl(lfc);
+	lesb->lesb_vlink_fail = htonl(vlfc);
+	lesb->lesb_miss_fka = htonl(mdac);
+	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index ce7f60fb1bc0..c69b2c56c2d1 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -32,7 +32,7 @@
 #define FCOE_NAME	"fcoe"
 #define FCOE_VENDOR	"Open-FCoE.org"
 
-#define FCOE_MAX_LUN		255
+#define FCOE_MAX_LUN		0xFFFF
 #define FCOE_MAX_FCP_TARGET	256
 
 #define FCOE_MAX_OUTSTANDING_COMMANDS	1024
@@ -40,11 +40,17 @@
 #define FCOE_MIN_XID		0x0000	/* the min xid supported by fcoe_sw */
 #define FCOE_MAX_XID		0x0FFF	/* the max xid supported by fcoe_sw */
 
+/*
+ * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
+ * + 4 (FC CRC) + 4 (FCoE trailer) =  2158 bytes
+ */
+#define FCOE_MTU	2158
+
 unsigned int fcoe_debug_logging;
 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
-#define FCOE_LOGGING        0x01 /* General logging, not categorized */
+#define FCOE_LOGGING	    0x01 /* General logging, not categorized */
 #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
 
 #define FCOE_CHECK_LOGGING(LEVEL, CMD)					\
@@ -64,8 +70,13 @@ do {                                                            	\
 			   printk(KERN_INFO "fcoe: %s: " fmt,	\
 				  netdev->name, ##args);)
 
-/*
- * this percpu struct for fcoe
+/**
+ * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
+ * @thread:	    The thread context
+ * @fcoe_rx_list:   The queue of pending packets to process
+ * @page:	    The memory page for calculating frame trailer CRCs
+ * @crc_eof_offset: The offset into the CRC page pointing to available
+ *		    memory for a new trailer
  */
 struct fcoe_percpu_s {
 	struct task_struct *thread;
@@ -74,37 +85,62 @@ struct fcoe_percpu_s {
 	int crc_eof_offset;
 };
 
-/*
- * an FCoE interface, 1:1 with netdev
+/**
+ * struct fcoe_interface - A FCoE interface
+ * @list:	      Handle for a list of FCoE interfaces
+ * @netdev:	      The associated net device
+ * @fcoe_packet_type: FCoE packet type
+ * @fip_packet_type:  FIP packet type
+ * @ctlr:	      The FCoE controller (for FIP)
+ * @oem:	      The offload exchange manager for all local port
+ *		      instances associated with this port
+ * @kref:	      The kernel reference
+ *
+ * This structure is 1:1 with a net devive.
  */
 struct fcoe_interface {
-	struct list_head list;
-	struct net_device *netdev;
-	struct packet_type  fcoe_packet_type;
-	struct packet_type  fip_packet_type;
-	struct fcoe_ctlr ctlr;
-	struct fc_exch_mgr *oem;		/* offload exchange manager */
-	struct kref kref;
+	struct list_head   list;
+	struct net_device  *netdev;
+	struct packet_type fcoe_packet_type;
+	struct packet_type fip_packet_type;
+	struct fcoe_ctlr   ctlr;
+	struct fc_exch_mgr *oem;
+	struct kref	   kref;
 };
 
-/*
- * the FCoE private structure that's allocated along with the
- * Scsi_Host and libfc fc_lport structures
+/**
+ * struct fcoe_port - The FCoE private structure
+ * @fcoe:		       The associated fcoe interface
+ * @lport:		       The associated local port
+ * @fcoe_pending_queue:	       The pending Rx queue of skbs
+ * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @timer:		       The queue timer
+ * @destroy_work:	       Handle for work context
+ *			       (to prevent RTNL deadlocks)
+ * @data_srt_addr:	       Source address for data
+ *
+ * An instance of this structure is to be allocated along with the
+ * Scsi_Host and libfc fc_lport structures.
  */
 struct fcoe_port {
 	struct fcoe_interface *fcoe;
-	struct fc_lport *lport;
-	struct sk_buff_head fcoe_pending_queue;
-	u8	fcoe_pending_queue_active;
-	struct timer_list timer;		/* queue timer */
-	struct work_struct destroy_work;	/* to prevent rtnl deadlocks */
+	struct fc_lport	      *lport;
+	struct sk_buff_head   fcoe_pending_queue;
+	u8		      fcoe_pending_queue_active;
+	struct timer_list     timer;
+	struct work_struct    destroy_work;
+	u8		      data_src_addr[ETH_ALEN];
 };
 
 #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
 
-static inline struct net_device *fcoe_netdev(const struct fc_lport *lp)
+/**
+ * fcoe_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
 {
-	return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev;
+	return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
 }
 
 #endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 11ae5c94608b..9823291395ad 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -59,26 +59,30 @@ unsigned int libfcoe_debug_logging;
 module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
-#define LIBFCOE_LOGGING     0x01 /* General logging, not categorized */
+#define LIBFCOE_LOGGING	    0x01 /* General logging, not categorized */
 #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
 
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)				\
-do {                                                            	\
-	if (unlikely(libfcoe_debug_logging & LEVEL))			\
-		do {							\
-			CMD;						\
-		} while (0);						\
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)		\
+do {							\
+	if (unlikely(libfcoe_debug_logging & LEVEL))	\
+		do {					\
+			CMD;				\
+		} while (0);				\
 } while (0)
 
 #define LIBFCOE_DBG(fmt, args...)					\
 	LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,				\
 			      printk(KERN_INFO "libfcoe: " fmt, ##args);)
 
-#define LIBFCOE_FIP_DBG(fmt, args...)					\
+#define LIBFCOE_FIP_DBG(fip, fmt, args...)				\
 	LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,			\
-			      printk(KERN_INFO "fip: " fmt, ##args);)
+			      printk(KERN_INFO "host%d: fip: " fmt, 	\
+				     (fip)->lp->host->host_no, ##args);)
 
-/*
+/**
+ * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid
+ * @fcf: The FCF to check
+ *
  * Return non-zero if FCF fcoe_size has been validated.
  */
 static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
@@ -86,7 +90,10 @@ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
 	return (fcf->flags & FIP_FL_SOL) != 0;
 }
 
-/*
+/**
+ * fcoe_ctlr_fcf_usable() - Check if a FCF is usable
+ * @fcf: The FCF to check
+ *
  * Return non-zero if the FCF is usable.
  */
 static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
@@ -97,12 +104,13 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
 }
 
 /**
- * fcoe_ctlr_init() - Initialize the FCoE Controller instance.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_init() - Initialize the FCoE Controller instance
+ * @fip: The FCoE controller to initialize
  */
 void fcoe_ctlr_init(struct fcoe_ctlr *fip)
 {
 	fip->state = FIP_ST_LINK_WAIT;
+	fip->mode = FIP_ST_AUTO;
 	INIT_LIST_HEAD(&fip->fcfs);
 	spin_lock_init(&fip->lock);
 	fip->flogi_oxid = FC_XID_UNKNOWN;
@@ -114,8 +122,8 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
 EXPORT_SYMBOL(fcoe_ctlr_init);
 
 /**
- * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
+ * @fip: The FCoE controller whose FCFs are to be reset
  *
  * Called with &fcoe_ctlr lock held.
  */
@@ -134,8 +142,8 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
 }
 
 /**
- * fcoe_ctlr_destroy() - Disable and tear-down the FCoE controller.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller
+ * @fip: The FCoE controller to tear down
  *
  * This is called by FCoE drivers before freeing the &fcoe_ctlr.
  *
@@ -148,9 +156,7 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
 void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
 {
 	cancel_work_sync(&fip->recv_work);
-	spin_lock_bh(&fip->fip_recv_list.lock);
-	__skb_queue_purge(&fip->fip_recv_list);
-	spin_unlock_bh(&fip->fip_recv_list.lock);
+	skb_queue_purge(&fip->fip_recv_list);
 
 	spin_lock_bh(&fip->lock);
 	fip->state = FIP_ST_DISABLED;
@@ -162,8 +168,8 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
 EXPORT_SYMBOL(fcoe_ctlr_destroy);
 
 /**
- * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
+ * @fip: The FCoE controller to get the maximum FCoE size from
  *
  * Returns the maximum packet size including the FCoE header and trailer,
  * but not including any Ethernet or VLAN headers.
@@ -180,9 +186,9 @@ static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
 }
 
 /**
- * fcoe_ctlr_solicit() - Send a solicitation.
- * @fip:	FCoE controller.
- * @fcf:	Destination FCF.  If NULL, a multicast solicitation is sent.
+ * fcoe_ctlr_solicit() - Send a FIP solicitation
+ * @fip: The FCoE controller to send the solicitation on
+ * @fcf: The destination FCF (if NULL, a multicast solicitation is sent)
  */
 static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 {
@@ -241,8 +247,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 }
 
 /**
- * fcoe_ctlr_link_up() - Start FCoE controller.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_link_up() - Start FCoE controller
+ * @fip: The FCoE controller to start
  *
  * Called from the LLD when the network link is ready.
  */
@@ -255,11 +261,12 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
 		spin_unlock_bh(&fip->lock);
 		fc_linkup(fip->lp);
 	} else if (fip->state == FIP_ST_LINK_WAIT) {
-		fip->state = FIP_ST_AUTO;
+		fip->state = fip->mode;
 		fip->last_link = 1;
 		fip->link = 1;
 		spin_unlock_bh(&fip->lock);
-		LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n");
+		if (fip->state == FIP_ST_AUTO)
+			LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
 		fc_linkup(fip->lp);
 		fcoe_ctlr_solicit(fip, NULL);
 	} else
@@ -268,45 +275,23 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
 EXPORT_SYMBOL(fcoe_ctlr_link_up);
 
 /**
- * fcoe_ctlr_reset() - Reset FIP.
- * @fip:	FCoE controller.
- * @new_state:	FIP state to be entered.
- *
- * Returns non-zero if the link was up and now isn't.
+ * fcoe_ctlr_reset() - Reset a FCoE controller
+ * @fip:       The FCoE controller to reset
  */
-static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state)
+static void fcoe_ctlr_reset(struct fcoe_ctlr *fip)
 {
-	struct fc_lport *lp = fip->lp;
-	int link_dropped;
-
-	spin_lock_bh(&fip->lock);
 	fcoe_ctlr_reset_fcfs(fip);
 	del_timer(&fip->timer);
-	fip->state = new_state;
 	fip->ctlr_ka_time = 0;
 	fip->port_ka_time = 0;
 	fip->sol_time = 0;
 	fip->flogi_oxid = FC_XID_UNKNOWN;
 	fip->map_dest = 0;
-	fip->last_link = 0;
-	link_dropped = fip->link;
-	fip->link = 0;
-	spin_unlock_bh(&fip->lock);
-
-	if (link_dropped)
-		fc_linkdown(lp);
-
-	if (new_state == FIP_ST_ENABLED) {
-		fcoe_ctlr_solicit(fip, NULL);
-		fc_linkup(lp);
-		link_dropped = 0;
-	}
-	return link_dropped;
 }
 
 /**
- * fcoe_ctlr_link_down() - Stop FCoE controller.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_link_down() - Stop a FCoE controller
+ * @fip: The FCoE controller to be stopped
  *
  * Returns non-zero if the link was up and now isn't.
  *
@@ -315,15 +300,29 @@ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state)
  */
 int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
 {
-	return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
+	int link_dropped;
+
+	LIBFCOE_FIP_DBG(fip, "link down.\n");
+	spin_lock_bh(&fip->lock);
+	fcoe_ctlr_reset(fip);
+	link_dropped = fip->link;
+	fip->link = 0;
+	fip->last_link = 0;
+	fip->state = FIP_ST_LINK_WAIT;
+	spin_unlock_bh(&fip->lock);
+
+	if (link_dropped)
+		fc_linkdown(fip->lp);
+	return link_dropped;
 }
 EXPORT_SYMBOL(fcoe_ctlr_link_down);
 
 /**
- * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF.
- * @fip:	FCoE controller.
- * @ports:	0 for controller keep-alive, 1 for port keep-alive.
- * @sa:		source MAC address.
+ * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF
+ * @fip:   The FCoE controller to send the FKA on
+ * @lport: libfc fc_lport to send from
+ * @ports: 0 for controller keep-alive, 1 for port keep-alive
+ * @sa:	   The source MAC address
  *
  * A controller keep-alive is sent every fka_period (typically 8 seconds).
  * The source MAC is the native MAC address.
@@ -332,7 +331,9 @@ EXPORT_SYMBOL(fcoe_ctlr_link_down);
  * The source MAC is the assigned mapped source address.
  * The destination is the FCF's F-port.
  */
-static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
+static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
+				      struct fc_lport *lport,
+				      int ports, u8 *sa)
 {
 	struct sk_buff *skb;
 	struct fip_kal {
@@ -350,8 +351,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 	if (!fcf || !fc_host_port_id(lp->host))
 		return;
 
-	len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr);
-	BUG_ON(len < sizeof(*kal) + sizeof(*vn));
+	len = sizeof(*kal) + ports * sizeof(*vn);
 	skb = dev_alloc_skb(len);
 	if (!skb)
 		return;
@@ -366,7 +366,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 	kal->fip.fip_op = htons(FIP_OP_CTRL);
 	kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE;
 	kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
-				    ports * sizeof(*vn)) / FIP_BPW);
+				     ports * sizeof(*vn)) / FIP_BPW);
 	kal->fip.fip_flags = htons(FIP_FL_FPMA);
 	if (fip->spma)
 		kal->fip.fip_flags |= htons(FIP_FL_SPMA);
@@ -374,16 +374,14 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 	kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
 	kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
 	memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
-
 	if (ports) {
 		vn = (struct fip_vn_desc *)(kal + 1);
 		vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
 		vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
-		memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN);
+		memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
 		hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
 		put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
 	}
-
 	skb_put(skb, len);
 	skb->protocol = htons(ETH_P_FIP);
 	skb_reset_mac_header(skb);
@@ -392,10 +390,10 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 }
 
 /**
- * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it.
- * @fip:	FCoE controller.
- * @dtype:	FIP descriptor type for the frame.
- * @skb:	FCoE ELS frame including FC header but no FCoE headers.
+ * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it
+ * @fip:   The FCoE controller for the ELS frame
+ * @dtype: The FIP descriptor type for the frame
+ * @skb:   The FCoE ELS frame including FC header but no FCoE headers
  *
  * Returns non-zero error code on failure.
  *
@@ -405,7 +403,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
  * Headroom includes the FIP encapsulation description, FIP header, and
  * Ethernet header.  The tailroom is for the FIP MAC descriptor.
  */
-static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
+static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
 			    u8 dtype, struct sk_buff *skb)
 {
 	struct fip_encaps_head {
@@ -449,8 +447,8 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
 	memset(mac, 0, sizeof(mac));
 	mac->fd_desc.fip_dtype = FIP_DT_MAC;
 	mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
-	if (dtype != FIP_DT_FLOGI)
-		memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
+	if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC)
+		memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
 	else if (fip->spma)
 		memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
 
@@ -463,6 +461,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate.
  * @fip:	FCoE controller.
+ * @lport:	libfc fc_lport to send from
  * @skb:	FCoE ELS frame including FC header but no FCoE headers.
  *
  * Returns a non-zero error code if the frame should not be sent.
@@ -471,11 +470,13 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
  * The caller must check that the length is a multiple of 4.
  * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
  */
-int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+		       struct sk_buff *skb)
 {
 	struct fc_frame_header *fh;
 	u16 old_xid;
 	u8 op;
+	u8 mac[ETH_ALEN];
 
 	fh = (struct fc_frame_header *)skb->data;
 	op = *(u8 *)(fh + 1);
@@ -498,6 +499,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 
 	if (fip->state == FIP_ST_NON_FIP)
 		return 0;
+	if (!fip->sel_fcf)
+		goto drop;
 
 	switch (op) {
 	case ELS_FLOGI:
@@ -530,14 +533,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 		 * FLOGI.
 		 */
 		fip->flogi_oxid = FC_XID_UNKNOWN;
-		fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id);
+		fc_fcoe_set_mac(mac, fh->fh_d_id);
+		fip->update_mac(lport, mac);
 		return 0;
 	default:
 		if (fip->state != FIP_ST_ENABLED)
 			goto drop;
 		return 0;
 	}
-	if (fcoe_ctlr_encaps(fip, op, skb))
+	if (fcoe_ctlr_encaps(fip, lport, op, skb))
 		goto drop;
 	fip->send(fip, skb);
 	return -EINPROGRESS;
@@ -547,9 +551,9 @@ drop:
 }
 EXPORT_SYMBOL(fcoe_ctlr_els_send);
 
-/*
- * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller.
- * @fip:	FCoE controller.
+/**
+ * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
+ * @fip: The FCoE controller to free FCFs on
  *
  * Called with lock held.
  *
@@ -558,14 +562,28 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
  * times its keep-alive period including fuzz.
  *
  * In addition, determine the time when an FCF selection can occur.
+ *
+ * Also, increment the MissDiscAdvCount when no advertisement is received
+ * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB).
  */
 static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
 {
 	struct fcoe_fcf *fcf;
 	struct fcoe_fcf *next;
 	unsigned long sel_time = 0;
+	unsigned long mda_time = 0;
 
 	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+		mda_time = fcf->fka_period + (fcf->fka_period >> 1);
+		if ((fip->sel_fcf == fcf) &&
+		    (time_after(jiffies, fcf->time + mda_time))) {
+			mod_timer(&fip->timer, jiffies + mda_time);
+			fc_lport_get_stats(fip->lp)->MissDiscAdvCount++;
+			printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
+			       "Advertisement for fab %llx count %lld\n",
+			       fip->lp->host->host_no, fcf->fabric_name,
+			       fc_lport_get_stats(fip->lp)->MissDiscAdvCount);
+		}
 		if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
 			       msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
 			if (fip->sel_fcf == fcf)
@@ -574,6 +592,7 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
 			WARN_ON(!fip->fcf_count);
 			fip->fcf_count--;
 			kfree(fcf);
+			fc_lport_get_stats(fip->lp)->VLinkFailureCount++;
 		} else if (fcoe_ctlr_mtu_valid(fcf) &&
 			   (!sel_time || time_before(sel_time, fcf->time))) {
 			sel_time = fcf->time;
@@ -590,14 +609,16 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
 }
 
 /**
- * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry.
- * @skb:	received FIP advertisement frame
- * @fcf:	resulting FCF entry.
+ * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry
+ * @fip: The FCoE controller receiving the advertisement
+ * @skb: The received FIP advertisement frame
+ * @fcf: The resulting FCF entry
  *
  * Returns zero on a valid parsed advertisement,
  * otherwise returns non zero value.
  */
-static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
+static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
+			       struct sk_buff *skb, struct fcoe_fcf *fcf)
 {
 	struct fip_header *fiph;
 	struct fip_desc *desc = NULL;
@@ -636,7 +657,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
 			       ((struct fip_mac_desc *)desc)->fd_mac,
 			       ETH_ALEN);
 			if (!is_valid_ether_addr(fcf->fcf_mac)) {
-				LIBFCOE_FIP_DBG("Invalid MAC address "
+				LIBFCOE_FIP_DBG(fip, "Invalid MAC address "
 						"in FIP adv\n");
 				return -EINVAL;
 			}
@@ -659,6 +680,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
 			if (dlen != sizeof(struct fip_fka_desc))
 				goto len_err;
 			fka = (struct fip_fka_desc *)desc;
+			if (fka->fd_flags & FIP_FKA_ADV_D)
+				fcf->fd_flags = 1;
 			t = ntohl(fka->fd_fka_period);
 			if (t >= FCOE_CTLR_MIN_FKA)
 				fcf->fka_period = msecs_to_jiffies(t);
@@ -670,7 +693,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
 		case FIP_DT_LOGO:
 		case FIP_DT_ELP:
 		default:
-			LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+			LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
 					"in FIP adv\n", desc->fip_dtype);
 			/* standard says ignore unknown descriptors >= 128 */
 			if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
@@ -687,15 +710,15 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
 	return 0;
 
 len_err:
-	LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+	LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
 			desc->fip_dtype, dlen);
 	return -EINVAL;
 }
 
 /**
- * fcoe_ctlr_recv_adv() - Handle an incoming advertisement.
- * @fip:	FCoE controller.
- * @skb:	Received FIP packet.
+ * fcoe_ctlr_recv_adv() - Handle an incoming advertisement
+ * @fip: The FCoE controller receiving the advertisement
+ * @skb: The received FIP packet
  */
 static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
@@ -706,7 +729,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	int first = 0;
 	int mtu_valid;
 
-	if (fcoe_ctlr_parse_adv(skb, &new))
+	if (fcoe_ctlr_parse_adv(fip, skb, &new))
 		return;
 
 	spin_lock_bh(&fip->lock);
@@ -752,7 +775,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	mtu_valid = fcoe_ctlr_mtu_valid(fcf);
 	fcf->time = jiffies;
 	if (!found) {
-		LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n",
+		LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n",
 				fcf->fabric_name, fcf->fc_map, mtu_valid);
 	}
 
@@ -778,7 +801,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	 */
 	if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) {
 		fip->sel_time = jiffies +
-				msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+			msecs_to_jiffies(FCOE_CTLR_START_DELAY);
 		if (!timer_pending(&fip->timer) ||
 		    time_before(fip->sel_time, fip->timer.expires))
 			mod_timer(&fip->timer, fip->sel_time);
@@ -788,15 +811,15 @@ out:
 }
 
 /**
- * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame.
- * @fip:	FCoE controller.
- * @skb:	Received FIP packet.
+ * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame
+ * @fip: The FCoE controller which received the packet
+ * @skb: The received FIP packet
  */
 static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	struct fc_lport *lp = fip->lp;
+	struct fc_lport *lport = fip->lp;
 	struct fip_header *fiph;
-	struct fc_frame *fp;
+	struct fc_frame *fp = (struct fc_frame *)skb;
 	struct fc_frame_header *fh = NULL;
 	struct fip_desc *desc;
 	struct fip_encaps *els;
@@ -831,10 +854,11 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 			       ((struct fip_mac_desc *)desc)->fd_mac,
 			       ETH_ALEN);
 			if (!is_valid_ether_addr(granted_mac)) {
-				LIBFCOE_FIP_DBG("Invalid MAC address "
+				LIBFCOE_FIP_DBG(fip, "Invalid MAC address "
 						"in FIP ELS\n");
 				goto drop;
 			}
+			memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
 			break;
 		case FIP_DT_FLOGI:
 		case FIP_DT_FDISC:
@@ -850,7 +874,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 			els_dtype = desc->fip_dtype;
 			break;
 		default:
-			LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+			LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
 					"in FIP adv\n", desc->fip_dtype);
 			/* standard says ignore unknown descriptors >= 128 */
 			if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
@@ -867,11 +891,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 
 	if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP &&
 	    fip->flogi_oxid == ntohs(fh->fh_ox_id) &&
-	    els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) {
+	    els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac))
 		fip->flogi_oxid = FC_XID_UNKNOWN;
-		fip->update_mac(fip, fip->data_src_addr, granted_mac);
-		memcpy(fip->data_src_addr, granted_mac, ETH_ALEN);
-	}
 
 	/*
 	 * Convert skb into an fc_frame containing only the ELS.
@@ -882,32 +903,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	fc_frame_init(fp);
 	fr_sof(fp) = FC_SOF_I3;
 	fr_eof(fp) = FC_EOF_T;
-	fr_dev(fp) = lp;
+	fr_dev(fp) = lport;
 
-	stats = fc_lport_get_stats(lp);
+	stats = fc_lport_get_stats(lport);
 	stats->RxFrames++;
 	stats->RxWords += skb->len / FIP_BPW;
 
-	fc_exch_recv(lp, fp);
+	fc_exch_recv(lport, fp);
 	return;
 
 len_err:
-	LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+	LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
 			desc->fip_dtype, dlen);
 drop:
 	kfree_skb(skb);
 }
 
 /**
- * fcoe_ctlr_recv_els() - Handle an incoming link reset frame.
- * @fip:	FCoE controller.
- * @fh:		Received FIP header.
+ * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
+ * @fip: The FCoE controller that received the frame
+ * @fh:	 The received FIP header
  *
  * There may be multiple VN_Port descriptors.
  * The overall length has already been checked.
  */
 static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
-				      struct fip_header *fh)
+				     struct fip_header *fh)
 {
 	struct fip_desc *desc;
 	struct fip_mac_desc *mp;
@@ -916,13 +937,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 	size_t rlen;
 	size_t dlen;
 	struct fcoe_fcf *fcf = fip->sel_fcf;
-	struct fc_lport *lp = fip->lp;
+	struct fc_lport *lport = fip->lp;
 	u32	desc_mask;
 
-	LIBFCOE_FIP_DBG("Clear Virtual Link received\n");
+	LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
 	if (!fcf)
 		return;
-	if (!fcf || !fc_host_port_id(lp->host))
+	if (!fcf || !fc_host_port_id(lport->host))
 		return;
 
 	/*
@@ -958,9 +979,10 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 			if (dlen < sizeof(*vp))
 				return;
 			if (compare_ether_addr(vp->fd_mac,
-			    fip->data_src_addr) == 0 &&
-			    get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn &&
-			    ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host))
+					       fip->get_src_addr(lport)) == 0 &&
+			    get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
+			    ntoh24(vp->fd_fc_id) ==
+			    fc_host_port_id(lport->host))
 				desc_mask &= ~BIT(FIP_DT_VN_ID);
 			break;
 		default:
@@ -977,33 +999,39 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 	 * reset only if all required descriptors were present and valid.
 	 */
 	if (desc_mask) {
-		LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask);
+		LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
+				desc_mask);
 	} else {
-		LIBFCOE_FIP_DBG("performing Clear Virtual Link\n");
-		fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
+		LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+
+		spin_lock_bh(&fip->lock);
+		fc_lport_get_stats(lport)->VLinkFailureCount++;
+		fcoe_ctlr_reset(fip);
+		spin_unlock_bh(&fip->lock);
+
+		fc_lport_reset(fip->lp);
+		fcoe_ctlr_solicit(fip, NULL);
 	}
 }
 
 /**
- * fcoe_ctlr_recv() - Receive a FIP frame.
- * @fip:	FCoE controller.
- * @skb:	Received FIP packet.
+ * fcoe_ctlr_recv() - Receive a FIP packet
+ * @fip: The FCoE controller that received the packet
+ * @skb: The received FIP packet
  *
- * This is called from NET_RX_SOFTIRQ.
+ * This may be called from either NET_RX_SOFTIRQ or IRQ.
  */
 void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	spin_lock_bh(&fip->fip_recv_list.lock);
-	__skb_queue_tail(&fip->fip_recv_list, skb);
-	spin_unlock_bh(&fip->fip_recv_list.lock);
+	skb_queue_tail(&fip->fip_recv_list, skb);
 	schedule_work(&fip->recv_work);
 }
 EXPORT_SYMBOL(fcoe_ctlr_recv);
 
 /**
- * fcoe_ctlr_recv_handler() - Receive a FIP frame.
- * @fip:	FCoE controller.
- * @skb:	Received FIP packet.
+ * fcoe_ctlr_recv_handler() - Receive a FIP frame
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
  *
  * Returns non-zero if the frame is dropped.
  */
@@ -1038,7 +1066,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
 		fip->map_dest = 0;
 		fip->state = FIP_ST_ENABLED;
 		state = FIP_ST_ENABLED;
-		LIBFCOE_FIP_DBG("Using FIP mode\n");
+		LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
 	}
 	spin_unlock_bh(&fip->lock);
 	if (state != FIP_ST_ENABLED)
@@ -1060,8 +1088,8 @@ drop:
 }
 
 /**
- * fcoe_ctlr_select() - Select the best FCF, if possible.
- * @fip:	FCoE controller.
+ * fcoe_ctlr_select() - Select the best FCF (if possible)
+ * @fip: The FCoE controller
  *
  * If there are conflicting advertisements, no FCF can be chosen.
  *
@@ -1073,11 +1101,11 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
 	struct fcoe_fcf *best = NULL;
 
 	list_for_each_entry(fcf, &fip->fcfs, list) {
-		LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x "
+		LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x "
 				"val %d\n", fcf->fabric_name, fcf->vfid,
 				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
 		if (!fcoe_ctlr_fcf_usable(fcf)) {
-			LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid "
+			LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid "
 					"%savailable\n", fcf->fabric_name,
 					fcf->fc_map, (fcf->flags & FIP_FL_SOL)
 					? "" : "in", (fcf->flags & FIP_FL_AVAIL)
@@ -1091,7 +1119,7 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
 		if (fcf->fabric_name != best->fabric_name ||
 		    fcf->vfid != best->vfid ||
 		    fcf->fc_map != best->fc_map) {
-			LIBFCOE_FIP_DBG("Conflicting fabric, VFID, "
+			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
 					"or FC-MAP\n");
 			return;
 		}
@@ -1102,8 +1130,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
 }
 
 /**
- * fcoe_ctlr_timeout() - FIP timer function.
- * @arg:	&fcoe_ctlr pointer.
+ * fcoe_ctlr_timeout() - FIP timeout handler
+ * @arg: The FCoE controller that timed out
  *
  * Ages FCFs.  Triggers FCF selection if possible.  Sends keep-alives.
  */
@@ -1113,8 +1141,6 @@ static void fcoe_ctlr_timeout(unsigned long arg)
 	struct fcoe_fcf *sel;
 	struct fcoe_fcf *fcf;
 	unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
-	u8 send_ctlr_ka;
-	u8 send_port_ka;
 
 	spin_lock_bh(&fip->lock);
 	if (fip->state == FIP_ST_DISABLED) {
@@ -1140,53 +1166,47 @@ static void fcoe_ctlr_timeout(unsigned long arg)
 			       fip->lp->host->host_no, sel->fcf_mac);
 			memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
 			fip->port_ka_time = jiffies +
-					    msecs_to_jiffies(FIP_VN_KA_PERIOD);
+				msecs_to_jiffies(FIP_VN_KA_PERIOD);
 			fip->ctlr_ka_time = jiffies + sel->fka_period;
-			fip->link = 1;
 		} else {
 			printk(KERN_NOTICE "libfcoe: host%d: "
-			       "FIP Fibre-Channel Forwarder timed out.  "
+			       "FIP Fibre-Channel Forwarder timed out.	"
 			       "Starting FCF discovery.\n",
 			       fip->lp->host->host_no);
-			fip->link = 0;
+			fip->reset_req = 1;
+			schedule_work(&fip->link_work);
 		}
-		schedule_work(&fip->link_work);
 	}
 
-	send_ctlr_ka = 0;
-	send_port_ka = 0;
-	if (sel) {
+	if (sel && !sel->fd_flags) {
 		if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
 			fip->ctlr_ka_time = jiffies + sel->fka_period;
-			send_ctlr_ka = 1;
+			fip->send_ctlr_ka = 1;
 		}
 		if (time_after(next_timer, fip->ctlr_ka_time))
 			next_timer = fip->ctlr_ka_time;
 
 		if (time_after_eq(jiffies, fip->port_ka_time)) {
 			fip->port_ka_time += jiffies +
-					msecs_to_jiffies(FIP_VN_KA_PERIOD);
-			send_port_ka = 1;
+				msecs_to_jiffies(FIP_VN_KA_PERIOD);
+			fip->send_port_ka = 1;
 		}
 		if (time_after(next_timer, fip->port_ka_time))
 			next_timer = fip->port_ka_time;
 		mod_timer(&fip->timer, next_timer);
 	} else if (fip->sel_time) {
 		next_timer = fip->sel_time +
-				msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+			msecs_to_jiffies(FCOE_CTLR_START_DELAY);
 		mod_timer(&fip->timer, next_timer);
 	}
+	if (fip->send_ctlr_ka || fip->send_port_ka)
+		schedule_work(&fip->link_work);
 	spin_unlock_bh(&fip->lock);
-
-	if (send_ctlr_ka)
-		fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr);
-	if (send_port_ka)
-		fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr);
 }
 
 /**
- * fcoe_ctlr_link_work() - worker thread function for link changes.
- * @work:	pointer to link_work member inside &fcoe_ctlr.
+ * fcoe_ctlr_link_work() - Worker thread function for link changes
+ * @work: Handle to a FCoE controller
  *
  * See if the link status has changed and if so, report it.
  *
@@ -1196,27 +1216,49 @@ static void fcoe_ctlr_timeout(unsigned long arg)
 static void fcoe_ctlr_link_work(struct work_struct *work)
 {
 	struct fcoe_ctlr *fip;
+	struct fc_lport *vport;
+	u8 *mac;
 	int link;
 	int last_link;
+	int reset;
 
 	fip = container_of(work, struct fcoe_ctlr, link_work);
 	spin_lock_bh(&fip->lock);
 	last_link = fip->last_link;
 	link = fip->link;
 	fip->last_link = link;
+	reset = fip->reset_req;
+	fip->reset_req = 0;
 	spin_unlock_bh(&fip->lock);
 
 	if (last_link != link) {
 		if (link)
 			fc_linkup(fip->lp);
 		else
-			fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
+			fc_linkdown(fip->lp);
+	} else if (reset && link)
+		fc_lport_reset(fip->lp);
+
+	if (fip->send_ctlr_ka) {
+		fip->send_ctlr_ka = 0;
+		fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr);
+	}
+	if (fip->send_port_ka) {
+		fip->send_port_ka = 0;
+		mutex_lock(&fip->lp->lp_mutex);
+		mac = fip->get_src_addr(fip->lp);
+		fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac);
+		list_for_each_entry(vport, &fip->lp->vports, list) {
+			mac = fip->get_src_addr(vport);
+			fcoe_ctlr_send_keep_alive(fip, vport, 1, mac);
+		}
+		mutex_unlock(&fip->lp->lp_mutex);
 	}
 }
 
 /**
- * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames.
- * @recv_work:	pointer to recv_work member inside &fcoe_ctlr.
+ * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames
+ * @recv_work: Handle to a FCoE controller
  */
 static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
 {
@@ -1224,20 +1266,14 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
 	struct sk_buff *skb;
 
 	fip = container_of(recv_work, struct fcoe_ctlr, recv_work);
-	spin_lock_bh(&fip->fip_recv_list.lock);
-	while ((skb = __skb_dequeue(&fip->fip_recv_list))) {
-		spin_unlock_bh(&fip->fip_recv_list.lock);
+	while ((skb = skb_dequeue(&fip->fip_recv_list)))
 		fcoe_ctlr_recv_handler(fip, skb);
-		spin_lock_bh(&fip->fip_recv_list.lock);
-	}
-	spin_unlock_bh(&fip->fip_recv_list.lock);
 }
 
 /**
- * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request.
- * @fip:	FCoE controller.
- * @fp:		FC frame.
- * @sa:		Ethernet source MAC address from received FCoE frame.
+ * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response
+ * @fip: The FCoE controller
+ * @fp:	 The FC frame to snoop
  *
  * Snoop potential response to FLOGI or even incoming FLOGI.
  *
@@ -1245,15 +1281,18 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
  * by fip->flogi_oxid != FC_XID_UNKNOWN.
  *
  * The caller is responsible for freeing the frame.
+ * Fill in the granted_mac address.
  *
  * Return non-zero if the frame should not be delivered to libfc.
  */
-int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
+int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
+			 struct fc_frame *fp)
 {
 	struct fc_frame_header *fh;
 	u8 op;
-	u8 mac[ETH_ALEN];
+	u8 *sa;
 
+	sa = eth_hdr(&fp->skb)->h_source;
 	fh = fc_frame_header_get(fp);
 	if (fh->fh_type != FC_TYPE_ELS)
 		return 0;
@@ -1268,7 +1307,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
 			return -EINVAL;
 		}
 		fip->state = FIP_ST_NON_FIP;
-		LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
+		LIBFCOE_FIP_DBG(fip,
+				"received FLOGI LS_ACC using non-FIP mode\n");
 
 		/*
 		 * FLOGI accepted.
@@ -1283,11 +1323,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
 			fip->map_dest = 0;
 		}
 		fip->flogi_oxid = FC_XID_UNKNOWN;
-		memcpy(mac, fip->data_src_addr, ETH_ALEN);
-		fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id);
 		spin_unlock_bh(&fip->lock);
-
-		fip->update_mac(fip, mac, fip->data_src_addr);
+		fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id);
 	} else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
 		/*
 		 * Save source MAC for point-to-point responses.
@@ -1297,7 +1334,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
 			memcpy(fip->dest_addr, sa, ETH_ALEN);
 			fip->map_dest = 0;
 			if (fip->state == FIP_ST_NON_FIP)
-				LIBFCOE_FIP_DBG("received FLOGI REQ, "
+				LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, "
 						"using non-FIP mode\n");
 			fip->state = FIP_ST_NON_FIP;
 		}
@@ -1308,10 +1345,10 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
 EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
 
 /**
- * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
- * @mac: mac address
- * @scheme: check port
- * @port: port indicator for converting
+ * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN
+ * @mac:    The MAC address to convert
+ * @scheme: The scheme to use when converting
+ * @port:   The port indicator for converting
  *
  * Returns: u64 fc world wide name
  */
@@ -1349,24 +1386,26 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
 EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
 
 /**
- * fcoe_libfc_config() - sets up libfc related properties for lport
- * @lp: ptr to the fc_lport
- * @tt: libfc function template
+ * fcoe_libfc_config() - Sets up libfc related properties for local port
+ * @lp: The local port to configure libfc for
+ * @tt: The libfc function template
  *
  * Returns : 0 for success
  */
-int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
+int fcoe_libfc_config(struct fc_lport *lport,
+		      struct libfc_function_template *tt)
 {
 	/* Set the function pointers set by the LLDD */
-	memcpy(&lp->tt, tt, sizeof(*tt));
-	if (fc_fcp_init(lp))
+	memcpy(&lport->tt, tt, sizeof(*tt));
+	if (fc_fcp_init(lport))
 		return -ENOMEM;
-	fc_exch_init(lp);
-	fc_elsct_init(lp);
-	fc_lport_init(lp);
-	fc_rport_init(lp);
-	fc_disc_init(lp);
+	fc_exch_init(lport);
+	fc_elsct_init(lport);
+	fc_lport_init(lport);
+	fc_rport_init(lport);
+	fc_disc_init(lport);
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e4c0a3d7d87b..bb208a6091e7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -22,6 +22,7 @@
 #include <linux/netdevice.h>
 #include <linux/workqueue.h>
 #include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
 #include "fnic_io.h"
 #include "fnic_res.h"
 #include "vnic_dev.h"
@@ -44,7 +45,7 @@
 #define	FNIC_IO_LOCKS		64 /* IO locks: power of 2 */
 #define FNIC_DFLT_QUEUE_DEPTH	32
 #define	FNIC_STATS_RATE_LIMIT	4 /* limit rate at which stats are pulled up */
-
+#define FNIC_MAX_CMD_LEN        16 /* Supported CDB length */
 /*
  * Tag bits used for special requests.
  */
@@ -145,6 +146,7 @@ struct mempool;
 /* Per-instance private data structure */
 struct fnic {
 	struct fc_lport *lport;
+	struct fcoe_ctlr ctlr;		/* FIP FCoE controller structure */
 	struct vnic_dev_bar bar0;
 
 	struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
@@ -162,23 +164,16 @@ struct fnic {
 	unsigned int wq_count;
 	unsigned int cq_count;
 
-	u32 fcoui_mode:1;		/* use fcoui address*/
 	u32 vlan_hw_insert:1;	        /* let hw insert the tag */
 	u32 in_remove:1;                /* fnic device in removal */
 	u32 stop_rx_link_events:1;      /* stop proc. rx frames, link events */
 
 	struct completion *remove_wait; /* device remove thread blocks */
 
-	struct fc_frame *flogi;
-	struct fc_frame *flogi_resp;
-	u16 flogi_oxid;
-	unsigned long s_id;
 	enum fnic_state state;
 	spinlock_t fnic_lock;
 
 	u16 vlan_id;	                /* VLAN tag including priority */
-	u8 mac_addr[ETH_ALEN];
-	u8 dest_addr[ETH_ALEN];
 	u8 data_src_addr[ETH_ALEN];
 	u64 fcp_input_bytes;		/* internal statistic */
 	u64 fcp_output_bytes;		/* internal statistic */
@@ -205,6 +200,7 @@ struct fnic {
 	struct work_struct link_work;
 	struct work_struct frame_work;
 	struct sk_buff_head frame_queue;
+	struct sk_buff_head tx_queue;
 
 	/* copy work queue cache line section */
 	____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
@@ -224,6 +220,11 @@ struct fnic {
 	____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
 };
 
+static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
+{
+	return container_of(fip, struct fnic, ctlr);
+}
+
 extern struct workqueue_struct *fnic_event_queue;
 extern struct device_attribute *fnic_attrs[];
 
@@ -239,7 +240,11 @@ void fnic_handle_link(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
+void fnic_flush_tx(struct fnic *);
+void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
+void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+void fnic_update_mac(struct fc_lport *, u8 *new);
+void fnic_update_mac_locked(struct fnic *, u8 *new);
 
 int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
 int fnic_abort_cmd(struct scsi_cmnd *);
@@ -252,7 +257,7 @@ void fnic_empty_scsi_cleanup(struct fc_lport *);
 void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
 int fnic_wq_cmpl_handler(struct fnic *fnic, int);
-int fnic_flogi_reg_handler(struct fnic *fnic);
+int fnic_flogi_reg_handler(struct fnic *fnic, u32);
 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
 				  struct fcpio_host_req *desc);
 int fnic_fw_reset_handler(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 50db3e36a619..54f8d0e5407f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -23,6 +23,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/workqueue.h>
+#include <scsi/fc/fc_fip.h>
 #include <scsi/fc/fc_els.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/fc_frame.h>
@@ -34,6 +35,8 @@
 
 struct workqueue_struct *fnic_event_queue;
 
+static void fnic_set_eth_mode(struct fnic *);
+
 void fnic_handle_link(struct work_struct *work)
 {
 	struct fnic *fnic = container_of(work, struct fnic, link_work);
@@ -64,10 +67,10 @@ void fnic_handle_link(struct work_struct *work)
 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 					     "link down\n");
-				fc_linkdown(fnic->lport);
+				fcoe_ctlr_link_down(&fnic->ctlr);
 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 					     "link up\n");
-				fc_linkup(fnic->lport);
+				fcoe_ctlr_link_up(&fnic->ctlr);
 			} else
 				/* UP -> UP */
 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -76,13 +79,13 @@ void fnic_handle_link(struct work_struct *work)
 		/* DOWN -> UP */
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
-		fc_linkup(fnic->lport);
+		fcoe_ctlr_link_up(&fnic->ctlr);
 	} else {
 		/* UP -> DOWN */
 		fnic->lport->host_stats.link_failure_count++;
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
-		fc_linkdown(fnic->lport);
+		fcoe_ctlr_link_down(&fnic->ctlr);
 	}
 
 }
@@ -107,197 +110,179 @@ void fnic_handle_frame(struct work_struct *work)
 			return;
 		}
 		fp = (struct fc_frame *)skb;
-		/* if Flogi resp frame, register the address */
-		if (fr_flags(fp)) {
-			vnic_dev_add_addr(fnic->vdev,
-					  fnic->data_src_addr);
-			fr_flags(fp) = 0;
+
+		/*
+		 * If we're in a transitional state, just re-queue and return.
+		 * The queue will be serviced when we get to a stable state.
+		 */
+		if (fnic->state != FNIC_IN_FC_MODE &&
+		    fnic->state != FNIC_IN_ETH_MODE) {
+			skb_queue_head(&fnic->frame_queue, skb);
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			return;
 		}
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
 		fc_exch_recv(lp, fp);
 	}
-
-}
-
-static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
-					   u32 len, u8 sof, u8 eof)
-{
-	struct fc_frame *fp = (struct fc_frame *)skb;
-
-	skb_trim(skb, len);
-	fr_eof(fp) = eof;
-	fr_sof(fp) = sof;
 }
 
-
-static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
+/**
+ * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
+ * @fnic:	fnic instance.
+ * @skb:	Ethernet Frame.
+ */
+static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
 {
 	struct fc_frame *fp;
 	struct ethhdr *eh;
-	struct vlan_ethhdr *vh;
 	struct fcoe_hdr *fcoe_hdr;
 	struct fcoe_crc_eof *ft;
-	u32    transport_len = 0;
 
+	/*
+	 * Undo VLAN encapsulation if present.
+	 */
 	eh = (struct ethhdr *)skb->data;
-	vh = (struct vlan_ethhdr *)skb->data;
-	if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
-	    vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
-		skb_pull(skb, sizeof(struct vlan_ethhdr));
-		transport_len += sizeof(struct vlan_ethhdr);
-	} else if (eh->h_proto == htons(ETH_P_FCOE)) {
-		transport_len += sizeof(struct ethhdr);
-		skb_pull(skb, sizeof(struct ethhdr));
-	} else
-		return -1;
+	if (eh->h_proto == htons(ETH_P_8021Q)) {
+		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
+		eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+		skb_reset_mac_header(skb);
+	}
+	if (eh->h_proto == htons(ETH_P_FIP)) {
+		skb_pull(skb, sizeof(*eh));
+		fcoe_ctlr_recv(&fnic->ctlr, skb);
+		return 1;		/* let caller know packet was used */
+	}
+	if (eh->h_proto != htons(ETH_P_FCOE))
+		goto drop;
+	skb_set_network_header(skb, sizeof(*eh));
+	skb_pull(skb, sizeof(*eh));
 
 	fcoe_hdr = (struct fcoe_hdr *)skb->data;
 	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
-		return -1;
+		goto drop;
 
 	fp = (struct fc_frame *)skb;
 	fc_frame_init(fp);
 	fr_sof(fp) = fcoe_hdr->fcoe_sof;
 	skb_pull(skb, sizeof(struct fcoe_hdr));
-	transport_len += sizeof(struct fcoe_hdr);
+	skb_reset_transport_header(skb);
 
-	ft = (struct fcoe_crc_eof *)(skb->data + len -
-				     transport_len - sizeof(*ft));
+	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
 	fr_eof(fp) = ft->fcoe_eof;
-	skb_trim(skb, len - transport_len - sizeof(*ft));
+	skb_trim(skb, skb->len - sizeof(*ft));
 	return 0;
+drop:
+	dev_kfree_skb_irq(skb);
+	return -1;
 }
 
-static inline int fnic_handle_flogi_resp(struct fnic *fnic,
-					 struct fc_frame *fp)
+/**
+ * fnic_update_mac_locked() - set data MAC address and filters.
+ * @fnic:	fnic instance.
+ * @new:	newly-assigned FCoE MAC address.
+ *
+ * Called with the fnic lock held.
+ */
+void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
 {
-	u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
-	struct ethhdr *eth_hdr;
-	struct fc_frame_header *fh;
-	int ret = 0;
-	unsigned long flags;
-	struct fc_frame *old_flogi_resp = NULL;
+	u8 *ctl = fnic->ctlr.ctl_src_addr;
+	u8 *data = fnic->data_src_addr;
 
-	fh = (struct fc_frame_header *)fr_hdr(fp);
+	if (is_zero_ether_addr(new))
+		new = ctl;
+	if (!compare_ether_addr(data, new))
+		return;
+	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
+	if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
+		vnic_dev_del_addr(fnic->vdev, data);
+	memcpy(data, new, ETH_ALEN);
+	if (compare_ether_addr(new, ctl))
+		vnic_dev_add_addr(fnic->vdev, new);
+}
 
-	spin_lock_irqsave(&fnic->fnic_lock, flags);
+/**
+ * fnic_update_mac() - set data MAC address and filters.
+ * @lport:	local port.
+ * @new:	newly-assigned FCoE MAC address.
+ */
+void fnic_update_mac(struct fc_lport *lport, u8 *new)
+{
+	struct fnic *fnic = lport_priv(lport);
 
-	if (fnic->state == FNIC_IN_ETH_MODE) {
+	spin_lock_irq(&fnic->fnic_lock);
+	fnic_update_mac_locked(fnic, new);
+	spin_unlock_irq(&fnic->fnic_lock);
+}
 
-		/*
-		 * Check if oxid matches on taking the lock. A new Flogi
-		 * issued by libFC might have changed the fnic cached oxid
-		 */
-		if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
-			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
-				     "Flogi response oxid not"
-				     " matching cached oxid, dropping frame"
-				     "\n");
-			ret = -1;
-			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-			dev_kfree_skb_irq(fp_skb(fp));
-			goto handle_flogi_resp_end;
-		}
+/**
+ * fnic_set_port_id() - set the port_ID after successful FLOGI.
+ * @lport:	local port.
+ * @port_id:	assigned FC_ID.
+ * @fp:		received frame containing the FLOGI accept or NULL.
+ *
+ * This is called from libfc when a new FC_ID has been assigned.
+ * This causes us to reset the firmware to FC_MODE and setup the new MAC
+ * address and FC_ID.
+ *
+ * It is also called with FC_ID 0 when we're logged off.
+ *
+ * If the FC_ID is due to point-to-point, fp may be NULL.
+ */
+void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
+{
+	struct fnic *fnic = lport_priv(lport);
+	u8 *mac;
+	int ret;
 
-		/* Drop older cached flogi response frame, cache this frame */
-		old_flogi_resp = fnic->flogi_resp;
-		fnic->flogi_resp = fp;
-		fnic->flogi_oxid = FC_XID_UNKNOWN;
+	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
+		     port_id, fp);
 
-		/*
-		 * this frame is part of flogi get the src mac addr from this
-		 * frame if the src mac is fcoui based then we mark the
-		 * address mode flag to use fcoui base for dst mac addr
-		 * otherwise we have to store the fcoe gateway addr
-		 */
-		eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
-		memcpy(mac, eth_hdr->h_source, ETH_ALEN);
+	/*
+	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
+	 * Set ethernet mode to send FLOGI.
+	 */
+	if (!port_id) {
+		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
+		fnic_set_eth_mode(fnic);
+		return;
+	}
 
-		if (ntoh24(mac) == FC_FCOE_OUI)
-			fnic->fcoui_mode = 1;
-		else {
-			fnic->fcoui_mode = 0;
-			memcpy(fnic->dest_addr, mac, ETH_ALEN);
+	if (fp) {
+		mac = fr_cb(fp)->granted_mac;
+		if (is_zero_ether_addr(mac)) {
+			/* non-FIP - FLOGI already accepted - ignore return */
+			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
 		}
+		fnic_update_mac(lport, mac);
+	}
 
-		/*
-		 * Except for Flogi frame, all outbound frames from us have the
-		 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
-		 * the vnic MAC address as the Eth Src address
-		 */
-		fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
-
-		/* We get our s_id from the d_id of the flogi resp frame */
-		fnic->s_id = ntoh24(fh->fh_d_id);
-
-		/* Change state to reflect transition from Eth to FC mode */
+	/* Change state to reflect transition to FC mode */
+	spin_lock_irq(&fnic->fnic_lock);
+	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
 		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
-
-	} else {
+	else {
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 			     "Unexpected fnic state %s while"
 			     " processing flogi resp\n",
 			     fnic_state_to_str(fnic->state));
-		ret = -1;
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		dev_kfree_skb_irq(fp_skb(fp));
-		goto handle_flogi_resp_end;
+		spin_unlock_irq(&fnic->fnic_lock);
+		return;
 	}
-
-	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-	/* Drop older cached frame */
-	if (old_flogi_resp)
-		dev_kfree_skb_irq(fp_skb(old_flogi_resp));
+	spin_unlock_irq(&fnic->fnic_lock);
 
 	/*
-	 * send flogi reg request to firmware, this will put the fnic in
-	 * in FC mode
+	 * Send FLOGI registration to firmware to set up FC mode.
+	 * The new address will be set up when registration completes.
 	 */
-	ret = fnic_flogi_reg_handler(fnic);
+	ret = fnic_flogi_reg_handler(fnic, port_id);
 
 	if (ret < 0) {
-		int free_fp = 1;
-		spin_lock_irqsave(&fnic->fnic_lock, flags);
-		/*
-		 * free the frame is some other thread is not
-		 * pointing to it
-		 */
-		if (fnic->flogi_resp != fp)
-			free_fp = 0;
-		else
-			fnic->flogi_resp = NULL;
-
+		spin_lock_irq(&fnic->fnic_lock);
 		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
 			fnic->state = FNIC_IN_ETH_MODE;
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		if (free_fp)
-			dev_kfree_skb_irq(fp_skb(fp));
+		spin_unlock_irq(&fnic->fnic_lock);
 	}
-
- handle_flogi_resp_end:
-	return ret;
-}
-
-/* Returns 1 for a response that matches cached flogi oxid */
-static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
-					       struct fc_frame *fp)
-{
-	struct fc_frame_header *fh;
-	int ret = 0;
-	u32 f_ctl;
-
-	fh = fc_frame_header_get(fp);
-	f_ctl = ntoh24(fh->fh_f_ctl);
-
-	if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
-	    fh->fh_r_ctl == FC_RCTL_ELS_REP &&
-	    (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
-	    fh->fh_type == FC_TYPE_ELS)
-		ret = 1;
-
-	return ret;
 }
 
 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
@@ -326,6 +311,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
 			 PCI_DMA_FROMDEVICE);
 	skb = buf->os_buf;
+	fp = (struct fc_frame *)skb;
 	buf->os_buf = NULL;
 
 	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
@@ -338,6 +324,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
 				   &vlan);
 		eth_hdrs_stripped = 1;
+		skb_trim(skb, fcp_bytes_written);
+		fr_sof(fp) = sof;
+		fr_eof(fp) = eof;
 
 	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
 		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
@@ -352,6 +341,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 				    &ipv4_csum_ok, &ipv6, &ipv4,
 				    &ipv4_fragment, &fcs_ok);
 		eth_hdrs_stripped = 0;
+		skb_trim(skb, bytes_written);
+		if (!fcs_ok) {
+			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+				     "fcs error.  dropping packet.\n");
+			goto drop;
+		}
+		if (fnic_import_rq_eth_pkt(fnic, skb))
+			return;
 
 	} else {
 		/* wrong CQ type*/
@@ -370,43 +367,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
 		goto drop;
 	}
 
-	if (eth_hdrs_stripped)
-		fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
-	else if (fnic_import_rq_eth_pkt(skb, bytes_written))
-		goto drop;
-
-	fp = (struct fc_frame *)skb;
-
-	/*
-	 * If frame is an ELS response that matches the cached FLOGI OX_ID,
-	 * and is accept, issue flogi_reg_request copy wq request to firmware
-	 * to register the S_ID and determine whether FC_OUI mode or GW mode.
-	 */
-	if (is_matching_flogi_resp_frame(fnic, fp)) {
-		if (!eth_hdrs_stripped) {
-			if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
-				fnic_handle_flogi_resp(fnic, fp);
-				return;
-			}
-			/*
-			 * Recd. Flogi reject. No point registering
-			 * with fw, but forward to libFC
-			 */
-			goto forward;
-		}
-		goto drop;
-	}
-	if (!eth_hdrs_stripped)
-		goto drop;
-
-forward:
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 	if (fnic->stop_rx_link_events) {
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 		goto drop;
 	}
-	/* Use fr_flags to indicate whether succ. flogi resp or not */
-	fr_flags(fp) = 0;
 	fr_dev(fp) = fnic->lport;
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
@@ -494,12 +459,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
 	buf->os_buf = NULL;
 }
 
-static inline int is_flogi_frame(struct fc_frame_header *fh)
+/**
+ * fnic_eth_send() - Send Ethernet frame.
+ * @fip:	fcoe_ctlr instance.
+ * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
+ */
+void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
+	struct fnic *fnic = fnic_from_ctlr(fip);
+	struct vnic_wq *wq = &fnic->wq[0];
+	dma_addr_t pa;
+	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *vlan_hdr;
+	unsigned long flags;
+
+	if (!fnic->vlan_hw_insert) {
+		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
+				sizeof(*vlan_hdr) - sizeof(*eth_hdr));
+		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
+		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
+		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+	}
+
+	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+	spin_lock_irqsave(&fnic->wq_lock[0], flags);
+	if (!vnic_wq_desc_avail(wq)) {
+		pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+		spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+		kfree_skb(skb);
+		return;
+	}
+
+	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
+			       fnic->vlan_hw_insert, fnic->vlan_id, 1);
+	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 }
 
-int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+/*
+ * Send FC frame.
+ */
+static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 {
 	struct vnic_wq *wq = &fnic->wq[0];
 	struct sk_buff *skb;
@@ -515,6 +517,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 	fh = fc_frame_header_get(fp);
 	skb = fp_skb(fp);
 
+	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
+		return 0;
+
 	if (!fnic->vlan_hw_insert) {
 		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
 		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
@@ -530,16 +536,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
 	}
 
-	if (is_flogi_frame(fh)) {
+	if (fnic->ctlr.map_dest)
 		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
-		memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
-	} else {
-		if (fnic->fcoui_mode)
-			fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
-		else
-			memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
-		memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
-	}
+	else
+		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
+	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
 
 	tot_len = skb->len;
 	BUG_ON(tot_len % 4);
@@ -578,109 +579,85 @@ fnic_send_frame_end:
 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
 {
 	struct fnic *fnic = lport_priv(lp);
-	struct fc_frame_header *fh;
-	int ret = 0;
-	enum fnic_state old_state;
 	unsigned long flags;
-	struct fc_frame *old_flogi = NULL;
-	struct fc_frame *old_flogi_resp = NULL;
 
 	if (fnic->in_remove) {
 		dev_kfree_skb(fp_skb(fp));
-		ret = -1;
-		goto fnic_send_end;
+		return -1;
 	}
 
-	fh = fc_frame_header_get(fp);
-	/* if not an Flogi frame, send it out, this is the common case */
-	if (!is_flogi_frame(fh))
-		return fnic_send_frame(fnic, fp);
+	/*
+	 * Queue frame if in a transitional state.
+	 * This occurs while registering the Port_ID / MAC address after FLOGI.
+	 */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
+		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-	/* Flogi frame, now enter the state machine */
+	return fnic_send_frame(fnic, fp);
+}
 
-	spin_lock_irqsave(&fnic->fnic_lock, flags);
-again:
-	/* Get any old cached frames, free them after dropping lock */
-	old_flogi = fnic->flogi;
-	fnic->flogi = NULL;
-	old_flogi_resp = fnic->flogi_resp;
-	fnic->flogi_resp = NULL;
+/**
+ * fnic_flush_tx() - send queued frames.
+ * @fnic: fnic device
+ *
+ * Send frames that were waiting to go out in FC or Ethernet mode.
+ * Whenever changing modes we purge queued frames, so these frames should
+ * be queued for the stable mode that we're in, either FC or Ethernet.
+ *
+ * Called without fnic_lock held.
+ */
+void fnic_flush_tx(struct fnic *fnic)
+{
+	struct sk_buff *skb;
+	struct fc_frame *fp;
 
-	fnic->flogi_oxid = FC_XID_UNKNOWN;
+	while ((skb = skb_dequeue(&fnic->frame_queue))) {
+		fp = (struct fc_frame *)skb;
+		fnic_send_frame(fnic, fp);
+	}
+}
 
+/**
+ * fnic_set_eth_mode() - put fnic into ethernet mode.
+ * @fnic: fnic device
+ *
+ * Called without fnic lock held.
+ */
+static void fnic_set_eth_mode(struct fnic *fnic)
+{
+	unsigned long flags;
+	enum fnic_state old_state;
+	int ret;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+again:
 	old_state = fnic->state;
 	switch (old_state) {
 	case FNIC_IN_FC_MODE:
 	case FNIC_IN_ETH_TRANS_FC_MODE:
 	default:
 		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
-		vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-		if (old_flogi) {
-			dev_kfree_skb(fp_skb(old_flogi));
-			old_flogi = NULL;
-		}
-		if (old_flogi_resp) {
-			dev_kfree_skb(fp_skb(old_flogi_resp));
-			old_flogi_resp = NULL;
-		}
-
 		ret = fnic_fw_reset_handler(fnic);
 
 		spin_lock_irqsave(&fnic->fnic_lock, flags);
 		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
 			goto again;
-		if (ret) {
+		if (ret)
 			fnic->state = old_state;
-			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-			dev_kfree_skb(fp_skb(fp));
-			goto fnic_send_end;
-		}
-		old_flogi = fnic->flogi;
-		fnic->flogi = fp;
-		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
-		old_flogi_resp = fnic->flogi_resp;
-		fnic->flogi_resp = NULL;
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 		break;
 
 	case FNIC_IN_FC_TRANS_ETH_MODE:
-		/*
-		 * A reset is pending with the firmware. Store the flogi
-		 * and its oxid. The transition out of this state happens
-		 * only when Firmware completes the reset, either with
-		 * success or failed. If success, transition to
-		 * FNIC_IN_ETH_MODE, if fail, then transition to
-		 * FNIC_IN_FC_MODE
-		 */
-		fnic->flogi = fp;
-		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		break;
-
 	case FNIC_IN_ETH_MODE:
-		/*
-		 * The fw/hw is already in eth mode. Store the oxid,
-		 * and send the flogi frame out. The transition out of this
-		 * state happens only we receive flogi response from the
-		 * network, and the oxid matches the cached oxid when the
-		 * flogi frame was sent out. If they match, then we issue
-		 * a flogi_reg request and transition to state
-		 * FNIC_IN_ETH_TRANS_FC_MODE
-		 */
-		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
-		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		ret = fnic_send_frame(fnic, fp);
 		break;
 	}
-
-fnic_send_end:
-	if (old_flogi)
-		dev_kfree_skb(fp_skb(old_flogi));
-	if (old_flogi_resp)
-		dev_kfree_skb(fp_skb(old_flogi_resp));
-	return ret;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 }
 
 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2b3064828aea..5c1f223cabce 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -48,9 +48,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
 	}
 
 	if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
-		work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
-		work_done += fnic_wq_cmpl_handler(fnic, 4);
-		work_done += fnic_rq_cmpl_handler(fnic, 4);
+		work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+		work_done += fnic_wq_cmpl_handler(fnic, -1);
+		work_done += fnic_rq_cmpl_handler(fnic, -1);
 
 		vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
 					 work_done,
@@ -66,9 +66,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
 	struct fnic *fnic = data;
 	unsigned long work_done = 0;
 
-	work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
-	work_done += fnic_wq_cmpl_handler(fnic, 4);
-	work_done += fnic_rq_cmpl_handler(fnic, 4);
+	work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+	work_done += fnic_wq_cmpl_handler(fnic, -1);
+	work_done += fnic_rq_cmpl_handler(fnic, -1);
 
 	vnic_intr_return_credits(&fnic->intr[0],
 				 work_done,
@@ -83,7 +83,7 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
 	struct fnic *fnic = data;
 	unsigned long rq_work_done = 0;
 
-	rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
+	rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
 				 rq_work_done,
 				 1 /* unmask intr */,
@@ -97,7 +97,7 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
 	struct fnic *fnic = data;
 	unsigned long wq_work_done = 0;
 
-	wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
+	wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
 				 wq_work_done,
 				 1 /* unmask intr */,
@@ -110,7 +110,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
 	struct fnic *fnic = data;
 	unsigned long wq_copy_work_done = 0;
 
-	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
+	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
 				 wq_copy_work_done,
 				 1 /* unmask intr */,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 71c7bbe26d05..fe1b1031f7ab 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -25,6 +25,8 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <scsi/fc/fc_fip.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_fc.h>
@@ -68,6 +70,7 @@ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
 
 static struct libfc_function_template fnic_transport_template = {
 	.frame_send = fnic_send,
+	.lport_set_port_id = fnic_set_port_id,
 	.fcp_abort_io = fnic_empty_scsi_cleanup,
 	.fcp_cleanup = fnic_empty_scsi_cleanup,
 	.exch_mgr_reset = fnic_exch_mgr_reset
@@ -140,6 +143,7 @@ static struct fc_function_template fnic_fc_functions = {
 	.get_fc_host_stats = fnic_get_stats,
 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
 	.terminate_rport_io = fnic_terminate_rport_io,
+	.bsg_request = fc_lport_bsg_request,
 };
 
 static void fnic_get_host_speed(struct Scsi_Host *shost)
@@ -324,9 +328,6 @@ static int fnic_cleanup(struct fnic *fnic)
 {
 	unsigned int i;
 	int err;
-	unsigned long flags;
-	struct fc_frame *flogi = NULL;
-	struct fc_frame *flogi_resp = NULL;
 
 	vnic_dev_disable(fnic->vdev);
 	for (i = 0; i < fnic->intr_count; i++)
@@ -367,24 +368,6 @@ static int fnic_cleanup(struct fnic *fnic)
 	for (i = 0; i < fnic->intr_count; i++)
 		vnic_intr_clean(&fnic->intr[i]);
 
-	/*
-	 * Remove cached flogi and flogi resp frames if any
-	 * These frames are not in any queue, and therefore queue
-	 * cleanup does not clean them. So clean them explicitly
-	 */
-	spin_lock_irqsave(&fnic->fnic_lock, flags);
-	flogi = fnic->flogi;
-	fnic->flogi = NULL;
-	flogi_resp = fnic->flogi_resp;
-	fnic->flogi_resp = NULL;
-	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-	if (flogi)
-		dev_kfree_skb(fp_skb(flogi));
-
-	if (flogi_resp)
-		dev_kfree_skb(fp_skb(flogi_resp));
-
 	mempool_destroy(fnic->io_req_pool);
 	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
 		mempool_destroy(fnic->io_sgl_pool[i]);
@@ -409,6 +392,17 @@ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
 	return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
 }
 
+/**
+ * fnic_get_mac() - get assigned data MAC address for FIP code.
+ * @lport: 	local port.
+ */
+static u8 *fnic_get_mac(struct fc_lport *lport)
+{
+	struct fnic *fnic = lport_priv(lport);
+
+	return fnic->data_src_addr;
+}
+
 static int __devinit fnic_probe(struct pci_dev *pdev,
 				const struct pci_device_id *ent)
 {
@@ -424,17 +418,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	 * Allocate SCSI Host and set up association between host,
 	 * local port, and fnic
 	 */
-	host = scsi_host_alloc(&fnic_host_template,
-			       sizeof(struct fc_lport) + sizeof(struct fnic));
-	if (!host) {
-		printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
+	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
+	if (!lp) {
+		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
 		err = -ENOMEM;
 		goto err_out;
 	}
-	lp = shost_priv(host);
-	lp->host = host;
+	host = lp->host;
 	fnic = lport_priv(lp);
 	fnic->lport = lp;
+	fnic->ctlr.lp = lp;
 
 	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
 		 host->host_no);
@@ -543,12 +536,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 		goto err_out_dev_close;
 	}
 
-	err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
+	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
 	if (err) {
 		shost_printk(KERN_ERR, fnic->lport->host,
 			     "vNIC get MAC addr failed \n");
 		goto err_out_dev_close;
 	}
+	/* set data_src for point-to-point mode and to keep it non-zero */
+	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
 
 	/* Get vNIC configuration */
 	err = fnic_get_vnic_config(fnic);
@@ -560,6 +555,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	}
 	host->max_lun = fnic->config.luns_per_tgt;
 	host->max_id = FNIC_MAX_FCP_TARGET;
+	host->max_cmd_len = FNIC_MAX_CMD_LEN;
 
 	fnic_get_res_counts(fnic);
 
@@ -571,19 +567,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 		goto err_out_dev_close;
 	}
 
-	err = fnic_request_intr(fnic);
-	if (err) {
-		shost_printk(KERN_ERR, fnic->lport->host,
-			     "Unable to request irq.\n");
-		goto err_out_clear_intr;
-	}
-
 	err = fnic_alloc_vnic_resources(fnic);
 	if (err) {
 		shost_printk(KERN_ERR, fnic->lport->host,
 			     "Failed to alloc vNIC resources, "
 			     "aborting.\n");
-		goto err_out_free_intr;
+		goto err_out_clear_intr;
 	}
 
 
@@ -623,9 +612,21 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	fnic->vlan_hw_insert = 1;
 	fnic->vlan_id = 0;
 
-	fnic->flogi_oxid = FC_XID_UNKNOWN;
-	fnic->flogi = NULL;
-	fnic->flogi_resp = NULL;
+	/* Initialize the FIP fcoe_ctrl struct */
+	fnic->ctlr.send = fnic_eth_send;
+	fnic->ctlr.update_mac = fnic_update_mac;
+	fnic->ctlr.get_src_addr = fnic_get_mac;
+	fcoe_ctlr_init(&fnic->ctlr);
+	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+		shost_printk(KERN_INFO, fnic->lport->host,
+			     "firmware supports FIP\n");
+		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
+		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+	} else {
+		shost_printk(KERN_INFO, fnic->lport->host,
+			     "firmware uses non-FIP mode\n");
+		fnic->ctlr.mode = FIP_ST_NON_FIP;
+	}
 	fnic->state = FNIC_IN_FC_MODE;
 
 	/* Enable hardware stripping of vlan header on ingress */
@@ -716,6 +717,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	INIT_WORK(&fnic->link_work, fnic_handle_link);
 	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
 	skb_queue_head_init(&fnic->frame_queue);
+	skb_queue_head_init(&fnic->tx_queue);
 
 	/* Enable all queues */
 	for (i = 0; i < fnic->raw_wq_count; i++)
@@ -728,6 +730,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	fc_fabric_login(lp);
 
 	vnic_dev_enable(fnic->vdev);
+
+	err = fnic_request_intr(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Unable to request irq.\n");
+		goto err_out_free_exch_mgr;
+	}
+
 	for (i = 0; i < fnic->intr_count; i++)
 		vnic_intr_unmask(&fnic->intr[i]);
 
@@ -738,8 +748,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 err_out_free_exch_mgr:
 	fc_exch_mgr_free(lp);
 err_out_remove_scsi_host:
-	fc_remove_host(fnic->lport->host);
-	scsi_remove_host(fnic->lport->host);
+	fc_remove_host(lp->host);
+	scsi_remove_host(lp->host);
 err_out_free_rq_buf:
 	for (i = 0; i < fnic->rq_count; i++)
 		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
@@ -752,8 +762,6 @@ err_out_free_ioreq_pool:
 	mempool_destroy(fnic->io_req_pool);
 err_out_free_resources:
 	fnic_free_vnic_resources(fnic);
-err_out_free_intr:
-	fnic_free_intr(fnic);
 err_out_clear_intr:
 	fnic_clear_intr_mode(fnic);
 err_out_dev_close:
@@ -775,6 +783,7 @@ err_out:
 static void __devexit fnic_remove(struct pci_dev *pdev)
 {
 	struct fnic *fnic = pci_get_drvdata(pdev);
+	struct fc_lport *lp = fnic->lport;
 	unsigned long flags;
 
 	/*
@@ -796,6 +805,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
 	 */
 	flush_workqueue(fnic_event_queue);
 	skb_queue_purge(&fnic->frame_queue);
+	skb_queue_purge(&fnic->tx_queue);
 
 	/*
 	 * Log off the fabric. This stops all remote ports, dns port,
@@ -808,7 +818,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
 	fnic->in_remove = 1;
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-	fc_lport_destroy(fnic->lport);
+	fcoe_ctlr_destroy(&fnic->ctlr);
+	fc_lport_destroy(lp);
 
 	/*
 	 * This stops the fnic device, masks all interrupts. Completed
@@ -818,6 +829,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
 	fnic_cleanup(fnic);
 
 	BUG_ON(!skb_queue_empty(&fnic->frame_queue));
+	BUG_ON(!skb_queue_empty(&fnic->tx_queue));
 
 	spin_lock_irqsave(&fnic_list_lock, flags);
 	list_del(&fnic->list);
@@ -827,8 +839,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
 	scsi_remove_host(fnic->lport->host);
 	fc_exch_mgr_free(fnic->lport);
 	vnic_dev_notify_unset(fnic->vdev);
-	fnic_free_vnic_resources(fnic);
 	fnic_free_intr(fnic);
+	fnic_free_vnic_resources(fnic);
 	fnic_clear_intr_mode(fnic);
 	vnic_dev_close(fnic->vdev);
 	vnic_dev_unregister(fnic->vdev);
@@ -836,7 +848,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
-	scsi_host_put(fnic->lport->host);
+	scsi_host_put(lp->host);
 }
 
 static struct pci_driver fnic_driver = {
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 7ba61ec715d2..50488f8e169d 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -144,10 +144,9 @@ int fnic_get_vnic_config(struct fnic *fnic)
 	c->intr_timer_type = c->intr_timer_type;
 
 	shost_printk(KERN_INFO, fnic->lport->host,
-		     "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+		     "vNIC MAC addr %pM "
 		     "wq/wq_copy/rq %d/%d/%d\n",
-		     fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
-		     fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
+		     fnic->ctlr.ctl_src_addr,
 		     c->wq_enet_desc_count, c->wq_copy_desc_count,
 		     c->rq_desc_count);
 	shost_printk(KERN_INFO, fnic->lport->host,
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index b6f310262534..ef8aaf2156dd 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -51,6 +51,31 @@ static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
 	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
 }
 
+static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq,
+				      void *os_buf, dma_addr_t dma_addr,
+				      unsigned int len,
+				      int vlan_tag_insert,
+				      unsigned int vlan_tag,
+				      int cq_entry)
+{
+	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+	wq_enet_desc_enc(desc,
+			 (u64)dma_addr | VNIC_PADDR_TARGET,
+			 (u16)len,
+			 0, /* mss_or_csum_offset */
+			 0, /* fc_eof */
+			 0, /* offload_mode */
+			 1, /* eop */
+			 (u8)cq_entry,
+			 0, /* fcoe_encap */
+			 (u8)vlan_tag_insert,
+			 (u16)vlan_tag,
+			 0 /* loopback */);
+
+	vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
+}
+
 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
 						    u32 req_id,
 						    u32 lunmap_id, u8 spl_flags,
@@ -58,6 +83,7 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
 						    u64 sgl_addr, u64 sns_addr,
 						    u8 crn, u8 pri_ta,
 						    u8 flags, u8 *scsi_cdb,
+						    u8 cdb_len,
 						    u32 data_len, u8 *lun,
 						    u32 d_id, u16 mss,
 						    u32 ratov, u32 edtov)
@@ -82,7 +108,8 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
 	desc->u.icmnd_16.pri_ta = pri_ta; 	/* SCSI Pri & Task attribute */
 	desc->u.icmnd_16._resvd1 = 0;           /* reserved: should be 0 */
 	desc->u.icmnd_16.flags = flags;         /* command flags */
-	memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16);    /* SCSI CDB */
+	memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16);
+	memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len);    /* SCSI CDB */
 	desc->u.icmnd_16.data_len = data_len;   /* length of data expected */
 	memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS);  /* LUN address */
 	desc->u.icmnd_16._resvd2 = 0;          	/* reserved */
@@ -132,12 +159,37 @@ static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
 	desc->hdr.tag.u.req_id = req_id;      /* id for this request */
 
 	desc->u.flogi_reg.format = format;
+	desc->u.flogi_reg._resvd = 0;
 	hton24(desc->u.flogi_reg.s_id, s_id);
 	memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
 
 	vnic_wq_copy_post(wq);
 }
 
+static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq,
+						   u32 req_id, u32 s_id,
+						   u8 *fcf_mac, u8 *ha_mac,
+						   u32 r_a_tov, u32 e_d_tov)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */
+	desc->hdr.status = 0;                 /* header status entry */
+	desc->hdr._resvd = 0;                 /* reserved */
+	desc->hdr.tag.u.req_id = req_id;      /* id for this request */
+
+	desc->u.flogi_fip_reg._resvd0 = 0;
+	hton24(desc->u.flogi_fip_reg.s_id, s_id);
+	memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN);
+	desc->u.flogi_fip_reg._resvd1 = 0;
+	desc->u.flogi_fip_reg.r_a_tov = r_a_tov;
+	desc->u.flogi_fip_reg.e_d_tov = e_d_tov;
+	memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN);
+	desc->u.flogi_fip_reg._resvd2 = 0;
+
+	vnic_wq_copy_post(wq);
+}
+
 static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
 						    u32 req_id)
 {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index bfc996971b81..65a39b0f6dc2 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -174,6 +174,9 @@ int fnic_fw_reset_handler(struct fnic *fnic)
 	int ret = 0;
 	unsigned long flags;
 
+	skb_queue_purge(&fnic->frame_queue);
+	skb_queue_purge(&fnic->tx_queue);
+
 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -200,9 +203,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
  * fnic_flogi_reg_handler
  * Routine to send flogi register msg to fw
  */
-int fnic_flogi_reg_handler(struct fnic *fnic)
+int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
 {
 	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+	enum fcpio_flogi_reg_format_type format;
+	struct fc_lport *lp = fnic->lport;
 	u8 gw_mac[ETH_ALEN];
 	int ret = 0;
 	unsigned long flags;
@@ -217,23 +222,32 @@ int fnic_flogi_reg_handler(struct fnic *fnic)
 		goto flogi_reg_ioreq_end;
 	}
 
-	if (fnic->fcoui_mode)
+	if (fnic->ctlr.map_dest) {
 		memset(gw_mac, 0xff, ETH_ALEN);
-	else
-		memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
+		format = FCPIO_FLOGI_REG_DEF_DEST;
+	} else {
+		memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
+		format = FCPIO_FLOGI_REG_GW_DEST;
+	}
 
-	fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
-					  FCPIO_FLOGI_REG_GW_DEST,
-					  fnic->s_id,
-					  gw_mac);
+	if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
+		fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
+						fc_id, gw_mac,
+						fnic->data_src_addr,
+						lp->r_a_tov, lp->e_d_tov);
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
+			      fc_id, fnic->data_src_addr, gw_mac);
+	} else {
+		fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
+						  format, fc_id, gw_mac);
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "FLOGI reg issued fcid %x map %d dest %pM\n",
+			      fc_id, fnic->ctlr.map_dest, gw_mac);
+	}
 
 flogi_reg_ioreq_end:
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
-
-	if (!ret)
-		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-			      "flog reg issued\n");
-
 	return ret;
 }
 
@@ -319,7 +333,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 					 0, /* scsi cmd ref, always 0 */
 					 pri_tag, /* scsi pri and tag */
 					 flags,	/* command flags */
-					 sc->cmnd, scsi_bufflen(sc),
+					 sc->cmnd, sc->cmd_len,
+					 scsi_bufflen(sc),
 					 fc_lun.scsi_lun, io_req->port_id,
 					 rport->maxframe_size, rp->r_a_tov,
 					 rp->e_d_tov);
@@ -452,7 +467,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 	u8 hdr_status;
 	struct fcpio_tag tag;
 	int ret = 0;
-	struct fc_frame *flogi;
 	unsigned long flags;
 
 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
@@ -462,9 +476,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 
-	flogi = fnic->flogi;
-	fnic->flogi = NULL;
-
 	/* fnic should be in FC_TRANS_ETH_MODE */
 	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 		/* Check status of reset completion */
@@ -505,17 +516,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 	 * free the flogi frame. Else, send it out
 	 */
 	if (fnic->remove_wait || ret) {
-		fnic->flogi_oxid = FC_XID_UNKNOWN;
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		if (flogi)
-			dev_kfree_skb_irq(fp_skb(flogi));
+		skb_queue_purge(&fnic->tx_queue);
 		goto reset_cmpl_handler_end;
 	}
 
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-	if (flogi)
-		ret = fnic_send_frame(fnic, flogi);
+	fnic_flush_tx(fnic);
 
  reset_cmpl_handler_end:
 	return ret;
@@ -532,18 +540,13 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 	u8 hdr_status;
 	struct fcpio_tag tag;
 	int ret = 0;
-	struct fc_frame *flogi_resp = NULL;
 	unsigned long flags;
-	struct sk_buff *skb;
 
 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 
 	/* Update fnic state based on status of flogi reg completion */
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 
-	flogi_resp = fnic->flogi_resp;
-	fnic->flogi_resp = NULL;
-
 	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 
 		/* Check flogi registration completion status */
@@ -567,25 +570,17 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 		ret = -1;
 	}
 
-	/* Successful flogi reg cmpl, pass frame to LibFC */
-	if (!ret && flogi_resp) {
+	if (!ret) {
 		if (fnic->stop_rx_link_events) {
 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 			goto reg_cmpl_handler_end;
 		}
-		skb = (struct sk_buff *)flogi_resp;
-		/* Use fr_flags to indicate whether flogi resp or not */
-		fr_flags(flogi_resp) = 1;
-		fr_dev(flogi_resp) = fnic->lport;
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-		skb_queue_tail(&fnic->frame_queue, skb);
+		fnic_flush_tx(fnic);
 		queue_work(fnic_event_queue, &fnic->frame_work);
-
 	} else {
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-		if (flogi_resp)
-			dev_kfree_skb_irq(fp_skb(flogi_resp));
 	}
 
 reg_cmpl_handler_end:
@@ -907,6 +902,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
 		break;
 
 	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
 		ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
 		break;
 
@@ -1224,22 +1220,6 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
 
 }
 
-static void fnic_block_error_handler(struct scsi_cmnd *sc)
-{
-	struct Scsi_Host *shost = sc->device->host;
-	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
-	unsigned long flags;
-
-	spin_lock_irqsave(shost->host_lock, flags);
-	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
-		spin_unlock_irqrestore(shost->host_lock, flags);
-		msleep(1000);
-		spin_lock_irqsave(shost->host_lock, flags);
-	}
-	spin_unlock_irqrestore(shost->host_lock, flags);
-
-}
-
 /*
  * This function is exported to SCSI for sending abort cmnds.
  * A SCSI IO is represented by a io_req in the driver.
@@ -1259,7 +1239,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 
 	/* Wait for rport to unblock */
-	fnic_block_error_handler(sc);
+	fc_block_scsi_eh(sc);
 
 	/* Get local-port, check ready and link up */
 	lp = shost_priv(sc->device->host);
@@ -1541,7 +1521,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 
 	/* Wait for rport to unblock */
-	fnic_block_error_handler(sc);
+	fc_block_scsi_eh(sc);
 
 	/* Get local-port, check ready and link up */
 	lp = shost_priv(sc->device->host);
@@ -1762,7 +1742,7 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
 	fnic->remove_wait = &remove_wait;
 	old_state = fnic->state;
 	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
-	vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
 	err = fnic_fw_reset_handler(fnic);
@@ -1802,7 +1782,7 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 	old_state = fnic->state;
 	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
-	vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
 	if (fnic_fw_reset_handler(fnic)) {
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index 46baa5254001..fbb55364e272 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -95,5 +95,6 @@ struct vnic_fc_config {
 
 #define VFCF_FCP_SEQ_LVL_ERR	0x1	/* Enable FCP-2 Error Recovery */
 #define VFCF_PERBI		0x2	/* persistent binding info available */
+#define VFCF_FIP_CAPABLE	0x4	/* firmware can handle FIP */
 
 #endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c968cc31cd86..554626e18062 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -180,14 +180,20 @@ void scsi_remove_host(struct Scsi_Host *shost)
 EXPORT_SYMBOL(scsi_remove_host);
 
 /**
- * scsi_add_host - add a scsi host
+ * scsi_add_host_with_dma - add a scsi host with dma device
  * @shost:	scsi host pointer to add
  * @dev:	a struct device of type scsi class
+ * @dma_dev:	dma device for the host
+ *
+ * Note: You rarely need to worry about this unless you're in a
+ * virtualised host environments, so use the simpler scsi_add_host()
+ * function instead.
  *
  * Return value: 
  * 	0 on success / != 0 for error
  **/
-int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
+int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+			   struct device *dma_dev)
 {
 	struct scsi_host_template *sht = shost->hostt;
 	int error = -EINVAL;
@@ -207,6 +213,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
 
 	if (!shost->shost_gendev.parent)
 		shost->shost_gendev.parent = dev ? dev : &platform_bus;
+	shost->dma_dev = dma_dev;
 
 	error = device_add(&shost->shost_gendev);
 	if (error)
@@ -262,7 +269,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
  fail:
 	return error;
 }
-EXPORT_SYMBOL(scsi_add_host);
+EXPORT_SYMBOL(scsi_add_host_with_dma);
 
 static void scsi_host_dev_release(struct device *dev)
 {
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index a0e7e711ff9d..901a3daeb36b 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -861,10 +861,13 @@ static int hptiop_reset(struct scsi_cmnd *scp)
 }
 
 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
-						int queue_depth)
+					  int queue_depth, int reason)
 {
 	struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
 
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (queue_depth > hba->max_requests)
 		queue_depth = hba->max_requests;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bb2c696c006a..87b536a97cb4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -39,6 +39,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
 #include "ibmvfc.h"
 
 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
@@ -558,12 +559,11 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
 /**
  * ibmvfc_init_host - Start host initialization
  * @vhost:		ibmvfc host struct
- * @relogin:	is this a re-login?
  *
  * Return value:
  *	nothing
  **/
-static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
+static void ibmvfc_init_host(struct ibmvfc_host *vhost)
 {
 	struct ibmvfc_target *tgt;
 
@@ -577,10 +577,8 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
 	}
 
 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
-		if (!relogin) {
-			memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
-			vhost->async_crq.cur = 0;
-		}
+		memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+		vhost->async_crq.cur = 0;
 
 		list_for_each_entry(tgt, &vhost->targets, queue)
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
@@ -1678,6 +1676,276 @@ static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
 }
 
 /**
+ * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
+ * @evt:	struct ibmvfc_event
+ *
+ **/
+static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+
+	ibmvfc_free_event(evt);
+	vhost->aborting_passthru = 0;
+	dev_info(vhost->dev, "Passthru command cancelled\n");
+}
+
+/**
+ * ibmvfc_bsg_timeout - Handle a BSG timeout
+ * @job:	struct fc_bsg_job that timed out
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+{
+	struct ibmvfc_host *vhost = shost_priv(job->shost);
+	unsigned long port_id = (unsigned long)job->dd_data;
+	struct ibmvfc_event *evt;
+	struct ibmvfc_tmf *tmf;
+	unsigned long flags;
+	int rc;
+
+	ENTER;
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
+		__ibmvfc_reset_host(vhost);
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		return 0;
+	}
+
+	vhost->aborting_passthru = 1;
+	evt = ibmvfc_get_event(vhost);
+	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+
+	tmf = &evt->iu.tmf;
+	memset(tmf, 0, sizeof(*tmf));
+	tmf->common.version = 1;
+	tmf->common.opcode = IBMVFC_TMF_MAD;
+	tmf->common.length = sizeof(*tmf);
+	tmf->scsi_id = port_id;
+	tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
+	tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
+	rc = ibmvfc_send_event(evt, vhost, default_timeout);
+
+	if (rc != 0) {
+		vhost->aborting_passthru = 0;
+		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
+		rc = -EIO;
+	} else
+		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
+			 port_id);
+
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	LEAVE;
+	return rc;
+}
+
+/**
+ * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
+ * @vhost:		struct ibmvfc_host to send command
+ * @port_id:	port ID to send command
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+{
+	struct ibmvfc_port_login *plogi;
+	struct ibmvfc_target *tgt;
+	struct ibmvfc_event *evt;
+	union ibmvfc_iu rsp_iu;
+	unsigned long flags;
+	int rc = 0, issue_login = 1;
+
+	ENTER;
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	list_for_each_entry(tgt, &vhost->targets, queue) {
+		if (tgt->scsi_id == port_id) {
+			issue_login = 0;
+			break;
+		}
+	}
+
+	if (!issue_login)
+		goto unlock_out;
+	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
+		goto unlock_out;
+
+	evt = ibmvfc_get_event(vhost);
+	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+	plogi = &evt->iu.plogi;
+	memset(plogi, 0, sizeof(*plogi));
+	plogi->common.version = 1;
+	plogi->common.opcode = IBMVFC_PORT_LOGIN;
+	plogi->common.length = sizeof(*plogi);
+	plogi->scsi_id = port_id;
+	evt->sync_iu = &rsp_iu;
+	init_completion(&evt->comp);
+
+	rc = ibmvfc_send_event(evt, vhost, default_timeout);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (rc)
+		return -EIO;
+
+	wait_for_completion(&evt->comp);
+
+	if (rsp_iu.plogi.common.status)
+		rc = -EIO;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_free_event(evt);
+unlock_out:
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	LEAVE;
+	return rc;
+}
+
+/**
+ * ibmvfc_bsg_request - Handle a BSG request
+ * @job:	struct fc_bsg_job to be executed
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+{
+	struct ibmvfc_host *vhost = shost_priv(job->shost);
+	struct fc_rport *rport = job->rport;
+	struct ibmvfc_passthru_mad *mad;
+	struct ibmvfc_event *evt;
+	union ibmvfc_iu rsp_iu;
+	unsigned long flags, port_id = -1;
+	unsigned int code = job->request->msgcode;
+	int rc = 0, req_seg, rsp_seg, issue_login = 0;
+	u32 fc_flags, rsp_len;
+
+	ENTER;
+	job->reply->reply_payload_rcv_len = 0;
+	if (rport)
+		port_id = rport->port_id;
+
+	switch (code) {
+	case FC_BSG_HST_ELS_NOLOGIN:
+		port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
+			(job->request->rqst_data.h_els.port_id[1] << 8) |
+			job->request->rqst_data.h_els.port_id[2];
+	case FC_BSG_RPT_ELS:
+		fc_flags = IBMVFC_FC_ELS;
+		break;
+	case FC_BSG_HST_CT:
+		issue_login = 1;
+		port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
+			(job->request->rqst_data.h_ct.port_id[1] << 8) |
+			job->request->rqst_data.h_ct.port_id[2];
+	case FC_BSG_RPT_CT:
+		fc_flags = IBMVFC_FC_CT_IU;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	if (port_id == -1)
+		return -EINVAL;
+	if (!mutex_trylock(&vhost->passthru_mutex))
+		return -EBUSY;
+
+	job->dd_data = (void *)port_id;
+	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
+			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+	if (!req_seg) {
+		mutex_unlock(&vhost->passthru_mutex);
+		return -ENOMEM;
+	}
+
+	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
+			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (!rsp_seg) {
+		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
+			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		mutex_unlock(&vhost->passthru_mutex);
+		return -ENOMEM;
+	}
+
+	if (req_seg > 1 || rsp_seg > 1) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (issue_login)
+		rc = ibmvfc_bsg_plogi(vhost, port_id);
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+
+	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
+	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		goto out;
+	}
+
+	evt = ibmvfc_get_event(vhost);
+	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+	mad = &evt->iu.passthru;
+
+	memset(mad, 0, sizeof(*mad));
+	mad->common.version = 1;
+	mad->common.opcode = IBMVFC_PASSTHRU;
+	mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
+
+	mad->cmd_ioba.va = (u64)evt->crq.ioba +
+		offsetof(struct ibmvfc_passthru_mad, iu);
+	mad->cmd_ioba.len = sizeof(mad->iu);
+
+	mad->iu.cmd_len = job->request_payload.payload_len;
+	mad->iu.rsp_len = job->reply_payload.payload_len;
+	mad->iu.flags = fc_flags;
+	mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
+
+	mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
+	mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
+	mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
+	mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
+	mad->iu.scsi_id = port_id;
+	mad->iu.tag = (u64)evt;
+	rsp_len = mad->iu.rsp.len;
+
+	evt->sync_iu = &rsp_iu;
+	init_completion(&evt->comp);
+	rc = ibmvfc_send_event(evt, vhost, 0);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (rc) {
+		rc = -EIO;
+		goto out;
+	}
+
+	wait_for_completion(&evt->comp);
+
+	if (rsp_iu.passthru.common.status)
+		rc = -EIO;
+	else
+		job->reply->reply_payload_rcv_len = rsp_len;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_free_event(evt);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	job->reply->result = rc;
+	job->job_done(job);
+	rc = 0;
+out:
+	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	mutex_unlock(&vhost->passthru_mutex);
+	LEAVE;
+	return rc;
+}
+
+/**
  * ibmvfc_reset_device - Reset the device with the specified reset type
  * @sdev:	scsi device to reset
  * @type:	reset type
@@ -1731,7 +1999,10 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
 	wait_for_completion(&evt->comp);
 
-	if (rsp_iu.cmd.status) {
+	if (rsp_iu.cmd.status)
+		rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+	if (rsp_code) {
 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
 			rsp_code = fc_rsp->data.info.rsp_code;
 
@@ -1820,7 +2091,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
 	wait_for_completion(&evt->comp);
 
-	if (rsp_iu.cmd.status) {
+	if (rsp_iu.cmd.status)
+		rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+	if (rsp_code) {
 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
 			rsp_code = fc_rsp->data.info.rsp_code;
 
@@ -2061,12 +2335,24 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 }
 
 /**
- * ibmvfc_dev_cancel_all - Device iterated cancel all function
+ * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
  * @sdev:	scsi device struct
  * @data:	return code
  *
  **/
-static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
+static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
+{
+	unsigned long *rc = data;
+	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+}
+
+/**
+ * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
+ * @sdev:	scsi device struct
+ * @data:	return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
 {
 	unsigned long *rc = data;
 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
@@ -2102,7 +2388,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 
 	ENTER;
 	ibmvfc_wait_while_resetting(vhost);
-	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
 	reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
 
 	if (!cancel_rc && !reset_rc)
@@ -2144,7 +2430,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
 	int rc = FAILED;
 
 	ENTER;
-	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
 	starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
 
 	if (!cancel_rc && !abort_rc)
@@ -2297,13 +2583,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
 			/* Send back a response */
 			rc = ibmvfc_send_crq_init_complete(vhost);
 			if (rc == 0)
-				ibmvfc_init_host(vhost, 0);
+				ibmvfc_init_host(vhost);
 			else
 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
 			break;
 		case IBMVFC_CRQ_INIT_COMPLETE:
 			dev_info(vhost->dev, "Partner initialization complete\n");
-			ibmvfc_init_host(vhost, 0);
+			ibmvfc_init_host(vhost);
 			break;
 		default:
 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
@@ -2478,12 +2764,17 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
  * ibmvfc_change_queue_depth - Change the device's queue depth
  * @sdev:	scsi device struct
  * @qdepth:	depth to set
+ * @reason:	calling context
  *
  * Return value:
  * 	actual depth set
  **/
-static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				     int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
 
@@ -3725,7 +4016,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
 	case IBMVFC_MAD_SUCCESS:
 		if (list_empty(&vhost->sent) &&
 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
-			ibmvfc_init_host(vhost, 0);
+			ibmvfc_init_host(vhost);
 			return;
 		}
 		break;
@@ -3903,6 +4194,8 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
 			rport->supported_classes |= FC_COS_CLASS2;
 		if (tgt->service_parms.class3_parms[0] & 0x80000000)
 			rport->supported_classes |= FC_COS_CLASS3;
+		if (rport->rqst_q)
+			blk_queue_max_hw_segments(rport->rqst_q, 1);
 	} else
 		tgt_dbg(tgt, "rport add failed\n");
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -4342,6 +4635,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	init_waitqueue_head(&vhost->work_wait_q);
 	init_waitqueue_head(&vhost->init_wait_q);
 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
+	mutex_init(&vhost->passthru_mutex);
 
 	if ((rc = ibmvfc_alloc_mem(vhost)))
 		goto free_scsi_host;
@@ -4374,6 +4668,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 		goto remove_shost;
 	}
 
+	if (shost_to_fc_host(shost)->rqst_q)
+		blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1);
 	dev_set_drvdata(dev, vhost);
 	spin_lock(&ibmvfc_driver_lock);
 	list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -4414,7 +4710,11 @@ static int ibmvfc_remove(struct vio_dev *vdev)
 
 	ENTER;
 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
 	ibmvfc_wait_while_resetting(vhost);
 	ibmvfc_release_crq_queue(vhost);
 	kthread_stop(vhost->work_thread);
@@ -4498,6 +4798,9 @@ static struct fc_function_template ibmvfc_transport_functions = {
 
 	.get_starget_port_id = ibmvfc_get_starget_port_id,
 	.show_starget_port_id = 1,
+
+	.bsg_request = ibmvfc_bsg_request,
+	.bsg_timeout = ibmvfc_bsg_timeout,
 };
 
 /**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 007fa1c9ef14..d25106a958d7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME	"ibmvfc"
-#define IBMVFC_DRIVER_VERSION		"1.0.6"
-#define IBMVFC_DRIVER_DATE		"(May 28, 2009)"
+#define IBMVFC_DRIVER_VERSION		"1.0.7"
+#define IBMVFC_DRIVER_DATE		"(October 16, 2009)"
 
 #define IBMVFC_DEFAULT_TIMEOUT	60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT	45
@@ -58,9 +58,10 @@
  * 1 for ERP
  * 1 for initialization
  * 1 for NPIV Logout
+ * 2 for BSG passthru
  * 2 for each discovery thread
  */
-#define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + 1 + (disc_threads * 2))
+#define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + 1 + 2 + (disc_threads * 2))
 
 #define IBMVFC_MAD_SUCCESS		0x00
 #define IBMVFC_MAD_NOT_SUPPORTED	0xF1
@@ -466,7 +467,10 @@ struct ibmvfc_passthru_iu {
 	u16 error;
 	u32 flags;
 #define IBMVFC_FC_ELS		0x01
+#define IBMVFC_FC_CT_IU		0x02
 	u32 cancel_key;
+#define IBMVFC_PASSTHRU_CANCEL_KEY	0x80000000
+#define IBMVFC_INTERNAL_CANCEL_KEY	0x80000001
 	u32 reserved;
 	struct srp_direct_buf cmd;
 	struct srp_direct_buf rsp;
@@ -693,6 +697,7 @@ struct ibmvfc_host {
 	int disc_buf_sz;
 	int log_level;
 	struct ibmvfc_discover_targets_buf *disc_buf;
+	struct mutex passthru_mutex;
 	int task_set;
 	int init_retries;
 	int discovery_threads;
@@ -702,6 +707,7 @@ struct ibmvfc_host {
 	int delay_init;
 	int scan_complete;
 	int logged_in;
+	int aborting_passthru;
 	int events_to_log;
 #define IBMVFC_AE_LINKUP	0x0001
 #define IBMVFC_AE_LINKDOWN	0x0002
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9b0e9d31983..e475b7957c2d 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1637,12 +1637,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  * ibmvscsi_change_queue_depth - Change the device's queue depth
  * @sdev:	scsi device struct
  * @qdepth:	depth to set
+ * @reason:	calling context
  *
  * Return value:
  * 	actual depth set
  **/
-static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				       int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
 		qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 76d294fc7846..206c2fa8c1ba 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3367,16 +3367,21 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
  * ipr_change_queue_depth - Change the device's queue depth
  * @sdev:	scsi device struct
  * @qdepth:	depth to set
+ * @reason:	calling context
  *
  * Return value:
  * 	actual depth set
  **/
-static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				  int reason)
 {
 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 	struct ipr_resource_entry *res;
 	unsigned long lock_flags = 0;
 
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 	res = (struct ipr_resource_entry *)sdev->hostdata;
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index edc49ca49cea..517da3fd89d3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -903,7 +903,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
 				  ISCSI_USERNAME | ISCSI_PASSWORD |
 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-				  ISCSI_LU_RESET_TMO |
+				  ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
 				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
 				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
index 55f982de3a9a..4bb23ac86a5c 100644
--- a/drivers/scsi/libfc/Makefile
+++ b/drivers/scsi/libfc/Makefile
@@ -3,10 +3,12 @@
 obj-$(CONFIG_LIBFC) += libfc.o
 
 libfc-objs := \
+	fc_libfc.o \
 	fc_disc.o \
 	fc_exch.o \
 	fc_elsct.o \
 	fc_frame.o \
 	fc_lport.o \
 	fc_rport.o \
-	fc_fcp.o
+	fc_fcp.o \
+	fc_npiv.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index c48799e9dd8e..9b0a5192a965 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -40,6 +40,8 @@
 
 #include <scsi/libfc.h>
 
+#include "fc_libfc.h"
+
 #define FC_DISC_RETRY_LIMIT	3	/* max retries */
 #define FC_DISC_RETRY_DELAY	500UL	/* (msecs) delay */
 
@@ -51,8 +53,8 @@ static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
 static void fc_disc_restart(struct fc_disc *);
 
 /**
- * fc_disc_stop_rports() - delete all the remote ports associated with the lport
- * @disc: The discovery job to stop rports on
+ * fc_disc_stop_rports() - Delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop remote ports on
  *
  * Locking Note: This function expects that the lport mutex is locked before
  * calling it.
@@ -72,9 +74,9 @@ void fc_disc_stop_rports(struct fc_disc *disc)
 
 /**
  * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
- * @sp: Current sequence of the RSCN exchange
- * @fp: RSCN Frame
- * @lport: Fibre Channel host port instance
+ * @sp:	   The sequence of the RSCN exchange
+ * @fp:	   The RSCN frame
+ * @lport: The local port that the request will be sent on
  *
  * Locking Note: This function expects that the disc_mutex is locked
  *		 before it is called.
@@ -183,9 +185,9 @@ reject:
 
 /**
  * fc_disc_recv_req() - Handle incoming requests
- * @sp: Current sequence of the request exchange
- * @fp: The frame
- * @lport: The FC local port
+ * @sp:	   The sequence of the request exchange
+ * @fp:	   The request frame
+ * @lport: The local port receiving the request
  *
  * Locking Note: This function is called from the EM and will lock
  *		 the disc_mutex before calling the handler for the
@@ -213,7 +215,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 
 /**
  * fc_disc_restart() - Restart discovery
- * @lport: FC discovery context
+ * @disc: The discovery object to be restarted
  *
  * Locking Note: This function expects that the disc mutex
  *		 is already locked.
@@ -240,9 +242,9 @@ static void fc_disc_restart(struct fc_disc *disc)
 }
 
 /**
- * fc_disc_start() - Fibre Channel Target discovery
- * @lport: FC local port
- * @disc_callback: function to be called when discovery is complete
+ * fc_disc_start() - Start discovery on a local port
+ * @lport:	   The local port to have discovery started on
+ * @disc_callback: Callback function to be called when discovery is complete
  */
 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
 						enum fc_disc_event),
@@ -263,8 +265,8 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
 
 /**
  * fc_disc_done() - Discovery has been completed
- * @disc: FC discovery context
- * @event: discovery completion status
+ * @disc:  The discovery context
+ * @event: The discovery completion status
  *
  * Locking Note: This function expects that the disc mutex is locked before
  * it is called. The discovery callback is then made with the lock released,
@@ -284,8 +286,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
 	}
 
 	/*
-	 * Go through all remote ports.  If they were found in the latest
-	 * discovery, reverify or log them in.  Otherwise, log them out.
+	 * Go through all remote ports.	 If they were found in the latest
+	 * discovery, reverify or log them in.	Otherwise, log them out.
 	 * Skip ports which were never discovered.  These are the dNS port
 	 * and ports which were created by PLOGI.
 	 */
@@ -305,8 +307,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
 
 /**
  * fc_disc_error() - Handle error on dNS request
- * @disc: FC discovery context
- * @fp: The frame pointer
+ * @disc: The discovery context
+ * @fp:	  The error code encoded as a frame pointer
  */
 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
 {
@@ -342,7 +344,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
 
 /**
  * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
- * @lport: FC discovery context
+ * @lport: The discovery context
  *
  * Locking Note: This function expects that the disc_mutex is locked
  *		 before it is called.
@@ -368,17 +370,17 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc)
 	if (lport->tt.elsct_send(lport, 0, fp,
 				 FC_NS_GPN_FT,
 				 fc_disc_gpn_ft_resp,
-				 disc, lport->e_d_tov))
+				 disc, 3 * lport->r_a_tov))
 		return;
 err:
-	fc_disc_error(disc, fp);
+	fc_disc_error(disc, NULL);
 }
 
 /**
  * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
- * @lport: Fibre Channel host port instance
- * @buf: GPN_FT response buffer
- * @len: size of response buffer
+ * @lport: The local port the GPN_FT was received on
+ * @buf:   The GPN_FT response buffer
+ * @len:   The size of response buffer
  *
  * Goes through the list of IDs and names resulting from a request.
  */
@@ -477,10 +479,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 }
 
 /**
- * fc_disc_timeout() - Retry handler for the disc component
- * @work: Structure holding disc obj that needs retry discovery
- *
- * Handle retry of memory allocation for remote ports.
+ * fc_disc_timeout() - Handler for discovery timeouts
+ * @work: Structure holding discovery context that needs to retry discovery
  */
 static void fc_disc_timeout(struct work_struct *work)
 {
@@ -494,9 +494,9 @@ static void fc_disc_timeout(struct work_struct *work)
 
 /**
  * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
- * @sp: Current sequence of GPN_FT exchange
- * @fp: response frame
- * @lp_arg: Fibre Channel host port instance
+ * @sp:	    The sequence that the GPN_FT response was received on
+ * @fp:	    The GPN_FT response frame
+ * @lp_arg: The discovery context
  *
  * Locking Note: This function is called without disc mutex held, and
  *		 should do all its processing with the mutex held
@@ -567,9 +567,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 /**
  * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
- * @sp: exchange sequence
- * @fp: response frame
- * @rdata_arg: remote port private data
+ * @sp:	       The sequence the GPN_ID is on
+ * @fp:	       The response frame
+ * @rdata_arg: The remote port that sent the GPN_ID response
  *
  * Locking Note: This function is called without disc mutex held.
  */
@@ -637,7 +637,7 @@ out:
 
 /**
  * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
- * @lport: local port
+ * @lport: The local port to initiate discovery on
  * @rdata: remote port private data
  *
  * Locking Note: This function expects that the disc_mutex is locked
@@ -654,7 +654,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
 	if (!fp)
 		return -ENOMEM;
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
-				 fc_disc_gpn_id_resp, rdata, lport->e_d_tov))
+				  fc_disc_gpn_id_resp, rdata,
+				  3 * lport->r_a_tov))
 		return -ENOMEM;
 	kref_get(&rdata->kref);
 	return 0;
@@ -662,8 +663,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
 
 /**
  * fc_disc_single() - Discover the directory information for a single target
- * @lport: local port
- * @dp: The port to rediscover
+ * @lport: The local port the remote port is associated with
+ * @dp:	   The port to rediscover
  *
  * Locking Note: This function expects that the disc_mutex is locked
  *		 before it is called.
@@ -681,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
 
 /**
  * fc_disc_stop() - Stop discovery for a given lport
- * @lport: The lport that discovery should stop for
+ * @lport: The local port that discovery should stop on
  */
 void fc_disc_stop(struct fc_lport *lport)
 {
@@ -695,7 +696,7 @@ void fc_disc_stop(struct fc_lport *lport)
 
 /**
  * fc_disc_stop_final() - Stop discovery for a given lport
- * @lport: The lport that discovery should stop for
+ * @lport: The lport that discovery should stop on
  *
  * This function will block until discovery has been
  * completely stopped and all rports have been deleted.
@@ -707,8 +708,8 @@ void fc_disc_stop_final(struct fc_lport *lport)
 }
 
 /**
- * fc_disc_init() - Initialize the discovery block
- * @lport: FC local port
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
  */
 int fc_disc_init(struct fc_lport *lport)
 {
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 5cfa68732e9d..53748724f2c5 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,17 +28,22 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
-/*
- * fc_elsct_send - sends ELS/CT frame
+/**
+ * fc_elsct_send() - Send an ELS or CT frame
+ * @lport:	The local port to send the frame on
+ * @did:	The destination ID for the frame
+ * @fp:		The frame to be sent
+ * @op:		The operational code
+ * @resp:	The callback routine when the response is received
+ * @arg:	The argument to pass to the response callback routine
+ * @timer_msec: The timeout period for the frame (in msecs)
  */
-static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
-				    u32 did,
-				    struct fc_frame *fp,
-				    unsigned int op,
-				    void (*resp)(struct fc_seq *,
-						 struct fc_frame *fp,
-						 void *arg),
-				    void *arg, u32 timer_msec)
+struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
+			     struct fc_frame *fp, unsigned int op,
+			     void (*resp)(struct fc_seq *,
+					  struct fc_frame *,
+					  void *),
+			     void *arg, u32 timer_msec)
 {
 	enum fc_rctl r_ctl;
 	enum fc_fh_type fh_type;
@@ -53,15 +58,22 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
 		did = FC_FID_DIR_SERV;
 	}
 
-	if (rc)
+	if (rc) {
+		fc_frame_free(fp);
 		return NULL;
+	}
 
 	fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
 	return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
 }
+EXPORT_SYMBOL(fc_elsct_send);
 
+/**
+ * fc_elsct_init() - Initialize the ELS/CT layer
+ * @lport: The local port to initialize the ELS/CT layer for
+ */
 int fc_elsct_init(struct fc_lport *lport)
 {
 	if (!lport->tt.elsct_send)
@@ -72,12 +84,15 @@ int fc_elsct_init(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_elsct_init);
 
 /**
- * fc_els_resp_type() - return string describing ELS response for debug.
- * @fp: frame pointer with possible error code.
+ * fc_els_resp_type() - Return a string describing the ELS response
+ * @fp: The frame pointer or possible error code
  */
 const char *fc_els_resp_type(struct fc_frame *fp)
 {
 	const char *msg;
+	struct fc_frame_header *fh;
+	struct fc_ct_hdr *ct;
+
 	if (IS_ERR(fp)) {
 		switch (-PTR_ERR(fp)) {
 		case FC_NO_ERR:
@@ -94,15 +109,41 @@ const char *fc_els_resp_type(struct fc_frame *fp)
 			break;
 		}
 	} else {
-		switch (fc_frame_payload_op(fp)) {
-		case ELS_LS_ACC:
-			msg = "accept";
+		fh = fc_frame_header_get(fp);
+		switch (fh->fh_type) {
+		case FC_TYPE_ELS:
+			switch (fc_frame_payload_op(fp)) {
+			case ELS_LS_ACC:
+				msg = "accept";
+				break;
+			case ELS_LS_RJT:
+				msg = "reject";
+				break;
+			default:
+				msg = "response unknown ELS";
+				break;
+			}
 			break;
-		case ELS_LS_RJT:
-			msg = "reject";
+		case FC_TYPE_CT:
+			ct = fc_frame_payload_get(fp, sizeof(*ct));
+			if (ct) {
+				switch (ntohs(ct->ct_cmd)) {
+				case FC_FS_ACC:
+					msg = "CT accept";
+					break;
+				case FC_FS_RJT:
+					msg = "CT reject";
+					break;
+				default:
+					msg = "response unknown CT";
+					break;
+				}
+			} else {
+				msg = "short CT response";
+			}
 			break;
 		default:
-			msg = "response unknown ELS";
+			msg = "response not ELS or CT";
 			break;
 		}
 	}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index c1c15748220c..19d711cb938c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,10 +32,13 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
+#include "fc_libfc.h"
+
 u16	fc_cpu_mask;		/* cpu mask for possible cpus */
 EXPORT_SYMBOL(fc_cpu_mask);
 static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
-static struct kmem_cache *fc_em_cachep;        /* cache for exchanges */
+static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
+struct workqueue_struct *fc_exch_workqueue;
 
 /*
  * Structure and function definitions for managing Fibre Channel Exchanges
@@ -50,35 +53,46 @@ static struct kmem_cache *fc_em_cachep;        /* cache for exchanges */
  * fc_seq holds the state for an individual sequence.
  */
 
-/*
- * Per cpu exchange pool
+/**
+ * struct fc_exch_pool - Per cpu exchange pool
+ * @next_index:	  Next possible free exchange index
+ * @total_exches: Total allocated exchanges
+ * @lock:	  Exch pool lock
+ * @ex_list:	  List of exchanges
  *
  * This structure manages per cpu exchanges in array of exchange pointers.
  * This array is allocated followed by struct fc_exch_pool memory for
  * assigned range of exchanges to per cpu pool.
  */
 struct fc_exch_pool {
-	u16		next_index;	/* next possible free exchange index */
-	u16		total_exches;	/* total allocated exchanges */
-	spinlock_t	lock;		/* exch pool lock */
-	struct list_head	ex_list;	/* allocated exchanges list */
+	u16		 next_index;
+	u16		 total_exches;
+	spinlock_t	 lock;
+	struct list_head ex_list;
 };
 
-/*
- * Exchange manager.
+/**
+ * struct fc_exch_mgr - The Exchange Manager (EM).
+ * @class:	    Default class for new sequences
+ * @kref:	    Reference counter
+ * @min_xid:	    Minimum exchange ID
+ * @max_xid:	    Maximum exchange ID
+ * @ep_pool:	    Reserved exchange pointers
+ * @pool_max_index: Max exch array index in exch pool
+ * @pool:	    Per cpu exch pool
+ * @stats:	    Statistics structure
  *
  * This structure is the center for creating exchanges and sequences.
  * It manages the allocation of exchange IDs.
  */
 struct fc_exch_mgr {
-	enum fc_class	class;		/* default class for sequences */
-	struct kref	kref;		/* exchange mgr reference count */
-	u16		min_xid;	/* min exchange ID */
-	u16		max_xid;	/* max exchange ID */
-	struct list_head	ex_list;	/* allocated exchanges list */
-	mempool_t	*ep_pool;	/* reserve ep's */
-	u16		pool_max_index;	/* max exch array index in exch pool */
-	struct fc_exch_pool *pool;	/* per cpu exch pool */
+	enum fc_class	class;
+	struct kref	kref;
+	u16		min_xid;
+	u16		max_xid;
+	mempool_t	*ep_pool;
+	u16		pool_max_index;
+	struct fc_exch_pool *pool;
 
 	/*
 	 * currently exchange mgr stats are updated but not used.
@@ -96,6 +110,18 @@ struct fc_exch_mgr {
 };
 #define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
 
+/**
+ * struct fc_exch_mgr_anchor - primary structure for list of EMs
+ * @ema_list: Exchange Manager Anchor list
+ * @mp:	      Exchange Manager associated with this anchor
+ * @match:    Routine to determine if this anchor's EM should be used
+ *
+ * When walking the list of anchors the match routine will be called
+ * for each anchor to determine if that EM should be used. The last
+ * anchor in the list will always match to handle any exchanges not
+ * handled by other EMs. The non-default EMs would be added to the
+ * anchor list by HW that provides FCoE offloads.
+ */
 struct fc_exch_mgr_anchor {
 	struct list_head ema_list;
 	struct fc_exch_mgr *mp;
@@ -108,7 +134,6 @@ static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
 			  enum fc_els_rjt_explan);
 static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
 static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
-static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
 
 /*
  * Internal implementation notes.
@@ -196,6 +221,15 @@ static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
 
 #define FC_TABLE_SIZE(x)   (sizeof(x) / sizeof(x[0]))
 
+/**
+ * fc_exch_name_lookup() - Lookup name by opcode
+ * @op:	       Opcode to be looked up
+ * @table:     Opcode/name table
+ * @max_index: Index not to be exceeded
+ *
+ * This routine is used to determine a human-readable string identifying
+ * a R_CTL opcode.
+ */
 static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
 					      unsigned int max_index)
 {
@@ -208,25 +242,34 @@ static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
 	return name;
 }
 
+/**
+ * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
+ * @op: The opcode to be looked up
+ */
 static const char *fc_exch_rctl_name(unsigned int op)
 {
 	return fc_exch_name_lookup(op, fc_exch_rctl_names,
 				   FC_TABLE_SIZE(fc_exch_rctl_names));
 }
 
-/*
- * Hold an exchange - keep it from being freed.
+/**
+ * fc_exch_hold() - Increment an exchange's reference count
+ * @ep: Echange to be held
  */
-static void fc_exch_hold(struct fc_exch *ep)
+static inline void fc_exch_hold(struct fc_exch *ep)
 {
 	atomic_inc(&ep->ex_refcnt);
 }
 
-/*
- * setup fc hdr by initializing few more FC header fields and sof/eof.
- * Initialized fields by this func:
- *	- fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
- *	- sof and eof
+/**
+ * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
+ *			 and determine SOF and EOF.
+ * @ep:	   The exchange to that will use the header
+ * @fp:	   The frame whose header is to be modified
+ * @f_ctl: F_CTL bits that will be used for the frame header
+ *
+ * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
+ * fh_seq_id, fh_seq_cnt and the SOF and EOF.
  */
 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
 			      u32 f_ctl)
@@ -243,7 +286,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
 		if (fc_sof_needs_ack(ep->class))
 			fr_eof(fp) = FC_EOF_N;
 		/*
-		 * Form f_ctl.
+		 * From F_CTL.
 		 * The number of fill bytes to make the length a 4-byte
 		 * multiple is the low order 2-bits of the f_ctl.
 		 * The fill itself will have been cleared by the frame
@@ -273,10 +316,12 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
 	fh->fh_seq_cnt = htons(ep->seq.cnt);
 }
 
-
-/*
- * Release a reference to an exchange.
- * If the refcnt goes to zero and the exchange is complete, it is freed.
+/**
+ * fc_exch_release() - Decrement an exchange's reference count
+ * @ep: Exchange to be released
+ *
+ * If the reference count reaches zero and the exchange is complete,
+ * it is freed.
  */
 static void fc_exch_release(struct fc_exch *ep)
 {
@@ -291,6 +336,10 @@ static void fc_exch_release(struct fc_exch *ep)
 	}
 }
 
+/**
+ * fc_exch_done_locked() - Complete an exchange with the exchange lock held
+ * @ep: The exchange that is complete
+ */
 static int fc_exch_done_locked(struct fc_exch *ep)
 {
 	int rc = 1;
@@ -315,6 +364,15 @@ static int fc_exch_done_locked(struct fc_exch *ep)
 	return rc;
 }
 
+/**
+ * fc_exch_ptr_get() - Return an exchange from an exchange pool
+ * @pool:  Exchange Pool to get an exchange from
+ * @index: Index of the exchange within the pool
+ *
+ * Use the index to get an exchange from within an exchange pool. exches
+ * will point to an array of exchange pointers. The index will select
+ * the exchange within the array.
+ */
 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
 					      u16 index)
 {
@@ -322,12 +380,22 @@ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
 	return exches[index];
 }
 
+/**
+ * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
+ * @pool:  The pool to assign the exchange to
+ * @index: The index in the pool where the exchange will be assigned
+ * @ep:	   The exchange to assign to the pool
+ */
 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
 				   struct fc_exch *ep)
 {
 	((struct fc_exch **)(pool + 1))[index] = ep;
 }
 
+/**
+ * fc_exch_delete() - Delete an exchange
+ * @ep: The exchange to be deleted
+ */
 static void fc_exch_delete(struct fc_exch *ep)
 {
 	struct fc_exch_pool *pool;
@@ -343,8 +411,14 @@ static void fc_exch_delete(struct fc_exch *ep)
 	fc_exch_release(ep);	/* drop hold for exch in mp */
 }
 
-/*
- * Internal version of fc_exch_timer_set - used with lock held.
+/**
+ * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
+ *				the exchange lock held
+ * @ep:		The exchange whose timer will start
+ * @timer_msec: The timeout period
+ *
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
  */
 static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
 					    unsigned int timer_msec)
@@ -354,17 +428,15 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
 
 	FC_EXCH_DBG(ep, "Exchange timer armed\n");
 
-	if (schedule_delayed_work(&ep->timeout_work,
-				  msecs_to_jiffies(timer_msec)))
+	if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+			       msecs_to_jiffies(timer_msec)))
 		fc_exch_hold(ep);		/* hold for timer */
 }
 
-/*
- * Set timer for an exchange.
- * The time is a minimum delay in milliseconds until the timer fires.
- * Used for upper level protocols to time out the exchange.
- * The timer is cancelled when it fires or when the exchange completes.
- * Returns non-zero if a timer couldn't be allocated.
+/**
+ * fc_exch_timer_set() - Lock the exchange and set the timer
+ * @ep:		The exchange whose timer will start
+ * @timer_msec: The timeout period
  */
 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
 {
@@ -373,7 +445,115 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
 	spin_unlock_bh(&ep->ex_lock);
 }
 
-int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp:	   The sequence to be sent
+ * @fp:	   The frame to be sent on the exchange
+ */
+static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+		       struct fc_frame *fp)
+{
+	struct fc_exch *ep;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	int error;
+	u32 f_ctl;
+
+	ep = fc_seq_exch(sp);
+	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fc_exch_setup_hdr(ep, fp, f_ctl);
+
+	/*
+	 * update sequence count if this frame is carrying
+	 * multiple FC frames when sequence offload is enabled
+	 * by LLD.
+	 */
+	if (fr_max_payload(fp))
+		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
+					fr_max_payload(fp));
+	else
+		sp->cnt++;
+
+	/*
+	 * Send the frame.
+	 */
+	error = lport->tt.frame_send(lport, fp);
+
+	/*
+	 * Update the exchange and sequence flags,
+	 * assuming all frames for the sequence have been sent.
+	 * We can only be called to send once for each sequence.
+	 */
+	spin_lock_bh(&ep->ex_lock);
+	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
+	if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
+	return error;
+}
+
+/**
+ * fc_seq_alloc() - Allocate a sequence for a given exchange
+ * @ep:	    The exchange to allocate a new sequence for
+ * @seq_id: The sequence ID to be used
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+	struct fc_seq *sp;
+
+	sp = &ep->seq;
+	sp->ssb_stat = 0;
+	sp->cnt = 0;
+	sp->id = seq_id;
+	return sp;
+}
+
+/**
+ * fc_seq_start_next_locked() - Allocate a new sequence on the same
+ *				exchange as the supplied sequence
+ * @sp: The sequence/exchange to get a new sequence for
+ */
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	sp = fc_seq_alloc(ep, ep->seq_id++);
+	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+		    ep->f_ctl, sp->id);
+	return sp;
+}
+
+/**
+ * fc_seq_start_next() - Lock the exchange and get a new sequence
+ *			 for a given sequence/exchange pair
+ * @sp: The sequence/exchange to get a new exchange for
+ */
+static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	spin_lock_bh(&ep->ex_lock);
+	sp = fc_seq_start_next_locked(sp);
+	spin_unlock_bh(&ep->ex_lock);
+
+	return sp;
+}
+
+/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp:	The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+			     unsigned int timer_msec)
 {
 	struct fc_seq *sp;
 	struct fc_exch *ep;
@@ -422,11 +602,10 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
 		error = -ENOBUFS;
 	return error;
 }
-EXPORT_SYMBOL(fc_seq_exch_abort);
 
-/*
- * Exchange timeout - handle exchange timer expiration.
- * The timer will have been cancelled before this is called.
+/**
+ * fc_exch_timeout() - Handle exchange timer expiration
+ * @work: The work_struct identifying the exchange that timed out
  */
 static void fc_exch_timeout(struct work_struct *work)
 {
@@ -474,28 +653,10 @@ done:
 	fc_exch_release(ep);
 }
 
-/*
- * Allocate a sequence.
- *
- * We don't support multiple originated sequences on the same exchange.
- * By implication, any previously originated sequence on this exchange
- * is complete, and we reallocate the same sequence.
- */
-static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
-{
-	struct fc_seq *sp;
-
-	sp = &ep->seq;
-	sp->ssb_stat = 0;
-	sp->cnt = 0;
-	sp->id = seq_id;
-	return sp;
-}
-
 /**
- * fc_exch_em_alloc() - allocate an exchange from a specified EM.
- * @lport:	ptr to the local port
- * @mp:		ptr to the exchange manager
+ * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
+ * @lport: The local port that the exchange is for
+ * @mp:	   The exchange manager that will allocate the exchange
  *
  * Returns pointer to allocated fc_exch with exch lock held.
  */
@@ -563,16 +724,18 @@ err:
 }
 
 /**
- * fc_exch_alloc() - allocate an exchange.
- * @lport:	ptr to the local port
- * @fp:		ptr to the FC frame
+ * fc_exch_alloc() - Allocate an exchange from an EM on a
+ *		     local port's list of EMs.
+ * @lport: The local port that will own the exchange
+ * @fp:	   The FC frame that the exchange will be for
  *
- * This function walks the list of the exchange manager(EM)
- * anchors to select a EM for new exchange allocation. The
- * EM is selected having either a NULL match function pointer
- * or call to match function returning true.
+ * This function walks the list of exchange manager(EM)
+ * anchors to select an EM for a new exchange allocation. The
+ * EM is selected when a NULL match function pointer is encountered
+ * or when a call to a match function returns true.
  */
-struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+				     struct fc_frame *fp)
 {
 	struct fc_exch_mgr_anchor *ema;
 	struct fc_exch *ep;
@@ -586,10 +749,11 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
 	}
 	return NULL;
 }
-EXPORT_SYMBOL(fc_exch_alloc);
 
-/*
- * Lookup and hold an exchange.
+/**
+ * fc_exch_find() - Lookup and hold an exchange
+ * @mp:	 The exchange manager to lookup the exchange from
+ * @xid: The XID of the exchange to look up
  */
 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
 {
@@ -609,7 +773,13 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
 	return ep;
 }
 
-void fc_exch_done(struct fc_seq *sp)
+
+/**
+ * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
+ *		    the memory allocated for the related objects may be freed.
+ * @sp: The sequence that has completed
+ */
+static void fc_exch_done(struct fc_seq *sp)
 {
 	struct fc_exch *ep = fc_seq_exch(sp);
 	int rc;
@@ -620,10 +790,13 @@ void fc_exch_done(struct fc_seq *sp)
 	if (!rc)
 		fc_exch_delete(ep);
 }
-EXPORT_SYMBOL(fc_exch_done);
 
-/*
- * Allocate a new exchange as responder.
+/**
+ * fc_exch_resp() - Allocate a new exchange for a response frame
+ * @lport: The local port that the exchange was for
+ * @mp:	   The exchange manager to allocate the exchange from
+ * @fp:	   The response frame
+ *
  * Sets the responder ID in the frame header.
  */
 static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
@@ -664,8 +837,13 @@ static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
 	return ep;
 }
 
-/*
- * Find a sequence for receive where the other end is originating the sequence.
+/**
+ * fc_seq_lookup_recip() - Find a sequence where the other end
+ *			   originated the sequence
+ * @lport: The local port that the frame was sent to
+ * @mp:	   The Exchange Manager to lookup the exchange from
+ * @fp:	   The frame associated with the sequence we're looking for
+ *
  * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
  * on the ep that should be released by the caller.
  */
@@ -771,10 +949,12 @@ rel:
 	return reject;
 }
 
-/*
- * Find the sequence for a frame being received.
- * We originated the sequence, so it should be found.
- * We may or may not have originated the exchange.
+/**
+ * fc_seq_lookup_orig() - Find a sequence where this end
+ *			  originated the sequence
+ * @mp:	   The Exchange Manager to lookup the exchange from
+ * @fp:	   The frame associated with the sequence we're looking for
+ *
  * Does not hold the sequence for the caller.
  */
 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
@@ -806,8 +986,12 @@ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
 	return sp;
 }
 
-/*
- * Set addresses for an exchange.
+/**
+ * fc_exch_set_addr() - Set the source and destination IDs for an exchange
+ * @ep:	     The exchange to set the addresses for
+ * @orig_id: The originator's ID
+ * @resp_id: The responder's ID
+ *
  * Note this must be done before the first sequence of the exchange is sent.
  */
 static void fc_exch_set_addr(struct fc_exch *ep,
@@ -823,76 +1007,15 @@ static void fc_exch_set_addr(struct fc_exch *ep,
 	}
 }
 
-static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
-{
-	struct fc_exch *ep = fc_seq_exch(sp);
-
-	sp = fc_seq_alloc(ep, ep->seq_id++);
-	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
-		    ep->f_ctl, sp->id);
-	return sp;
-}
-/*
- * Allocate a new sequence on the same exchange as the supplied sequence.
- * This will never return NULL.
+/**
+ * fc_seq_els_rsp_send() - Send an ELS response using infomation from
+ *			   the existing sequence/exchange.
+ * @sp:	      The sequence/exchange to get information from
+ * @els_cmd:  The ELS command to be sent
+ * @els_data: The ELS data to be sent
  */
-struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
-{
-	struct fc_exch *ep = fc_seq_exch(sp);
-
-	spin_lock_bh(&ep->ex_lock);
-	sp = fc_seq_start_next_locked(sp);
-	spin_unlock_bh(&ep->ex_lock);
-
-	return sp;
-}
-EXPORT_SYMBOL(fc_seq_start_next);
-
-int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
-{
-	struct fc_exch *ep;
-	struct fc_frame_header *fh = fc_frame_header_get(fp);
-	int error;
-	u32	f_ctl;
-
-	ep = fc_seq_exch(sp);
-	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
-
-	f_ctl = ntoh24(fh->fh_f_ctl);
-	fc_exch_setup_hdr(ep, fp, f_ctl);
-
-	/*
-	 * update sequence count if this frame is carrying
-	 * multiple FC frames when sequence offload is enabled
-	 * by LLD.
-	 */
-	if (fr_max_payload(fp))
-		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
-					fr_max_payload(fp));
-	else
-		sp->cnt++;
-
-	/*
-	 * Send the frame.
-	 */
-	error = lp->tt.frame_send(lp, fp);
-
-	/*
-	 * Update the exchange and sequence flags,
-	 * assuming all frames for the sequence have been sent.
-	 * We can only be called to send once for each sequence.
-	 */
-	spin_lock_bh(&ep->ex_lock);
-	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
-	if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
-		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
-	spin_unlock_bh(&ep->ex_lock);
-	return error;
-}
-EXPORT_SYMBOL(fc_seq_send);
-
-void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
-			 struct fc_seq_els_data *els_data)
+static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+				struct fc_seq_els_data *els_data)
 {
 	switch (els_cmd) {
 	case ELS_LS_RJT:
@@ -911,10 +1034,13 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
 		FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
 	}
 }
-EXPORT_SYMBOL(fc_seq_els_rsp_send);
 
-/*
- * Send a sequence, which is also the last sequence in the exchange.
+/**
+ * fc_seq_send_last() - Send a sequence that is the last in the exchange
+ * @sp:	     The sequence that is to be sent
+ * @fp:	     The frame that will be sent on the sequence
+ * @rctl:    The R_CTL information to be sent
+ * @fh_type: The frame header type
  */
 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
 			     enum fc_rctl rctl, enum fc_fh_type fh_type)
@@ -928,9 +1054,12 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
 	fc_seq_send(ep->lp, sp, fp);
 }
 
-/*
+/**
+ * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
+ * @sp:	   The sequence to send the ACK on
+ * @rx_fp: The received frame that is being acknoledged
+ *
  * Send ACK_1 (or equiv.) indicating we received something.
- * The frame we're acking is supplied.
  */
 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
 {
@@ -938,14 +1067,14 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
 	struct fc_frame_header *rx_fh;
 	struct fc_frame_header *fh;
 	struct fc_exch *ep = fc_seq_exch(sp);
-	struct fc_lport *lp = ep->lp;
+	struct fc_lport *lport = ep->lp;
 	unsigned int f_ctl;
 
 	/*
 	 * Don't send ACKs for class 3.
 	 */
 	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
-		fp = fc_frame_alloc(lp, 0);
+		fp = fc_frame_alloc(lport, 0);
 		if (!fp)
 			return;
 
@@ -980,12 +1109,16 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
 		else
 			fr_eof(fp) = FC_EOF_N;
 
-		(void) lp->tt.frame_send(lp, fp);
+		lport->tt.frame_send(lport, fp);
 	}
 }
 
-/*
- * Send BLS Reject.
+/**
+ * fc_exch_send_ba_rjt() - Send BLS Reject
+ * @rx_fp:  The frame being rejected
+ * @reason: The reason the frame is being rejected
+ * @explan: The explaination for the rejection
+ *
  * This is for rejecting BA_ABTS only.
  */
 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
@@ -996,11 +1129,11 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
 	struct fc_frame_header *rx_fh;
 	struct fc_frame_header *fh;
 	struct fc_ba_rjt *rp;
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	unsigned int f_ctl;
 
-	lp = fr_dev(rx_fp);
-	fp = fc_frame_alloc(lp, sizeof(*rp));
+	lport = fr_dev(rx_fp);
+	fp = fc_frame_alloc(lport, sizeof(*rp));
 	if (!fp)
 		return;
 	fh = fc_frame_header_get(fp);
@@ -1045,13 +1178,17 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
 	if (fc_sof_needs_ack(fr_sof(fp)))
 		fr_eof(fp) = FC_EOF_N;
 
-	(void) lp->tt.frame_send(lp, fp);
+	lport->tt.frame_send(lport, fp);
 }
 
-/*
- * Handle an incoming ABTS.  This would be for target mode usually,
- * but could be due to lost FCP transfer ready, confirm or RRQ.
- * We always handle this as an exchange abort, ignoring the parameter.
+/**
+ * fc_exch_recv_abts() - Handle an incoming ABTS
+ * @ep:	   The exchange the abort was on
+ * @rx_fp: The ABTS frame
+ *
+ * This would be for target mode usually, but could be due to lost
+ * FCP transfer ready, confirm or RRQ. We always handle this as an
+ * exchange abort, ignoring the parameter.
  */
 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
 {
@@ -1100,10 +1237,14 @@ free:
 	fc_frame_free(rx_fp);
 }
 
-/*
- * Handle receive where the other end is originating the sequence.
+/**
+ * fc_exch_recv_req() - Handler for an incoming request where is other
+ *			end is originating the sequence
+ * @lport: The local port that received the request
+ * @mp:	   The EM that the exchange is on
+ * @fp:	   The request frame
  */
-static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
+static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
 			     struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -1114,8 +1255,17 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
 	u32 f_ctl;
 	enum fc_pf_rjt_reason reject;
 
+	/* We can have the wrong fc_lport at this point with NPIV, which is a
+	 * problem now that we know a new exchange needs to be allocated
+	 */
+	lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+	if (!lport) {
+		fc_frame_free(fp);
+		return;
+	}
+
 	fr_seq(fp) = NULL;
-	reject = fc_seq_lookup_recip(lp, mp, fp);
+	reject = fc_seq_lookup_recip(lport, mp, fp);
 	if (reject == FC_RJT_NONE) {
 		sp = fr_seq(fp);	/* sequence will be held */
 		ep = fc_seq_exch(sp);
@@ -1138,17 +1288,21 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
 		if (ep->resp)
 			ep->resp(sp, fp, ep->arg);
 		else
-			lp->tt.lport_recv(lp, sp, fp);
+			lport->tt.lport_recv(lport, sp, fp);
 		fc_exch_release(ep);	/* release from lookup */
 	} else {
-		FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject);
+		FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
+			     reject);
 		fc_frame_free(fp);
 	}
 }
 
-/*
- * Handle receive where the other end is originating the sequence in
- * response to our exchange.
+/**
+ * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
+ *			     end is the originator of the sequence that is a
+ *			     response to our initial exchange
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
  */
 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 {
@@ -1239,8 +1393,11 @@ out:
 	fc_frame_free(fp);
 }
 
-/*
- * Handle receive for a sequence where other end is responding to our sequence.
+/**
+ * fc_exch_recv_resp() - Handler for a sequence where other end is
+ *			 responding to our sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
  */
 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 {
@@ -1256,9 +1413,13 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 	fc_frame_free(fp);
 }
 
-/*
- * Handle the response to an ABTS for exchange or sequence.
- * This can be BA_ACC or BA_RJT.
+/**
+ * fc_exch_abts_resp() - Handler for a response to an ABT
+ * @ep: The exchange that the frame is on
+ * @fp: The response frame
+ *
+ * This response would be to an ABTS cancelling an exchange or sequence.
+ * The response can be either BA_ACC or BA_RJT
  */
 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
 {
@@ -1333,9 +1494,12 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
 
 }
 
-/*
- * Receive BLS sequence.
- * This is always a sequence initiated by the remote side.
+/**
+ * fc_exch_recv_bls() - Handler for a BLS sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The request frame
+ *
+ * The BLS frame is always a sequence initiated by the remote side.
  * We may be either the originator or recipient of the exchange.
  */
 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
@@ -1392,8 +1556,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
 		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
 }
 
-/*
- * Accept sequence with LS_ACC.
+/**
+ * fc_seq_ls_acc() - Accept sequence with LS_ACC
+ * @req_sp: The request sequence
+ *
  * If this fails due to allocation or transmit congestion, assume the
  * originator will repeat the sequence.
  */
@@ -1413,8 +1579,12 @@ static void fc_seq_ls_acc(struct fc_seq *req_sp)
 	}
 }
 
-/*
- * Reject sequence with ELS LS_RJT.
+/**
+ * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
+ * @req_sp: The request sequence
+ * @reason: The reason the sequence is being rejected
+ * @explan: The explaination for the rejection
+ *
  * If this fails due to allocation or transmit congestion, assume the
  * originator will repeat the sequence.
  */
@@ -1437,6 +1607,10 @@ static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
 	}
 }
 
+/**
+ * fc_exch_reset() - Reset an exchange
+ * @ep: The exchange to be reset
+ */
 static void fc_exch_reset(struct fc_exch *ep)
 {
 	struct fc_seq *sp;
@@ -1446,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep)
 
 	spin_lock_bh(&ep->ex_lock);
 	ep->state |= FC_EX_RST_CLEANUP;
-	/*
-	 * we really want to call del_timer_sync, but cannot due
-	 * to the lport calling with the lport lock held (some resp
-	 * functions can also grab the lport lock which could cause
-	 * a deadlock).
-	 */
 	if (cancel_delayed_work(&ep->timeout_work))
 		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
 	resp = ep->resp;
@@ -1471,16 +1639,16 @@ static void fc_exch_reset(struct fc_exch *ep)
 }
 
 /**
- * fc_exch_pool_reset() - Resets an per cpu exches pool.
- * @lport:	ptr to the local port
- * @pool:	ptr to the per cpu exches pool
- * @sid:	source FC ID
- * @did:	destination FC ID
+ * fc_exch_pool_reset() - Reset a per cpu exchange pool
+ * @lport: The local port that the exchange pool is on
+ * @pool:  The exchange pool to be reset
+ * @sid:   The source ID
+ * @did:   The destination ID
  *
- * Resets an per cpu exches pool, releasing its all sequences
- * and exchanges. If sid is non-zero, then reset only exchanges
- * we sourced from that FID. If did is non-zero, reset only
- * exchanges destined to that FID.
+ * Resets a per cpu exches pool, releasing all of its sequences
+ * and exchanges. If sid is non-zero then reset only exchanges
+ * we sourced from the local port's FID. If did is non-zero then
+ * only reset exchanges destined for the local port's FID.
  */
 static void fc_exch_pool_reset(struct fc_lport *lport,
 			       struct fc_exch_pool *pool,
@@ -1514,15 +1682,15 @@ restart:
 }
 
 /**
- * fc_exch_mgr_reset() - Resets all EMs of a lport
- * @lport:	ptr to the local port
- * @sid:	source FC ID
- * @did:	destination FC ID
+ * fc_exch_mgr_reset() - Reset all EMs of a local port
+ * @lport: The local port whose EMs are to be reset
+ * @sid:   The source ID
+ * @did:   The destination ID
  *
- * Reset all EMs of a lport, releasing its all sequences and
- * exchanges. If sid is non-zero, then reset only exchanges
- * we sourced from that FID. If did is non-zero, reset only
- * exchanges destined to that FID.
+ * Reset all EMs associated with a given local port. Release all
+ * sequences and exchanges. If sid is non-zero then reset only the
+ * exchanges sent from the local port's FID. If did is non-zero then
+ * reset only exchanges destined for the local port's FID.
  */
 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
 {
@@ -1538,8 +1706,11 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
 }
 EXPORT_SYMBOL(fc_exch_mgr_reset);
 
-/*
- * Handle incoming ELS REC - Read Exchange Concise.
+/**
+ * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
+ * @sp:	 The sequence the REC is on
+ * @rfp: The REC frame
+ *
  * Note that the requesting port may be different than the S_ID in the request.
  */
 static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
@@ -1621,10 +1792,11 @@ reject:
 	fc_frame_free(rfp);
 }
 
-/*
- * Handle response from RRQ.
- * Not much to do here, really.
- * Should report errors.
+/**
+ * fc_exch_rrq_resp() - Handler for RRQ responses
+ * @sp:	 The sequence that the RRQ is on
+ * @fp:	 The RRQ frame
+ * @arg: The exchange that the RRQ is on
  *
  * TODO: fix error handler.
  */
@@ -1664,21 +1836,99 @@ cleanup:
 	fc_exch_release(aborted_ep);
 }
 
-/*
- * Send ELS RRQ - Reinstate Recovery Qualifier.
+
+/**
+ * fc_exch_seq_send() - Send a frame using a new exchange and sequence
+ * @lport:	The local port to send the frame on
+ * @fp:		The frame to be sent
+ * @resp:	The response handler for this request
+ * @destructor: The destructor for the exchange
+ * @arg:	The argument to be passed to the response handler
+ * @timer_msec: The timeout period for the exchange
+ *
+ * The frame pointer with some of the header's fields must be
+ * filled before calling this routine, those fields are:
+ *
+ * - routing control
+ * - FC port did
+ * - FC port sid
+ * - FC header type
+ * - frame control
+ * - parameter or relative offset
+ */
+static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+				       struct fc_frame *fp,
+				       void (*resp)(struct fc_seq *,
+						    struct fc_frame *fp,
+						    void *arg),
+				       void (*destructor)(struct fc_seq *,
+							  void *),
+				       void *arg, u32 timer_msec)
+{
+	struct fc_exch *ep;
+	struct fc_seq *sp = NULL;
+	struct fc_frame_header *fh;
+	int rc = 1;
+
+	ep = fc_exch_alloc(lport, fp);
+	if (!ep) {
+		fc_frame_free(fp);
+		return NULL;
+	}
+	ep->esb_stat |= ESB_ST_SEQ_INIT;
+	fh = fc_frame_header_get(fp);
+	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
+	ep->resp = resp;
+	ep->destructor = destructor;
+	ep->arg = arg;
+	ep->r_a_tov = FC_DEF_R_A_TOV;
+	ep->lp = lport;
+	sp = &ep->seq;
+
+	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
+	ep->f_ctl = ntoh24(fh->fh_f_ctl);
+	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
+	sp->cnt++;
+
+	if (ep->xid <= lport->lro_xid)
+		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+
+	if (unlikely(lport->tt.frame_send(lport, fp)))
+		goto err;
+
+	if (timer_msec)
+		fc_exch_timer_set_locked(ep, timer_msec);
+	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
+
+	if (ep->f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
+	return sp;
+err:
+	rc = fc_exch_done_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	if (!rc)
+		fc_exch_delete(ep);
+	return NULL;
+}
+
+/**
+ * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
+ * @ep: The exchange to send the RRQ on
+ *
  * This tells the remote port to stop blocking the use of
  * the exchange and the seq_cnt range.
  */
 static void fc_exch_rrq(struct fc_exch *ep)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fc_els_rrq *rrq;
 	struct fc_frame *fp;
 	u32 did;
 
-	lp = ep->lp;
+	lport = ep->lp;
 
-	fp = fc_frame_alloc(lp, sizeof(*rrq));
+	fp = fc_frame_alloc(lport, sizeof(*rrq));
 	if (!fp)
 		goto retry;
 
@@ -1694,10 +1944,11 @@ static void fc_exch_rrq(struct fc_exch *ep)
 		did = ep->sid;
 
 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
-		       fc_host_port_id(lp->host), FC_TYPE_ELS,
+		       fc_host_port_id(lport->host), FC_TYPE_ELS,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
-	if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov))
+	if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
+			     lport->e_d_tov))
 		return;
 
 retry:
@@ -1714,12 +1965,14 @@ retry:
 }
 
 
-/*
- * Handle incoming ELS RRQ - Reset Recovery Qualifier.
+/**
+ * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
+ * @sp: The sequence that the RRQ is on
+ * @fp: The RRQ frame
  */
 static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
 {
-	struct fc_exch *ep;		/* request or subject exchange */
+	struct fc_exch *ep = NULL;	/* request or subject exchange */
 	struct fc_els_rrq *rp;
 	u32 sid;
 	u16 xid;
@@ -1769,17 +2022,24 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
 	 * Send LS_ACC.
 	 */
 	fc_seq_ls_acc(sp);
-	fc_frame_free(fp);
-	return;
+	goto out;
 
 unlock_reject:
 	spin_unlock_bh(&ep->ex_lock);
-	fc_exch_release(ep);	/* drop hold from fc_exch_find */
 reject:
 	fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
+out:
 	fc_frame_free(fp);
+	if (ep)
+		fc_exch_release(ep);	/* drop hold from fc_exch_find */
 }
 
+/**
+ * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
+ * @lport: The local port to add the exchange manager to
+ * @mp:	   The exchange manager to be added to the local port
+ * @match: The match routine that indicates when this EM should be used
+ */
 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
 					   struct fc_exch_mgr *mp,
 					   bool (*match)(struct fc_frame *))
@@ -1799,6 +2059,10 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
 }
 EXPORT_SYMBOL(fc_exch_mgr_add);
 
+/**
+ * fc_exch_mgr_destroy() - Destroy an exchange manager
+ * @kref: The reference to the EM to be destroyed
+ */
 static void fc_exch_mgr_destroy(struct kref *kref)
 {
 	struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
@@ -1808,6 +2072,10 @@ static void fc_exch_mgr_destroy(struct kref *kref)
 	kfree(mp);
 }
 
+/**
+ * fc_exch_mgr_del() - Delete an EM from a local port's list
+ * @ema: The exchange manager anchor identifying the EM to be deleted
+ */
 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
 {
 	/* remove EM anchor from EM anchors list */
@@ -1817,7 +2085,35 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
 }
 EXPORT_SYMBOL(fc_exch_mgr_del);
 
-struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+/**
+ * fc_exch_mgr_list_clone() - Share all exchange manager objects
+ * @src: Source lport to clone exchange managers from
+ * @dst: New lport that takes references to all the exchange managers
+ */
+int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
+{
+	struct fc_exch_mgr_anchor *ema, *tmp;
+
+	list_for_each_entry(ema, &src->ema_list, ema_list) {
+		if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
+			goto err;
+	}
+	return 0;
+err:
+	list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
+		fc_exch_mgr_del(ema);
+	return -ENOMEM;
+}
+
+/**
+ * fc_exch_mgr_alloc() - Allocate an exchange manager
+ * @lport:   The local port that the new EM will be associated with
+ * @class:   The default FC class for new exchanges
+ * @min_xid: The minimum XID for exchanges from the new EM
+ * @max_xid: The maximum XID for exchanges from the new EM
+ * @match:   The match routine for the new EM
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
 				      enum fc_class class,
 				      u16 min_xid, u16 max_xid,
 				      bool (*match)(struct fc_frame *))
@@ -1830,7 +2126,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
 
 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
 	    (min_xid & fc_cpu_mask) != 0) {
-		FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+		FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
 			     min_xid, max_xid);
 		return NULL;
 	}
@@ -1873,7 +2169,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
 	}
 
 	kref_init(&mp->kref);
-	if (!fc_exch_mgr_add(lp, mp, match)) {
+	if (!fc_exch_mgr_add(lport, mp, match)) {
 		free_percpu(mp->pool);
 		goto free_mempool;
 	}
@@ -1894,76 +2190,26 @@ free_mp:
 }
 EXPORT_SYMBOL(fc_exch_mgr_alloc);
 
+/**
+ * fc_exch_mgr_free() - Free all exchange managers on a local port
+ * @lport: The local port whose EMs are to be freed
+ */
 void fc_exch_mgr_free(struct fc_lport *lport)
 {
 	struct fc_exch_mgr_anchor *ema, *next;
 
+	flush_workqueue(fc_exch_workqueue);
 	list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
 		fc_exch_mgr_del(ema);
 }
 EXPORT_SYMBOL(fc_exch_mgr_free);
 
-
-struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
-				struct fc_frame *fp,
-				void (*resp)(struct fc_seq *,
-					     struct fc_frame *fp,
-					     void *arg),
-				void (*destructor)(struct fc_seq *, void *),
-				void *arg, u32 timer_msec)
-{
-	struct fc_exch *ep;
-	struct fc_seq *sp = NULL;
-	struct fc_frame_header *fh;
-	int rc = 1;
-
-	ep = fc_exch_alloc(lp, fp);
-	if (!ep) {
-		fc_frame_free(fp);
-		return NULL;
-	}
-	ep->esb_stat |= ESB_ST_SEQ_INIT;
-	fh = fc_frame_header_get(fp);
-	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
-	ep->resp = resp;
-	ep->destructor = destructor;
-	ep->arg = arg;
-	ep->r_a_tov = FC_DEF_R_A_TOV;
-	ep->lp = lp;
-	sp = &ep->seq;
-
-	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
-	ep->f_ctl = ntoh24(fh->fh_f_ctl);
-	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
-	sp->cnt++;
-
-	if (ep->xid <= lp->lro_xid)
-		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
-
-	if (unlikely(lp->tt.frame_send(lp, fp)))
-		goto err;
-
-	if (timer_msec)
-		fc_exch_timer_set_locked(ep, timer_msec);
-	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
-
-	if (ep->f_ctl & FC_FC_SEQ_INIT)
-		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
-	spin_unlock_bh(&ep->ex_lock);
-	return sp;
-err:
-	rc = fc_exch_done_locked(ep);
-	spin_unlock_bh(&ep->ex_lock);
-	if (!rc)
-		fc_exch_delete(ep);
-	return NULL;
-}
-EXPORT_SYMBOL(fc_exch_seq_send);
-
-/*
- * Receive a frame
+/**
+ * fc_exch_recv() - Handler for received frames
+ * @lport: The local port the frame was received on
+ * @fp:	   The received frame
  */
-void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
+void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fc_exch_mgr_anchor *ema;
@@ -1971,8 +2217,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
 	u16 oxid;
 
 	/* lport lock ? */
-	if (!lp || lp->state == LPORT_ST_DISABLED) {
-		FC_LPORT_DBG(lp, "Receiving frames for an lport that "
+	if (!lport || lport->state == LPORT_ST_DISABLED) {
+		FC_LPORT_DBG(lport, "Receiving frames for an lport that "
 			     "has not been initialized correctly\n");
 		fc_frame_free(fp);
 		return;
@@ -1981,7 +2227,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
 	f_ctl = ntoh24(fh->fh_f_ctl);
 	oxid = ntohs(fh->fh_ox_id);
 	if (f_ctl & FC_FC_EX_CTX) {
-		list_for_each_entry(ema, &lp->ema_list, ema_list) {
+		list_for_each_entry(ema, &lport->ema_list, ema_list) {
 			if ((oxid >= ema->mp->min_xid) &&
 			    (oxid <= ema->mp->max_xid)) {
 				found = 1;
@@ -1990,13 +2236,13 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
 		}
 
 		if (!found) {
-			FC_LPORT_DBG(lp, "Received response for out "
+			FC_LPORT_DBG(lport, "Received response for out "
 				     "of range oxid:%hx\n", oxid);
 			fc_frame_free(fp);
 			return;
 		}
 	} else
-		ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list);
+		ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
 
 	/*
 	 * If frame is marked invalid, just drop it.
@@ -2015,37 +2261,56 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
 		else if (f_ctl & FC_FC_SEQ_CTX)
 			fc_exch_recv_resp(ema->mp, fp);
 		else
-			fc_exch_recv_req(lp, ema->mp, fp);
+			fc_exch_recv_req(lport, ema->mp, fp);
 		break;
 	default:
-		FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp));
+		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
+			     fr_eof(fp));
 		fc_frame_free(fp);
 	}
 }
 EXPORT_SYMBOL(fc_exch_recv);
 
-int fc_exch_init(struct fc_lport *lp)
+/**
+ * fc_exch_init() - Initialize the exchange layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_exch_init(struct fc_lport *lport)
 {
-	if (!lp->tt.seq_start_next)
-		lp->tt.seq_start_next = fc_seq_start_next;
+	if (!lport->tt.seq_start_next)
+		lport->tt.seq_start_next = fc_seq_start_next;
 
-	if (!lp->tt.exch_seq_send)
-		lp->tt.exch_seq_send = fc_exch_seq_send;
+	if (!lport->tt.exch_seq_send)
+		lport->tt.exch_seq_send = fc_exch_seq_send;
 
-	if (!lp->tt.seq_send)
-		lp->tt.seq_send = fc_seq_send;
+	if (!lport->tt.seq_send)
+		lport->tt.seq_send = fc_seq_send;
 
-	if (!lp->tt.seq_els_rsp_send)
-		lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+	if (!lport->tt.seq_els_rsp_send)
+		lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
 
-	if (!lp->tt.exch_done)
-		lp->tt.exch_done = fc_exch_done;
+	if (!lport->tt.exch_done)
+		lport->tt.exch_done = fc_exch_done;
 
-	if (!lp->tt.exch_mgr_reset)
-		lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
+	if (!lport->tt.exch_mgr_reset)
+		lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
 
-	if (!lp->tt.seq_exch_abort)
-		lp->tt.seq_exch_abort = fc_seq_exch_abort;
+	if (!lport->tt.seq_exch_abort)
+		lport->tt.seq_exch_abort = fc_seq_exch_abort;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
+
+/**
+ * fc_setup_exch_mgr() - Setup an exchange manager
+ */
+int fc_setup_exch_mgr()
+{
+	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
+					 0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!fc_em_cachep)
+		return -ENOMEM;
 
 	/*
 	 * Initialize fc_cpu_mask and fc_cpu_order. The
@@ -2069,20 +2334,17 @@ int fc_exch_init(struct fc_lport *lp)
 	}
 	fc_cpu_mask--;
 
-	return 0;
-}
-EXPORT_SYMBOL(fc_exch_init);
-
-int fc_setup_exch_mgr(void)
-{
-	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
-					 0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!fc_em_cachep)
+	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
+	if (!fc_exch_workqueue)
 		return -ENOMEM;
 	return 0;
 }
 
-void fc_destroy_exch_mgr(void)
+/**
+ * fc_destroy_exch_mgr() - Destroy an exchange manager
+ */
+void fc_destroy_exch_mgr()
 {
+	destroy_workqueue(fc_exch_workqueue);
 	kmem_cache_destroy(fc_em_cachep);
 }
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 59a4408b27b5..c4b58d042f6f 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -39,15 +39,9 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("libfc");
-MODULE_LICENSE("GPL v2");
+#include "fc_libfc.h"
 
-unsigned int fc_debug_logging;
-module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-static struct kmem_cache *scsi_pkt_cachep;
+struct kmem_cache *scsi_pkt_cachep;
 
 /* SRB state definitions */
 #define FC_SRB_FREE		0		/* cmd is free */
@@ -58,7 +52,6 @@ static struct kmem_cache *scsi_pkt_cachep;
 #define FC_SRB_DISCONTIG	(1 << 4)	/* non-sequential data recvd */
 #define FC_SRB_COMPL		(1 << 5)	/* fc_io_compl has been run */
 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6)	/* timer function processing */
-#define FC_SRB_NOMEM		(1 << 7)	/* dropped to out of mem */
 
 #define FC_SRB_READ		(1 << 1)
 #define FC_SRB_WRITE		(1 << 0)
@@ -73,10 +66,20 @@ static struct kmem_cache *scsi_pkt_cachep;
 #define CMD_SCSI_STATUS(Cmnd)	    ((Cmnd)->SCp.Status)
 #define CMD_RESID_LEN(Cmnd)	    ((Cmnd)->SCp.buffers_residual)
 
+/**
+ * struct fc_fcp_internal - FCP layer internal data
+ * @scsi_pkt_pool:  Memory pool to draw FCP packets from
+ * @scsi_pkt_queue: Current FCP packets
+ * @last_can_queue_ramp_down_time: ramp down time
+ * @last_can_queue_ramp_up_time: ramp up time
+ * @max_can_queue: max can_queue size
+ */
 struct fc_fcp_internal {
-	mempool_t	*scsi_pkt_pool;
+	mempool_t	 *scsi_pkt_pool;
 	struct list_head scsi_pkt_queue;
-	u8		throttled;
+	unsigned long last_can_queue_ramp_down_time;
+	unsigned long last_can_queue_ramp_up_time;
+	int max_can_queue;
 };
 
 #define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -90,9 +93,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
 static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
 static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
-static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
 static void fc_timeout_error(struct fc_fcp_pkt *);
-static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_timeout(unsigned long);
 static void fc_fcp_rec(struct fc_fcp_pkt *);
 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
 static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
@@ -124,6 +127,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
 #define FC_SCSI_TM_TOV		(10 * HZ)
 #define FC_SCSI_REC_TOV		(2 * HZ)
 #define FC_HOST_RESET_TIMEOUT	(30 * HZ)
+#define FC_CAN_QUEUE_PERIOD	(60 * HZ)
 
 #define FC_MAX_ERROR_CNT	5
 #define FC_MAX_RECOV_RETRY	3
@@ -131,23 +135,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
 #define FC_FCP_DFLT_QUEUE_DEPTH 32
 
 /**
- * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
- * @lp:		fc lport struct
- * @gfp:	gfp flags for allocation
+ * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
+ * @lport: The local port that the FCP packet is for
+ * @gfp:   GFP flags for allocation
  *
- * This is used by upper layer scsi driver.
- * Return Value : scsi_pkt structure or null on allocation failure.
- * Context	: call from process context. no locking required.
+ * Return value: fcp_pkt structure or null on allocation failure.
+ * Context:	 Can be called from process context, no lock is required.
  */
-static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
 {
-	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
 	struct fc_fcp_pkt *fsp;
 
 	fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
 	if (fsp) {
 		memset(fsp, 0, sizeof(*fsp));
-		fsp->lp = lp;
+		fsp->lp = lport;
 		atomic_set(&fsp->ref_cnt, 1);
 		init_timer(&fsp->timer);
 		INIT_LIST_HEAD(&fsp->list);
@@ -157,12 +160,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
 }
 
 /**
- * fc_fcp_pkt_release() - release hold on scsi_pkt packet
- * @fsp:	fcp packet struct
+ * fc_fcp_pkt_release() - Release hold on a fcp_pkt
+ * @fsp: The FCP packet to be released
  *
- * This is used by upper layer scsi driver.
- * Context	: call from process  and interrupt context.
- *		  no locking required
+ * Context: Can be called from process or interrupt context,
+ *	    no lock is required.
  */
 static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
 {
@@ -173,20 +175,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
 	}
 }
 
+/**
+ * fc_fcp_pkt_hold() - Hold a fcp_pkt
+ * @fsp: The FCP packet to be held
+ */
 static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
 {
 	atomic_inc(&fsp->ref_cnt);
 }
 
 /**
- * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
- * @seq:		exchange sequence
- * @fsp:	fcp packet struct
+ * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
+ * @seq: The sequence that the FCP packet is on (required by destructor API)
+ * @fsp: The FCP packet to be released
+ *
+ * This routine is called by a destructor callback in the exch_seq_send()
+ * routine of the libfc Transport Template. The 'struct fc_seq' is a required
+ * argument even though it is not used by this routine.
  *
- * Release hold on scsi_pkt packet set to keep scsi_pkt
- * till EM layer exch resource is not freed.
- * Context	: called from from EM layer.
- *		  no locking required
+ * Context: No locking required.
  */
 static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
 {
@@ -194,10 +201,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
 }
 
 /**
- * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
- * @fsp:	fcp packet
+ * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
+ * @fsp: The FCP packet to be locked and incremented
  *
- * We should only return error if we return a command to scsi-ml before
+ * We should only return error if we return a command to SCSI-ml before
  * getting a response. This can happen in cases where we send a abort, but
  * do not wait for the response and the abort and command can be passing
  * each other on the wire/network-layer.
@@ -222,18 +229,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
 	return 0;
 }
 
+/**
+ * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
+ *			 reference count
+ * @fsp: The FCP packet to be unlocked and decremented
+ */
 static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
 {
 	spin_unlock_bh(&fsp->scsi_pkt_lock);
 	fc_fcp_pkt_release(fsp);
 }
 
+/**
+ * fc_fcp_timer_set() - Start a timer for a fcp_pkt
+ * @fsp:   The FCP packet to start a timer for
+ * @delay: The timeout period for the timer
+ */
 static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
 {
 	if (!(fsp->state & FC_SRB_COMPL))
 		mod_timer(&fsp->timer, jiffies + delay);
 }
 
+/**
+ * fc_fcp_send_abort() - Send an abort for exchanges associated with a
+ *			 fcp_pkt
+ * @fsp: The FCP packet to abort exchanges on
+ */
 static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
 {
 	if (!fsp->seq_ptr)
@@ -243,9 +265,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
 	return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
 }
 
-/*
- * Retry command.
- * An abort isn't needed.
+/**
+ * fc_fcp_retry_cmd() - Retry a fcp_pkt
+ * @fsp: The FCP packet to be retried
+ *
+ * Sets the status code to be FC_ERROR and then calls
+ * fc_fcp_complete_locked() which in turn calls fc_io_compl().
+ * fc_io_compl() will notify the SCSI-ml that the I/O is done.
+ * The SCSI-ml will retry the command.
  */
 static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
 {
@@ -260,64 +287,146 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
 	fc_fcp_complete_locked(fsp);
 }
 
-/*
- * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
- * transfer for a read I/O indicated by the fc_fcp_pkt.
- * @fsp: ptr to the fc_fcp_pkt
- *
- * This is called in exch_seq_send() when we have a newly allocated
- * exchange with a valid exchange id to setup ddp.
- *
- * returns: none
+/**
+ * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
+ * @fsp: The FCP packet that will manage the DDP frames
+ * @xid: The XID that will be used for the DDP exchange
  */
 void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 
 	if (!fsp)
 		return;
 
-	lp = fsp->lp;
+	lport = fsp->lp;
 	if ((fsp->req_flags & FC_SRB_READ) &&
-	    (lp->lro_enabled) && (lp->tt.ddp_setup)) {
-		if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd),
-				     scsi_sg_count(fsp->cmd)))
+	    (lport->lro_enabled) && (lport->tt.ddp_setup)) {
+		if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
+					scsi_sg_count(fsp->cmd)))
 			fsp->xfer_ddp = xid;
 	}
 }
-EXPORT_SYMBOL(fc_fcp_ddp_setup);
 
-/*
- * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
- * DDP related resources for this I/O if it is initialized
- * as a ddp transfer
- * @fsp: ptr to the fc_fcp_pkt
- *
- * returns: none
+/**
+ * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
+ *		       DDP related resources for a fcp_pkt
+ * @fsp: The FCP packet that DDP had been used on
  */
 static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 
 	if (!fsp)
 		return;
 
-	lp = fsp->lp;
-	if (fsp->xfer_ddp && lp->tt.ddp_done) {
-		fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
-		fsp->xfer_ddp = 0;
+	if (fsp->xfer_ddp == FC_XID_UNKNOWN)
+		return;
+
+	lport = fsp->lp;
+	if (lport->tt.ddp_done) {
+		fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
+		fsp->xfer_ddp = FC_XID_UNKNOWN;
 	}
 }
 
+/**
+ * fc_fcp_can_queue_ramp_up() - increases can_queue
+ * @lport: lport to ramp up can_queue
+ *
+ * Locking notes: Called with Scsi_Host lock held
+ */
+static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+	int can_queue;
+
+	if (si->last_can_queue_ramp_up_time &&
+	    (time_before(jiffies, si->last_can_queue_ramp_up_time +
+			 FC_CAN_QUEUE_PERIOD)))
+		return;
+
+	if (time_before(jiffies, si->last_can_queue_ramp_down_time +
+			FC_CAN_QUEUE_PERIOD))
+		return;
+
+	si->last_can_queue_ramp_up_time = jiffies;
+
+	can_queue = lport->host->can_queue << 1;
+	if (can_queue >= si->max_can_queue) {
+		can_queue = si->max_can_queue;
+		si->last_can_queue_ramp_down_time = 0;
+	}
+	lport->host->can_queue = can_queue;
+	shost_printk(KERN_ERR, lport->host, "libfc: increased "
+		     "can_queue to %d.\n", can_queue);
+}
+
+/**
+ * fc_fcp_can_queue_ramp_down() - reduces can_queue
+ * @lport: lport to reduce can_queue
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ *
+ * Locking notes: Called with Scsi_Host lock held
+ */
+static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+	int can_queue;
+
+	if (si->last_can_queue_ramp_down_time &&
+	    (time_before(jiffies, si->last_can_queue_ramp_down_time +
+			 FC_CAN_QUEUE_PERIOD)))
+		return;
+
+	si->last_can_queue_ramp_down_time = jiffies;
+
+	can_queue = lport->host->can_queue;
+	can_queue >>= 1;
+	if (!can_queue)
+		can_queue = 1;
+	lport->host->can_queue = can_queue;
+	shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
+		     "Reducing can_queue to %d.\n", can_queue);
+}
 
 /*
- * Receive SCSI data from target.
- * Called after receiving solicited data.
+ * fc_fcp_frame_alloc() -  Allocates fc_frame structure and buffer.
+ * @lport:	fc lport struct
+ * @len:	payload length
+ *
+ * Allocates fc_frame structure and buffer but if fails to allocate
+ * then reduce can_queue.
+ */
+static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
+						  size_t len)
+{
+	struct fc_frame *fp;
+	unsigned long flags;
+
+	fp = fc_frame_alloc(lport, len);
+	if (!fp) {
+		spin_lock_irqsave(lport->host->host_lock, flags);
+		fc_fcp_can_queue_ramp_down(lport);
+		spin_unlock_irqrestore(lport->host->host_lock, flags);
+	}
+	return fp;
+}
+
+/**
+ * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
+ * @fsp: The FCP packet the data is on
+ * @fp:	 The data frame
  */
 static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
 	struct scsi_cmnd *sc = fsp->cmd;
-	struct fc_lport *lp = fsp->lp;
+	struct fc_lport *lport = fsp->lp;
 	struct fcoe_dev_stats *stats;
 	struct fc_frame_header *fh;
 	size_t start_offset;
@@ -327,7 +436,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	size_t len;
 	void *buf;
 	struct scatterlist *sg;
-	size_t remaining;
+	u32 nents;
 
 	fh = fc_frame_header_get(fp);
 	offset = ntohl(fh->fh_parm_offset);
@@ -351,65 +460,29 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	if (offset != fsp->xfer_len)
 		fsp->state |= FC_SRB_DISCONTIG;
 
-	crc = 0;
-	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
-		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
-
 	sg = scsi_sglist(sc);
-	remaining = len;
-
-	while (remaining > 0 && sg) {
-		size_t off;
-		void *page_addr;
-		size_t sg_bytes;
+	nents = scsi_sg_count(sc);
 
-		if (offset >= sg->length) {
-			offset -= sg->length;
-			sg = sg_next(sg);
-			continue;
-		}
-		sg_bytes = min(remaining, sg->length - offset);
-
-		/*
-		 * The scatterlist item may be bigger than PAGE_SIZE,
-		 * but we are limited to mapping PAGE_SIZE at a time.
-		 */
-		off = offset + sg->offset;
-		sg_bytes = min(sg_bytes, (size_t)
-			       (PAGE_SIZE - (off & ~PAGE_MASK)));
-		page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
-					KM_SOFTIRQ0);
-		if (!page_addr)
-			break;		/* XXX panic? */
-
-		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
-			crc = crc32(crc, buf, sg_bytes);
-		memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
-		       sg_bytes);
-
-		kunmap_atomic(page_addr, KM_SOFTIRQ0);
-		buf += sg_bytes;
-		offset += sg_bytes;
-		remaining -= sg_bytes;
-		copy_len += sg_bytes;
-	}
-
-	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
+		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+						    &offset, KM_SOFTIRQ0, NULL);
+	} else {
+		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+						    &offset, KM_SOFTIRQ0, &crc);
 		buf = fc_frame_payload_get(fp, 0);
-		if (len % 4) {
+		if (len % 4)
 			crc = crc32(crc, buf + len, 4 - (len % 4));
-			len += 4 - (len % 4);
-		}
 
 		if (~crc != le32_to_cpu(fr_crc(fp))) {
 crc_err:
-			stats = fc_lport_get_stats(lp);
+			stats = fc_lport_get_stats(lport);
 			stats->ErrorFrames++;
 			/* FIXME - per cpu count, not total count! */
 			if (stats->InvalidCRCCount++ < 5)
 				printk(KERN_WARNING "libfc: CRC error on data "
 				       "frame for port (%6x)\n",
-				       fc_host_port_id(lp->host));
+				       fc_host_port_id(lport->host));
 			/*
 			 * Assume the frame is total garbage.
 			 * We may have copied it over the good part
@@ -437,18 +510,17 @@ crc_err:
 }
 
 /**
- * fc_fcp_send_data() -  Send SCSI data to target.
- * @fsp: ptr to fc_fcp_pkt
- * @sp: ptr to this sequence
- * @offset: starting offset for this data request
- * @seq_blen: the burst length for this data request
+ * fc_fcp_send_data() - Send SCSI data to a target
+ * @fsp:      The FCP packet the data is on
+ * @sp:	      The sequence the data is to be sent on
+ * @offset:   The starting offset for this data request
+ * @seq_blen: The burst length for this data request
  *
  * Called after receiving a Transfer Ready data descriptor.
- * if LLD is capable of seq offload then send down seq_blen
- * size of data in single frame, otherwise send multiple FC
- * frames of max FC frame payload supported by target port.
- *
- * Returns : 0 for success.
+ * If the LLD is capable of sequence offload then send down the
+ * seq_blen ammount of data in single frame, otherwise send
+ * multiple frames of the maximum frame payload supported by
+ * the target port.
  */
 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 			    size_t offset, size_t seq_blen)
@@ -457,16 +529,18 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	struct scsi_cmnd *sc;
 	struct scatterlist *sg;
 	struct fc_frame *fp = NULL;
-	struct fc_lport *lp = fsp->lp;
+	struct fc_lport *lport = fsp->lp;
+	struct page *page;
 	size_t remaining;
 	size_t t_blen;
 	size_t tlen;
 	size_t sg_bytes;
 	size_t frame_offset, fh_parm_offset;
+	size_t off;
 	int error;
 	void *data = NULL;
 	void *page_addr;
-	int using_sg = lp->sg_supp;
+	int using_sg = lport->sg_supp;
 	u32 f_ctl;
 
 	WARN_ON(seq_blen <= 0);
@@ -488,10 +562,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	 * to max FC frame payload previously set in fsp->max_payload.
 	 */
 	t_blen = fsp->max_payload;
-	if (lp->seq_offload) {
-		t_blen = min(seq_blen, (size_t)lp->lso_max);
+	if (lport->seq_offload) {
+		t_blen = min(seq_blen, (size_t)lport->lso_max);
 		FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
-			   fsp, seq_blen, lp->lso_max, t_blen);
+			   fsp, seq_blen, lport->lso_max, t_blen);
 	}
 
 	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
@@ -503,7 +577,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	remaining = seq_blen;
 	fh_parm_offset = frame_offset = offset;
 	tlen = 0;
-	seq = lp->tt.seq_start_next(seq);
+	seq = lport->tt.seq_start_next(seq);
 	f_ctl = FC_FC_REL_OFF;
 	WARN_ON(!seq);
 
@@ -525,43 +599,34 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 			 */
 			if (tlen % 4)
 				using_sg = 0;
-			if (using_sg) {
-				fp = _fc_frame_alloc(lp, 0);
-				if (!fp)
-					return -ENOMEM;
-			} else {
-				fp = fc_frame_alloc(lp, tlen);
-				if (!fp)
-					return -ENOMEM;
+			fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
+			if (!fp)
+				return -ENOMEM;
 
-				data = (void *)(fr_hdr(fp)) +
-					sizeof(struct fc_frame_header);
-			}
+			data = fc_frame_header_get(fp) + 1;
 			fh_parm_offset = frame_offset;
 			fr_max_payload(fp) = fsp->max_payload;
 		}
+
+		off = offset + sg->offset;
 		sg_bytes = min(tlen, sg->length - offset);
+		sg_bytes = min(sg_bytes,
+			       (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
+		page = sg_page(sg) + (off >> PAGE_SHIFT);
 		if (using_sg) {
-			get_page(sg_page(sg));
+			get_page(page);
 			skb_fill_page_desc(fp_skb(fp),
 					   skb_shinfo(fp_skb(fp))->nr_frags,
-					   sg_page(sg), sg->offset + offset,
-					   sg_bytes);
+					   page, off & ~PAGE_MASK, sg_bytes);
 			fp_skb(fp)->data_len += sg_bytes;
 			fr_len(fp) += sg_bytes;
 			fp_skb(fp)->truesize += PAGE_SIZE;
 		} else {
-			size_t off = offset + sg->offset;
-
 			/*
 			 * The scatterlist item may be bigger than PAGE_SIZE,
 			 * but we must not cross pages inside the kmap.
 			 */
-			sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
-							   (off & ~PAGE_MASK)));
-			page_addr = kmap_atomic(sg_page(sg) +
-						(off >> PAGE_SHIFT),
-						KM_SOFTIRQ0);
+			page_addr = kmap_atomic(page, KM_SOFTIRQ0);
 			memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
 			       sg_bytes);
 			kunmap_atomic(page_addr, KM_SOFTIRQ0);
@@ -572,7 +637,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 		tlen -= sg_bytes;
 		remaining -= sg_bytes;
 
-		if (tlen)
+		if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
+		    (tlen))
 			continue;
 
 		/*
@@ -589,7 +655,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 		/*
 		 * send fragment using for a sequence.
 		 */
-		error = lp->tt.seq_send(lp, seq, fp);
+		error = lport->tt.seq_send(lport, seq, fp);
 		if (error) {
 			WARN_ON(1);		/* send error should be rare */
 			fc_fcp_retry_cmd(fsp);
@@ -601,6 +667,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	return 0;
 }
 
+/**
+ * fc_fcp_abts_resp() - Send an ABTS response
+ * @fsp: The FCP packet that is being aborted
+ * @fp:	 The response frame
+ */
 static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
 	int ba_done = 1;
@@ -637,46 +708,13 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 }
 
 /**
- * fc_fcp_reduce_can_queue() - drop can_queue
- * @lp: lport to drop queueing for
- *
- * If we are getting memory allocation failures, then we may
- * be trying to execute too many commands. We let the running
- * commands complete or timeout, then try again with a reduced
- * can_queue. Eventually we will hit the point where we run
- * on all reserved structs.
- */
-static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
-{
-	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
-	unsigned long flags;
-	int can_queue;
-
-	spin_lock_irqsave(lp->host->host_lock, flags);
-	if (si->throttled)
-		goto done;
-	si->throttled = 1;
-
-	can_queue = lp->host->can_queue;
-	can_queue >>= 1;
-	if (!can_queue)
-		can_queue = 1;
-	lp->host->can_queue = can_queue;
-	shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
-		     "Reducing can_queue to %d.\n", can_queue);
-done:
-	spin_unlock_irqrestore(lp->host->host_lock, flags);
-}
-
-/**
- * fc_fcp_recv() - Reveive FCP frames
+ * fc_fcp_recv() - Reveive an FCP frame
  * @seq: The sequence the frame is on
- * @fp: The FC frame
+ * @fp:	 The received frame
  * @arg: The related FCP packet
  *
- * Return   : None
- * Context  : called from Soft IRQ context
- *	      can not called holding list lock
+ * Context: Called from Soft IRQ context. Can not be called
+ *	    holding the FCP packet list lock.
  */
 static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 {
@@ -687,8 +725,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	u8 r_ctl;
 	int rc = 0;
 
-	if (IS_ERR(fp))
-		goto errout;
+	if (IS_ERR(fp)) {
+		fc_fcp_error(fsp, fp);
+		return;
+	}
 
 	fh = fc_frame_header_get(fp);
 	r_ctl = fh->fh_r_ctl;
@@ -721,8 +761,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 				      (size_t) ntohl(dd->ft_burst_len));
 		if (!rc)
 			seq->rec_data = fsp->xfer_len;
-		else if (rc == -ENOMEM)
-			fsp->state |= FC_SRB_NOMEM;
 	} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
 		/*
 		 * received a DATA frame
@@ -742,13 +780,13 @@ unlock:
 	fc_fcp_unlock_pkt(fsp);
 out:
 	fc_frame_free(fp);
-errout:
-	if (IS_ERR(fp))
-		fc_fcp_error(fsp, fp);
-	else if (rc == -ENOMEM)
-		fc_fcp_reduce_can_queue(lport);
 }
 
+/**
+ * fc_fcp_resp() - Handler for FCP responses
+ * @fsp: The FCP packet the response is for
+ * @fp:	 The response frame
+ */
 static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
 	struct fc_frame_header *fh;
@@ -862,15 +900,16 @@ err:
 }
 
 /**
- * fc_fcp_complete_locked() - complete processing of a fcp packet
- * @fsp:	fcp packet
+ * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
+ *			      fcp_pkt lock held
+ * @fsp: The FCP packet to be completed
  *
  * This function may sleep if a timer is pending. The packet lock must be
  * held, and the host lock must not be held.
  */
 static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 {
-	struct fc_lport *lp = fsp->lp;
+	struct fc_lport *lport = fsp->lp;
 	struct fc_seq *seq;
 	struct fc_exch *ep;
 	u32 f_ctl;
@@ -901,8 +940,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 			struct fc_frame *conf_frame;
 			struct fc_seq *csp;
 
-			csp = lp->tt.seq_start_next(seq);
-			conf_frame = fc_frame_alloc(fsp->lp, 0);
+			csp = lport->tt.seq_start_next(seq);
+			conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
 			if (conf_frame) {
 				f_ctl = FC_FC_SEQ_INIT;
 				f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
@@ -910,43 +949,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 				fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
 					       ep->did, ep->sid,
 					       FC_TYPE_FCP, f_ctl, 0);
-				lp->tt.seq_send(lp, csp, conf_frame);
+				lport->tt.seq_send(lport, csp, conf_frame);
 			}
 		}
-		lp->tt.exch_done(seq);
+		lport->tt.exch_done(seq);
 	}
 	fc_io_compl(fsp);
 }
 
+/**
+ * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
+ * @fsp:   The FCP packet whose exchanges should be canceled
+ * @error: The reason for the cancellation
+ */
 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
 {
-	struct fc_lport *lp = fsp->lp;
+	struct fc_lport *lport = fsp->lp;
 
 	if (fsp->seq_ptr) {
-		lp->tt.exch_done(fsp->seq_ptr);
+		lport->tt.exch_done(fsp->seq_ptr);
 		fsp->seq_ptr = NULL;
 	}
 	fsp->status_code = error;
 }
 
 /**
- * fc_fcp_cleanup_each_cmd() - Cleanup active commads
- * @lp:		logical port
- * @id:		target id
- * @lun:	lun
- * @error:	fsp status code
+ * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
+ * @lport: The local port whose exchanges should be canceled
+ * @id:	   The target's ID
+ * @lun:   The LUN
+ * @error: The reason for cancellation
  *
  * If lun or id is -1, they are ignored.
  */
-static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
 				    unsigned int lun, int error)
 {
-	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
 	struct fc_fcp_pkt *fsp;
 	struct scsi_cmnd *sc_cmd;
 	unsigned long flags;
 
-	spin_lock_irqsave(lp->host->host_lock, flags);
+	spin_lock_irqsave(lport->host->host_lock, flags);
 restart:
 	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
 		sc_cmd = fsp->cmd;
@@ -957,7 +1001,7 @@ restart:
 			continue;
 
 		fc_fcp_pkt_hold(fsp);
-		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		spin_unlock_irqrestore(lport->host->host_lock, flags);
 
 		if (!fc_fcp_lock_pkt(fsp)) {
 			fc_fcp_cleanup_cmd(fsp, error);
@@ -966,35 +1010,36 @@ restart:
 		}
 
 		fc_fcp_pkt_release(fsp);
-		spin_lock_irqsave(lp->host->host_lock, flags);
+		spin_lock_irqsave(lport->host->host_lock, flags);
 		/*
 		 * while we dropped the lock multiple pkts could
 		 * have been released, so we have to start over.
 		 */
 		goto restart;
 	}
-	spin_unlock_irqrestore(lp->host->host_lock, flags);
+	spin_unlock_irqrestore(lport->host->host_lock, flags);
 }
 
-static void fc_fcp_abort_io(struct fc_lport *lp)
+/**
+ * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
+ * @lport: The local port whose exchanges are to be aborted
+ */
+static void fc_fcp_abort_io(struct fc_lport *lport)
 {
-	fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
+	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
 }
 
 /**
- * fc_fcp_pkt_send() - send a fcp packet to the lower level.
- * @lp:		fc lport
- * @fsp:	fc packet.
+ * fc_fcp_pkt_send() - Send a fcp_pkt
+ * @lport: The local port to send the FCP packet on
+ * @fsp:   The FCP packet to send
  *
- * This is called by upper layer protocol.
- * Return   : zero for success and -1 for failure
- * Context  : called from queuecommand which can be called from process
- *	      or scsi soft irq.
- * Locks    : called with the host lock and irqs disabled.
+ * Return:  Zero for success and -1 for failure
+ * Locks:   Called with the host lock and irqs disabled.
  */
-static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 {
-	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
 	int rc;
 
 	fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1006,16 +1051,22 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
 	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
 	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
 
-	spin_unlock_irq(lp->host->host_lock);
-	rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
-	spin_lock_irq(lp->host->host_lock);
+	spin_unlock_irq(lport->host->host_lock);
+	rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
+	spin_lock_irq(lport->host->host_lock);
 	if (rc)
 		list_del(&fsp->list);
 
 	return rc;
 }
 
-static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+/**
+ * fc_fcp_cmd_send() - Send a FCP command
+ * @lport: The local port to send the command on
+ * @fsp:   The FCP packet the command is on
+ * @resp:  The handler for the response
+ */
+static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 			   void (*resp)(struct fc_seq *,
 					struct fc_frame *fp,
 					void *arg))
@@ -1023,14 +1074,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	struct fc_frame *fp;
 	struct fc_seq *seq;
 	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 	const size_t len = sizeof(fsp->cdb_cmd);
 	int rc = 0;
 
 	if (fc_fcp_lock_pkt(fsp))
 		return 0;
 
-	fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+	fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
 	if (!fp) {
 		rc = -1;
 		goto unlock;
@@ -1040,15 +1091,15 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	fr_fsp(fp) = fsp;
 	rport = fsp->rport;
 	fsp->max_payload = rport->maxframe_size;
-	rp = rport->dd_data;
+	rpriv = rport->dd_data;
 
 	fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
-		       fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+		       fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
-	seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
+	seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
+				      fsp, 0);
 	if (!seq) {
-		fc_frame_free(fp);
 		rc = -1;
 		goto unlock;
 	}
@@ -1065,8 +1116,10 @@ unlock:
 	return rc;
 }
 
-/*
- * transport error handler
+/**
+ * fc_fcp_error() - Handler for FCP layer errors
+ * @fsp: The FCP packet the error is on
+ * @fp:	 The frame that has errored
  */
 static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
@@ -1091,11 +1144,13 @@ unlock:
 	fc_fcp_unlock_pkt(fsp);
 }
 
-/*
- * Scsi abort handler- calls to send an abort
- * and then wait for abort completion
+/**
+ * fc_fcp_pkt_abort() - Abort a fcp_pkt
+ * @fsp:   The FCP packet to abort on
+ *
+ * Called to send an abort and then wait for abort completion
  */
-static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
 {
 	int rc = FAILED;
 
@@ -1122,14 +1177,15 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
 	return rc;
 }
 
-/*
- * Retry LUN reset after resource allocation failed.
+/**
+ * fc_lun_reset_send() - Send LUN reset command
+ * @data: The FCP packet that identifies the LUN to be reset
  */
 static void fc_lun_reset_send(unsigned long data)
 {
 	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
-	struct fc_lport *lp = fsp->lp;
-	if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
+	struct fc_lport *lport = fsp->lp;
+	if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
 		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
 			return;
 		if (fc_fcp_lock_pkt(fsp))
@@ -1140,11 +1196,15 @@ static void fc_lun_reset_send(unsigned long data)
 	}
 }
 
-/*
- * Scsi device reset handler- send a LUN RESET to the device
- * and wait for reset reply
+/**
+ * fc_lun_reset() - Send a LUN RESET command to a device
+ *		    and wait for the reply
+ * @lport: The local port to sent the comand on
+ * @fsp:   The FCP packet that identifies the LUN to be reset
+ * @id:	   The SCSI command ID
+ * @lun:   The LUN ID to be reset
  */
-static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 			unsigned int id, unsigned int lun)
 {
 	int rc;
@@ -1172,14 +1232,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 
 	spin_lock_bh(&fsp->scsi_pkt_lock);
 	if (fsp->seq_ptr) {
-		lp->tt.exch_done(fsp->seq_ptr);
+		lport->tt.exch_done(fsp->seq_ptr);
 		fsp->seq_ptr = NULL;
 	}
 	fsp->wait_for_comp = 0;
 	spin_unlock_bh(&fsp->scsi_pkt_lock);
 
 	if (!rc) {
-		FC_SCSI_DBG(lp, "lun reset failed\n");
+		FC_SCSI_DBG(lport, "lun reset failed\n");
 		return FAILED;
 	}
 
@@ -1187,13 +1247,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	if (fsp->cdb_status != FCP_TMF_CMPL)
 		return FAILED;
 
-	FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
-	fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
+	FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
+	fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
 	return SUCCESS;
 }
 
-/*
- * Task Managment response handler
+/**
+ * fc_tm_done() - Task Managment response handler
+ * @seq: The sequence that the response is on
+ * @fp:	 The response frame
+ * @arg: The FCP packet the response is for
  */
 static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 {
@@ -1230,34 +1293,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	fc_fcp_unlock_pkt(fsp);
 }
 
-static void fc_fcp_cleanup(struct fc_lport *lp)
+/**
+ * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
+ * @lport: The local port to be cleaned up
+ */
+static void fc_fcp_cleanup(struct fc_lport *lport)
 {
-	fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
+	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
 }
 
-/*
- * fc_fcp_timeout: called by OS timer function.
- *
- * The timer has been inactivated and must be reactivated if desired
- * using fc_fcp_timer_set().
- *
- * Algorithm:
- *
- * If REC is supported, just issue it, and return.  The REC exchange will
- * complete or time out, and recovery can continue at that point.
- *
- * Otherwise, if the response has been received without all the data,
- * it has been ER_TIMEOUT since the response was received.
+/**
+ * fc_fcp_timeout() - Handler for fcp_pkt timeouts
+ * @data: The FCP packet that has timed out
  *
- * If the response has not been received,
- * we see if data was received recently.  If it has been, we continue waiting,
- * otherwise, we abort the command.
+ * If REC is supported then just issue it and return. The REC exchange will
+ * complete or time out and recovery can continue at that point. Otherwise,
+ * if the response has been received without all the data it has been
+ * ER_TIMEOUT since the response was received. If the response has not been
+ * received we see if data was received recently. If it has been then we
+ * continue waiting, otherwise, we abort the command.
  */
 static void fc_fcp_timeout(unsigned long data)
 {
 	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
 	struct fc_rport *rport = fsp->rport;
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_rport_libfc_priv *rpriv = rport->dd_data;
 
 	if (fc_fcp_lock_pkt(fsp))
 		return;
@@ -1267,7 +1327,7 @@ static void fc_fcp_timeout(unsigned long data)
 
 	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
 
-	if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
+	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
 		fc_fcp_rec(fsp);
 	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
 			       jiffies))
@@ -1281,39 +1341,40 @@ unlock:
 	fc_fcp_unlock_pkt(fsp);
 }
 
-/*
- * Send a REC ELS request
+/**
+ * fc_fcp_rec() - Send a REC ELS request
+ * @fsp: The FCP packet to send the REC request on
  */
 static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fc_frame *fp;
 	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 
-	lp = fsp->lp;
+	lport = fsp->lp;
 	rport = fsp->rport;
-	rp = rport->dd_data;
-	if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
+	rpriv = rport->dd_data;
+	if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
 		fsp->status_code = FC_HRD_ERROR;
 		fsp->io_status = 0;
 		fc_fcp_complete_locked(fsp);
 		return;
 	}
-	fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
+	fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
 	if (!fp)
 		goto retry;
 
 	fr_seq(fp) = fsp->seq_ptr;
 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
-		       fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
+		       fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-	if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
-			      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
+	if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
+				 fc_fcp_rec_resp, fsp,
+				 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
 		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
 		return;
 	}
-	fc_frame_free(fp);
 retry:
 	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
@@ -1321,12 +1382,16 @@ retry:
 		fc_timeout_error(fsp);
 }
 
-/*
- * Receive handler for REC ELS frame
- * if it is a reject then let the scsi layer to handle
- * the timeout. if it is a LS_ACC then if the io was not completed
- * then set the timeout and return otherwise complete the exchange
- * and tell the scsi layer to restart the I/O.
+/**
+ * fc_fcp_rec_resp() - Handler for REC ELS responses
+ * @seq: The sequence the response is on
+ * @fp:	 The response frame
+ * @arg: The FCP packet the response is on
+ *
+ * If the response is a reject then the scsi layer will handle
+ * the timeout. If the response is a LS_ACC then if the I/O was not completed
+ * set the timeout and return. If the I/O was completed then complete the
+ * exchange and tell the SCSI layer.
  */
 static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 {
@@ -1338,7 +1403,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	u32 offset;
 	enum dma_data_direction data_dir;
 	enum fc_rctl r_ctl;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 
 	if (IS_ERR(fp)) {
 		fc_fcp_rec_error(fsp, fp);
@@ -1361,13 +1426,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 			/* fall through */
 		case ELS_RJT_UNSUP:
 			FC_FCP_DBG(fsp, "device does not support REC\n");
-			rp = fsp->rport->dd_data;
+			rpriv = fsp->rport->dd_data;
 			/*
 			 * if we do not spport RECs or got some bogus
 			 * reason then resetup timer so we check for
 			 * making progress.
 			 */
-			rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+			rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
 			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
 			break;
 		case ELS_RJT_LOGIC:
@@ -1464,8 +1529,10 @@ out:
 	fc_frame_free(fp);
 }
 
-/*
- * Handle error response or timeout for REC exchange.
+/**
+ * fc_fcp_rec_error() - Handler for REC errors
+ * @fsp: The FCP packet the error is on
+ * @fp:	 The REC frame
  */
 static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
@@ -1504,10 +1571,9 @@ out:
 	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
 }
 
-/*
- * Time out error routine:
- * abort's the I/O close the exchange and
- * send completion notification to scsi layer
+/**
+ * fc_timeout_error() - Handler for fcp_pkt timeouts
+ * @fsp: The FCP packt that has timed out
  */
 static void fc_timeout_error(struct fc_fcp_pkt *fsp)
 {
@@ -1521,16 +1587,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp)
 	fc_fcp_send_abort(fsp);
 }
 
-/*
- * Sequence retransmission request.
+/**
+ * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
+ * @fsp:   The FCP packet the SRR is to be sent on
+ * @r_ctl: The R_CTL field for the SRR request
  * This is called after receiving status but insufficient data, or
  * when expecting status but the request has timed out.
  */
 static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 {
-	struct fc_lport *lp = fsp->lp;
+	struct fc_lport *lport = fsp->lp;
 	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 	struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
 	struct fc_seq *seq;
 	struct fcp_srr *srr;
@@ -1538,12 +1606,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 	u8 cdb_op;
 
 	rport = fsp->rport;
-	rp = rport->dd_data;
+	rpriv = rport->dd_data;
 	cdb_op = fsp->cdb_cmd.fc_cdb[0];
 
-	if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
+	if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
+	    rpriv->rp_state != RPORT_ST_READY)
 		goto retry;			/* shouldn't happen */
-	fp = fc_frame_alloc(lp, sizeof(*srr));
+	fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
 	if (!fp)
 		goto retry;
 
@@ -1556,15 +1625,14 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 	srr->srr_rel_off = htonl(offset);
 
 	fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
-		       fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+		       fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
-	seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
-				   fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
-	if (!seq) {
-		fc_frame_free(fp);
+	seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+				      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
+	if (!seq)
 		goto retry;
-	}
+
 	fsp->recov_seq = seq;
 	fsp->xfer_len = offset;
 	fsp->xfer_contig_end = offset;
@@ -1575,8 +1643,11 @@ retry:
 	fc_fcp_retry_cmd(fsp);
 }
 
-/*
- * Handle response from SRR.
+/**
+ * fc_fcp_srr_resp() - Handler for SRR response
+ * @seq: The sequence the SRR is on
+ * @fp:	 The SRR frame
+ * @arg: The FCP packet the SRR is on
  */
 static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 {
@@ -1622,6 +1693,11 @@ out:
 	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
 }
 
+/**
+ * fc_fcp_srr_error() - Handler for SRR errors
+ * @fsp: The FCP packet that the SRR error is on
+ * @fp:	 The SRR frame
+ */
 static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
 	if (fc_fcp_lock_pkt(fsp))
@@ -1646,31 +1722,36 @@ out:
 	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
 }
 
-static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
+/**
+ * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
+ * @lport: The local port to be checked
+ */
+static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
 {
 	/* lock ? */
-	return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
+	return (lport->state == LPORT_ST_READY) &&
+		lport->link_up && !lport->qfull;
 }
 
 /**
- * fc_queuecommand - The queuecommand function of the scsi template
- * @cmd:	struct scsi_cmnd to be executed
- * @done:	Callback function to be called when cmd is completed
+ * fc_queuecommand() - The queuecommand function of the SCSI template
+ * @cmd:   The scsi_cmnd to be executed
+ * @done:  The callback function to be called when the scsi_cmnd is complete
  *
- * this is the i/o strategy routine, called by the scsi layer
- * this routine is called with holding the host_lock.
+ * This is the i/o strategy routine, called by the SCSI layer. This routine
+ * is called with the host_lock held.
  */
 int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 	struct fc_fcp_pkt *fsp;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 	int rval;
 	int rc = 0;
 	struct fcoe_dev_stats *stats;
 
-	lp = shost_priv(sc_cmd->device->host);
+	lport = shost_priv(sc_cmd->device->host);
 
 	rval = fc_remote_port_chkready(rport);
 	if (rval) {
@@ -1689,14 +1770,16 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 		goto out;
 	}
 
-	rp = rport->dd_data;
+	rpriv = rport->dd_data;
 
-	if (!fc_fcp_lport_queue_ready(lp)) {
+	if (!fc_fcp_lport_queue_ready(lport)) {
+		if (lport->qfull)
+			fc_fcp_can_queue_ramp_down(lport);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 	}
 
-	fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
+	fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
 	if (fsp == NULL) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
@@ -1706,8 +1789,9 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 	 * build the libfc request pkt
 	 */
 	fsp->cmd = sc_cmd;	/* save the cmd */
-	fsp->lp = lp;		/* save the softc ptr */
+	fsp->lp = lport;	/* save the softc ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
+	fsp->xfer_ddp = FC_XID_UNKNOWN;
 	sc_cmd->scsi_done = done;
 
 	/*
@@ -1719,7 +1803,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 	/*
 	 * setup the data direction
 	 */
-	stats = fc_lport_get_stats(lp);
+	stats = fc_lport_get_stats(lport);
 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		fsp->req_flags = FC_SRB_READ;
 		stats->InputRequests++;
@@ -1733,7 +1817,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 		stats->ControlRequests++;
 	}
 
-	fsp->tgt_flags = rp->flags;
+	fsp->tgt_flags = rpriv->flags;
 
 	init_timer(&fsp->timer);
 	fsp->timer.data = (unsigned long)fsp;
@@ -1743,7 +1827,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 	 * if we get -1 return then put the request in the pending
 	 * queue.
 	 */
-	rval = fc_fcp_pkt_send(lp, fsp);
+	rval = fc_fcp_pkt_send(lport, fsp);
 	if (rval != 0) {
 		fsp->state = FC_SRB_FREE;
 		fc_fcp_pkt_release(fsp);
@@ -1755,18 +1839,17 @@ out:
 EXPORT_SYMBOL(fc_queuecommand);
 
 /**
- * fc_io_compl() -  Handle responses for completed commands
- * @fsp:	scsi packet
- *
- * Translates a error to a Linux SCSI error.
+ * fc_io_compl() - Handle responses for completed commands
+ * @fsp: The FCP packet that is complete
  *
+ * Translates fcp_pkt errors to a Linux SCSI errors.
  * The fcp packet lock must be held when calling.
  */
 static void fc_io_compl(struct fc_fcp_pkt *fsp)
 {
 	struct fc_fcp_internal *si;
 	struct scsi_cmnd *sc_cmd;
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	unsigned long flags;
 
 	/* release outstanding ddp context */
@@ -1779,28 +1862,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		spin_lock_bh(&fsp->scsi_pkt_lock);
 	}
 
-	lp = fsp->lp;
-	si = fc_get_scsi_internal(lp);
-	spin_lock_irqsave(lp->host->host_lock, flags);
+	lport = fsp->lp;
+	si = fc_get_scsi_internal(lport);
+	spin_lock_irqsave(lport->host->host_lock, flags);
 	if (!fsp->cmd) {
-		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		spin_unlock_irqrestore(lport->host->host_lock, flags);
 		return;
 	}
 
 	/*
-	 * if a command timed out while we had to try and throttle IO
-	 * and it is now getting cleaned up, then we are about to
-	 * try again so clear the throttled flag incase we get more
-	 * time outs.
+	 * if can_queue ramp down is done then try can_queue ramp up
+	 * since commands are completing now.
 	 */
-	if (si->throttled && fsp->state & FC_SRB_NOMEM)
-		si->throttled = 0;
+	if (si->last_can_queue_ramp_down_time)
+		fc_fcp_can_queue_ramp_up(lport);
 
 	sc_cmd = fsp->cmd;
 	fsp->cmd = NULL;
 
 	if (!sc_cmd->SCp.ptr) {
-		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		spin_unlock_irqrestore(lport->host->host_lock, flags);
 		return;
 	}
 
@@ -1814,21 +1895,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 			sc_cmd->result = DID_OK << 16;
 			if (fsp->scsi_resid)
 				CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
-		} else if (fsp->cdb_status == QUEUE_FULL) {
-			struct scsi_device *tmp_sdev;
-			struct scsi_device *sdev = sc_cmd->device;
-
-			shost_for_each_device(tmp_sdev, sdev->host) {
-				if (tmp_sdev->id != sdev->id)
-					continue;
-
-				if (tmp_sdev->queue_depth > 1) {
-					scsi_track_queue_full(tmp_sdev,
-							      tmp_sdev->
-							      queue_depth - 1);
-				}
-			}
-			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
 		} else {
 			/*
 			 * transport level I/O was ok but scsi
@@ -1846,7 +1912,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 			 * scsi status is good but transport level
 			 * underrun.
 			 */
-			sc_cmd->result = DID_OK << 16;
+			sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
+					  DID_OK : DID_ERROR) << 16;
 		} else {
 			/*
 			 * scsi got underrun, this is an error
@@ -1881,60 +1948,42 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 	list_del(&fsp->list);
 	sc_cmd->SCp.ptr = NULL;
 	sc_cmd->scsi_done(sc_cmd);
-	spin_unlock_irqrestore(lp->host->host_lock, flags);
+	spin_unlock_irqrestore(lport->host->host_lock, flags);
 
 	/* release ref from initial allocation in queue command */
 	fc_fcp_pkt_release(fsp);
 }
 
 /**
- * fc_fcp_complete() - complete processing of a fcp packet
- * @fsp:	fcp packet
- *
- * This function may sleep if a fsp timer is pending.
- * The host lock must not be held by caller.
- */
-void fc_fcp_complete(struct fc_fcp_pkt *fsp)
-{
-	if (fc_fcp_lock_pkt(fsp))
-		return;
-
-	fc_fcp_complete_locked(fsp);
-	fc_fcp_unlock_pkt(fsp);
-}
-EXPORT_SYMBOL(fc_fcp_complete);
-
-/**
  * fc_eh_abort() - Abort a command
- * @sc_cmd:	scsi command to abort
+ * @sc_cmd: The SCSI command to abort
  *
- * From scsi host template.
- * send ABTS to the target device  and wait for the response
- * sc_cmd is the pointer to the command to be aborted.
+ * From SCSI host template.
+ * Send an ABTS to the target device and wait for the response.
  */
 int fc_eh_abort(struct scsi_cmnd *sc_cmd)
 {
 	struct fc_fcp_pkt *fsp;
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	int rc = FAILED;
 	unsigned long flags;
 
-	lp = shost_priv(sc_cmd->device->host);
-	if (lp->state != LPORT_ST_READY)
+	lport = shost_priv(sc_cmd->device->host);
+	if (lport->state != LPORT_ST_READY)
 		return rc;
-	else if (!lp->link_up)
+	else if (!lport->link_up)
 		return rc;
 
-	spin_lock_irqsave(lp->host->host_lock, flags);
+	spin_lock_irqsave(lport->host->host_lock, flags);
 	fsp = CMD_SP(sc_cmd);
 	if (!fsp) {
 		/* command completed while scsi eh was setting up */
-		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		spin_unlock_irqrestore(lport->host->host_lock, flags);
 		return SUCCESS;
 	}
 	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
 	fc_fcp_pkt_hold(fsp);
-	spin_unlock_irqrestore(lp->host->host_lock, flags);
+	spin_unlock_irqrestore(lport->host->host_lock, flags);
 
 	if (fc_fcp_lock_pkt(fsp)) {
 		/* completed while we were waiting for timer to be deleted */
@@ -1942,7 +1991,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
 		goto release_pkt;
 	}
 
-	rc = fc_fcp_pkt_abort(lp, fsp);
+	rc = fc_fcp_pkt_abort(fsp);
 	fc_fcp_unlock_pkt(fsp);
 
 release_pkt:
@@ -1952,37 +2001,34 @@ release_pkt:
 EXPORT_SYMBOL(fc_eh_abort);
 
 /**
- * fc_eh_device_reset() Reset a single LUN
- * @sc_cmd:	scsi command
+ * fc_eh_device_reset() - Reset a single LUN
+ * @sc_cmd: The SCSI command which identifies the device whose
+ *	    LUN is to be reset
  *
- * Set from scsi host template to send tm cmd to the target and wait for the
- * response.
+ * Set from SCSI host template.
  */
 int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-	struct fc_lport *lp;
+	struct fc_lport *lport;
 	struct fc_fcp_pkt *fsp;
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 	int rc = FAILED;
-	struct fc_rport_libfc_priv *rp;
 	int rval;
 
 	rval = fc_remote_port_chkready(rport);
 	if (rval)
 		goto out;
 
-	rp = rport->dd_data;
-	lp = shost_priv(sc_cmd->device->host);
+	lport = shost_priv(sc_cmd->device->host);
 
-	if (lp->state != LPORT_ST_READY)
+	if (lport->state != LPORT_ST_READY)
 		return rc;
 
-	FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
+	FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
 
-	fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
+	fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
 	if (fsp == NULL) {
 		printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
-		sc_cmd->result = DID_NO_CONNECT << 16;
 		goto out;
 	}
 
@@ -1991,13 +2037,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	 * the sc passed in is not setup for execution like when sent
 	 * through the queuecommand callout.
 	 */
-	fsp->lp = lp;		/* save the softc ptr */
+	fsp->lp = lport;	/* save the softc ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
 
 	/*
 	 * flush outstanding commands
 	 */
-	rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
+	rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
 	fsp->state = FC_SRB_FREE;
 	fc_fcp_pkt_release(fsp);
 
@@ -2007,38 +2053,39 @@ out:
 EXPORT_SYMBOL(fc_eh_device_reset);
 
 /**
- * fc_eh_host_reset() - The reset function will reset the ports on the host.
- * @sc_cmd:	scsi command
+ * fc_eh_host_reset() - Reset a Scsi_Host.
+ * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
  */
 int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 {
 	struct Scsi_Host *shost = sc_cmd->device->host;
-	struct fc_lport *lp = shost_priv(shost);
+	struct fc_lport *lport = shost_priv(shost);
 	unsigned long wait_tmo;
 
-	FC_SCSI_DBG(lp, "Resetting host\n");
+	FC_SCSI_DBG(lport, "Resetting host\n");
 
-	lp->tt.lport_reset(lp);
+	lport->tt.lport_reset(lport);
 	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
-	while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
+	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
+							       wait_tmo))
 		msleep(1000);
 
-	if (fc_fcp_lport_queue_ready(lp)) {
+	if (fc_fcp_lport_queue_ready(lport)) {
 		shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
-			     "on port (%6x)\n", fc_host_port_id(lp->host));
+			     "on port (%6x)\n", fc_host_port_id(lport->host));
 		return SUCCESS;
 	} else {
 		shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
 			     "port (%6x) is not ready.\n",
-			     fc_host_port_id(lp->host));
+			     fc_host_port_id(lport->host));
 		return FAILED;
 	}
 }
 EXPORT_SYMBOL(fc_eh_host_reset);
 
 /**
- * fc_slave_alloc() - configure queue depth
- * @sdev:	scsi device
+ * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * @sdev: The SCSI device that identifies the SCSI host
  *
  * Configures queue depth based on host's cmd_per_len. If not set
  * then we use the libfc default.
@@ -2046,29 +2093,50 @@ EXPORT_SYMBOL(fc_eh_host_reset);
 int fc_slave_alloc(struct scsi_device *sdev)
 {
 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
-	int queue_depth;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
-	if (sdev->tagged_supported) {
-		if (sdev->host->hostt->cmd_per_lun)
-			queue_depth = sdev->host->hostt->cmd_per_lun;
-		else
-			queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
-		scsi_activate_tcq(sdev, queue_depth);
-	}
+	if (sdev->tagged_supported)
+		scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
+	else
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
+					FC_FCP_DFLT_QUEUE_DEPTH);
+
 	return 0;
 }
 EXPORT_SYMBOL(fc_slave_alloc);
 
-int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+/**
+ * fc_change_queue_depth() - Change a device's queue depth
+ * @sdev:   The SCSI device whose queue depth is to change
+ * @qdepth: The new queue depth
+ * @reason: The resason for the change
+ */
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, qdepth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 	return sdev->queue_depth;
 }
 EXPORT_SYMBOL(fc_change_queue_depth);
 
+/**
+ * fc_change_queue_type() - Change a device's queue type
+ * @sdev:     The SCSI device whose queue depth is to change
+ * @tag_type: Identifier for queue type
+ */
 int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
 {
 	if (sdev->tagged_supported) {
@@ -2084,38 +2152,69 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
 }
 EXPORT_SYMBOL(fc_change_queue_type);
 
-void fc_fcp_destroy(struct fc_lport *lp)
+/**
+ * fc_fcp_destory() - Tear down the FCP layer for a given local port
+ * @lport: The local port that no longer needs the FCP layer
+ */
+void fc_fcp_destroy(struct fc_lport *lport)
 {
-	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
 
 	if (!list_empty(&si->scsi_pkt_queue))
 		printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
-		       "port (%6x)\n", fc_host_port_id(lp->host));
+		       "port (%6x)\n", fc_host_port_id(lport->host));
 
 	mempool_destroy(si->scsi_pkt_pool);
 	kfree(si);
-	lp->scsi_priv = NULL;
+	lport->scsi_priv = NULL;
 }
 EXPORT_SYMBOL(fc_fcp_destroy);
 
-int fc_fcp_init(struct fc_lport *lp)
+int fc_setup_fcp()
+{
+	int rc = 0;
+
+	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+					    sizeof(struct fc_fcp_pkt),
+					    0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!scsi_pkt_cachep) {
+		printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+		       "module load failed!");
+		rc = -ENOMEM;
+	}
+
+	return rc;
+}
+
+void fc_destroy_fcp()
+{
+	if (scsi_pkt_cachep)
+		kmem_cache_destroy(scsi_pkt_cachep);
+}
+
+/**
+ * fc_fcp_init() - Initialize the FCP layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_fcp_init(struct fc_lport *lport)
 {
 	int rc;
 	struct fc_fcp_internal *si;
 
-	if (!lp->tt.fcp_cmd_send)
-		lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
+	if (!lport->tt.fcp_cmd_send)
+		lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
 
-	if (!lp->tt.fcp_cleanup)
-		lp->tt.fcp_cleanup = fc_fcp_cleanup;
+	if (!lport->tt.fcp_cleanup)
+		lport->tt.fcp_cleanup = fc_fcp_cleanup;
 
-	if (!lp->tt.fcp_abort_io)
-		lp->tt.fcp_abort_io = fc_fcp_abort_io;
+	if (!lport->tt.fcp_abort_io)
+		lport->tt.fcp_abort_io = fc_fcp_abort_io;
 
 	si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
 	if (!si)
 		return -ENOMEM;
-	lp->scsi_priv = si;
+	lport->scsi_priv = si;
+	si->max_can_queue = lport->host->can_queue;
 	INIT_LIST_HEAD(&si->scsi_pkt_queue);
 
 	si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
@@ -2130,42 +2229,3 @@ free_internal:
 	return rc;
 }
 EXPORT_SYMBOL(fc_fcp_init);
-
-static int __init libfc_init(void)
-{
-	int rc;
-
-	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
-					    sizeof(struct fc_fcp_pkt),
-					    0, SLAB_HWCACHE_ALIGN, NULL);
-	if (scsi_pkt_cachep == NULL) {
-		printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
-		       "module load failed!");
-		return -ENOMEM;
-	}
-
-	rc = fc_setup_exch_mgr();
-	if (rc)
-		goto destroy_pkt_cache;
-
-	rc = fc_setup_rport();
-	if (rc)
-		goto destroy_em;
-
-	return rc;
-destroy_em:
-	fc_destroy_exch_mgr();
-destroy_pkt_cache:
-	kmem_cache_destroy(scsi_pkt_cachep);
-	return rc;
-}
-
-static void __exit libfc_exit(void)
-{
-	kmem_cache_destroy(scsi_pkt_cachep);
-	fc_destroy_exch_mgr();
-	fc_destroy_rport();
-}
-
-module_init(libfc_init);
-module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 63fe00cfe667..6da01c616964 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -51,24 +51,24 @@ EXPORT_SYMBOL(fc_frame_crc_check);
  * Allocate a frame intended to be sent via fcoe_xmit.
  * Get an sk_buff for the frame and set the length.
  */
-struct fc_frame *__fc_frame_alloc(size_t len)
+struct fc_frame *_fc_frame_alloc(size_t len)
 {
 	struct fc_frame *fp;
 	struct sk_buff *skb;
 
 	WARN_ON((len % sizeof(u32)) != 0);
 	len += sizeof(struct fc_frame_header);
-	skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
+	skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
+			       NET_SKB_PAD, GFP_ATOMIC);
 	if (!skb)
 		return NULL;
+	skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
 	fp = (struct fc_frame *) skb;
 	fc_frame_init(fp);
-	skb_reserve(skb, FC_FRAME_HEADROOM);
 	skb_put(skb, len);
 	return fp;
 }
-EXPORT_SYMBOL(__fc_frame_alloc);
-
+EXPORT_SYMBOL(_fc_frame_alloc);
 
 struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
 {
@@ -78,7 +78,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
 	fill = payload_len % 4;
 	if (fill != 0)
 		fill = 4 - fill;
-	fp = __fc_frame_alloc(payload_len + fill);
+	fp = _fc_frame_alloc(payload_len + fill);
 	if (fp) {
 		memset((char *) fr_hdr(fp) + payload_len, 0, fill);
 		/* trim is OK, we just allocated it so there are no fragments */
@@ -87,3 +87,4 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
 	}
 	return fp;
 }
+EXPORT_SYMBOL(fc_frame_alloc_fill);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
new file mode 100644
index 000000000000..39f4b6ab04b4
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/crc32.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_libfc.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL v2");
+
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+/**
+ * libfc_init() - Initialize libfc.ko
+ */
+static int __init libfc_init(void)
+{
+	int rc = 0;
+
+	rc = fc_setup_fcp();
+	if (rc)
+		return rc;
+
+	rc = fc_setup_exch_mgr();
+	if (rc)
+		goto destroy_pkt_cache;
+
+	rc = fc_setup_rport();
+	if (rc)
+		goto destroy_em;
+
+	return rc;
+destroy_em:
+	fc_destroy_exch_mgr();
+destroy_pkt_cache:
+	fc_destroy_fcp();
+	return rc;
+}
+module_init(libfc_init);
+
+/**
+ * libfc_exit() - Tear down libfc.ko
+ */
+static void __exit libfc_exit(void)
+{
+	fc_destroy_fcp();
+	fc_destroy_exch_mgr();
+	fc_destroy_rport();
+}
+module_exit(libfc_exit);
+
+/**
+ * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer
+ *				into a scatter-gather list (SG list).
+ *
+ * @buf: pointer to the data buffer.
+ * @len: the byte-length of the data buffer.
+ * @sg: pointer to the pointer of the SG list.
+ * @nents: pointer to the remaining number of entries in the SG list.
+ * @offset: pointer to the current offset in the SG list.
+ * @km_type: dedicated page table slot type for kmap_atomic.
+ * @crc: pointer to the 32-bit crc value.
+ *	 If crc is NULL, CRC is not calculated.
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+			     struct scatterlist *sg,
+			     u32 *nents, size_t *offset,
+			     enum km_type km_type, u32 *crc)
+{
+	size_t remaining = len;
+	u32 copy_len = 0;
+
+	while (remaining > 0 && sg) {
+		size_t off, sg_bytes;
+		void *page_addr;
+
+		if (*offset >= sg->length) {
+			/*
+			 * Check for end and drop resources
+			 * from the last iteration.
+			 */
+			if (!(*nents))
+				break;
+			--(*nents);
+			*offset -= sg->length;
+			sg = sg_next(sg);
+			continue;
+		}
+		sg_bytes = min(remaining, sg->length - *offset);
+
+		/*
+		 * The scatterlist item may be bigger than PAGE_SIZE,
+		 * but we are limited to mapping PAGE_SIZE at a time.
+		 */
+		off = *offset + sg->offset;
+		sg_bytes = min(sg_bytes,
+			       (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
+		page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+					km_type);
+		if (crc)
+			*crc = crc32(*crc, buf, sg_bytes);
+		memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
+		kunmap_atomic(page_addr, km_type);
+		buf += sg_bytes;
+		*offset += sg_bytes;
+		remaining -= sg_bytes;
+		copy_len += sg_bytes;
+	}
+	return copy_len;
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
new file mode 100644
index 000000000000..741fd5c72e13
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_LIBFC_H_
+#define _FC_LIBFC_H_
+
+#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
+#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
+#define FC_DISC_LOGGING	 0x04 /* discovery layer logging */
+#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
+#define FC_FCP_LOGGING	 0x10 /* I/O path logging */
+#define FC_EM_LOGGING	 0x20 /* Exchange Manager logging */
+#define FC_EXCH_LOGGING	 0x40 /* Exchange/Sequence logging */
+#define FC_SCSI_LOGGING	 0x80 /* SCSI logging (mostly error handling) */
+
+extern unsigned int fc_debug_logging;
+
+#define FC_CHECK_LOGGING(LEVEL, CMD)			\
+	do {						\
+		if (unlikely(fc_debug_logging & LEVEL))	\
+			do {				\
+				CMD;			\
+			} while (0);			\
+	} while (0)
+
+#define FC_LIBFC_DBG(fmt, args...)					\
+	FC_CHECK_LOGGING(FC_LIBFC_LOGGING,				\
+			 printk(KERN_INFO "libfc: " fmt, ##args))
+
+#define FC_LPORT_DBG(lport, fmt, args...)				\
+	FC_CHECK_LOGGING(FC_LPORT_LOGGING,				\
+			 printk(KERN_INFO "host%u: lport %6x: " fmt,	\
+				(lport)->host->host_no,			\
+				fc_host_port_id((lport)->host), ##args))
+
+#define FC_DISC_DBG(disc, fmt, args...)				\
+	FC_CHECK_LOGGING(FC_DISC_LOGGING,			\
+			 printk(KERN_INFO "host%u: disc: " fmt,	\
+				(disc)->lport->host->host_no,	\
+				##args))
+
+#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...)			\
+	FC_CHECK_LOGGING(FC_RPORT_LOGGING,				\
+			 printk(KERN_INFO "host%u: rport %6x: " fmt,	\
+				(lport)->host->host_no,			\
+				(port_id), ##args))
+
+#define FC_RPORT_DBG(rdata, fmt, args...)				\
+	FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
+
+#define FC_FCP_DBG(pkt, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_FCP_LOGGING,				\
+			 printk(KERN_INFO "host%u: fcp: %6x: " fmt,	\
+				(pkt)->lp->host->host_no,		\
+				pkt->rport->port_id, ##args))
+
+#define FC_EXCH_DBG(exch, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_EXCH_LOGGING,				\
+			 printk(KERN_INFO "host%u: xid %4x: " fmt,	\
+				(exch)->lp->host->host_no,		\
+				exch->xid, ##args))
+
+#define FC_SCSI_DBG(lport, fmt, args...)				\
+	FC_CHECK_LOGGING(FC_SCSI_LOGGING,				\
+			 printk(KERN_INFO "host%u: scsi: " fmt,		\
+				(lport)->host->host_no,	##args))
+
+/*
+ * Set up direct-data placement for this I/O request
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+
+/*
+ * Module setup functions
+ */
+int fc_setup_exch_mgr(void);
+void fc_destroy_exch_mgr(void);
+int fc_setup_rport(void);
+void fc_destroy_rport(void);
+int fc_setup_fcp(void);
+void fc_destroy_fcp(void);
+
+/*
+ * Internal libfc functions
+ */
+const char *fc_els_resp_type(struct fc_frame *);
+
+/*
+ * Copies a buffer into an sg list
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+			     struct scatterlist *sg,
+			     u32 *nents, size_t *offset,
+			     enum km_type km_type, u32 *crc);
+
+#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index bd2f77197447..bbf4152c9c69 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -94,6 +94,9 @@
 
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
+#include <linux/scatterlist.h>
+
+#include "fc_libfc.h"
 
 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
 #define FC_LOCAL_PTP_FID_LO   0x010101
@@ -106,8 +109,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *);
 static void fc_lport_enter_reset(struct fc_lport *);
 static void fc_lport_enter_flogi(struct fc_lport *);
 static void fc_lport_enter_dns(struct fc_lport *);
-static void fc_lport_enter_rpn_id(struct fc_lport *);
-static void fc_lport_enter_rft_id(struct fc_lport *);
+static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
 static void fc_lport_enter_scr(struct fc_lport *);
 static void fc_lport_enter_ready(struct fc_lport *);
 static void fc_lport_enter_logo(struct fc_lport *);
@@ -116,14 +118,40 @@ static const char *fc_lport_state_names[] = {
 	[LPORT_ST_DISABLED] = "disabled",
 	[LPORT_ST_FLOGI] =    "FLOGI",
 	[LPORT_ST_DNS] =      "dNS",
-	[LPORT_ST_RPN_ID] =   "RPN_ID",
+	[LPORT_ST_RNN_ID] =   "RNN_ID",
+	[LPORT_ST_RSNN_NN] =  "RSNN_NN",
+	[LPORT_ST_RSPN_ID] =  "RSPN_ID",
 	[LPORT_ST_RFT_ID] =   "RFT_ID",
+	[LPORT_ST_RFF_ID] =   "RFF_ID",
 	[LPORT_ST_SCR] =      "SCR",
 	[LPORT_ST_READY] =    "Ready",
 	[LPORT_ST_LOGO] =     "LOGO",
 	[LPORT_ST_RESET] =    "reset",
 };
 
+/**
+ * struct fc_bsg_info - FC Passthrough managemet structure
+ * @job:      The passthrough job
+ * @lport:    The local port to pass through a command
+ * @rsp_code: The expected response code
+ * @sg:	      job->reply_payload.sg_list
+ * @nents:    job->reply_payload.sg_cnt
+ * @offset:   The offset into the response data
+ */
+struct fc_bsg_info {
+	struct fc_bsg_job *job;
+	struct fc_lport *lport;
+	u16 rsp_code;
+	struct scatterlist *sg;
+	u32 nents;
+	size_t offset;
+};
+
+/**
+ * fc_frame_drop() - Dummy frame handler
+ * @lport: The local port the frame was received on
+ * @fp:	   The received frame
+ */
 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
 {
 	fc_frame_free(fp);
@@ -150,8 +178,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
 	switch (event) {
 	case RPORT_EV_READY:
 		if (lport->state == LPORT_ST_DNS) {
-			lport->dns_rp = rdata;
-			fc_lport_enter_rpn_id(lport);
+			lport->dns_rdata = rdata;
+			fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
 		} else {
 			FC_LPORT_DBG(lport, "Received an READY event "
 				     "on port (%6x) for the directory "
@@ -165,7 +193,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
 	case RPORT_EV_LOGO:
 	case RPORT_EV_FAILED:
 	case RPORT_EV_STOP:
-		lport->dns_rp = NULL;
+		lport->dns_rdata = NULL;
 		break;
 	case RPORT_EV_NONE:
 		break;
@@ -189,8 +217,8 @@ static const char *fc_lport_state(struct fc_lport *lport)
 
 /**
  * fc_lport_ptp_setup() - Create an rport for point-to-point mode
- * @lport: The lport to attach the ptp rport to
- * @fid: The FID of the ptp rport
+ * @lport:	 The lport to attach the ptp rport to
+ * @remote_fid:	 The FID of the ptp rport
  * @remote_wwpn: The WWPN of the ptp rport
  * @remote_wwnn: The WWNN of the ptp rport
  */
@@ -199,18 +227,22 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
 			       u64 remote_wwnn)
 {
 	mutex_lock(&lport->disc.disc_mutex);
-	if (lport->ptp_rp)
-		lport->tt.rport_logoff(lport->ptp_rp);
-	lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
-	lport->ptp_rp->ids.port_name = remote_wwpn;
-	lport->ptp_rp->ids.node_name = remote_wwnn;
+	if (lport->ptp_rdata)
+		lport->tt.rport_logoff(lport->ptp_rdata);
+	lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+	lport->ptp_rdata->ids.port_name = remote_wwpn;
+	lport->ptp_rdata->ids.node_name = remote_wwnn;
 	mutex_unlock(&lport->disc.disc_mutex);
 
-	lport->tt.rport_login(lport->ptp_rp);
+	lport->tt.rport_login(lport->ptp_rdata);
 
 	fc_lport_enter_ready(lport);
 }
 
+/**
+ * fc_get_host_port_type() - Return the port type of the given Scsi_Host
+ * @shost: The SCSI host whose port type is to be determined
+ */
 void fc_get_host_port_type(struct Scsi_Host *shost)
 {
 	/* TODO - currently just NPORT */
@@ -218,17 +250,33 @@ void fc_get_host_port_type(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(fc_get_host_port_type);
 
+/**
+ * fc_get_host_port_state() - Return the port state of the given Scsi_Host
+ * @shost:  The SCSI host whose port state is to be determined
+ */
 void fc_get_host_port_state(struct Scsi_Host *shost)
 {
-	struct fc_lport *lp = shost_priv(shost);
+	struct fc_lport *lport = shost_priv(shost);
 
-	if (lp->link_up)
-		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+	mutex_lock(&lport->lp_mutex);
+	if (!lport->link_up)
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
 	else
-		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		switch (lport->state) {
+		case LPORT_ST_READY:
+			fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+			break;
+		default:
+			fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		}
+	mutex_unlock(&lport->lp_mutex);
 }
 EXPORT_SYMBOL(fc_get_host_port_state);
 
+/**
+ * fc_get_host_speed() - Return the speed of the given Scsi_Host
+ * @shost: The SCSI host whose port speed is to be determined
+ */
 void fc_get_host_speed(struct Scsi_Host *shost)
 {
 	struct fc_lport *lport = shost_priv(shost);
@@ -237,24 +285,28 @@ void fc_get_host_speed(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(fc_get_host_speed);
 
+/**
+ * fc_get_host_stats() - Return the Scsi_Host's statistics
+ * @shost: The SCSI host whose statistics are to be returned
+ */
 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 {
 	struct fc_host_statistics *fcoe_stats;
-	struct fc_lport *lp = shost_priv(shost);
+	struct fc_lport *lport = shost_priv(shost);
 	struct timespec v0, v1;
 	unsigned int cpu;
 
-	fcoe_stats = &lp->host_stats;
+	fcoe_stats = &lport->host_stats;
 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
 
 	jiffies_to_timespec(jiffies, &v0);
-	jiffies_to_timespec(lp->boot_time, &v1);
+	jiffies_to_timespec(lport->boot_time, &v1);
 	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
 
 	for_each_possible_cpu(cpu) {
 		struct fcoe_dev_stats *stats;
 
-		stats = per_cpu_ptr(lp->dev_stats, cpu);
+		stats = per_cpu_ptr(lport->dev_stats, cpu);
 
 		fcoe_stats->tx_frames += stats->TxFrames;
 		fcoe_stats->tx_words += stats->TxWords;
@@ -279,12 +331,15 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(fc_get_host_stats);
 
-/*
- * Fill in FLOGI command for request.
+/**
+ * fc_lport_flogi_fill() - Fill in FLOGI command for request
+ * @lport: The local port the FLOGI is for
+ * @flogi: The FLOGI command
+ * @op:	   The opcode
  */
-static void
-fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
-		    unsigned int op)
+static void fc_lport_flogi_fill(struct fc_lport *lport,
+				struct fc_els_flogi *flogi,
+				unsigned int op)
 {
 	struct fc_els_csp *sp;
 	struct fc_els_cssp *cp;
@@ -312,8 +367,10 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
 	}
 }
 
-/*
- * Add a supported FC-4 type.
+/**
+ * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
+ * @lport: The local port to add a new FC-4 type to
+ * @type:  The new FC-4 type
  */
 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
 {
@@ -325,11 +382,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
 
 /**
  * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
+ * @sp:	   The sequence in the RLIR exchange
+ * @fp:	   The RLIR request frame
  * @lport: Fibre Channel local port recieving the RLIR
- * @sp: current sequence in the RLIR exchange
- * @fp: RLIR request frame
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
@@ -344,11 +401,11 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
 
 /**
  * fc_lport_recv_echo_req() - Handle received ECHO request
- * @lport: Fibre Channel local port recieving the ECHO
- * @sp: current sequence in the ECHO exchange
- * @fp: ECHO request frame
+ * @sp:	   The sequence in the ECHO exchange
+ * @fp:	   ECHO request frame
+ * @lport: The local port recieving the ECHO
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
@@ -361,7 +418,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
 	void *dp;
 	u32 f_ctl;
 
-	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+	FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
 		     fc_lport_state(lport));
 
 	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
@@ -374,7 +431,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
 	if (fp) {
 		dp = fc_frame_payload_get(fp, len);
 		memcpy(dp, pp, len);
-		*((u32 *)dp) = htonl(ELS_LS_ACC << 24);
+		*((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
 		sp = lport->tt.seq_start_next(sp);
 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
@@ -385,12 +442,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
 }
 
 /**
- * fc_lport_recv_echo_req() - Handle received Request Node ID data request
- * @lport: Fibre Channel local port recieving the RNID
- * @sp: current sequence in the RNID exchange
- * @fp: RNID request frame
+ * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
+ * @sp:	   The sequence in the RNID exchange
+ * @fp:	   The RNID request frame
+ * @lport: The local port recieving the RNID
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
@@ -453,9 +510,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
 
 /**
  * fc_lport_recv_logo_req() - Handle received fabric LOGO request
- * @lport: Fibre Channel local port recieving the LOGO
- * @sp: current sequence in the LOGO exchange
- * @fp: LOGO request frame
+ * @sp:	   The sequence in the LOGO exchange
+ * @fp:	   The LOGO request frame
+ * @lport: The local port recieving the LOGO
  *
  * Locking Note: The lport lock is exected to be held before calling
  * this function.
@@ -470,7 +527,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
 
 /**
  * fc_fabric_login() - Start the lport state machine
- * @lport: The lport that should log into the fabric
+ * @lport: The local port that should log into the fabric
  *
  * Locking Note: This function should not be called
  *		 with the lport lock held.
@@ -491,47 +548,69 @@ int fc_fabric_login(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_fabric_login);
 
 /**
- * fc_linkup() - Handler for transport linkup events
+ * __fc_linkup() - Handler for transport linkup events
  * @lport: The lport whose link is up
+ *
+ * Locking: must be called with the lp_mutex held
  */
-void fc_linkup(struct fc_lport *lport)
+void __fc_linkup(struct fc_lport *lport)
 {
-	printk(KERN_INFO "libfc: Link up on port (%6x)\n",
-	       fc_host_port_id(lport->host));
-
-	mutex_lock(&lport->lp_mutex);
 	if (!lport->link_up) {
 		lport->link_up = 1;
 
 		if (lport->state == LPORT_ST_RESET)
 			fc_lport_enter_flogi(lport);
 	}
+}
+
+/**
+ * fc_linkup() - Handler for transport linkup events
+ * @lport: The local port whose link is up
+ */
+void fc_linkup(struct fc_lport *lport)
+{
+	printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
+	       lport->host->host_no, fc_host_port_id(lport->host));
+
+	mutex_lock(&lport->lp_mutex);
+	__fc_linkup(lport);
 	mutex_unlock(&lport->lp_mutex);
 }
 EXPORT_SYMBOL(fc_linkup);
 
 /**
- * fc_linkdown() - Handler for transport linkdown events
+ * __fc_linkdown() - Handler for transport linkdown events
  * @lport: The lport whose link is down
+ *
+ * Locking: must be called with the lp_mutex held
  */
-void fc_linkdown(struct fc_lport *lport)
+void __fc_linkdown(struct fc_lport *lport)
 {
-	mutex_lock(&lport->lp_mutex);
-	printk(KERN_INFO "libfc: Link down on port (%6x)\n",
-	       fc_host_port_id(lport->host));
-
 	if (lport->link_up) {
 		lport->link_up = 0;
 		fc_lport_enter_reset(lport);
 		lport->tt.fcp_cleanup(lport);
 	}
+}
+
+/**
+ * fc_linkdown() - Handler for transport linkdown events
+ * @lport: The local port whose link is down
+ */
+void fc_linkdown(struct fc_lport *lport)
+{
+	printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
+	       lport->host->host_no, fc_host_port_id(lport->host));
+
+	mutex_lock(&lport->lp_mutex);
+	__fc_linkdown(lport);
 	mutex_unlock(&lport->lp_mutex);
 }
 EXPORT_SYMBOL(fc_linkdown);
 
 /**
  * fc_fabric_logoff() - Logout of the fabric
- * @lport:	      fc_lport pointer to logoff the fabric
+ * @lport: The local port to logoff the fabric
  *
  * Return value:
  *	0 for success, -1 for failure
@@ -540,8 +619,8 @@ int fc_fabric_logoff(struct fc_lport *lport)
 {
 	lport->tt.disc_stop_final(lport);
 	mutex_lock(&lport->lp_mutex);
-	if (lport->dns_rp)
-		lport->tt.rport_logoff(lport->dns_rp);
+	if (lport->dns_rdata)
+		lport->tt.rport_logoff(lport->dns_rdata);
 	mutex_unlock(&lport->lp_mutex);
 	lport->tt.rport_flush_queue();
 	mutex_lock(&lport->lp_mutex);
@@ -553,11 +632,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_fabric_logoff);
 
 /**
- * fc_lport_destroy() - unregister a fc_lport
- * @lport:	      fc_lport pointer to unregister
+ * fc_lport_destroy() - Unregister a fc_lport
+ * @lport: The local port to unregister
  *
- * Return value:
- *	None
  * Note:
  * exit routine for fc_lport instance
  * clean-up all the allocated memory
@@ -580,13 +657,9 @@ int fc_lport_destroy(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_lport_destroy);
 
 /**
- * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
- * @lport: fc_lport pointer to unregister
- * @mfs: the new mfs for fc_lport
- *
- * Set mfs for the given fc_lport to the new mfs.
- *
- * Return: 0 for success
+ * fc_set_mfs() - Set the maximum frame size for a local port
+ * @lport: The local port to set the MFS for
+ * @mfs:   The new MFS
  */
 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
 {
@@ -617,7 +690,7 @@ EXPORT_SYMBOL(fc_set_mfs);
 
 /**
  * fc_lport_disc_callback() - Callback for discovery events
- * @lport: FC local port
+ * @lport: The local port receiving the event
  * @event: The discovery event
  */
 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
@@ -627,8 +700,9 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
 		FC_LPORT_DBG(lport, "Discovery succeeded\n");
 		break;
 	case DISC_EV_FAILED:
-		printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
-		       fc_host_port_id(lport->host));
+		printk(KERN_ERR "host%d: libfc: "
+		       "Discovery failed for port (%6x)\n",
+		       lport->host->host_no, fc_host_port_id(lport->host));
 		mutex_lock(&lport->lp_mutex);
 		fc_lport_enter_reset(lport);
 		mutex_unlock(&lport->lp_mutex);
@@ -641,7 +715,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
 
 /**
  * fc_rport_enter_ready() - Enter the ready state and start discovery
- * @lport: Fibre Channel local port that is ready
+ * @lport: The local port that is ready
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -652,22 +726,46 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
 		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_READY);
+	if (lport->vport)
+		fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
+	fc_vports_linkchange(lport);
 
-	if (!lport->ptp_rp)
+	if (!lport->ptp_rdata)
 		lport->tt.disc_start(fc_lport_disc_callback, lport);
 }
 
 /**
+ * fc_lport_set_port_id() - set the local port Port ID
+ * @lport: The local port which will have its Port ID set.
+ * @port_id: The new port ID.
+ * @fp: The frame containing the incoming request, or NULL.
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
+				 struct fc_frame *fp)
+{
+	if (port_id)
+		printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
+		       lport->host->host_no, port_id);
+
+	fc_host_port_id(lport->host) = port_id;
+	if (lport->tt.lport_set_port_id)
+		lport->tt.lport_set_port_id(lport, port_id, fp);
+}
+
+/**
  * fc_lport_recv_flogi_req() - Receive a FLOGI request
  * @sp_in: The sequence the FLOGI is on
- * @rx_fp: The frame the FLOGI is in
- * @lport: The lport that recieved the request
+ * @rx_fp: The FLOGI frame
+ * @lport: The local port that recieved the request
  *
  * A received FLOGI request indicates a point-to-point connection.
  * Accept it with the common service parameters indicating our N port.
  * Set up to do a PLOGI if we have the higher-number WWPN.
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
@@ -695,8 +793,9 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
 		goto out;
 	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
 	if (remote_wwpn == lport->wwpn) {
-		printk(KERN_WARNING "libfc: Received FLOGI from port "
-		       "with same WWPN %llx\n", remote_wwpn);
+		printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
+		       "with same WWPN %llx\n",
+		       lport->host->host_no, remote_wwpn);
 		goto out;
 	}
 	FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
@@ -715,7 +814,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
 		remote_fid = FC_LOCAL_PTP_FID_HI;
 	}
 
-	fc_host_port_id(lport->host) = local_fid;
+	fc_lport_set_port_id(lport, local_fid, rx_fp);
 
 	fp = fc_frame_alloc(lport, sizeof(*flp));
 	if (fp) {
@@ -747,9 +846,9 @@ out:
 
 /**
  * fc_lport_recv_req() - The generic lport request handler
- * @lport: The lport that received the request
- * @sp: The sequence the request is on
- * @fp: The frame the request is in
+ * @lport: The local port that received the request
+ * @sp:	   The sequence the request is on
+ * @fp:	   The request frame
  *
  * This function will see if the lport handles the request or
  * if an rport should handle the request.
@@ -817,8 +916,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
 }
 
 /**
- * fc_lport_reset() - Reset an lport
- * @lport: The lport which should be reset
+ * fc_lport_reset() - Reset a local port
+ * @lport: The local port which should be reset
  *
  * Locking Note: This functions should not be called with the
  *		 lport lock held.
@@ -834,29 +933,31 @@ int fc_lport_reset(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_lport_reset);
 
 /**
- * fc_lport_reset_locked() - Reset the local port
- * @lport: Fibre Channel local port to be reset
+ * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
+ * @lport: The local port to be reset
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
 static void fc_lport_reset_locked(struct fc_lport *lport)
 {
-	if (lport->dns_rp)
-		lport->tt.rport_logoff(lport->dns_rp);
+	if (lport->dns_rdata)
+		lport->tt.rport_logoff(lport->dns_rdata);
 
-	lport->ptp_rp = NULL;
+	lport->ptp_rdata = NULL;
 
 	lport->tt.disc_stop(lport);
 
 	lport->tt.exch_mgr_reset(lport, 0, 0);
 	fc_host_fabric_name(lport->host) = 0;
-	fc_host_port_id(lport->host) = 0;
+
+	if (fc_host_port_id(lport->host))
+		fc_lport_set_port_id(lport, 0, NULL);
 }
 
 /**
  * fc_lport_enter_reset() - Reset the local port
- * @lport: Fibre Channel local port to be reset
+ * @lport: The local port to be reset
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -866,15 +967,22 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
 	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
 		     fc_lport_state(lport));
 
+	if (lport->vport) {
+		if (lport->link_up)
+			fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
+		else
+			fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
+	}
 	fc_lport_state_enter(lport, LPORT_ST_RESET);
+	fc_vports_linkchange(lport);
 	fc_lport_reset_locked(lport);
 	if (lport->link_up)
 		fc_lport_enter_flogi(lport);
 }
 
 /**
- * fc_lport_enter_disabled() - disable the local port
- * @lport: Fibre Channel local port to be reset
+ * fc_lport_enter_disabled() - Disable the local port
+ * @lport: The local port to be reset
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -885,13 +993,14 @@ static void fc_lport_enter_disabled(struct fc_lport *lport)
 		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+	fc_vports_linkchange(lport);
 	fc_lport_reset_locked(lport);
 }
 
 /**
  * fc_lport_error() - Handler for any errors
- * @lport: The fc_lport object
- * @fp: The frame pointer
+ * @lport: The local port that the error was on
+ * @fp:	   The error code encoded in a frame pointer
  *
  * If the error was caused by a resource allocation failure
  * then wait for half a second and retry, otherwise retry
@@ -922,8 +1031,11 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
 			case LPORT_ST_DISABLED:
 			case LPORT_ST_READY:
 			case LPORT_ST_RESET:
-			case LPORT_ST_RPN_ID:
+			case LPORT_ST_RNN_ID:
+			case LPORT_ST_RSNN_NN:
+			case LPORT_ST_RSPN_ID:
 			case LPORT_ST_RFT_ID:
+			case LPORT_ST_RFF_ID:
 			case LPORT_ST_SCR:
 			case LPORT_ST_DNS:
 			case LPORT_ST_FLOGI:
@@ -936,33 +1048,33 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
 }
 
 /**
- * fc_lport_rft_id_resp() - Handle response to Register Fibre
- *			    Channel Types by ID (RPN_ID) request
- * @sp: current sequence in RPN_ID exchange
- * @fp: response frame
+ * fc_lport_ns_resp() - Handle response to a name server
+ *			registration exchange
+ * @sp:	    current sequence in exchange
+ * @fp:	    response frame
  * @lp_arg: Fibre Channel host port instance
  *
  * Locking Note: This function will be called without the lport lock
- * held, but it will lock, call an _enter_* function or fc_lport_error
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
  * and then unlock the lport.
  */
-static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
-				 void *lp_arg)
+static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
+			     void *lp_arg)
 {
 	struct fc_lport *lport = lp_arg;
 	struct fc_frame_header *fh;
 	struct fc_ct_hdr *ct;
 
-	FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
+	FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
 
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	if (lport->state != LPORT_ST_RFT_ID) {
-		FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
-			     "%s\n", fc_lport_state(lport));
+	if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
+		FC_LPORT_DBG(lport, "Received a name server response, "
+			     "but in state %s\n", fc_lport_state(lport));
 		if (IS_ERR(fp))
 			goto err;
 		goto out;
@@ -980,63 +1092,28 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 	    ct->ct_fs_type == FC_FST_DIR &&
 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
-		fc_lport_enter_scr(lport);
-	else
-		fc_lport_error(lport, fp);
-out:
-	fc_frame_free(fp);
-err:
-	mutex_unlock(&lport->lp_mutex);
-}
-
-/**
- * fc_lport_rpn_id_resp() - Handle response to Register Port
- *			    Name by ID (RPN_ID) request
- * @sp: current sequence in RPN_ID exchange
- * @fp: response frame
- * @lp_arg: Fibre Channel host port instance
- *
- * Locking Note: This function will be called without the lport lock
- * held, but it will lock, call an _enter_* function or fc_lport_error
- * and then unlock the lport.
- */
-static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
-				 void *lp_arg)
-{
-	struct fc_lport *lport = lp_arg;
-	struct fc_frame_header *fh;
-	struct fc_ct_hdr *ct;
-
-	FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
-
-	if (fp == ERR_PTR(-FC_EX_CLOSED))
-		return;
-
-	mutex_lock(&lport->lp_mutex);
-
-	if (lport->state != LPORT_ST_RPN_ID) {
-		FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
-			     "%s\n", fc_lport_state(lport));
-		if (IS_ERR(fp))
-			goto err;
-		goto out;
-	}
-
-	if (IS_ERR(fp)) {
-		fc_lport_error(lport, fp);
-		goto err;
-	}
-
-	fh = fc_frame_header_get(fp);
-	ct = fc_frame_payload_get(fp, sizeof(*ct));
-	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
-	    ct->ct_fs_type == FC_FST_DIR &&
-	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
-	    ntohs(ct->ct_cmd) == FC_FS_ACC)
-		fc_lport_enter_rft_id(lport);
+		switch (lport->state) {
+		case LPORT_ST_RNN_ID:
+			fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
+			break;
+		case LPORT_ST_RSNN_NN:
+			fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
+			break;
+		case LPORT_ST_RSPN_ID:
+			fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+			break;
+		case LPORT_ST_RFT_ID:
+			fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
+			break;
+		case LPORT_ST_RFF_ID:
+			fc_lport_enter_scr(lport);
+			break;
+		default:
+			/* should have already been caught by state checks */
+			break;
+		}
 	else
 		fc_lport_error(lport, fp);
-
 out:
 	fc_frame_free(fp);
 err:
@@ -1045,8 +1122,8 @@ err:
 
 /**
  * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
- * @sp: current sequence in SCR exchange
- * @fp: response frame
+ * @sp:	    current sequence in SCR exchange
+ * @fp:	    response frame
  * @lp_arg: Fibre Channel lport port instance that sent the registration request
  *
  * Locking Note: This function will be called without the lport lock
@@ -1092,8 +1169,8 @@ err:
 }
 
 /**
- * fc_lport_enter_scr() - Send a State Change Register (SCR) request
- * @lport: Fibre Channel local port to register for state changes
+ * fc_lport_enter_scr() - Send a SCR (State Change Register) request
+ * @lport: The local port to register for state changes
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -1114,78 +1191,74 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
 	}
 
 	if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
-				  fc_lport_scr_resp, lport, lport->e_d_tov))
-		fc_lport_error(lport, fp);
+				  fc_lport_scr_resp, lport,
+				  2 * lport->r_a_tov))
+		fc_lport_error(lport, NULL);
 }
 
 /**
- * fc_lport_enter_rft_id() - Register FC4-types with the name server
+ * fc_lport_enter_ns() - register some object with the name server
  * @lport: Fibre Channel local port to register
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-static void fc_lport_enter_rft_id(struct fc_lport *lport)
+static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
 {
 	struct fc_frame *fp;
-	struct fc_ns_fts *lps;
-	int i;
+	enum fc_ns_req cmd;
+	int size = sizeof(struct fc_ct_hdr);
+	size_t len;
 
-	FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
+	FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+		     fc_lport_state_names[state],
 		     fc_lport_state(lport));
 
-	fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
-
-	lps = &lport->fcts;
-	i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
-	while (--i >= 0)
-		if (ntohl(lps->ff_type_map[i]) != 0)
-			break;
-	if (i < 0) {
-		/* nothing to register, move on to SCR */
-		fc_lport_enter_scr(lport);
-		return;
-	}
+	fc_lport_state_enter(lport, state);
 
-	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
-			    sizeof(struct fc_ns_rft));
-	if (!fp) {
-		fc_lport_error(lport, fp);
+	switch (state) {
+	case LPORT_ST_RNN_ID:
+		cmd = FC_NS_RNN_ID;
+		size += sizeof(struct fc_ns_rn_id);
+		break;
+	case LPORT_ST_RSNN_NN:
+		len = strnlen(fc_host_symbolic_name(lport->host), 255);
+		/* if there is no symbolic name, skip to RFT_ID */
+		if (!len)
+			return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+		cmd = FC_NS_RSNN_NN;
+		size += sizeof(struct fc_ns_rsnn) + len;
+		break;
+	case LPORT_ST_RSPN_ID:
+		len = strnlen(fc_host_symbolic_name(lport->host), 255);
+		/* if there is no symbolic name, skip to RFT_ID */
+		if (!len)
+			return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+		cmd = FC_NS_RSPN_ID;
+		size += sizeof(struct fc_ns_rspn) + len;
+		break;
+	case LPORT_ST_RFT_ID:
+		cmd = FC_NS_RFT_ID;
+		size += sizeof(struct fc_ns_rft);
+		break;
+	case LPORT_ST_RFF_ID:
+		cmd = FC_NS_RFF_ID;
+		size += sizeof(struct fc_ns_rff_id);
+		break;
+	default:
+		fc_lport_error(lport, NULL);
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
-				  fc_lport_rft_id_resp,
-				  lport, lport->e_d_tov))
-		fc_lport_error(lport, fp);
-}
-
-/**
- * fc_rport_enter_rft_id() - Register port name with the name server
- * @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
- */
-static void fc_lport_enter_rpn_id(struct fc_lport *lport)
-{
-	struct fc_frame *fp;
-
-	FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
-		     fc_lport_state(lport));
-
-	fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
-
-	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
-			    sizeof(struct fc_ns_rn_id));
+	fp = fc_frame_alloc(lport, size);
 	if (!fp) {
 		fc_lport_error(lport, fp);
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
-				  fc_lport_rpn_id_resp,
-				  lport, lport->e_d_tov))
+	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
+				  fc_lport_ns_resp,
+				  lport, 3 * lport->r_a_tov))
 		fc_lport_error(lport, fp);
 }
 
@@ -1194,8 +1267,8 @@ static struct fc_rport_operations fc_lport_rport_ops = {
 };
 
 /**
- * fc_rport_enter_dns() - Create a rport to the name server
- * @lport: Fibre Channel local port requesting a rport for the name server
+ * fc_rport_enter_dns() - Create a fc_rport for the name server
+ * @lport: The local port requesting a remote port for the name server
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -1224,8 +1297,8 @@ err:
 }
 
 /**
- * fc_lport_timeout() - Handler for the retry_work timer.
- * @work: The work struct of the fc_lport
+ * fc_lport_timeout() - Handler for the retry_work timer
+ * @work: The work struct of the local port
  */
 static void fc_lport_timeout(struct work_struct *work)
 {
@@ -1237,21 +1310,25 @@ static void fc_lport_timeout(struct work_struct *work)
 
 	switch (lport->state) {
 	case LPORT_ST_DISABLED:
+		WARN_ON(1);
+		break;
 	case LPORT_ST_READY:
-	case LPORT_ST_RESET:
 		WARN_ON(1);
 		break;
+	case LPORT_ST_RESET:
+		break;
 	case LPORT_ST_FLOGI:
 		fc_lport_enter_flogi(lport);
 		break;
 	case LPORT_ST_DNS:
 		fc_lport_enter_dns(lport);
 		break;
-	case LPORT_ST_RPN_ID:
-		fc_lport_enter_rpn_id(lport);
-		break;
+	case LPORT_ST_RNN_ID:
+	case LPORT_ST_RSNN_NN:
+	case LPORT_ST_RSPN_ID:
 	case LPORT_ST_RFT_ID:
-		fc_lport_enter_rft_id(lport);
+	case LPORT_ST_RFF_ID:
+		fc_lport_enter_ns(lport, lport->state);
 		break;
 	case LPORT_ST_SCR:
 		fc_lport_enter_scr(lport);
@@ -1266,16 +1343,16 @@ static void fc_lport_timeout(struct work_struct *work)
 
 /**
  * fc_lport_logo_resp() - Handle response to LOGO request
- * @sp: current sequence in LOGO exchange
- * @fp: response frame
- * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
+ * @sp:	    The sequence that the LOGO was on
+ * @fp:	    The LOGO frame
+ * @lp_arg: The lport port that received the LOGO request
  *
  * Locking Note: This function will be called without the lport lock
- * held, but it will lock, call an _enter_* function or fc_lport_error
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
  * and then unlock the lport.
  */
-static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
-			       void *lp_arg)
+void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+			void *lp_arg)
 {
 	struct fc_lport *lport = lp_arg;
 	u8 op;
@@ -1311,10 +1388,11 @@ out:
 err:
 	mutex_unlock(&lport->lp_mutex);
 }
+EXPORT_SYMBOL(fc_lport_logo_resp);
 
 /**
  * fc_rport_enter_logo() - Logout of the fabric
- * @lport: Fibre Channel local port to be logged out
+ * @lport: The local port to be logged out
  *
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
@@ -1328,6 +1406,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
 		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_LOGO);
+	fc_vports_linkchange(lport);
 
 	fp = fc_frame_alloc(lport, sizeof(*logo));
 	if (!fp) {
@@ -1336,22 +1415,23 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
 	}
 
 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
-				  fc_lport_logo_resp, lport, lport->e_d_tov))
-		fc_lport_error(lport, fp);
+				  fc_lport_logo_resp, lport,
+				  2 * lport->r_a_tov))
+		fc_lport_error(lport, NULL);
 }
 
 /**
  * fc_lport_flogi_resp() - Handle response to FLOGI request
- * @sp: current sequence in FLOGI exchange
- * @fp: response frame
- * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
+ * @sp:	    The sequence that the FLOGI was on
+ * @fp:	    The FLOGI response frame
+ * @lp_arg: The lport port that received the FLOGI response
  *
  * Locking Note: This function will be called without the lport lock
- * held, but it will lock, call an _enter_* function or fc_lport_error
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
  * and then unlock the lport.
  */
-static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
-				void *lp_arg)
+void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+			 void *lp_arg)
 {
 	struct fc_lport *lport = lp_arg;
 	struct fc_frame_header *fh;
@@ -1385,11 +1465,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	fh = fc_frame_header_get(fp);
 	did = ntoh24(fh->fh_d_id);
 	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
-
-		printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
-		       did);
-		fc_host_port_id(lport->host) = did;
-
 		flp = fc_frame_payload_get(fp, sizeof(*flp));
 		if (flp) {
 			mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1402,12 +1477,18 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
 			if (csp_flags & FC_SP_FT_EDTR)
 				e_d_tov /= 1000000;
+
+			lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
 			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
 				if (e_d_tov > lport->e_d_tov)
 					lport->e_d_tov = e_d_tov;
 				lport->r_a_tov = 2 * e_d_tov;
-				printk(KERN_INFO "libfc: Port (%6x) entered "
-				       "point to point mode\n", did);
+				fc_lport_set_port_id(lport, did, fp);
+				printk(KERN_INFO "host%d: libfc: "
+				       "Port (%6x) entered "
+				       "point-to-point mode\n",
+				       lport->host->host_no, did);
 				fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
 						   get_unaligned_be64(
 							   &flp->fl_wwpn),
@@ -1418,6 +1499,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 				lport->r_a_tov = r_a_tov;
 				fc_host_fabric_name(lport->host) =
 					get_unaligned_be64(&flp->fl_wwnn);
+				fc_lport_set_port_id(lport, did, fp);
 				fc_lport_enter_dns(lport);
 			}
 		}
@@ -1430,6 +1512,7 @@ out:
 err:
 	mutex_unlock(&lport->lp_mutex);
 }
+EXPORT_SYMBOL(fc_lport_flogi_resp);
 
 /**
  * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
@@ -1451,12 +1534,18 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
 	if (!fp)
 		return fc_lport_error(lport, fp);
 
-	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
-				  fc_lport_flogi_resp, lport, lport->e_d_tov))
-		fc_lport_error(lport, fp);
+	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
+				  lport->vport ? ELS_FDISC : ELS_FLOGI,
+				  fc_lport_flogi_resp, lport,
+				  lport->vport ? 2 * lport->r_a_tov :
+				  lport->e_d_tov))
+		fc_lport_error(lport, NULL);
 }
 
-/* Configure a fc_lport */
+/**
+ * fc_lport_config() - Configure a fc_lport
+ * @lport: The local port to be configured
+ */
 int fc_lport_config(struct fc_lport *lport)
 {
 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
@@ -1471,6 +1560,10 @@ int fc_lport_config(struct fc_lport *lport)
 }
 EXPORT_SYMBOL(fc_lport_config);
 
+/**
+ * fc_lport_init() - Initialize the lport layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
 int fc_lport_init(struct fc_lport *lport)
 {
 	if (!lport->tt.lport_recv)
@@ -1500,7 +1593,253 @@ int fc_lport_init(struct fc_lport *lport)
 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
 
-	INIT_LIST_HEAD(&lport->ema_list);
 	return 0;
 }
 EXPORT_SYMBOL(fc_lport_init);
+
+/**
+ * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
+ * @sp:	      The sequence for the FC Passthrough response
+ * @fp:	      The response frame
+ * @info_arg: The BSG info that the response is for
+ */
+static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *info_arg)
+{
+	struct fc_bsg_info *info = info_arg;
+	struct fc_bsg_job *job = info->job;
+	struct fc_lport *lport = info->lport;
+	struct fc_frame_header *fh;
+	size_t len;
+	void *buf;
+
+	if (IS_ERR(fp)) {
+		job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+			-ECONNABORTED : -ETIMEDOUT;
+		job->reply_len = sizeof(uint32_t);
+		job->state_flags |= FC_RQST_STATE_DONE;
+		job->job_done(job);
+		kfree(info);
+		return;
+	}
+
+	mutex_lock(&lport->lp_mutex);
+	fh = fc_frame_header_get(fp);
+	len = fr_len(fp) - sizeof(*fh);
+	buf = fc_frame_payload_get(fp, 0);
+
+	if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
+		/* Get the response code from the first frame payload */
+		unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
+			ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
+			(unsigned short)fc_frame_payload_op(fp);
+
+		/* Save the reply status of the job */
+		job->reply->reply_data.ctels_reply.status =
+			(cmd == info->rsp_code) ?
+			FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
+	}
+
+	job->reply->reply_payload_rcv_len +=
+		fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
+					 &info->offset, KM_BIO_SRC_IRQ, NULL);
+
+	if (fr_eof(fp) == FC_EOF_T &&
+	    (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+		if (job->reply->reply_payload_rcv_len >
+		    job->reply_payload.payload_len)
+			job->reply->reply_payload_rcv_len =
+				job->reply_payload.payload_len;
+		job->reply->result = 0;
+		job->state_flags |= FC_RQST_STATE_DONE;
+		job->job_done(job);
+		kfree(info);
+	}
+	fc_frame_free(fp);
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_els_request() - Send ELS passthrough request
+ * @job:   The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did:   The destination port id
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static int fc_lport_els_request(struct fc_bsg_job *job,
+				struct fc_lport *lport,
+				u32 did, u32 tov)
+{
+	struct fc_bsg_info *info;
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	char *pp;
+	int len;
+
+	fp = fc_frame_alloc(lport, job->request_payload.payload_len);
+	if (!fp)
+		return -ENOMEM;
+
+	len = job->request_payload.payload_len;
+	pp = fc_frame_payload_get(fp, len);
+
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  pp, len);
+
+	fh = fc_frame_header_get(fp);
+	fh->fh_r_ctl = FC_RCTL_ELS_REQ;
+	hton24(fh->fh_d_id, did);
+	hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+	fh->fh_type = FC_TYPE_ELS;
+	hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
+	       FC_FC_END_SEQ | FC_FC_SEQ_INIT);
+	fh->fh_cs_ctl = 0;
+	fh->fh_df_ctl = 0;
+	fh->fh_parm_offset = 0;
+
+	info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+	if (!info) {
+		fc_frame_free(fp);
+		return -ENOMEM;
+	}
+
+	info->job = job;
+	info->lport = lport;
+	info->rsp_code = ELS_LS_ACC;
+	info->nents = job->reply_payload.sg_cnt;
+	info->sg = job->reply_payload.sg_list;
+
+	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
+				     NULL, info, tov))
+		return -ECOMM;
+	return 0;
+}
+
+/**
+ * fc_lport_ct_request() - Send CT Passthrough request
+ * @job:   The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did:   The destination FC-ID
+ * @tov:   The timeout period to wait for the response
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static int fc_lport_ct_request(struct fc_bsg_job *job,
+			       struct fc_lport *lport, u32 did, u32 tov)
+{
+	struct fc_bsg_info *info;
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	struct fc_ct_req *ct;
+	size_t len;
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+			    job->request_payload.payload_len);
+	if (!fp)
+		return -ENOMEM;
+
+	len = job->request_payload.payload_len;
+	ct = fc_frame_payload_get(fp, len);
+
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  ct, len);
+
+	fh = fc_frame_header_get(fp);
+	fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
+	hton24(fh->fh_d_id, did);
+	hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+	fh->fh_type = FC_TYPE_CT;
+	hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
+	       FC_FC_END_SEQ | FC_FC_SEQ_INIT);
+	fh->fh_cs_ctl = 0;
+	fh->fh_df_ctl = 0;
+	fh->fh_parm_offset = 0;
+
+	info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+	if (!info) {
+		fc_frame_free(fp);
+		return -ENOMEM;
+	}
+
+	info->job = job;
+	info->lport = lport;
+	info->rsp_code = FC_FS_ACC;
+	info->nents = job->reply_payload.sg_cnt;
+	info->sg = job->reply_payload.sg_list;
+
+	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
+				     NULL, info, tov))
+		return -ECOMM;
+	return 0;
+}
+
+/**
+ * fc_lport_bsg_request() - The common entry point for sending
+ *			    FC Passthrough requests
+ * @job: The BSG passthrough job
+ */
+int fc_lport_bsg_request(struct fc_bsg_job *job)
+{
+	struct request *rsp = job->req->next_rq;
+	struct Scsi_Host *shost = job->shost;
+	struct fc_lport *lport = shost_priv(shost);
+	struct fc_rport *rport;
+	struct fc_rport_priv *rdata;
+	int rc = -EINVAL;
+	u32 did;
+
+	job->reply->reply_payload_rcv_len = 0;
+	rsp->resid_len = job->reply_payload.payload_len;
+
+	mutex_lock(&lport->lp_mutex);
+
+	switch (job->request->msgcode) {
+	case FC_BSG_RPT_ELS:
+		rport = job->rport;
+		if (!rport)
+			break;
+
+		rdata = rport->dd_data;
+		rc = fc_lport_els_request(job, lport, rport->port_id,
+					  rdata->e_d_tov);
+		break;
+
+	case FC_BSG_RPT_CT:
+		rport = job->rport;
+		if (!rport)
+			break;
+
+		rdata = rport->dd_data;
+		rc = fc_lport_ct_request(job, lport, rport->port_id,
+					 rdata->e_d_tov);
+		break;
+
+	case FC_BSG_HST_CT:
+		did = ntoh24(job->request->rqst_data.h_ct.port_id);
+		if (did == FC_FID_DIR_SERV)
+			rdata = lport->dns_rdata;
+		else
+			rdata = lport->tt.rport_lookup(lport, did);
+
+		if (!rdata)
+			break;
+
+		rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
+		break;
+
+	case FC_BSG_HST_ELS_NOLOGIN:
+		did = ntoh24(job->request->rqst_data.h_els.port_id);
+		rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
+		break;
+	}
+
+	mutex_unlock(&lport->lp_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(fc_lport_bsg_request);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
new file mode 100644
index 000000000000..c68f6c7341c2
--- /dev/null
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * NPIV VN_Port helper functions for libfc
+ */
+
+#include <scsi/libfc.h>
+
+/**
+ * fc_vport_create() - Create a new NPIV vport instance
+ * @vport: fc_vport structure from scsi_transport_fc
+ * @privsize: driver private data size to allocate along with the Scsi_Host
+ */
+
+struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
+{
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+	struct fc_lport *vn_port;
+
+	vn_port = libfc_host_alloc(shost->hostt, privsize);
+	if (!vn_port)
+		goto err_out;
+	if (fc_exch_mgr_list_clone(n_port, vn_port))
+		goto err_put;
+
+	vn_port->vport = vport;
+	vport->dd_data = vn_port;
+
+	mutex_lock(&n_port->lp_mutex);
+	list_add_tail(&vn_port->list, &n_port->vports);
+	mutex_unlock(&n_port->lp_mutex);
+
+	return vn_port;
+
+err_put:
+	scsi_host_put(vn_port->host);
+err_out:
+	return NULL;
+}
+EXPORT_SYMBOL(libfc_vport_create);
+
+/**
+ * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID
+ * @n_port: Top level N_Port which may have multiple NPIV VN_Ports
+ * @port_id: Fabric ID to find a match for
+ *
+ * Returns: matching lport pointer or NULL if there is no match
+ */
+struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
+{
+	struct fc_lport *lport = NULL;
+	struct fc_lport *vn_port;
+
+	if (fc_host_port_id(n_port->host) == port_id)
+		return n_port;
+
+	mutex_lock(&n_port->lp_mutex);
+	list_for_each_entry(vn_port, &n_port->vports, list) {
+		if (fc_host_port_id(vn_port->host) == port_id) {
+			lport = vn_port;
+			break;
+		}
+	}
+	mutex_unlock(&n_port->lp_mutex);
+
+	return lport;
+}
+
+/*
+ * When setting the link state of vports during an lport state change, it's
+ * necessary to hold the lp_mutex of both the N_Port and the VN_Port.
+ * This tells the lockdep engine to treat the nested locking of the VN_Port
+ * as a different lock class.
+ */
+enum libfc_lport_mutex_class {
+	LPORT_MUTEX_NORMAL = 0,
+	LPORT_MUTEX_VN_PORT = 1,
+};
+
+/**
+ * __fc_vport_setlink() - update link and status on a VN_Port
+ * @n_port: parent N_Port
+ * @vn_port: VN_Port to update
+ *
+ * Locking: must be called with both the N_Port and VN_Port lp_mutex held
+ */
+static void __fc_vport_setlink(struct fc_lport *n_port,
+			       struct fc_lport *vn_port)
+{
+	struct fc_vport *vport = vn_port->vport;
+
+	if (vn_port->state == LPORT_ST_DISABLED)
+		return;
+
+	if (n_port->state == LPORT_ST_READY) {
+		if (n_port->npiv_enabled) {
+			fc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+			__fc_linkup(vn_port);
+		} else {
+			fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+			__fc_linkdown(vn_port);
+		}
+	} else {
+		fc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		__fc_linkdown(vn_port);
+	}
+}
+
+/**
+ * fc_vport_setlink() - update link and status on a VN_Port
+ * @vn_port: virtual port to update
+ */
+void fc_vport_setlink(struct fc_lport *vn_port)
+{
+	struct fc_vport *vport = vn_port->vport;
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+
+	mutex_lock(&n_port->lp_mutex);
+	mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+	__fc_vport_setlink(n_port, vn_port);
+	mutex_unlock(&vn_port->lp_mutex);
+	mutex_unlock(&n_port->lp_mutex);
+}
+EXPORT_SYMBOL(fc_vport_setlink);
+
+/**
+ * fc_vports_linkchange() - change the link state of all vports
+ * @n_port: Parent N_Port that has changed state
+ *
+ * Locking: called with the n_port lp_mutex held
+ */
+void fc_vports_linkchange(struct fc_lport *n_port)
+{
+	struct fc_lport *vn_port;
+
+	list_for_each_entry(vn_port, &n_port->vports, list) {
+		mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+		__fc_vport_setlink(n_port, vn_port);
+		mutex_unlock(&vn_port->lp_mutex);
+	}
+}
+
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 03ea6748e7ee..35ca0e72df46 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -55,6 +55,8 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
+#include "fc_libfc.h"
+
 struct workqueue_struct *rport_event_queue;
 
 static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -86,12 +88,13 @@ static const char *fc_rport_state_names[] = {
 	[RPORT_ST_LOGO] = "LOGO",
 	[RPORT_ST_ADISC] = "ADISC",
 	[RPORT_ST_DELETE] = "Delete",
+	[RPORT_ST_RESTART] = "Restart",
 };
 
 /**
- * fc_rport_lookup() - lookup a remote port by port_id
- * @lport: Fibre Channel host port instance
- * @port_id: remote port port_id to match
+ * fc_rport_lookup() - Lookup a remote port by port_id
+ * @lport:   The local port to lookup the remote port on
+ * @port_id: The remote port ID to look up
  */
 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
 					     u32 port_id)
@@ -99,16 +102,17 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
 	struct fc_rport_priv *rdata;
 
 	list_for_each_entry(rdata, &lport->disc.rports, peers)
-		if (rdata->ids.port_id == port_id &&
-		    rdata->rp_state != RPORT_ST_DELETE)
+		if (rdata->ids.port_id == port_id)
 			return rdata;
 	return NULL;
 }
 
 /**
  * fc_rport_create() - Create a new remote port
- * @lport:   The local port that the new remote port is for
- * @port_id: The port ID for the new remote port
+ * @lport: The local port this remote port will be associated with
+ * @ids:   The identifiers for the new remote port
+ *
+ * The remote port will start in the INIT state.
  *
  * Locking note:  must be called with the disc_mutex held.
  */
@@ -147,8 +151,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
 }
 
 /**
- * fc_rport_destroy() - free a remote port after last reference is released.
- * @kref: pointer to kref inside struct fc_rport_priv
+ * fc_rport_destroy() - Free a remote port after last reference is released
+ * @kref: The remote port's kref
  */
 static void fc_rport_destroy(struct kref *kref)
 {
@@ -159,8 +163,8 @@ static void fc_rport_destroy(struct kref *kref)
 }
 
 /**
- * fc_rport_state() - return a string for the state the rport is in
- * @rdata: remote port private data
+ * fc_rport_state() - Return a string identifying the remote port's state
+ * @rdata: The remote port
  */
 static const char *fc_rport_state(struct fc_rport_priv *rdata)
 {
@@ -173,9 +177,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
 }
 
 /**
- * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
- * @rport: Pointer to Fibre Channel remote port structure
- * @timeout: timeout in seconds
+ * fc_set_rport_loss_tmo() - Set the remote port loss timeout
+ * @rport:   The remote port that gets a new timeout value
+ * @timeout: The new timeout value (in seconds)
  */
 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 {
@@ -187,9 +191,11 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
 
 /**
- * fc_plogi_get_maxframe() - Get max payload from the common service parameters
- * @flp: FLOGI payload structure
- * @maxval: upper limit, may be less than what is in the service parameters
+ * fc_plogi_get_maxframe() - Get the maximum payload from the common service
+ *			     parameters in a FLOGI frame
+ * @flp:    The FLOGI payload
+ * @maxval: The maximum frame size upper limit; this may be less than what
+ *	    is in the service parameters
  */
 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
 					  unsigned int maxval)
@@ -210,9 +216,9 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
 }
 
 /**
- * fc_rport_state_enter() - Change the rport's state
- * @rdata: The rport whose state should change
- * @new: The new state of the rport
+ * fc_rport_state_enter() - Change the state of a remote port
+ * @rdata: The remote port whose state should change
+ * @new:   The new state
  *
  * Locking Note: Called with the rport lock held
  */
@@ -224,17 +230,22 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
 	rdata->rp_state = new;
 }
 
+/**
+ * fc_rport_work() - Handler for remote port events in the rport_event_queue
+ * @work: Handle to the remote port being dequeued
+ */
 static void fc_rport_work(struct work_struct *work)
 {
 	u32 port_id;
 	struct fc_rport_priv *rdata =
 		container_of(work, struct fc_rport_priv, event_work);
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rpriv;
 	enum fc_rport_event event;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_rport_operations *rport_ops;
 	struct fc_rport_identifiers ids;
 	struct fc_rport *rport;
+	int restart = 0;
 
 	mutex_lock(&rdata->rp_mutex);
 	event = rdata->event;
@@ -265,12 +276,12 @@ static void fc_rport_work(struct work_struct *work)
 		rport->maxframe_size = rdata->maxframe_size;
 		rport->supported_classes = rdata->supported_classes;
 
-		rp = rport->dd_data;
-		rp->local_port = lport;
-		rp->rp_state = rdata->rp_state;
-		rp->flags = rdata->flags;
-		rp->e_d_tov = rdata->e_d_tov;
-		rp->r_a_tov = rdata->r_a_tov;
+		rpriv = rport->dd_data;
+		rpriv->local_port = lport;
+		rpriv->rp_state = rdata->rp_state;
+		rpriv->flags = rdata->flags;
+		rpriv->e_d_tov = rdata->e_d_tov;
+		rpriv->r_a_tov = rdata->r_a_tov;
 		mutex_unlock(&rdata->rp_mutex);
 
 		if (rport_ops && rport_ops->event_callback) {
@@ -287,8 +298,19 @@ static void fc_rport_work(struct work_struct *work)
 		mutex_unlock(&rdata->rp_mutex);
 
 		if (port_id != FC_FID_DIR_SERV) {
+			/*
+			 * We must drop rp_mutex before taking disc_mutex.
+			 * Re-evaluate state to allow for restart.
+			 * A transition to RESTART state must only happen
+			 * while disc_mutex is held and rdata is on the list.
+			 */
 			mutex_lock(&lport->disc.disc_mutex);
-			list_del(&rdata->peers);
+			mutex_lock(&rdata->rp_mutex);
+			if (rdata->rp_state == RPORT_ST_RESTART)
+				restart = 1;
+			else
+				list_del(&rdata->peers);
+			mutex_unlock(&rdata->rp_mutex);
 			mutex_unlock(&lport->disc.disc_mutex);
 		}
 
@@ -305,14 +327,20 @@ static void fc_rport_work(struct work_struct *work)
 		lport->tt.exch_mgr_reset(lport, port_id, 0);
 
 		if (rport) {
-			rp = rport->dd_data;
-			rp->rp_state = RPORT_ST_DELETE;
+			rpriv = rport->dd_data;
+			rpriv->rp_state = RPORT_ST_DELETE;
 			mutex_lock(&rdata->rp_mutex);
 			rdata->rport = NULL;
 			mutex_unlock(&rdata->rp_mutex);
 			fc_remote_port_delete(rport);
 		}
-		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		if (restart) {
+			mutex_lock(&rdata->rp_mutex);
+			FC_RPORT_DBG(rdata, "work restart\n");
+			fc_rport_enter_plogi(rdata);
+			mutex_unlock(&rdata->rp_mutex);
+		} else
+			kref_put(&rdata->kref, lport->tt.rport_destroy);
 		break;
 
 	default:
@@ -323,7 +351,7 @@ static void fc_rport_work(struct work_struct *work)
 
 /**
  * fc_rport_login() - Start the remote port login state machine
- * @rdata: private remote port
+ * @rdata: The remote port to be logged in to
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
@@ -342,6 +370,12 @@ int fc_rport_login(struct fc_rport_priv *rdata)
 		FC_RPORT_DBG(rdata, "ADISC port\n");
 		fc_rport_enter_adisc(rdata);
 		break;
+	case RPORT_ST_RESTART:
+		break;
+	case RPORT_ST_DELETE:
+		FC_RPORT_DBG(rdata, "Restart deleted port\n");
+		fc_rport_state_enter(rdata, RPORT_ST_RESTART);
+		break;
 	default:
 		FC_RPORT_DBG(rdata, "Login to port\n");
 		fc_rport_enter_plogi(rdata);
@@ -353,9 +387,9 @@ int fc_rport_login(struct fc_rport_priv *rdata)
 }
 
 /**
- * fc_rport_enter_delete() - schedule a remote port to be deleted.
- * @rdata: private remote port
- * @event: event to report as the reason for deletion
+ * fc_rport_enter_delete() - Schedule a remote port to be deleted
+ * @rdata: The remote port to be deleted
+ * @event: The event to report as the reason for deletion
  *
  * Locking Note: Called with the rport lock held.
  *
@@ -382,8 +416,8 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 }
 
 /**
- * fc_rport_logoff() - Logoff and remove an rport
- * @rdata: private remote port
+ * fc_rport_logoff() - Logoff and remove a remote port
+ * @rdata: The remote port to be logged off of
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
@@ -397,26 +431,27 @@ int fc_rport_logoff(struct fc_rport_priv *rdata)
 
 	if (rdata->rp_state == RPORT_ST_DELETE) {
 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
-		mutex_unlock(&rdata->rp_mutex);
 		goto out;
 	}
 
-	fc_rport_enter_logo(rdata);
+	if (rdata->rp_state == RPORT_ST_RESTART)
+		FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
+	else
+		fc_rport_enter_logo(rdata);
 
 	/*
 	 * Change the state to Delete so that we discard
 	 * the response.
 	 */
 	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
-	mutex_unlock(&rdata->rp_mutex);
-
 out:
+	mutex_unlock(&rdata->rp_mutex);
 	return 0;
 }
 
 /**
- * fc_rport_enter_ready() - The rport is ready
- * @rdata: private remote port
+ * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
+ * @rdata: The remote port that is ready
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -433,8 +468,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
 }
 
 /**
- * fc_rport_timeout() - Handler for the retry_work timer.
- * @work: The work struct of the fc_rport_priv
+ * fc_rport_timeout() - Handler for the retry_work timer
+ * @work: Handle to the remote port that has timed out
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
@@ -466,6 +501,7 @@ static void fc_rport_timeout(struct work_struct *work)
 	case RPORT_ST_READY:
 	case RPORT_ST_INIT:
 	case RPORT_ST_DELETE:
+	case RPORT_ST_RESTART:
 		break;
 	}
 
@@ -474,8 +510,8 @@ static void fc_rport_timeout(struct work_struct *work)
 
 /**
  * fc_rport_error() - Error handler, called once retries have been exhausted
- * @rdata: private remote port
- * @fp: The frame pointer
+ * @rdata: The remote port the error is happened on
+ * @fp:	   The error code encapsulated in a frame pointer
  *
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
@@ -499,6 +535,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 		fc_rport_enter_logo(rdata);
 		break;
 	case RPORT_ST_DELETE:
+	case RPORT_ST_RESTART:
 	case RPORT_ST_READY:
 	case RPORT_ST_INIT:
 		break;
@@ -506,9 +543,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 }
 
 /**
- * fc_rport_error_retry() - Error handler when retries are desired
- * @rdata: private remote port data
- * @fp: The frame pointer
+ * fc_rport_error_retry() - Handler for remote port state retries
+ * @rdata: The remote port whose state is to be retried
+ * @fp:	   The error code encapsulated in a frame pointer
  *
  * If the error was an exchange timeout retry immediately,
  * otherwise wait for E_D_TOV.
@@ -540,10 +577,10 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
 }
 
 /**
- * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
- * @sp: current sequence in the PLOGI exchange
- * @fp: response frame
- * @rdata_arg: private remote port data
+ * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses
+ * @sp:	       The sequence the PLOGI is on
+ * @fp:	       The PLOGI response frame
+ * @rdata_arg: The remote port that sent the PLOGI response
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -606,8 +643,8 @@ err:
 }
 
 /**
- * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
- * @rdata: private remote port data
+ * fc_rport_enter_plogi() - Send Port Login (PLOGI) request
+ * @rdata: The remote port to send a PLOGI to
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -631,17 +668,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
 	rdata->e_d_tov = lport->e_d_tov;
 
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
-				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
-		fc_rport_error_retry(rdata, fp);
+				  fc_rport_plogi_resp, rdata,
+				  2 * lport->r_a_tov))
+		fc_rport_error_retry(rdata, NULL);
 	else
 		kref_get(&rdata->kref);
 }
 
 /**
  * fc_rport_prli_resp() - Process Login (PRLI) response handler
- * @sp: current sequence in the PRLI exchange
- * @fp: response frame
- * @rdata_arg: private remote port data
+ * @sp:	       The sequence the PRLI response was on
+ * @fp:	       The PRLI response frame
+ * @rdata_arg: The remote port that sent the PRLI response
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -710,10 +748,10 @@ err:
 }
 
 /**
- * fc_rport_logo_resp() - Logout (LOGO) response handler
- * @sp: current sequence in the LOGO exchange
- * @fp: response frame
- * @rdata_arg: private remote port data
+ * fc_rport_logo_resp() - Handler for logout (LOGO) responses
+ * @sp:	       The sequence the LOGO was on
+ * @fp:	       The LOGO response frame
+ * @rdata_arg: The remote port that sent the LOGO response
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
@@ -756,8 +794,8 @@ err:
 }
 
 /**
- * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
- * @rdata: private remote port data
+ * fc_rport_enter_prli() - Send Process Login (PRLI) request
+ * @rdata: The remote port to send the PRLI request to
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -792,17 +830,18 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 	}
 
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
-				  fc_rport_prli_resp, rdata, lport->e_d_tov))
-		fc_rport_error_retry(rdata, fp);
+				  fc_rport_prli_resp, rdata,
+				  2 * lport->r_a_tov))
+		fc_rport_error_retry(rdata, NULL);
 	else
 		kref_get(&rdata->kref);
 }
 
 /**
- * fc_rport_els_rtv_resp() - Request Timeout Value response handler
- * @sp: current sequence in the RTV exchange
- * @fp: response frame
- * @rdata_arg: private remote port data
+ * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * @sp:	       The sequence the RTV was on
+ * @fp:	       The RTV response frame
+ * @rdata_arg: The remote port that sent the RTV response
  *
  * Many targets don't seem to support this.
  *
@@ -865,8 +904,8 @@ err:
 }
 
 /**
- * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
- * @rdata: private remote port data
+ * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
+ * @rdata: The remote port to send the RTV request to
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -888,15 +927,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
 	}
 
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
-				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
-		fc_rport_error_retry(rdata, fp);
+				  fc_rport_rtv_resp, rdata,
+				  2 * lport->r_a_tov))
+		fc_rport_error_retry(rdata, NULL);
 	else
 		kref_get(&rdata->kref);
 }
 
 /**
- * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
- * @rdata: private remote port data
+ * fc_rport_enter_logo() - Send a logout (LOGO) request
+ * @rdata: The remote port to send the LOGO request to
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -918,24 +958,25 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
 	}
 
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
-				  fc_rport_logo_resp, rdata, lport->e_d_tov))
-		fc_rport_error_retry(rdata, fp);
+				  fc_rport_logo_resp, rdata,
+				  2 * lport->r_a_tov))
+		fc_rport_error_retry(rdata, NULL);
 	else
 		kref_get(&rdata->kref);
 }
 
 /**
- * fc_rport_els_adisc_resp() - Address Discovery response handler
- * @sp: current sequence in the ADISC exchange
- * @fp: response frame
- * @rdata_arg: remote port private.
+ * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * @sp:	       The sequence the ADISC response was on
+ * @fp:	       The ADISC response frame
+ * @rdata_arg: The remote port that sent the ADISC response
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
  * and then unlock the rport.
  */
 static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
-			      void *rdata_arg)
+				void *rdata_arg)
 {
 	struct fc_rport_priv *rdata = rdata_arg;
 	struct fc_els_adisc *adisc;
@@ -983,8 +1024,8 @@ err:
 }
 
 /**
- * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
- * @rdata: remote port private data
+ * fc_rport_enter_adisc() - Send Address Discover (ADISC) request
+ * @rdata: The remote port to send the ADISC request to
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
@@ -1005,17 +1046,18 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
 		return;
 	}
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
-				  fc_rport_adisc_resp, rdata, lport->e_d_tov))
-		fc_rport_error_retry(rdata, fp);
+				  fc_rport_adisc_resp, rdata,
+				  2 * lport->r_a_tov))
+		fc_rport_error_retry(rdata, NULL);
 	else
 		kref_get(&rdata->kref);
 }
 
 /**
- * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
- * @rdata: remote port private
- * @sp: current sequence in the ADISC exchange
- * @in_fp: ADISC request frame
+ * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
+ * @rdata: The remote port that sent the ADISC request
+ * @sp:	   The sequence the ADISC request was on
+ * @in_fp: The ADISC request frame
  *
  * Locking Note:  Called with the lport and rport locks held.
  */
@@ -1056,10 +1098,82 @@ drop:
 }
 
 /**
- * fc_rport_recv_els_req() - handle a validated ELS request.
- * @lport: Fibre Channel local port
- * @sp: current sequence in the PLOGI exchange
- * @fp: response frame
+ * fc_rport_recv_rls_req() - Handle received Read Link Status request
+ * @rdata: The remote port that sent the RLS request
+ * @sp:	The sequence that the RLS was on
+ * @rx_fp: The PRLI request frame
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
+				  struct fc_seq *sp, struct fc_frame *rx_fp)
+
+{
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	struct fc_els_rls *rls;
+	struct fc_els_rls_resp *rsp;
+	struct fc_els_lesb *lesb;
+	struct fc_seq_els_data rjt_data;
+	struct fc_host_statistics *hst;
+	u32 f_ctl;
+
+	FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
+		     fc_rport_state(rdata));
+
+	rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
+	if (!rls) {
+		rjt_data.reason = ELS_RJT_PROT;
+		rjt_data.explan = ELS_EXPL_INV_LEN;
+		goto out_rjt;
+	}
+
+	fp = fc_frame_alloc(lport, sizeof(*rsp));
+	if (!fp) {
+		rjt_data.reason = ELS_RJT_UNAB;
+		rjt_data.explan = ELS_EXPL_INSUF_RES;
+		goto out_rjt;
+	}
+
+	rsp = fc_frame_payload_get(fp, sizeof(*rsp));
+	memset(rsp, 0, sizeof(*rsp));
+	rsp->rls_cmd = ELS_LS_ACC;
+	lesb = &rsp->rls_lesb;
+	if (lport->tt.get_lesb) {
+		/* get LESB from LLD if it supports it */
+		lport->tt.get_lesb(lport, lesb);
+	} else {
+		fc_get_host_stats(lport->host);
+		hst = &lport->host_stats;
+		lesb->lesb_link_fail = htonl(hst->link_failure_count);
+		lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
+		lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
+		lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
+		lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
+		lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
+	}
+
+	sp = lport->tt.seq_start_next(sp);
+	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+		       FC_TYPE_ELS, f_ctl, 0);
+	lport->tt.seq_send(lport, sp, fp);
+	goto out;
+
+out_rjt:
+	rjt_data.fp = NULL;
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+out:
+	fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_els_req() - Handler for validated ELS requests
+ * @lport: The local port that received the ELS request
+ * @sp:	   The sequence that the ELS request was on
+ * @fp:	   The ELS request frame
  *
  * Handle incoming ELS requests that require port login.
  * The ELS opcode has already been validated by the caller.
@@ -1117,6 +1231,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport,
 		els_data.fp = fp;
 		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
 		break;
+	case ELS_RLS:
+		fc_rport_recv_rls_req(rdata, sp, fp);
+		break;
 	default:
 		fc_frame_free(fp);	/* can't happen */
 		break;
@@ -1131,10 +1248,10 @@ reject:
 }
 
 /**
- * fc_rport_recv_req() - Handle a received ELS request from a rport
- * @sp: current sequence in the PLOGI exchange
- * @fp: response frame
- * @lport: Fibre Channel local port
+ * fc_rport_recv_req() - Handler for requests
+ * @sp:	   The sequence the request was on
+ * @fp:	   The request frame
+ * @lport: The local port that received the request
  *
  * Locking Note: Called with the lport lock held.
  */
@@ -1161,6 +1278,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 	case ELS_ADISC:
 	case ELS_RRQ:
 	case ELS_REC:
+	case ELS_RLS:
 		fc_rport_recv_els_req(lport, sp, fp);
 		break;
 	default:
@@ -1174,10 +1292,10 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 }
 
 /**
- * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
- * @lport: local port
- * @sp: current sequence in the PLOGI exchange
- * @fp: PLOGI request frame
+ * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
+ * @lport: The local port that received the PLOGI request
+ * @sp:	   The sequence that the PLOGI request was on
+ * @rx_fp: The PLOGI request frame
  *
  * Locking Note: The rport lock is held before calling this function.
  */
@@ -1248,6 +1366,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 		}
 		break;
 	case RPORT_ST_PRLI:
+	case RPORT_ST_RTV:
 	case RPORT_ST_READY:
 	case RPORT_ST_ADISC:
 		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
@@ -1255,11 +1374,14 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 		/* XXX TBD - should reset */
 		break;
 	case RPORT_ST_DELETE:
-	default:
-		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
-			     rdata->rp_state);
-		fc_frame_free(rx_fp);
-		goto out;
+	case RPORT_ST_LOGO:
+	case RPORT_ST_RESTART:
+		FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
+			     fc_rport_state(rdata));
+		mutex_unlock(&rdata->rp_mutex);
+		rjt_data.reason = ELS_RJT_BUSY;
+		rjt_data.explan = ELS_EXPL_NONE;
+		goto reject;
 	}
 
 	/*
@@ -1295,10 +1417,10 @@ reject:
 }
 
 /**
- * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
- * @rdata: private remote port data
- * @sp: current sequence in the PRLI exchange
- * @fp: PRLI request frame
+ * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
+ * @rdata: The remote port that sent the PRLI request
+ * @sp:	   The sequence that the PRLI was on
+ * @rx_fp: The PRLI request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
@@ -1402,7 +1524,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 				break;
 			case FC_TYPE_FCP:
 				fcp_parm = ntohl(rspp->spp_params);
-				if (fcp_parm * FCP_SPPF_RETRY)
+				if (fcp_parm & FCP_SPPF_RETRY)
 					rdata->flags |= FC_RP_FLAGS_RETRY;
 				rdata->supported_classes = FC_COS_CLASS3;
 				if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1452,10 +1574,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 }
 
 /**
- * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
- * @rdata: private remote port data
- * @sp: current sequence in the PRLO exchange
- * @fp: PRLO request frame
+ * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
+ * @rdata: The remote port that sent the PRLO request
+ * @sp:	   The sequence that the PRLO was on
+ * @fp:	   The PRLO request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
@@ -1482,10 +1604,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
 }
 
 /**
- * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
- * @lport: local port.
- * @sp: current sequence in the LOGO exchange
- * @fp: LOGO request frame
+ * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
+ * @lport: The local port that received the LOGO request
+ * @sp:	   The sequence that the LOGO request was on
+ * @fp:	   The LOGO request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
@@ -1510,14 +1632,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 
+		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+
 		/*
-		 * If the remote port was created due to discovery,
-		 * log back in.  It may have seen a stale RSCN about us.
+		 * If the remote port was created due to discovery, set state
+		 * to log back in.  It may have seen a stale RSCN about us.
 		 */
-		if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
-			fc_rport_enter_plogi(rdata);
-		else
-			fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+		if (rdata->disc_id)
+			fc_rport_state_enter(rdata, RPORT_ST_RESTART);
 		mutex_unlock(&rdata->rp_mutex);
 	} else
 		FC_RPORT_ID_DBG(lport, sid,
@@ -1526,11 +1648,18 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport,
 	fc_frame_free(fp);
 }
 
+/**
+ * fc_rport_flush_queue() - Flush the rport_event_queue
+ */
 static void fc_rport_flush_queue(void)
 {
 	flush_workqueue(rport_event_queue);
 }
 
+/**
+ * fc_rport_init() - Initialize the remote port layer for a local port
+ * @lport: The local port to initialize the remote port layer for
+ */
 int fc_rport_init(struct fc_lport *lport)
 {
 	if (!lport->tt.rport_lookup)
@@ -1558,25 +1687,33 @@ int fc_rport_init(struct fc_lport *lport)
 }
 EXPORT_SYMBOL(fc_rport_init);
 
-int fc_setup_rport(void)
+/**
+ * fc_setup_rport() - Initialize the rport_event_queue
+ */
+int fc_setup_rport()
 {
 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
 	if (!rport_event_queue)
 		return -ENOMEM;
 	return 0;
 }
-EXPORT_SYMBOL(fc_setup_rport);
 
-void fc_destroy_rport(void)
+/**
+ * fc_destroy_rport() - Destroy the rport_event_queue
+ */
+void fc_destroy_rport()
 {
 	destroy_workqueue(rport_event_queue);
 }
-EXPORT_SYMBOL(fc_destroy_rport);
 
+/**
+ * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
+ * @rport: The remote port whose I/O should be terminated
+ */
 void fc_rport_terminate_io(struct fc_rport *rport)
 {
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
-	struct fc_lport *lport = rp->local_port;
+	struct fc_rport_libfc_priv *rpriv = rport->dd_data;
+	struct fc_lport *lport = rpriv->local_port;
 
 	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
 	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f1a4246f890c..b7689f3d05f5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -266,6 +266,88 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
 }
 
 /**
+ * iscsi_check_tmf_restrictions - check if a task is affected by TMF
+ * @task: iscsi task
+ * @opcode: opcode to check for
+ *
+ * During TMF a task has to be checked if it's affected.
+ * All unrelated I/O can be passed through, but I/O to the
+ * affected LUN should be restricted.
+ * If 'fast_abort' is set we won't be sending any I/O to the
+ * affected LUN.
+ * Otherwise the target is waiting for all TTTs to be completed,
+ * so we have to send all outstanding Data-Out PDUs to the target.
+ */
+static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_tm *tmf = &conn->tmhdr;
+	unsigned int hdr_lun;
+
+	if (conn->tmf_state == TMF_INITIAL)
+		return 0;
+
+	if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
+		return 0;
+
+	switch (ISCSI_TM_FUNC_VALUE(tmf)) {
+	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+		/*
+		 * Allow PDUs for unrelated LUNs
+		 */
+		hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun);
+		if (hdr_lun != task->sc->device->lun)
+			return 0;
+		/* fall through */
+	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+		/*
+		 * Fail all SCSI cmd PDUs
+		 */
+		if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
+			iscsi_conn_printk(KERN_INFO, conn,
+					  "task [op %x/%x itt "
+					  "0x%x/0x%x] "
+					  "rejected.\n",
+					  task->hdr->opcode, opcode,
+					  task->itt, task->hdr_itt);
+			return -EACCES;
+		}
+		/*
+		 * And also all data-out PDUs in response to R2T
+		 * if fast_abort is set.
+		 */
+		if (conn->session->fast_abort) {
+			iscsi_conn_printk(KERN_INFO, conn,
+					  "task [op %x/%x itt "
+					  "0x%x/0x%x] fast abort.\n",
+					  task->hdr->opcode, opcode,
+					  task->itt, task->hdr_itt);
+			return -EACCES;
+		}
+		break;
+	case ISCSI_TM_FUNC_ABORT_TASK:
+		/*
+		 * the caller has already checked if the task
+		 * they want to abort was in the pending queue so if
+		 * we are here the cmd pdu has gone out already, and
+		 * we will only hit this for data-outs
+		 */
+		if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
+		    task->hdr_itt == tmf->rtt) {
+			ISCSI_DBG_SESSION(conn->session,
+					  "Preventing task %x/%x from sending "
+					  "data-out due to abort task in "
+					  "progress\n", task->itt,
+					  task->hdr_itt);
+			return -EACCES;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+/**
  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
  * @task: iscsi task
  *
@@ -282,6 +364,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 	itt_t itt;
 	int rc;
 
+	rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
+	if (rc)
+		return rc;
+
 	if (conn->session->tt->alloc_pdu) {
 		rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
 		if (rc)
@@ -577,12 +663,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 	struct iscsi_session *session = conn->session;
 	struct iscsi_hdr *hdr = task->hdr;
 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+	uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 
 	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
 		return -ENOTCONN;
 
-	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
-	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+	if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
 		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
 	/*
 	 * pre-format CmdSN for outgoing PDU.
@@ -590,9 +676,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 	nop->cmdsn = cpu_to_be32(session->cmdsn);
 	if (hdr->itt != RESERVED_ITT) {
 		/*
-		 * TODO: We always use immediate, so we never hit this.
+		 * TODO: We always use immediate for normal session pdus.
 		 * If we start to send tmfs or nops as non-immediate then
 		 * we should start checking the cmdsn numbers for mgmt tasks.
+		 *
+		 * During discovery sessions iscsid sends TEXT as non immediate,
+		 * but we always only send one PDU at a time.
 		 */
 		if (conn->c_stage == ISCSI_CONN_STARTED &&
 		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -620,22 +709,28 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_host *ihost = shost_priv(session->host);
+	uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 	struct iscsi_task *task;
 	itt_t itt;
 
 	if (session->state == ISCSI_STATE_TERMINATE)
 		return NULL;
 
-	if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
-	    hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+	if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
 		/*
 		 * Login and Text are sent serially, in
 		 * request-followed-by-response sequence.
 		 * Same task can be used. Same ITT must be used.
 		 * Note that login_task is preallocated at conn_create().
 		 */
+		if (conn->login_task->state != ISCSI_TASK_FREE) {
+			iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
+					  "progress. Cannot start new task.\n");
+			return NULL;
+		}
+
 		task = conn->login_task;
-	else {
+	} else {
 		if (session->state != ISCSI_STATE_LOGGED_IN)
 			return NULL;
 
@@ -1357,6 +1452,7 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
  **/
 static int iscsi_data_xmit(struct iscsi_conn *conn)
 {
+	struct iscsi_task *task;
 	int rc = 0;
 
 	spin_lock_bh(&conn->session->lock);
@@ -1394,11 +1490,8 @@ check_mgmt:
 
 	/* process pending command queue */
 	while (!list_empty(&conn->cmdqueue)) {
-		if (conn->tmf_state == TMF_QUEUED)
-			break;
-
-		conn->task = list_entry(conn->cmdqueue.next,
-					 struct iscsi_task, running);
+		conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+					running);
 		list_del_init(&conn->task->running);
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
 			fail_scsi_task(conn->task, DID_IMM_RETRY);
@@ -1406,7 +1499,7 @@ check_mgmt:
 		}
 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
 		if (rc) {
-			if (rc == -ENOMEM) {
+			if (rc == -ENOMEM || rc == -EACCES) {
 				list_add_tail(&conn->task->running,
 					      &conn->cmdqueue);
 				conn->task = NULL;
@@ -1428,17 +1521,18 @@ check_mgmt:
 	}
 
 	while (!list_empty(&conn->requeue)) {
-		if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
-			break;
-
 		/*
 		 * we always do fastlogout - conn stop code will clean up.
 		 */
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
 			break;
 
-		conn->task = list_entry(conn->requeue.next,
-					 struct iscsi_task, running);
+		task = list_entry(conn->requeue.next, struct iscsi_task,
+				  running);
+		if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+			break;
+
+		conn->task = task;
 		list_del_init(&conn->task->running);
 		conn->task->state = ISCSI_TASK_RUNNING;
 		rc = iscsi_xmit_task(conn);
@@ -1591,7 +1685,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	if (!ihost->workq) {
 		reason = iscsi_prep_scsi_cmd_pdu(task);
 		if (reason) {
-			if (reason == -ENOMEM) {
+			if (reason == -ENOMEM ||  reason == -EACCES) {
 				reason = FAILURE_OOM;
 				goto prepd_reject;
 			} else {
@@ -1643,9 +1737,21 @@ fault:
 }
 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
 
-int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
+int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
 {
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, depth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 	return sdev->queue_depth;
 }
 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
@@ -1660,72 +1766,6 @@ int iscsi_target_alloc(struct scsi_target *starget)
 }
 EXPORT_SYMBOL_GPL(iscsi_target_alloc);
 
-void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
-{
-	struct iscsi_session *session = cls_session->dd_data;
-
-	spin_lock_bh(&session->lock);
-	if (session->state != ISCSI_STATE_LOGGED_IN) {
-		session->state = ISCSI_STATE_RECOVERY_FAILED;
-		if (session->leadconn)
-			wake_up(&session->leadconn->ehwait);
-	}
-	spin_unlock_bh(&session->lock);
-}
-EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
-
-int iscsi_eh_target_reset(struct scsi_cmnd *sc)
-{
-	struct iscsi_cls_session *cls_session;
-	struct iscsi_session *session;
-	struct iscsi_conn *conn;
-
-	cls_session = starget_to_session(scsi_target(sc->device));
-	session = cls_session->dd_data;
-	conn = session->leadconn;
-
-	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
-	if (session->state == ISCSI_STATE_TERMINATE) {
-failed:
-		ISCSI_DBG_EH(session,
-			     "failing target reset: Could not log back into "
-			     "target [age %d]\n",
-			     session->age);
-		spin_unlock_bh(&session->lock);
-		mutex_unlock(&session->eh_mutex);
-		return FAILED;
-	}
-
-	spin_unlock_bh(&session->lock);
-	mutex_unlock(&session->eh_mutex);
-	/*
-	 * we drop the lock here but the leadconn cannot be destoyed while
-	 * we are in the scsi eh
-	 */
-	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-
-	ISCSI_DBG_EH(session, "wait for relogin\n");
-	wait_event_interruptible(conn->ehwait,
-				 session->state == ISCSI_STATE_TERMINATE ||
-				 session->state == ISCSI_STATE_LOGGED_IN ||
-				 session->state == ISCSI_STATE_RECOVERY_FAILED);
-	if (signal_pending(current))
-		flush_signals(current);
-
-	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
-	if (session->state == ISCSI_STATE_LOGGED_IN) {
-		ISCSI_DBG_EH(session,
-			     "target reset succeeded\n");
-	} else
-		goto failed;
-	spin_unlock_bh(&session->lock);
-	mutex_unlock(&session->eh_mutex);
-	return SUCCESS;
-}
-EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
-
 static void iscsi_tmf_timedout(unsigned long data)
 {
 	struct iscsi_conn *conn = (struct iscsi_conn *)data;
@@ -2108,6 +2148,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 		spin_lock_bh(&session->lock);
 		fail_scsi_task(task, DID_ABORT);
 		conn->tmf_state = TMF_INITIAL;
+		memset(hdr, 0, sizeof(*hdr));
 		spin_unlock_bh(&session->lock);
 		iscsi_start_tx(conn);
 		goto success_unlocked;
@@ -2118,6 +2159,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	case TMF_NOT_FOUND:
 		if (!sc->SCp.ptr) {
 			conn->tmf_state = TMF_INITIAL;
+			memset(hdr, 0, sizeof(*hdr));
 			/* task completed before tmf abort response */
 			ISCSI_DBG_EH(session, "sc completed while abort	in "
 					      "progress\n");
@@ -2212,6 +2254,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 	iscsi_suspend_tx(conn);
 
 	spin_lock_bh(&session->lock);
+	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
 	spin_unlock_bh(&session->lock);
@@ -2229,6 +2272,172 @@ done:
 }
 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
 
+void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *session = cls_session->dd_data;
+
+	spin_lock_bh(&session->lock);
+	if (session->state != ISCSI_STATE_LOGGED_IN) {
+		session->state = ISCSI_STATE_RECOVERY_FAILED;
+		if (session->leadconn)
+			wake_up(&session->leadconn->ehwait);
+	}
+	spin_unlock_bh(&session->lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+/**
+ * iscsi_eh_session_reset - drop session and attempt relogin
+ * @sc: scsi command
+ *
+ * This function will wait for a relogin, session termination from
+ * userspace, or a recovery/replacement timeout.
+ */
+static int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+{
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
+	struct iscsi_conn *conn;
+
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+	conn = session->leadconn;
+
+	mutex_lock(&session->eh_mutex);
+	spin_lock_bh(&session->lock);
+	if (session->state == ISCSI_STATE_TERMINATE) {
+failed:
+		ISCSI_DBG_EH(session,
+			     "failing session reset: Could not log back into "
+			     "%s, %s [age %d]\n", session->targetname,
+			     conn->persistent_address, session->age);
+		spin_unlock_bh(&session->lock);
+		mutex_unlock(&session->eh_mutex);
+		return FAILED;
+	}
+
+	spin_unlock_bh(&session->lock);
+	mutex_unlock(&session->eh_mutex);
+	/*
+	 * we drop the lock here but the leadconn cannot be destoyed while
+	 * we are in the scsi eh
+	 */
+	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+	ISCSI_DBG_EH(session, "wait for relogin\n");
+	wait_event_interruptible(conn->ehwait,
+				 session->state == ISCSI_STATE_TERMINATE ||
+				 session->state == ISCSI_STATE_LOGGED_IN ||
+				 session->state == ISCSI_STATE_RECOVERY_FAILED);
+	if (signal_pending(current))
+		flush_signals(current);
+
+	mutex_lock(&session->eh_mutex);
+	spin_lock_bh(&session->lock);
+	if (session->state == ISCSI_STATE_LOGGED_IN) {
+		ISCSI_DBG_EH(session,
+			     "session reset succeeded for %s,%s\n",
+			     session->targetname, conn->persistent_address);
+	} else
+		goto failed;
+	spin_unlock_bh(&session->lock);
+	mutex_unlock(&session->eh_mutex);
+	return SUCCESS;
+}
+
+static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+{
+	memset(hdr, 0, sizeof(*hdr));
+	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+	hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+	hdr->rtt = RESERVED_ITT;
+}
+
+/**
+ * iscsi_eh_target_reset - reset target
+ * @sc: scsi command
+ *
+ * This will attempt to send a warm target reset. If that fails
+ * then we will drop the session and attempt ERL0 recovery.
+ */
+int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+{
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
+	struct iscsi_conn *conn;
+	struct iscsi_tm *hdr;
+	int rc = FAILED;
+
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+
+	ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
+		     session->targetname);
+
+	mutex_lock(&session->eh_mutex);
+	spin_lock_bh(&session->lock);
+	/*
+	 * Just check if we are not logged in. We cannot check for
+	 * the phase because the reset could come from a ioctl.
+	 */
+	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+		goto unlock;
+	conn = session->leadconn;
+
+	/* only have one tmf outstanding at a time */
+	if (conn->tmf_state != TMF_INITIAL)
+		goto unlock;
+	conn->tmf_state = TMF_QUEUED;
+
+	hdr = &conn->tmhdr;
+	iscsi_prep_tgt_reset_pdu(sc, hdr);
+
+	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+				    session->tgt_reset_timeout)) {
+		rc = FAILED;
+		goto unlock;
+	}
+
+	switch (conn->tmf_state) {
+	case TMF_SUCCESS:
+		break;
+	case TMF_TIMEDOUT:
+		spin_unlock_bh(&session->lock);
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		goto done;
+	default:
+		conn->tmf_state = TMF_INITIAL;
+		goto unlock;
+	}
+
+	rc = SUCCESS;
+	spin_unlock_bh(&session->lock);
+
+	iscsi_suspend_tx(conn);
+
+	spin_lock_bh(&session->lock);
+	memset(hdr, 0, sizeof(*hdr));
+	fail_scsi_tasks(conn, -1, DID_ERROR);
+	conn->tmf_state = TMF_INITIAL;
+	spin_unlock_bh(&session->lock);
+
+	iscsi_start_tx(conn);
+	goto done;
+
+unlock:
+	spin_unlock_bh(&session->lock);
+done:
+	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
+		     rc == SUCCESS ? "SUCCESS" : "FAILED");
+	mutex_unlock(&session->eh_mutex);
+
+	if (rc == FAILED)
+		rc = iscsi_eh_session_reset(sc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
+
 /*
  * Pre-allocate a pool of @max items of @item_size. By default, the pool
  * should be accessed via kfifo_{get,put} on q->queue.
@@ -2495,6 +2704,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	session->host = shost;
 	session->state = ISCSI_STATE_FREE;
 	session->fast_abort = 1;
+	session->tgt_reset_timeout = 30;
 	session->lu_reset_timeout = 15;
 	session->abort_timeout = 10;
 	session->scsi_cmds_max = scsi_cmds;
@@ -2856,6 +3066,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 	spin_lock_bh(&session->lock);
 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
 	fail_mgmt_tasks(session, conn);
+	memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
 	spin_unlock_bh(&session->lock);
 	mutex_unlock(&session->eh_mutex);
 }
@@ -2932,6 +3143,9 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 	case ISCSI_PARAM_LU_RESET_TMO:
 		sscanf(buf, "%d", &session->lu_reset_timeout);
 		break;
+	case ISCSI_PARAM_TGT_RESET_TMO:
+		sscanf(buf, "%d", &session->tgt_reset_timeout);
+		break;
 	case ISCSI_PARAM_PING_TMO:
 		sscanf(buf, "%d", &conn->ping_timeout);
 		break;
@@ -3031,6 +3245,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_LU_RESET_TMO:
 		len = sprintf(buf, "%d\n", session->lu_reset_timeout);
 		break;
+	case ISCSI_PARAM_TGT_RESET_TMO:
+		len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
+		break;
 	case ISCSI_PARAM_INITIAL_R2T_EN:
 		len = sprintf(buf, "%d\n", session->initial_r2t_en);
 		break;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 1c558d3bce18..14b13196b22d 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -820,10 +820,14 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
 		ata_port_disable(dev->sata_dev.ap);
 }
 
-int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
+int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
+			   int reason)
 {
 	int res = min(new_depth, SAS_MAX_QD);
 
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (scsi_dev->tagged_supported)
 		scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
 					res);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aa10f7951634..1cc23a69db5e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -109,7 +109,8 @@ struct hbq_dmabuf {
 	struct lpfc_dmabuf dbuf;
 	uint32_t size;
 	uint32_t tag;
-	struct lpfc_rcqe rcqe;
+	struct lpfc_cq_event cq_event;
+	unsigned long time_stamp;
 };
 
 /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
@@ -201,6 +202,7 @@ struct lpfc_stats {
 	uint32_t elsRcvLIRR;
 	uint32_t elsRcvRPS;
 	uint32_t elsRcvRPL;
+	uint32_t elsRcvRRQ;
 	uint32_t elsXmitFLOGI;
 	uint32_t elsXmitFDISC;
 	uint32_t elsXmitPLOGI;
@@ -289,8 +291,8 @@ struct lpfc_vport {
 
 	uint16_t vpi;
 	uint16_t vfi;
-	uint8_t vfi_state;
-#define LPFC_VFI_REGISTERED	0x1
+	uint8_t vpi_state;
+#define LPFC_VPI_REGISTERED	0x1
 
 	uint32_t fc_flag;	/* FC flags */
 /* Several of these flags are HBA centric and should be moved to
@@ -405,6 +407,7 @@ struct lpfc_vport {
 	uint8_t stat_data_enabled;
 	uint8_t stat_data_blocked;
 	struct list_head rcv_buffer_list;
+	unsigned long rcv_buffer_time_stamp;
 	uint32_t vport_flag;
 #define STATIC_VPORT	1
 };
@@ -527,13 +530,16 @@ struct lpfc_hba {
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
 #define DEFER_ERATT		0x2 /* Deferred error attention in progress */
 #define HBA_FCOE_SUPPORT	0x4 /* HBA function supports FCOE */
-#define HBA_RECEIVE_BUFFER	0x8 /* Rcv buffer posted to worker thread */
+#define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
 #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
 #define FCP_XRI_ABORT_EVENT	0x20
 #define ELS_XRI_ABORT_EVENT	0x40
 #define ASYNC_EVENT		0x80
 #define LINK_DISABLED		0x100 /* Link disabled by user */
 #define FCF_DISC_INPROGRESS	0x200 /* FCF discovery in progress */
+#define HBA_FIP_SUPPORT		0x400 /* FIP support in HBA */
+#define HBA_AER_ENABLED		0x800 /* AER enabled with HBA */
+	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
 	MAILBOX_t *mbox;
@@ -551,6 +557,7 @@ struct lpfc_hba {
 	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
 
 	uint32_t fc_eventTag;	/* event tag for link attention */
+	uint32_t link_events;
 
 	/* These fields used to be binfo */
 	uint32_t fc_pref_DID;	/* preferred D_ID */
@@ -604,8 +611,8 @@ struct lpfc_hba {
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
 	uint32_t cfg_enable_bg;
-	uint32_t cfg_enable_fip;
 	uint32_t cfg_log_verbose;
+	uint32_t cfg_aer_support;
 
 	lpfc_vpd_t vpd;		/* vital product data */
 
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e1a30a16a9fa..75523603b91c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,12 +23,14 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
+#include <linux/aer.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -98,6 +100,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
 }
 
+/**
+ * lpfc_enable_fip_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->hba_flag & HBA_FIP_SUPPORT)
+		return snprintf(buf, PAGE_SIZE, "1\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
 static ssize_t
 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
 		  char *buf)
@@ -762,9 +786,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
-		status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			return -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
 	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
-		status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			return -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
 	else
 		return -EINVAL;
 
@@ -1126,6 +1156,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
 	if ((val & 0x3) != val)
 		return -EINVAL;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		val = 0;
+
 	spin_lock_irq(&phba->hbalock);
 
 	old_val = phba->cfg_poll;
@@ -1589,6 +1622,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
 static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
+static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
 		   lpfc_board_mode_show, lpfc_board_mode_store);
 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -2759,6 +2793,196 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
 		lpfc_link_speed_show, lpfc_link_speed_store);
 
 /*
+# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
+#       0  = aer disabled or not supported
+#       1  = aer supported and enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+
+/**
+ * lpfc_aer_support_store - Set the adapter for aer support
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the val is 1 and currently the device's AER capability was not
+ * enabled, invoke the kernel's enable AER helper routine, trying to
+ * enable the device's AER capability. If the helper routine enabling
+ * AER returns success, update the device's cfg_aer_support flag to
+ * indicate AER is supported by the device; otherwise, if the device
+ * AER capability is already enabled to support AER, then do nothing.
+ *
+ * If the val is 0 and currently the device's AER support was enabled,
+ * invoke the kernel's disable AER helper routine. After that, update
+ * the device's cfg_aer_support flag to indicate AER is not supported
+ * by the device; otherwise, if the device AER capability is already
+ * disabled from supporting AER, then do nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int val = 0, rc = -EINVAL;
+
+	/* AER not supported on OC devices yet */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+		return -EPERM;
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	switch (val) {
+	case 0:
+		if (phba->hba_flag & HBA_AER_ENABLED) {
+			rc = pci_disable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag &= ~HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 0;
+				rc = strlen(buf);
+			} else
+				rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 0;
+			rc = strlen(buf);
+		}
+		break;
+	case 1:
+		if (!(phba->hba_flag & HBA_AER_ENABLED)) {
+			rc = pci_enable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag |= HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 1;
+				rc = strlen(buf);
+			} else
+				 rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 1;
+			rc = strlen(buf);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int lpfc_aer_support = 1;
+module_param(lpfc_aer_support, int, 1);
+MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
+
+/**
+ * lpfc_aer_support_init - Set the initial adapters aer support flag
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,1], then set the adapter's initial
+ * cfg_aer_support field. It will be up to the driver's probe_one
+ * routine to determine whether the device's AER support can be set
+ * or not.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, and
+ * choose the default value of setting AER support and return.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_aer_support_init(struct lpfc_hba *phba, int val)
+{
+	/* AER not supported on OC devices yet */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		phba->cfg_aer_support = 0;
+		return -EPERM;
+	}
+
+	if (val == 0 || val == 1) {
+		phba->cfg_aer_support = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2712 lpfc_aer_support attribute value %d out "
+			"of range, allowed values are 0|1, setting it "
+			"to default value of 1\n", val);
+	/* By default, try to enable AER on a device */
+	phba->cfg_aer_support = 1;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
+		   lpfc_aer_support_show, lpfc_aer_support_store);
+
+/**
+ * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the @buf contains 1 and the device currently has the AER support
+ * enabled, then invokes the kernel AER helper routine
+ * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * error status register.
+ *
+ * Notes:
+ *
+ * Returns:
+ * -EINVAL if the buf does not contain the 1 or the device is not currently
+ * enabled with the AER support.
+ **/
+static ssize_t
+lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val, rc = -1;
+
+	/* AER not supported on OC devices yet */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+		return -EPERM;
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+
+	if (rc == 0)
+		return strlen(buf);
+	else
+		return -EPERM;
+}
+
+static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
+		   lpfc_aer_cleanup_state);
+
+/*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
 */
@@ -2846,7 +3070,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
 # identifies what rctl value to configure the additional ring for.
 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
 */
-LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
+LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
 	     255, "Identifies RCTL for additional ring configuration");
 
 /*
@@ -2854,7 +3078,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
 # identifies what type value to configure the additional ring for.
 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
 */
-LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
+LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
 	     255, "Identifies TYPE for additional ring configuration");
 
 /*
@@ -2947,15 +3171,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
 /*
-# lpfc_enable_fip: When set, FIP is required to start discovery. If not
-# set, the driver will add an FCF record manually if the port has no
-# FCF records available and start discovery.
-# Value range is [0,1]. Default value is 1 (enabled)
-*/
-LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
-
-
-/*
 # lpfc_prot_mask: i
 #	- Bit mask of host protection capabilities used to register with the
 #	  SCSI mid-layer
@@ -3013,6 +3228,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_num_discovered_ports,
 	&dev_attr_menlo_mgmt_mode,
 	&dev_attr_lpfc_drvr_version,
+	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_temp_sensor,
 	&dev_attr_lpfc_log_verbose,
 	&dev_attr_lpfc_lun_queue_depth,
@@ -3020,7 +3236,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
-	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
 	&dev_attr_lpfc_ack0,
@@ -3061,6 +3276,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
 	&dev_attr_lpfc_prot_sg_seg_cnt,
+	&dev_attr_lpfc_aer_support,
+	&dev_attr_lpfc_aer_state_cleanup,
 	NULL,
 };
 
@@ -3073,7 +3290,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_lun_queue_depth,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
-	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_hba_queue_depth,
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_restrict_login,
@@ -3815,7 +4031,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	hs->invalid_crc_count -= lso->invalid_crc_count;
 	hs->error_frames -= lso->error_frames;
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->hba_flag & HBA_FCOE_SUPPORT) {
+		hs->lip_count = -1;
+		hs->nos_count = (phba->link_events >> 1);
+		hs->nos_count -= lso->link_events;
+	} else if (phba->fc_topology == TOPOLOGY_LOOP) {
 		hs->lip_count = (phba->fc_eventTag >> 1);
 		hs->lip_count -= lso->link_events;
 		hs->nos_count = -1;
@@ -3906,7 +4126,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
 	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
 	lso->error_frames = pmb->un.varRdLnk.crcCnt;
-	lso->link_events = (phba->fc_eventTag >> 1);
+	if (phba->hba_flag & HBA_FCOE_SUPPORT)
+		lso->link_events = (phba->link_events >> 1);
+	else
+		lso->link_events = (phba->fc_eventTag >> 1);
 
 	psli->stats_start = get_seconds();
 
@@ -4222,14 +4445,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->cfg_poll = 0;
+	else
 	phba->cfg_poll = lpfc_poll;
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
-	lpfc_enable_fip_init(phba, lpfc_enable_fip);
 	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+	lpfc_aer_support_init(phba, lpfc_aer_support);
 
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index da6bf5aac9dd..a5d9048235d9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -26,6 +26,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_bsg_fc.h>
+#include <scsi/fc/fc_fs.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -148,8 +149,8 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
-	cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
-	cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
 	cmd->ulpBdeCount = 1;
 	cmd->ulpLe = 1;
 	cmd->ulpClass = CLASS3;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0830f37409a3..650494d622c1 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -49,6 +49,8 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
+void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
+void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
 int lpfc_linkdown(struct lpfc_hba *);
 void lpfc_linkdown_port(struct lpfc_vport *);
@@ -144,6 +146,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
 
 void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
 			 struct lpfc_iocbq *);
+void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+				    struct lpfc_iocbq *);
 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
 void lpfc_fdmi_tmo(unsigned long);
@@ -188,7 +192,7 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
 void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
 void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
 void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
-void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
 void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
 void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
@@ -212,7 +216,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *);
 void lpfc_poll_timeout(unsigned long ptr);
 void lpfc_poll_start_timer(struct lpfc_hba *);
 void lpfc_poll_eratt(unsigned long);
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
+			struct lpfc_sli_ring *, uint32_t);
+
 struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
 void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -235,7 +242,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
 int lpfc_sli_check_eratt(struct lpfc_hba *);
 void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
 				    struct lpfc_sli_ring *, uint32_t);
-int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
+void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 			struct lpfc_iocbq *, uint32_t);
@@ -361,6 +368,7 @@ void lpfc_stop_port(struct lpfc_hba *);
 void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
 int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 void lpfc_start_fdiscs(struct lpfc_hba *phba);
+struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
 
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9a1bd9534d74..0ebcd9baca79 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -31,6 +31,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -87,7 +88,6 @@ void
 lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		    struct lpfc_iocbq *piocbq)
 {
-
 	struct lpfc_dmabuf *mp = NULL;
 	IOCB_t *icmd = &piocbq->iocb;
 	int i;
@@ -160,6 +160,39 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	}
 }
 
+/**
+ * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to the driver internal I/O ring.
+ * @piocbq: Pointer to the IOCBQ.
+ *
+ * This function serves as the default handler for the sli4 unsolicited
+ * abort event. It shall be invoked when there is no application interface
+ * registered unsolicited abort handler. This handler does nothing but
+ * just simply releases the dma buffer used by the unsol abort event.
+ **/
+void
+lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
+			       struct lpfc_sli_ring *pring,
+			       struct lpfc_iocbq *piocbq)
+{
+	IOCB_t *icmd = &piocbq->iocb;
+	struct lpfc_dmabuf *bdeBuf;
+	uint32_t size;
+
+	/* Forward abort event to any process registered to receive ct event */
+	lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+
+	/* If there is no BDE associated with IOCB, there is nothing to do */
+	if (icmd->ulpBdeCount == 0)
+		return;
+	bdeBuf = piocbq->context2;
+	piocbq->context2 = NULL;
+	size  = icmd->un.cont64[0].tus.f.bdeSize;
+	lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+	lpfc_in_buf_free(phba, bdeBuf);
+}
+
 static void
 lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
 {
@@ -304,8 +337,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 	/* Fill in rest of iocb */
 	icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
 	icmd->un.genreq64.w5.hcsw.Dfctl = 0;
-	icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
-	icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+	icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
 
 	if (!tmo) {
 		 /* FC spec states we need 3 * ratov for CT requests */
@@ -363,9 +396,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
 	outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
 	if (!outmp)
 		return -ENOMEM;
-
+	/*
+	 * Form the CT IOCB.  The total number of BDEs in this IOCB
+	 * is the single command plus response count from
+	 * lpfc_alloc_ct_rsp.
+	 */
+	cnt += 1;
 	status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
-			      cnt+1, 0, retry);
+			      cnt, 0, retry);
 	if (status) {
 		lpfc_free_ct_rsp(phba, outmp);
 		return -ENOMEM;
@@ -501,6 +539,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
 							SLI_CTNS_GFF_ID,
 							0, Did) == 0)
 							vport->num_disc_nodes++;
+						else
+							lpfc_setup_disc_node
+								(vport, Did);
 					}
 					else {
 						lpfc_debugfs_disc_trc(vport,
@@ -1209,7 +1250,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
 		    be16_to_cpu(SLI_CTNS_RFF_ID);
 		CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
 		CtReq->un.rff.fbits = FC4_FEATURE_INIT;
-		CtReq->un.rff.type_code = FC_FCP_DATA;
+		CtReq->un.rff.type_code = FC_TYPE_FCP;
 		cmpl = lpfc_cmpl_ct_cmd_rff_id;
 		break;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d0f0de76b63..391584183d81 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -926,7 +926,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
 		goto out;
 
 	/* Round to page boundry */
-	printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
+	printk(KERN_ERR "9059 BLKGRD:  %s: _dump_buf_data=0x%p\n",
 			__func__, _dump_buf_data);
 	debug->buffer = _dump_buf_data;
 	if (!debug->buffer) {
@@ -956,8 +956,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
 		goto out;
 
 	/* Round to page boundry */
-	printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
-	       _dump_buf_dif, file->f_dentry->d_name.name);
+	printk(KERN_ERR	"9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
+		__func__, _dump_buf_dif, file->f_dentry->d_name.name);
 	debug->buffer = _dump_buf_dif;
 	if (!debug->buffer) {
 		kfree(debug);
@@ -1377,7 +1377,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 			debugfs_create_dir(name, phba->hba_debugfs_root);
 		if (!vport->vport_debugfs_root) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0417 Cant create debugfs");
+					 "0417 Cant create debugfs\n");
 			goto debug_failed;
 		}
 		atomic_inc(&phba->debugfs_vport_count);
@@ -1430,7 +1430,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 vport, &lpfc_debugfs_op_nodelist);
 	if (!vport->debug_nodelist) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0409 Cant create debugfs nodelist");
+				 "0409 Cant create debugfs nodelist\n");
 		goto debug_failed;
 	}
 debug_failed:
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1142070e9484..2851d75ffc6f 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -19,7 +19,7 @@
  *******************************************************************/
 
 #define FC_MAX_HOLD_RSCN     32	      /* max number of deferred RSCNs */
-#define FC_MAX_NS_RSP        65536    /* max size NameServer rsp */
+#define FC_MAX_NS_RSP        64512    /* max size NameServer rsp */
 #define FC_MAXLOOP           126      /* max devices supported on a fc loop */
 #define LPFC_DISC_FLOGI_TMO  10	      /* Discovery FLOGI ratov */
 
@@ -105,8 +105,6 @@ struct lpfc_nodelist {
 	struct lpfc_vport *vport;
 	struct lpfc_work_evt els_retry_evt;
 	struct lpfc_work_evt dev_loss_evt;
-	unsigned long last_ramp_up_time;        /* jiffy of last ramp up */
-	unsigned long last_q_full_time;		/* jiffy of last queue full */
 	struct kref     kref;
 	atomic_t cmd_pending;
 	uint32_t cmd_qdepth;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 45337cd23feb..a079bbc03cf8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -173,13 +173,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
 	 */
 	if ((did == Fabric_DID) &&
-		bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
+		(phba->hba_flag & HBA_FIP_SUPPORT) &&
 		((elscmd == ELS_CMD_FLOGI) ||
 		 (elscmd == ELS_CMD_FDISC) ||
 		 (elscmd == ELS_CMD_LOGO)))
-		elsiocb->iocb_flag |= LPFC_FIP_ELS;
+		switch (elscmd) {
+		case ELS_CMD_FLOGI:
+		elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_FDISC:
+		elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_LOGO:
+		elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		}
 	else
-		elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
+		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
 
 	icmd = &elsiocb->iocb;
 
@@ -591,7 +604,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	} else {
 		ndlp->nlp_type |= NLP_FABRIC;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-		if (vport->vfi_state & LPFC_VFI_REGISTERED) {
+		if (vport->vpi_state & LPFC_VPI_REGISTERED) {
 			lpfc_start_fdiscs(phba);
 			lpfc_do_scr_ns_plogi(phba, vport);
 		} else
@@ -2452,6 +2465,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
 	 */
 	del_timer_sync(&ndlp->nlp_delayfunc);
 	retry = ndlp->nlp_retry;
+	ndlp->nlp_retry = 0;
 
 	switch (cmd) {
 	case ELS_CMD_FLOGI:
@@ -2711,12 +2725,16 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	    !lpfc_error_lost_link(irsp)) {
 		/* FLOGI retry policy */
 		retry = 1;
-		maxretry = 48;
-		if (cmdiocb->retry >= 32)
+		/* retry forever */
+		maxretry = 0;
+		if (cmdiocb->retry >= 100)
+			delay = 5000;
+		else if (cmdiocb->retry >= 32)
 			delay = 1000;
 	}
 
-	if ((++cmdiocb->retry) >= maxretry) {
+	cmdiocb->retry++;
+	if (maxretry && (cmdiocb->retry >= maxretry)) {
 		phba->fc_stat.elsRetryExceeded++;
 		retry = 0;
 	}
@@ -4503,6 +4521,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 }
 
 /**
+ * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
+ * received as an ELS unsolicited event. A request to RRQ shall only
+ * be accepted if the Originator Nx_Port N_Port_ID or the Responder
+ * Nx_Port N_Port_ID of the target Exchange is the same as the
+ * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
+ * not accepted, an LS_RJT with reason code "Unable to perform
+ * command request" and reason code explanation "Invalid Originator
+ * S_ID" shall be returned. For now, we just unconditionally accept
+ * RRQ from the target.
+ **/
+static void
+lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+}
+
+/**
  * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -5396,7 +5437,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	if (lpfc_els_chk_latt(vport))
 		goto dropit;
 
-	/* Ignore traffic recevied during vport shutdown. */
+	/* Ignore traffic received during vport shutdown. */
 	if (vport->load_flag & FC_UNLOADING)
 		goto dropit;
 
@@ -5618,6 +5659,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		if (newnode)
 			lpfc_nlp_put(ndlp);
 		break;
+	case ELS_CMD_RRQ:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRRQ++;
+		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
 	default:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -5670,7 +5721,7 @@ dropit:
  *    NULL - No vport with the matching @vpi found
  *    Otherwise - Address to the vport with the matching @vpi.
  **/
-static struct lpfc_vport *
+struct lpfc_vport *
 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
 {
 	struct lpfc_vport *vport;
@@ -6024,11 +6075,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
 		goto fdisc_failed;
 	}
-		if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
-			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-		lpfc_nlp_put(ndlp);
-		/* giving up on FDISC. Cancel discovery timer */
-		lpfc_can_disctmo(vport);
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag |= FC_FABRIC;
 	if (vport->phba->fc_topology == TOPOLOGY_LOOP)
@@ -6107,6 +6153,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	int did = ndlp->nlp_DID;
 	int rc;
 
+	vport->port_state = LPFC_FDISC;
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
 				     ELS_CMD_FDISC);
@@ -6172,7 +6219,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		return 1;
 	}
 	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
-	vport->port_state = LPFC_FDISC;
 	return 0;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..3b9424427652 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
 			spin_unlock_irq(&phba->hbalock);
 			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 		}
-		if (phba->hba_flag & HBA_RECEIVE_BUFFER)
-			lpfc_sli4_handle_received_buffer(phba);
 	}
 
 	vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
 	pring = &phba->sli.ring[LPFC_ELS_RING];
 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
 	status >>= (4*LPFC_ELS_RING);
-	if ((status & HA_RXMASK)
-		|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+	if ((status & HA_RXMASK) ||
+	    (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
+	    (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
 			/* Set the lpfc data pending flag */
@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 			lpfc_unreg_rpi(vport, ndlp);
 
 		/* Leave Fabric nodes alone on link down */
-		if (!remove && ndlp->nlp_type & NLP_FABRIC)
+		if ((phba->sli_rev < LPFC_SLI_REV4) &&
+		    (!remove && ndlp->nlp_type & NLP_FABRIC))
 			continue;
 		rc = lpfc_disc_state_machine(vport, ndlp, NULL,
 					     remove
@@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 void
 lpfc_port_link_failure(struct lpfc_vport *vport)
 {
+	/* Cleanup any outstanding received buffers */
+	lpfc_cleanup_rcv_buffers(vport);
+
 	/* Cleanup any outstanding RSCN activity */
 	lpfc_els_flush_rscn(vport);
 
@@ -1015,13 +1018,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		mempool_free(mboxq, phba->mbox_mem_pool);
 		return;
 	}
-	if (vport->port_state != LPFC_FLOGI) {
-		spin_lock_irqsave(&phba->hbalock, flags);
-		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-		spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_lock_irqsave(&phba->hbalock, flags);
+	phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+	phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (vport->port_state != LPFC_FLOGI)
 		lpfc_initial_flogi(vport);
-	}
 
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	return;
@@ -1199,6 +1201,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
 
 	/* If the FCF is not availabe do nothing. */
 	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return;
 	}
@@ -1216,15 +1219,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
 
 	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
 		GFP_KERNEL);
-	if (!fcf_mbxq)
+	if (!fcf_mbxq) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return;
+	}
 
 	lpfc_reg_fcfi(phba, fcf_mbxq);
 	fcf_mbxq->vport = phba->pport;
 	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
 	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
-	if (rc == MBX_NOT_FINISHED)
+	if (rc == MBX_NOT_FINISHED) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+	}
 
 	return;
 }
@@ -1253,13 +1264,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
 			uint16_t *vlan_id)
 {
 	struct lpfc_fcf_conn_entry *conn_entry;
+	int i, j, fcf_vlan_id = 0;
+
+	/* Find the lowest VLAN id in the FCF record */
+	for (i = 0; i < 512; i++) {
+		if (new_fcf_record->vlan_bitmap[i]) {
+			fcf_vlan_id = i * 8;
+			j = 0;
+			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
+				j++;
+				fcf_vlan_id++;
+			}
+			break;
+		}
+	}
 
 	/* If FCF not available return 0 */
 	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
 		!bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
 		return 0;
 
-	if (!phba->cfg_enable_fip) {
+	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
 		*boot_flag = 0;
 		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
 				new_fcf_record);
@@ -1286,7 +1311,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
 		if (*addr_mode & LPFC_FCF_FPMA)
 			*addr_mode = LPFC_FCF_FPMA;
 
-		*vlan_id = 0xFFFF;
+		/* If FCF record report a vlan id use that vlan id */
+		if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
+		else
+			*vlan_id = 0xFFFF;
 		return 1;
 	}
 
@@ -1384,8 +1413,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
 			(*addr_mode & LPFC_FCF_FPMA))
 				*addr_mode = LPFC_FCF_FPMA;
 
+		/* If matching connect list has a vlan id, use it */
 		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
 			*vlan_id = conn_entry->conn_rec.vlan_tag;
+		/*
+		 * If no vlan id is specified in connect list, use the vlan id
+		 * in the FCF record
+		 */
+		else if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
 		else
 			*vlan_id = 0xFFFF;
 
@@ -1423,6 +1459,15 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
 
 	if (phba->link_state >= LPFC_LINK_UP)
 		lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+	else {
+		/*
+		 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+		 * flag
+		 */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		spin_unlock_irq(&phba->hbalock);
+	}
 
 	if (unreg_fcf) {
 		spin_lock_irq(&phba->hbalock);
@@ -1659,9 +1704,8 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		lpfc_initial_fdisc(vport);
 	else {
 		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
-		lpfc_printf_vlog(vport, KERN_ERR,
-			LOG_ELS,
-			"2606 No NPIV Fabric support\n");
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2606 No NPIV Fabric support\n");
 	}
 	return;
 }
@@ -1756,8 +1800,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		goto fail_free_mem;
 	}
-	/* Mark the vport has registered with its VFI */
-	vport->vfi_state |= LPFC_VFI_REGISTERED;
+	/* The VPI is implicitly registered when the VFI is registered */
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
 		lpfc_start_fdiscs(phba);
@@ -1861,7 +1905,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 	if (phba->fc_topology == TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
-		if (phba->cfg_enable_npiv)
+		/* if npiv is enabled and this adapter supports npiv log
+		 * a message that npiv is not supported in this topology
+		 */
+		if (phba->cfg_enable_npiv && phba->max_vpi)
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
@@ -1955,7 +2002,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 		 * is phase 1 implementation that support FCF index 0 and driver
 		 * defaults.
 		 */
-		if (phba->cfg_enable_fip == 0) {
+		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
 			fcf_record = kzalloc(sizeof(struct fcf_record),
 					GFP_KERNEL);
 			if (unlikely(!fcf_record)) {
@@ -2085,6 +2132,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	else
 		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 
+	phba->link_events++;
 	if (la->attType == AT_LINK_UP && (!la->mm)) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -2211,13 +2259,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				 mb->mbxStatus);
 		break;
 	}
+	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	mempool_free(pmb, phba->mbox_mem_pool);
 	/*
 	 * This shost reference might have been taken at the beginning of
 	 * lpfc_vport_delete()
 	 */
-	if (vport->load_flag & FC_UNLOADING)
+	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
 		scsi_host_put(shost);
 }
 
@@ -2268,6 +2317,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		goto out;
 	}
 
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
 	vport->num_disc_nodes = 0;
 	/* go thru NPR list and issue ELS PLOGIs */
 	if (vport->fc_npr_cnt)
@@ -3077,7 +3127,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 	struct lpfc_sli *psli;
 	struct lpfc_sli_ring *pring;
 	struct lpfc_iocbq *iocb, *next_iocb;
-	uint32_t rpi, i;
+	uint32_t i;
 
 	lpfc_fabric_abort_nport(ndlp);
 
@@ -3086,7 +3136,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 	 * by firmware with a no rpi error.
 	 */
 	psli = &phba->sli;
-	rpi = ndlp->nlp_rpi;
 	if (ndlp->nlp_flag & NLP_RPI_VALID) {
 		/* Now process each ring */
 		for (i = 0; i < psli->num_rings; i++) {
@@ -4322,6 +4371,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
 				ret = 1;
 				spin_unlock_irq(shost->host_lock);
 				goto out;
+			} else {
+				lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+					"2624 RPI %x DID %x flg %x still "
+					"logged in\n",
+					ndlp->nlp_rpi, ndlp->nlp_DID,
+					ndlp->nlp_flag);
+				if (ndlp->nlp_flag & NLP_RPI_VALID)
+					ret = 1;
 			}
 		}
 		spin_unlock_irq(shost->host_lock);
@@ -4400,7 +4457,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 	 */
 	if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
 		!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
-		(phba->cfg_enable_fip == 0)) {
+		(!(phba->hba_flag & HBA_FIP_SUPPORT))) {
 		spin_unlock_irq(&phba->hbalock);
 		return;
 	}
@@ -4409,6 +4466,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 	if (lpfc_fcf_inuse(phba))
 		return;
 
+	/* At this point, all discovery is aborted */
+	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
 
 	/* Unregister VPIs */
 	vports = lpfc_create_vport_work_array(phba);
@@ -4416,8 +4475,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 		(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			lpfc_mbx_unreg_vpi(vports[i]);
-			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
 
@@ -4431,7 +4490,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 		return;
 	}
 
-	lpfc_unreg_vfi(mbox, phba->pport->vfi);
+	lpfc_unreg_vfi(mbox, phba->pport);
 	mbox->vport = phba->pport;
 	mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
 
@@ -4512,8 +4571,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
 
 	/* Free the current connect table */
 	list_for_each_entry_safe(conn_entry, next_conn_entry,
-		&phba->fcf_conn_rec_list, list)
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
 		kfree(conn_entry);
+	}
 
 	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
 	record_count = conn_hdr->length * sizeof(uint32_t)/
@@ -4569,14 +4630,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
 		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
 		return;
 
-	if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
-			FIPP_MODE_ON)
-		phba->cfg_enable_fip = 1;
-
-	if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
-		FIPP_MODE_OFF)
-		phba->cfg_enable_fip = 0;
-
 	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
 		phba->valid_vlan = 1;
 		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ccb26724dc53..c9faa1d8c3c8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1124,21 +1124,6 @@ typedef struct {
 /* Number of 4-byte words in an IOCB. */
 #define IOCB_WORD_SZ    8
 
-/* defines for type field in fc header */
-#define FC_ELS_DATA     0x1
-#define FC_LLC_SNAP     0x5
-#define FC_FCP_DATA     0x8
-#define FC_COMMON_TRANSPORT_ULP 0x20
-
-/* defines for rctl field in fc header */
-#define FC_DEV_DATA     0x0
-#define FC_UNSOL_CTL    0x2
-#define FC_SOL_CTL      0x3
-#define FC_UNSOL_DATA   0x4
-#define FC_FCP_CMND     0x6
-#define FC_ELS_REQ      0x22
-#define FC_ELS_RSP      0x23
-
 /* network headers for Dfctl field */
 #define FC_NET_HDR      0x20
 
@@ -1183,6 +1168,8 @@ typedef struct {
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
 #define PCI_VENDOR_ID_SERVERENGINE  0x19a2
 #define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TOMCAT        0x0714
+#define PCI_DEVICE_ID_FALCON        0xf180
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1444,6 +1431,7 @@ typedef struct {		/* FireFly BIU registers */
 #define CMD_ABORT_MXRI64_CN     0x8C
 #define CMD_RCV_ELS_REQ64_CX    0x8D
 #define CMD_XMIT_ELS_RSP64_CX   0x95
+#define CMD_XMIT_BLS_RSP64_CX   0x97
 #define CMD_FCP_IWRITE64_CR     0x98
 #define CMD_FCP_IWRITE64_CX     0x99
 #define CMD_FCP_IREAD64_CR      0x9A
@@ -2306,8 +2294,7 @@ typedef struct {
 	uint32_t rsvd1;
 	uint32_t rsvd2:8;
 	uint32_t sid:24;
-	uint32_t rsvd3;
-	uint32_t rsvd4;
+	uint32_t wwn[2];
 	uint32_t rsvd5;
 	uint16_t vfi;
 	uint16_t vpi;
@@ -2315,8 +2302,7 @@ typedef struct {
 	uint32_t rsvd1;
 	uint32_t sid:24;
 	uint32_t rsvd2:8;
-	uint32_t rsvd3;
-	uint32_t rsvd4;
+	uint32_t wwn[2];
 	uint32_t rsvd5;
 	uint16_t vpi;
 	uint16_t vfi;
@@ -2326,7 +2312,13 @@ typedef struct {
 /* Structure for MB Command UNREG_VPI (0x97) */
 typedef struct {
 	uint32_t rsvd1;
-	uint32_t rsvd2;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd2;
+	uint16_t sli4_vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint16_t sli4_vpi;
+	uint16_t rsvd2;
+#endif
 	uint32_t rsvd3;
 	uint32_t rsvd4;
 	uint32_t rsvd5;
@@ -3547,7 +3539,7 @@ typedef struct _IOCB {	/* IOCB structure */
 		ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
 		QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
 		struct rcv_seq64 rcvseq64;	/* RCV_SEQ64 and RCV_CONT64 */
-
+		struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
 		uint32_t ulpWord[IOCB_WORD_SZ - 2];	/* generic 6 'words' */
 	} un;
 	union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 3689eee04535..1585148a17e5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -194,6 +194,26 @@ struct lpfc_sli4_flags {
 #define lpfc_fip_flag_WORD word0
 };
 
+struct sli4_bls_acc {
+	uint32_t word0_rsvd;      /* Word0 must be reserved */
+	uint32_t word1;
+#define lpfc_abts_orig_SHIFT      0
+#define lpfc_abts_orig_MASK       0x00000001
+#define lpfc_abts_orig_WORD       word1
+#define LPFC_ABTS_UNSOL_RSP       1
+#define LPFC_ABTS_UNSOL_INT       0
+	uint32_t word2;
+#define lpfc_abts_rxid_SHIFT      0
+#define lpfc_abts_rxid_MASK       0x0000FFFF
+#define lpfc_abts_rxid_WORD       word2
+#define lpfc_abts_oxid_SHIFT      16
+#define lpfc_abts_oxid_MASK       0x0000FFFF
+#define lpfc_abts_oxid_WORD       word2
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5_rsvd;	/* Word5 must be reserved */
+};
+
 /* event queue entry structure */
 struct lpfc_eqe {
 	uint32_t word0;
@@ -425,7 +445,7 @@ struct lpfc_wqe_generic{
 #define lpfc_wqe_gen_status_MASK	0x0000000F
 #define lpfc_wqe_gen_status_WORD	word7
 #define lpfc_wqe_gen_ct_SHIFT		2
-#define lpfc_wqe_gen_ct_MASK		0x00000007
+#define lpfc_wqe_gen_ct_MASK		0x00000003
 #define lpfc_wqe_gen_ct_WORD		word7
 	uint32_t abort_tag;
 	uint32_t word9;
@@ -453,6 +473,13 @@ struct lpfc_wqe_generic{
 #define lpfc_wqe_gen_wqec_SHIFT		7
 #define lpfc_wqe_gen_wqec_MASK		0x00000001
 #define lpfc_wqe_gen_wqec_WORD		word11
+#define ELS_ID_FLOGI 3
+#define ELS_ID_FDISC 2
+#define ELS_ID_LOGO  1
+#define ELS_ID_DEFAULT 0
+#define lpfc_wqe_gen_els_id_SHIFT	4
+#define lpfc_wqe_gen_els_id_MASK	0x00000003
+#define lpfc_wqe_gen_els_id_WORD	word11
 #define lpfc_wqe_gen_cmd_type_SHIFT	0
 #define lpfc_wqe_gen_cmd_type_MASK	0x0000000F
 #define lpfc_wqe_gen_cmd_type_WORD	word11
@@ -487,8 +514,8 @@ struct lpfc_register {
 
 #define LPFC_UERR_STATUS_HI		0x00A4
 #define LPFC_UERR_STATUS_LO		0x00A0
-#define LPFC_ONLINE0			0x00B0
-#define LPFC_ONLINE1			0x00B4
+#define LPFC_UE_MASK_HI			0x00AC
+#define LPFC_UE_MASK_LO			0x00A8
 #define LPFC_SCRATCHPAD			0x0058
 
 /* BAR0 Registers */
@@ -760,6 +787,7 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_MQ_DESTROY		0x35
 #define LPFC_MBOX_OPCODE_CQ_DESTROY		0x36
 #define LPFC_MBOX_OPCODE_EQ_DESTROY		0x37
+#define LPFC_MBOX_OPCODE_QUERY_FW_CFG		0x3A
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET		0x3D
 
 /* FCoE Opcodes */
@@ -1273,6 +1301,51 @@ struct lpfc_mbx_del_fcf_tbl_entry {
 #define lpfc_mbx_del_fcf_tbl_index_WORD		word10
 };
 
+struct lpfc_mbx_query_fw_cfg {
+	struct mbox_header header;
+	uint32_t config_number;
+	uint32_t asic_rev;
+	uint32_t phys_port;
+	uint32_t function_mode;
+/* firmware Function Mode */
+#define lpfc_function_mode_toe_SHIFT		0
+#define lpfc_function_mode_toe_MASK		0x00000001
+#define lpfc_function_mode_toe_WORD		function_mode
+#define lpfc_function_mode_nic_SHIFT		1
+#define lpfc_function_mode_nic_MASK		0x00000001
+#define lpfc_function_mode_nic_WORD		function_mode
+#define lpfc_function_mode_rdma_SHIFT		2
+#define lpfc_function_mode_rdma_MASK		0x00000001
+#define lpfc_function_mode_rdma_WORD		function_mode
+#define lpfc_function_mode_vm_SHIFT		3
+#define lpfc_function_mode_vm_MASK		0x00000001
+#define lpfc_function_mode_vm_WORD		function_mode
+#define lpfc_function_mode_iscsi_i_SHIFT	4
+#define lpfc_function_mode_iscsi_i_MASK		0x00000001
+#define lpfc_function_mode_iscsi_i_WORD		function_mode
+#define lpfc_function_mode_iscsi_t_SHIFT	5
+#define lpfc_function_mode_iscsi_t_MASK		0x00000001
+#define lpfc_function_mode_iscsi_t_WORD		function_mode
+#define lpfc_function_mode_fcoe_i_SHIFT		6
+#define lpfc_function_mode_fcoe_i_MASK		0x00000001
+#define lpfc_function_mode_fcoe_i_WORD		function_mode
+#define lpfc_function_mode_fcoe_t_SHIFT		7
+#define lpfc_function_mode_fcoe_t_MASK		0x00000001
+#define lpfc_function_mode_fcoe_t_WORD		function_mode
+#define lpfc_function_mode_dal_SHIFT		8
+#define lpfc_function_mode_dal_MASK		0x00000001
+#define lpfc_function_mode_dal_WORD		function_mode
+#define lpfc_function_mode_lro_SHIFT		9
+#define lpfc_function_mode_lro_MASK		0x00000001
+#define lpfc_function_mode_lro_WORD		function_mode9
+#define lpfc_function_mode_flex10_SHIFT		10
+#define lpfc_function_mode_flex10_MASK		0x00000001
+#define lpfc_function_mode_flex10_WORD		function_mode
+#define lpfc_function_mode_ncsi_SHIFT		11
+#define lpfc_function_mode_ncsi_MASK		0x00000001
+#define lpfc_function_mode_ncsi_WORD		function_mode
+};
+
 /* Status field for embedded SLI_CONFIG mailbox command */
 #define STATUS_SUCCESS					0x0
 #define STATUS_FAILED 					0x1
@@ -1349,8 +1422,7 @@ struct lpfc_mbx_reg_vfi {
 #define lpfc_reg_vfi_fcfi_SHIFT		0
 #define lpfc_reg_vfi_fcfi_MASK		0x0000FFFF
 #define lpfc_reg_vfi_fcfi_WORD		word2
-	uint32_t word3_rsvd;
-	uint32_t word4_rsvd;
+	uint32_t wwn[2];
 	struct ulp_bde64 bde;
 	uint32_t word8_rsvd;
 	uint32_t word9_rsvd;
@@ -1555,6 +1627,11 @@ struct lpfc_mbx_read_rev {
 #define lpfc_mbx_rd_rev_fcoe_SHIFT		20
 #define lpfc_mbx_rd_rev_fcoe_MASK		0x00000001
 #define lpfc_mbx_rd_rev_fcoe_WORD		word1
+#define lpfc_mbx_rd_rev_cee_ver_SHIFT		21
+#define lpfc_mbx_rd_rev_cee_ver_MASK		0x00000003
+#define lpfc_mbx_rd_rev_cee_ver_WORD		word1
+#define LPFC_PREDCBX_CEE_MODE	0
+#define LPFC_DCBX_CEE_MODE	1
 #define lpfc_mbx_rd_rev_vpd_SHIFT		29
 #define lpfc_mbx_rd_rev_vpd_MASK		0x00000001
 #define lpfc_mbx_rd_rev_vpd_WORD		word1
@@ -1804,6 +1881,7 @@ struct lpfc_mqe {
 		struct lpfc_mbx_read_config rd_config;
 		struct lpfc_mbx_request_features req_ftrs;
 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+		struct lpfc_mbx_query_fw_cfg query_fw_cfg;
 		struct lpfc_mbx_nop nop;
 	} un;
 };
@@ -1885,7 +1963,7 @@ struct lpfc_acqe_link {
 };
 
 struct lpfc_acqe_fcoe {
-	uint32_t fcf_index;
+	uint32_t index;
 	uint32_t word1;
 #define lpfc_acqe_fcoe_fcf_count_SHIFT		0
 #define lpfc_acqe_fcoe_fcf_count_MASK		0x0000FFFF
@@ -1896,6 +1974,7 @@ struct lpfc_acqe_fcoe {
 #define LPFC_FCOE_EVENT_TYPE_NEW_FCF		0x1
 #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
 #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
+#define LPFC_FCOE_EVENT_TYPE_CVL		0x4
 	uint32_t event_tag;
 	uint32_t trailer;
 };
@@ -1921,12 +2000,13 @@ struct lpfc_bmbx_create {
 #define SGL_ALIGN_SZ 64
 #define SGL_PAGE_SIZE 4096
 /* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI  ((uint16_t)-1)
+
 struct wqe_common {
 	uint32_t word6;
-#define wqe_xri_SHIFT         0
-#define wqe_xri_MASK          0x0000FFFF
-#define wqe_xri_WORD          word6
+#define wqe_xri_tag_SHIFT     0
+#define wqe_xri_tag_MASK      0x0000FFFF
+#define wqe_xri_tag_WORD      word6
 #define wqe_ctxt_tag_SHIFT    16
 #define wqe_ctxt_tag_MASK     0x0000FFFF
 #define wqe_ctxt_tag_WORD     word6
@@ -1987,7 +2067,7 @@ struct wqe_common {
 #define wqe_wqec_MASK       0x00000001
 #define wqe_wqec_WORD       word11
 #define wqe_cqid_SHIFT      16
-#define wqe_cqid_MASK       0x000003ff
+#define wqe_cqid_MASK       0x0000ffff
 #define wqe_cqid_WORD       word11
 };
 
@@ -1996,6 +2076,9 @@ struct wqe_did {
 #define wqe_els_did_SHIFT         0
 #define wqe_els_did_MASK          0x00FFFFFF
 #define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_pt_SHIFT         28
+#define wqe_xmit_bls_pt_MASK          0x00000003
+#define wqe_xmit_bls_pt_WORD          word5
 #define wqe_xmit_bls_ar_SHIFT         30
 #define wqe_xmit_bls_ar_MASK          0x00000001
 #define wqe_xmit_bls_ar_WORD          word5
@@ -2044,6 +2127,23 @@ struct xmit_els_rsp64_wqe {
 
 struct xmit_bls_rsp64_wqe {
 	uint32_t payload0;
+/* Payload0 for BA_ACC */
+#define xmit_bls_rsp64_acc_seq_id_SHIFT        16
+#define xmit_bls_rsp64_acc_seq_id_MASK         0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_WORD         payload0
+#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT   24
+#define xmit_bls_rsp64_acc_seq_id_vald_MASK    0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_vald_WORD    payload0
+/* Payload0 for BA_RJT */
+#define xmit_bls_rsp64_rjt_vspec_SHIFT   0
+#define xmit_bls_rsp64_rjt_vspec_MASK    0x000000ff
+#define xmit_bls_rsp64_rjt_vspec_WORD    payload0
+#define xmit_bls_rsp64_rjt_expc_SHIFT    8
+#define xmit_bls_rsp64_rjt_expc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_expc_WORD     payload0
+#define xmit_bls_rsp64_rjt_rsnc_SHIFT    16
+#define xmit_bls_rsp64_rjt_rsnc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_rsnc_WORD     payload0
 	uint32_t word1;
 #define xmit_bls_rsp64_rxid_SHIFT  0
 #define xmit_bls_rsp64_rxid_MASK   0x0000ffff
@@ -2052,18 +2152,19 @@ struct xmit_bls_rsp64_wqe {
 #define xmit_bls_rsp64_oxid_MASK   0x0000ffff
 #define xmit_bls_rsp64_oxid_WORD   word1
 	uint32_t word2;
-#define xmit_bls_rsp64_seqcntlo_SHIFT  0
-#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
-#define xmit_bls_rsp64_seqcntlo_WORD   word2
-#define xmit_bls_rsp64_seqcnthi_SHIFT  16
+#define xmit_bls_rsp64_seqcnthi_SHIFT  0
 #define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
 #define xmit_bls_rsp64_seqcnthi_WORD   word2
+#define xmit_bls_rsp64_seqcntlo_SHIFT  16
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
 	uint32_t rsrvd3;
 	uint32_t rsrvd4;
 	struct wqe_did	wqe_dest;
 	struct wqe_common wqe_com; /* words 6-11 */
 	uint32_t rsvd_12_15[4];
 };
+
 struct wqe_rctl_dfctl {
 	uint32_t word5;
 #define wqe_si_SHIFT 2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 562d8cee874b..0ba35a9a5c5f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -28,6 +28,7 @@
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 #include <linux/ctype.h>
+#include <linux/aer.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -852,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 void
 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 {
+	struct lpfc_vport **vports;
 	LPFC_MBOXQ_t *pmboxq;
 	struct lpfc_dmabuf *buf_ptr;
-	int retval;
+	int retval, i;
 	struct lpfc_sli *psli = &phba->sli;
 	LIST_HEAD(completions);
 
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_rcv_seq_check_edtov(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+
 	if ((phba->link_state == LPFC_HBA_ERROR) ||
 		(phba->pport->load_flag & FC_UNLOADING) ||
 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1521,10 +1529,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	int GE = 0;
 	int oneConnect = 0; /* default is not a oneConnect */
 	struct {
-		char * name;
-		int    max_speed;
-		char * bus;
-	} m = {"<Unknown>", 0, ""};
+		char *name;
+		char *bus;
+		char *function;
+	} m = {"<Unknown>", "", ""};
 
 	if (mdp && mdp[0] != '\0'
 		&& descp && descp[0] != '\0')
@@ -1545,132 +1553,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 
 	switch (dev_id) {
 	case PCI_DEVICE_ID_FIREFLY:
-		m = (typeof(m)){"LP6000", max_speed, "PCI"};
+		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SUPERFLY:
 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
-			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
+			m = (typeof(m)){"LP7000", "PCI",
+					"Fibre Channel Adapter"};
 		else
-			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
+			m = (typeof(m)){"LP7000E", "PCI",
+					"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_DRAGONFLY:
-		m = (typeof(m)){"LP8000", max_speed, "PCI"};
+		m = (typeof(m)){"LP8000", "PCI",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_CENTAUR:
 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
-			m = (typeof(m)){"LP9002", max_speed, "PCI"};
+			m = (typeof(m)){"LP9002", "PCI",
+					"Fibre Channel Adapter"};
 		else
-			m = (typeof(m)){"LP9000", max_speed, "PCI"};
+			m = (typeof(m)){"LP9000", "PCI",
+					"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_RFLY:
-		m = (typeof(m)){"LP952", max_speed, "PCI"};
+		m = (typeof(m)){"LP952", "PCI",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_PEGASUS:
-		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
+		m = (typeof(m)){"LP9802", "PCI-X",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_THOR:
-		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
+		m = (typeof(m)){"LP10000", "PCI-X",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_VIPER:
-		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
+		m = (typeof(m)){"LPX1000",  "PCI-X",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_PFLY:
-		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
+		m = (typeof(m)){"LP982", "PCI-X",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_TFLY:
-		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
+		m = (typeof(m)){"LP1050", "PCI-X",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_HELIOS:
-		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
+		m = (typeof(m)){"LP11000", "PCI-X2",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_HELIOS_SCSP:
-		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
+		m = (typeof(m)){"LP11000-SP", "PCI-X2",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_HELIOS_DCSP:
-		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
+		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE:
-		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
-		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
-		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_BMID:
-		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
+		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_BSMB:
-		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
+		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR:
-		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
-		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
-		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
+		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
 		GE = 1;
 		break;
 	case PCI_DEVICE_ID_ZMID:
-		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_ZSMB:
-		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_LP101:
-		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
+		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_LP10000S:
-		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
+		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_LP11000S:
-		m = (typeof(m)){"LP11000-S", max_speed,
-			"PCI-X2"};
+		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_LPE11000S:
-		m = (typeof(m)){"LPe11000-S", max_speed,
-			"PCIe"};
+		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT:
-		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT_MID:
-		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT_SMB:
-		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT_DCSP:
-		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT_SCSP:
-		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_SAT_S:
-		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
+		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_HORNET:
-		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
+		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
 		GE = 1;
 		break;
 	case PCI_DEVICE_ID_PROTEUS_VF:
-		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_PROTEUS_PF:
-		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_PROTEUS_S:
-		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
+		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
+				"Fibre Channel Adapter"};
 		break;
 	case PCI_DEVICE_ID_TIGERSHARK:
 		oneConnect = 1;
-		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
+		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_TOMCAT:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_FALCON:
+		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
+				"EmulexSecure Fibre"};
 		break;
 	default:
-		m = (typeof(m)){ NULL };
+		m = (typeof(m)){"Unknown", "", ""};
 		break;
 	}
 
@@ -1682,17 +1713,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	if (descp && descp[0] == '\0') {
 		if (oneConnect)
 			snprintf(descp, 255,
-				"Emulex OneConnect %s, FCoE Initiator, Port %s",
-				m.name,
+				"Emulex OneConnect %s, %s Initiator, Port %s",
+				m.name, m.function,
 				phba->Port);
 		else
 			snprintf(descp, 255,
 				"Emulex %s %d%s %s %s",
-				m.name, m.max_speed,
-				(GE) ? "GE" : "Gb",
-				m.bus,
-				(GE) ? "FCoE Adapter" :
-					"Fibre Channel Adapter");
+				m.name, max_speed, (GE) ? "GE" : "Gb",
+				m.bus, m.function);
 	}
 }
 
@@ -2217,7 +2245,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
 
 			if (vports[i]->load_flag & FC_UNLOADING)
 				continue;
-			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
 			shost =	lpfc_shost_from_vport(vports[i]);
 			list_for_each_entry_safe(ndlp, next_ndlp,
 						 &vports[i]->fc_nodes,
@@ -2308,6 +2336,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 
 	spin_lock_irq(&phba->hbalock);
 	/* Release all the lpfc_scsi_bufs maintained by this host. */
+	spin_lock(&phba->scsi_buf_list_lock);
 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
 		list_del(&sb->list);
 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2344,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 		kfree(sb);
 		phba->total_scsi_bufs--;
 	}
+	spin_unlock(&phba->scsi_buf_list_lock);
 
 	/* Release all the lpfc_iocbq entries maintained by this host. */
 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2352,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 		kfree(io);
 		phba->total_iocbq_bufs--;
 	}
-
 	spin_unlock_irq(&phba->hbalock);
-
 	return 0;
 }
 
@@ -2408,7 +2436,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 	vport->els_tmofunc.function = lpfc_els_timeout;
 	vport->els_tmofunc.data = (unsigned long)vport;
 
-	error = scsi_add_host(shost, dev);
+	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
 	if (error)
 		goto out_put_shost;
 
@@ -2699,6 +2727,63 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function uses the QUERY_FW_CFG mailbox command to determine if the
+ * firmware loaded supports FCoE. A return of zero indicates that the mailbox
+ * was successful and the firmware supports FCoE. Any other return indicates
+ * a error. It is assumed that this function will be called before interrupts
+ * are enabled.
+ **/
+static int
+lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
+{
+	int rc = 0;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
+	uint32_t length;
+	uint32_t shdr_status, shdr_add_status;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2621 Failed to allocate mbox for "
+				"query firmware config cmd\n");
+		return -ENOMEM;
+	}
+	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
+	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+			 length, LPFC_SLI4_MBX_EMBED);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &query_fw_cfg->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+				 &query_fw_cfg->header.cfg_shdr.response);
+	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2622 Query Firmware Config failed "
+				"mbx status x%x, status x%x add_status x%x\n",
+				rc, shdr_status, shdr_add_status);
+		return -EINVAL;
+	}
+	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2623 FCoE Function not supported by firmware. "
+				"Function mode = %08x\n",
+				query_fw_cfg->function_mode);
+		return -EINVAL;
+	}
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async link completion queue entry.
@@ -2918,13 +3003,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 {
 	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
 	int rc;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
 
+	phba->fc_eventTag = acqe_fcoe->event_tag;
 	phba->fcoe_eventtag = acqe_fcoe->event_tag;
 	switch (event_type) {
 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
 			"2546 New FCF found index 0x%x tag 0x%x\n",
-			acqe_fcoe->fcf_index,
+			acqe_fcoe->index,
 			acqe_fcoe->event_tag);
 		/*
 		 * If the current FCF is in discovered state, or
@@ -2939,12 +3028,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 		spin_unlock_irq(&phba->hbalock);
 
 		/* Read the FCF table and re-discover SAN. */
-		rc = lpfc_sli4_read_fcf_record(phba,
-			LPFC_FCOE_FCF_GET_FIRST);
+		rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
 			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-				"2547 Read FCF record failed 0x%x\n",
-				rc);
+					"2547 Read FCF record failed 0x%x\n",
+					rc);
 		break;
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2956,11 +3044,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-			"2549 FCF disconnected fron network index 0x%x"
-			" tag 0x%x\n", acqe_fcoe->fcf_index,
+			"2549 FCF disconnected from network index 0x%x"
+			" tag 0x%x\n", acqe_fcoe->index,
 			acqe_fcoe->event_tag);
 		/* If the event is not for currently used fcf do nothing */
-		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
+		if (phba->fcf.fcf_indx != acqe_fcoe->index)
 			break;
 		/*
 		 * Currently, driver support only one FCF - so treat this as
@@ -2970,7 +3058,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 		/* Unregister FCF if no devices connected to it */
 		lpfc_unregister_unused_fcf(phba);
 		break;
-
+	case LPFC_FCOE_EVENT_TYPE_CVL:
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2718 Clear Virtual Link Received for VPI 0x%x"
+			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
+		vport = lpfc_find_vport_by_vpid(phba,
+				acqe_fcoe->index - phba->vpi_base);
+		if (!vport)
+			break;
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp)
+			break;
+		shost = lpfc_shost_from_vport(vport);
+		lpfc_linkdown_port(vport);
+		if (vport->port_type != LPFC_NPIV_PORT) {
+			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+			vport->port_state = LPFC_FLOGI;
+		}
+		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3099,7 @@ static void
 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
 			 struct lpfc_acqe_dcbx *acqe_dcbx)
 {
+	phba->fc_eventTag = acqe_dcbx->event_tag;
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0290 The SLI4 DCBX asynchronous event is not "
 			"handled yet\n");
@@ -3432,7 +3542,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	/* Driver internel slow-path CQ Event pool */
 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
 	/* Response IOCB work queue list */
-	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
 	/* Asynchronous event CQ Event work queue list */
 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
 	/* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +3571,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	if (unlikely(rc))
 		goto out_free_bsmbx;
 
+	rc = lpfc_sli4_fw_cfg_check(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
 	/* Set up the hba's configuration parameters. */
 	rc = lpfc_sli4_read_config(phba);
 	if (unlikely(rc))
@@ -3594,8 +3708,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 
 	/* Free the current connect table */
 	list_for_each_entry_safe(conn_entry, next_conn_entry,
-		&phba->fcf_conn_rec_list, list)
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
 		kfree(conn_entry);
+	}
 
 	return;
 }
@@ -3824,7 +3940,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
 	rc = lpfc_sli4_remove_all_sgl_pages(phba);
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-			"2005 Unable to deregister pages from HBA: %x", rc);
+			"2005 Unable to deregister pages from HBA: %x\n", rc);
 	}
 	kfree(phba->sli4_hba.lpfc_els_sgl_array);
 }
@@ -4273,7 +4389,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
 			_dump_buf_data =
 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
 			if (_dump_buf_data) {
-				printk(KERN_ERR "BLKGRD allocated %d pages for "
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9043 BLKGRD: allocated %d pages for "
 				       "_dump_buf_data at 0x%p\n",
 				       (1 << pagecnt), _dump_buf_data);
 				_dump_buf_data_order = pagecnt;
@@ -4284,17 +4401,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
 				--pagecnt;
 		}
 		if (!_dump_buf_data_order)
-			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9044 BLKGRD: ERROR unable to allocate "
 			       "memory for hexdump\n");
 	} else
-		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
 		       "\n", _dump_buf_data);
 	if (!_dump_buf_dif) {
 		while (pagecnt) {
 			_dump_buf_dif =
 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
 			if (_dump_buf_dif) {
-				printk(KERN_ERR "BLKGRD allocated %d pages for "
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9046 BLKGRD: allocated %d pages for "
 				       "_dump_buf_dif at 0x%p\n",
 				       (1 << pagecnt), _dump_buf_dif);
 				_dump_buf_dif_order = pagecnt;
@@ -4305,10 +4425,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
 				--pagecnt;
 		}
 		if (!_dump_buf_dif_order)
-			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9047 BLKGRD: ERROR unable to allocate "
 			       "memory for hexdump\n");
 	} else
-		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
 		       _dump_buf_dif);
 }
 
@@ -4512,7 +4634,6 @@ int
 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 {
 	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
-	uint32_t onlnreg0, onlnreg1;
 	int i, port_error = -ENODEV;
 
 	if (!phba->sli4_hba.STAregaddr)
@@ -4556,21 +4677,20 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 			bf_get(lpfc_scratchpad_slirev, &scratchpad),
 			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
 			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
-
+	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
+	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
 	/* With uncoverable error, log the error message and return error */
-	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
-	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
-	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
-		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
-		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
-		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"1422 HBA Unrecoverable error: "
-					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-					"online0_reg=0x%x, online1_reg=0x%x\n",
-					uerrlo_reg.word0, uerrhi_reg.word0,
-					onlnreg0, onlnreg1);
-		}
+	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1422 HBA Unrecoverable error: "
+				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
+				uerrlo_reg.word0, uerrhi_reg.word0,
+				phba->sli4_hba.ue_mask_lo,
+				phba->sli4_hba.ue_mask_hi);
 		return -ENODEV;
 	}
 
@@ -4591,10 +4711,10 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
 					LPFC_UERR_STATUS_LO;
 	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
 					LPFC_UERR_STATUS_HI;
-	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_ONLINE0;
-	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_ONLINE1;
+	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_UE_MASK_LO;
+	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_UE_MASK_HI;
 	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
 					LPFC_SCRATCHPAD;
 }
@@ -4825,7 +4945,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
-		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
+		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
+				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
 		phba->max_vports = phba->max_vpi;
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 				"2003 cfg params XRI(B:%d M:%d), "
@@ -4979,10 +5100,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 	/* It does not make sense to have more EQs than WQs */
 	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"2593 The number of FCP EQs (%d) is more "
-				"than the number of FCP WQs (%d), take "
-				"the number of FCP EQs same as than of "
-				"WQs (%d)\n", cfg_fcp_eq_count,
+				"2593 The FCP EQ count(%d) cannot be greater "
+				"than the FCP WQ count(%d), limiting the "
+				"FCP EQ count to %d\n", cfg_fcp_eq_count,
 				phba->cfg_fcp_wq_count,
 				phba->cfg_fcp_wq_count);
 		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5178,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 	}
 	phba->sli4_hba.els_cq = qdesc;
 
-	/* Create slow-path Unsolicited Receive Complete Queue */
-	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-				      phba->sli4_hba.cq_ecount);
-	if (!qdesc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0502 Failed allocate slow-path USOL RX CQ\n");
-		goto out_free_els_cq;
-	}
-	phba->sli4_hba.rxq_cq = qdesc;
 
 	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
 	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5186,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2577 Failed allocate memory for fast-path "
 				"CQ record array\n");
-		goto out_free_rxq_cq;
+		goto out_free_els_cq;
 	}
 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5299,6 @@ out_free_fcp_cq:
 		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
 	}
 	kfree(phba->sli4_hba.fcp_cq);
-out_free_rxq_cq:
-	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
-	phba->sli4_hba.rxq_cq = NULL;
 out_free_els_cq:
 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
 	phba->sli4_hba.els_cq = NULL;
@@ -5247,10 +5355,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
 	phba->sli4_hba.dat_rq = NULL;
 
-	/* Release unsolicited receive complete queue */
-	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
-	phba->sli4_hba.rxq_cq = NULL;
-
 	/* Release ELS complete queue */
 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
 	phba->sli4_hba.els_cq = NULL;
@@ -5383,25 +5487,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 			phba->sli4_hba.els_cq->queue_id,
 			phba->sli4_hba.sp_eq->queue_id);
 
-	/* Set up slow-path Unsolicited Receive Complete Queue */
-	if (!phba->sli4_hba.rxq_cq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0532 USOL RX CQ not allocated\n");
-		goto out_destroy_els_cq;
-	}
-	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
-			    LPFC_RCQ, LPFC_USOL);
-	if (rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0533 Failed setup of slow-path USOL RX CQ: "
-				"rc = 0x%x\n", rc);
-		goto out_destroy_els_cq;
-	}
-	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
-			phba->sli4_hba.rxq_cq->queue_id,
-			phba->sli4_hba.sp_eq->queue_id);
-
 	/* Set up fast-path FCP Response Complete Queue */
 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
 		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +5592,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 		goto out_destroy_fcp_wq;
 	}
 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
-			    phba->sli4_hba.rxq_cq, LPFC_USOL);
+			    phba->sli4_hba.els_cq, LPFC_USOL);
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0541 Failed setup of Receive Queue: "
@@ -5519,7 +5604,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 			"parent cq-id=%d\n",
 			phba->sli4_hba.hdr_rq->queue_id,
 			phba->sli4_hba.dat_rq->queue_id,
-			phba->sli4_hba.rxq_cq->queue_id);
+			phba->sli4_hba.els_cq->queue_id);
 	return 0;
 
 out_destroy_fcp_wq:
@@ -5531,8 +5616,6 @@ out_destroy_mbx_wq:
 out_destroy_fcp_cq:
 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
-	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
-out_destroy_els_cq:
 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5574,8 +5657,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
 	/* Unset ELS complete queue */
 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
-	/* Unset unsolicited receive complete queue */
-	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
 	/* Unset FCP response complete queue */
 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -6722,6 +6803,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
 	struct lpfc_hba   *phba;
 	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
 	int error;
 	uint32_t cfg_mode, intr_mode;
 
@@ -6800,6 +6882,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 		goto out_destroy_shost;
 	}
 
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
 	/* Now, trying to enable interrupt and bring up the device */
 	cfg_mode = phba->cfg_use_msi;
 	while (true) {
@@ -6866,6 +6949,8 @@ out_unset_pci_mem_s3:
 	lpfc_sli_pci_mem_unset(phba);
 out_disable_pci_dev:
 	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
 out_free_phba:
 	lpfc_hba_free(phba);
 	return error;
@@ -7036,6 +7121,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 	/* Restore device state from PCI config space */
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
+
 	if (pdev->is_busmaster)
 		pci_set_master(pdev);
 
@@ -7070,6 +7156,75 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 }
 
 /**
+ * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot recover. It
+ * aborts and stops all the on-going I/Os on the pci device.
+ **/
+static void
+lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2723 PCI channel I/O abort preparing for recovery\n");
+	/* Prepare for bringing HBA offline */
+	lpfc_offline_prep(phba);
+	/* Clear sli active flag to prevent sysfs access to HBA */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+	/* Stop and flush all I/Os and bring HBA offline */
+	lpfc_offline(phba);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2710 PCI channel disable preparing for reset\n");
+	/* Disable interrupt and pci device */
+	lpfc_sli_disable_intr(phba);
+	pci_disable_device(phba->pcidev);
+	/*
+	 * There may be I/Os dropped by the firmware.
+	 * Error iocb (I/O) on txcmplq and let the SCSI layer
+	 * retry it after re-establishing link.
+	 */
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2711 PCI channel permanent disable for failure\n");
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+	/* Clean up all driver's outstanding SCSI I/Os */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
@@ -7083,6 +7238,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
  * as desired.
  *
  * Return codes
+ * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  **/
@@ -7091,33 +7247,27 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring  *pring;
 
-	if (state == pci_channel_io_perm_failure) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0472 PCI channel I/O permanent failure\n");
-		/* Block all SCSI devices' I/Os on the host */
-		lpfc_scsi_dev_block(phba);
-		/* Clean up all driver's outstanding SCSI I/Os */
-		lpfc_sli_flush_fcp_rings(phba);
+	switch (state) {
+	case pci_channel_io_normal:
+		/* Non-fatal error, prepare for recovery */
+		lpfc_sli_prep_dev_for_recover(phba);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		/* Fatal error, prepare for slot reset */
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		/* Permanent failure, prepare for device down */
+		lpfc_prep_dev_for_perm_failure(phba);
 		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		/* Unknown state, prepare and request slot reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0472 Unknown PCI error state: x%x\n", state);
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
 	}
-
-	pci_disable_device(pdev);
-	/*
-	 * There may be I/Os dropped by the firmware.
-	 * Error iocb (I/O) on txcmplq and let the SCSI layer
-	 * retry it after re-establishing link.
-	 */
-	pring = &psli->ring[psli->fcp_ring];
-	lpfc_sli_abort_iocb_ring(phba, pring);
-
-	/* Disable interrupt */
-	lpfc_sli_disable_intr(phba);
-
-	/* Request a slot reset. */
-	return PCI_ERS_RESULT_NEED_RESET;
 }
 
 /**
@@ -7197,7 +7347,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
+	/* Bring the device online */
 	lpfc_online(phba);
+
+	/* Clean up Advanced Error Reporting (AER) if needed */
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		pci_cleanup_aer_uncorrect_error_status(pdev);
 }
 
 /**
@@ -7213,15 +7368,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		if (max_xri <= 100)
-			return 4;
+			return 10;
 		else if (max_xri <= 256)
-			return 8;
+			return 25;
 		else if (max_xri <= 512)
-			return 16;
+			return 50;
 		else if (max_xri <= 1024)
-			return 32;
+			return 100;
 		else
-			return 48;
+			return 150;
 	} else
 		return 0;
 }
@@ -7249,6 +7404,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
 	struct lpfc_hba   *phba;
 	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
 	int error;
 	uint32_t cfg_mode, intr_mode;
 	int mcnt;
@@ -7329,6 +7485,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 		goto out_destroy_shost;
 	}
 
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
 	/* Now, trying to enable interrupt and bring up the device */
 	cfg_mode = phba->cfg_use_msi;
 	while (true) {
@@ -7397,6 +7554,8 @@ out_unset_pci_mem_s4:
 	lpfc_sli4_pci_mem_unset(phba);
 out_disable_pci_dev:
 	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
 out_free_phba:
 	lpfc_hba_free(phba);
 	return error;
@@ -7971,6 +8130,10 @@ static struct pci_device_id lpfc_id_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0 }
 };
 
@@ -8053,15 +8216,15 @@ lpfc_exit(void)
 	if (lpfc_enable_npiv)
 		fc_release_transport(lpfc_vport_transport_template);
 	if (_dump_buf_data) {
-		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
-				"at 0x%p\n",
+		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
+				"_dump_buf_data at 0x%p\n",
 				(1L << _dump_buf_data_order), _dump_buf_data);
 		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
 	}
 
 	if (_dump_buf_dif) {
-		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
-				"at 0x%p\n",
+		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
+				"_dump_buf_dif at 0x%p\n",
 				(1L << _dump_buf_dif_order), _dump_buf_dif);
 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
 	}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1ab405902a18..a9afd8b94b6a 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -25,8 +25,8 @@
 
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_transport_fc.h>
-
 #include <scsi/scsi.h>
+#include <scsi/fc/fc_fs.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -820,6 +820,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
 	mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
 	mb->un.varRegVpi.sid = vport->fc_myDID;
 	mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+	memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
+	       sizeof(struct lpfc_name));
+	mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
+	mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
 
 	mb->mbxCommand = MBX_REG_VPI;
 	mb->mbxOwner = OWN_HOST;
@@ -849,7 +853,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-	mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
+	else
+		mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
 
 	mb->mbxCommand = MBX_UNREG_VPI;
 	mb->mbxOwner = OWN_HOST;
@@ -1132,7 +1139,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
 	/* Otherwise we setup specific rctl / type masks for this ring */
 	for (i = 0; i < pring->num_mask; i++) {
 		mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
-		if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
+		if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
 			mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
 		else
 			mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
@@ -1654,9 +1661,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
 	/* Allocate record for keeping SGE virtual addresses */
 	mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
 				  GFP_KERNEL);
-	if (!mbox->sge_array)
+	if (!mbox->sge_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2527 Failed to allocate non-embedded SGE "
+				"array.\n");
 		return 0;
-
+	}
 	for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
 		/* The DMA memory is always allocated in the length of a
 		 * page even though the last SGE might not fill up to a
@@ -1753,11 +1763,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
 	/* Set up host requested features. */
 	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
 
-	if (phba->cfg_enable_fip)
-		bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
-	else
-		bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
-
 	/* Enable DIF (block guard) only if configured to do so. */
 	if (phba->cfg_enable_bg)
 		bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
@@ -1817,6 +1822,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 	bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
 	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
 	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+	memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
+	reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
+	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
 	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
 	reg_vfi->bde.addrLow = putPaddrLow(phys);
 	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
@@ -1850,7 +1858,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
 /**
  * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
  * @mbox: pointer to lpfc mbox command to initialize.
- * @vfi: VFI to be unregistered.
+ * @vport: vport associated with the VF.
  *
  * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
  * (logical NPort) into the inactive state. The SLI Host must have logged out
@@ -1859,11 +1867,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
  * fabric inactive.
  **/
 void
-lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
 {
 	memset(mbox, 0, sizeof(*mbox));
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
-	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
+	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
+	       vport->vfi + vport->phba->vfi_base);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3e74136f1ede..2ed6af194932 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1223,6 +1223,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			if (phba->sli_rev == LPFC_SLI_REV4) {
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli4_free_rpi(phba,
+					mb->u.mb.un.varRegLogin.rpi);
+				spin_lock_irq(&phba->hbalock);
+			}
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1230,6 +1236,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 			}
 			lpfc_nlp_put(ndlp);
 			list_del(&mb->list);
+			phba->sli.mboxq_cnt--;
 			mempool_free(mb, phba->mbox_mem_pool);
 		}
 	}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c88f59f0ce30..a246410ce9df 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -59,22 +59,26 @@ static char *dif_op_str[] = {
 };
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 
 static void
-lpfc_debug_save_data(struct scsi_cmnd *cmnd)
+lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
 {
 	void *src, *dst;
 	struct scatterlist *sgde = scsi_sglist(cmnd);
 
 	if (!_dump_buf_data) {
-		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
 				__func__);
 		return;
 	}
 
 
 	if (!sgde) {
-		printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9051 BLKGRD: ERROR: data scatterlist is null\n");
 		return;
 	}
 
@@ -88,19 +92,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd)
 }
 
 static void
-lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
+lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
 {
 	void *src, *dst;
 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
 
 	if (!_dump_buf_dif) {
-		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
 				__func__);
 		return;
 	}
 
 	if (!sgde) {
-		printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
 		return;
 	}
 
@@ -242,6 +248,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
 }
 
 /**
+ * lpfc_change_queue_depth - Alter scsi device queue depth
+ * @sdev: Pointer the scsi device on which to change the queue depth.
+ * @qdepth: New queue depth to set the sdev to.
+ * @reason: The reason for the queue depth change.
+ *
+ * This function is called by the midlayer and the LLD to alter the queue
+ * depth for a scsi device. This function sets the queue depth to the new
+ * value and sends an event out to log the queue depth change.
+ **/
+int
+lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_rport_data *rdata;
+	unsigned long new_queue_depth, old_queue_depth;
+
+	old_queue_depth = sdev->queue_depth;
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	new_queue_depth = sdev->queue_depth;
+	rdata = sdev->hostdata;
+	if (rdata)
+		lpfc_send_sdev_queuedepth_change_event(phba, vport,
+						       rdata->pnode, sdev->lun,
+						       old_queue_depth,
+						       new_queue_depth);
+	return sdev->queue_depth;
+}
+
+/**
  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
  * @phba: The Hba for which this call is being executed.
  *
@@ -305,8 +341,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 	if (vport->cfg_lun_queue_depth <= queue_depth)
 		return;
 	spin_lock_irqsave(&phba->hbalock, flags);
-	if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
-	 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
+	if (time_before(jiffies,
+			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
+	    time_before(jiffies,
+			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return;
 	}
@@ -338,10 +376,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 	struct lpfc_vport **vports;
 	struct Scsi_Host  *shost;
 	struct scsi_device *sdev;
-	unsigned long new_queue_depth, old_queue_depth;
+	unsigned long new_queue_depth;
 	unsigned long num_rsrc_err, num_cmd_success;
 	int i;
-	struct lpfc_rport_data *rdata;
 
 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
 	num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -359,22 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 				else
 					new_queue_depth = sdev->queue_depth -
 								new_queue_depth;
-				old_queue_depth = sdev->queue_depth;
-				if (sdev->ordered_tags)
-					scsi_adjust_queue_depth(sdev,
-							MSG_ORDERED_TAG,
-							new_queue_depth);
-				else
-					scsi_adjust_queue_depth(sdev,
-							MSG_SIMPLE_TAG,
-							new_queue_depth);
-				rdata = sdev->hostdata;
-				if (rdata)
-					lpfc_send_sdev_queuedepth_change_event(
-						phba, vports[i],
-						rdata->pnode,
-						sdev->lun, old_queue_depth,
-						new_queue_depth);
+				lpfc_change_queue_depth(sdev, new_queue_depth,
+							SCSI_QDEPTH_DEFAULT);
 			}
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
@@ -398,7 +421,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 	struct Scsi_Host  *shost;
 	struct scsi_device *sdev;
 	int i;
-	struct lpfc_rport_data *rdata;
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
@@ -408,22 +430,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 				if (vports[i]->cfg_lun_queue_depth <=
 				    sdev->queue_depth)
 					continue;
-				if (sdev->ordered_tags)
-					scsi_adjust_queue_depth(sdev,
-							MSG_ORDERED_TAG,
-							sdev->queue_depth+1);
-				else
-					scsi_adjust_queue_depth(sdev,
-							MSG_SIMPLE_TAG,
-							sdev->queue_depth+1);
-				rdata = sdev->hostdata;
-				if (rdata)
-					lpfc_send_sdev_queuedepth_change_event(
-						phba, vports[i],
-						rdata->pnode,
-						sdev->lun,
-						sdev->queue_depth - 1,
-						sdev->queue_depth);
+				lpfc_change_queue_depth(sdev,
+							sdev->queue_depth+1,
+							SCSI_QDEPTH_RAMP_UP);
 			}
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
@@ -589,7 +598,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		iocb->ulpClass = CLASS3;
 		psb->status = IOSTAT_SUCCESS;
 		/* Put it back into the SCSI buffer list */
-		lpfc_release_scsi_buf_s4(phba, psb);
+		lpfc_release_scsi_buf_s3(phba, psb);
 
 	}
 
@@ -1024,7 +1033,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 
 		lpfc_cmd->seg_cnt = nseg;
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-			printk(KERN_ERR "%s: Too many sg segments from "
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9064 BLKGRD: %s: Too many sg segments from "
 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
 			       __func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
@@ -1112,7 +1122,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  * with the cmd
  */
 static int
-lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
+lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
 {
 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
 	uint8_t ret_prof = LPFC_PROF_INVALID;
@@ -1136,7 +1146,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
 
 		case SCSI_PROT_NORMAL:
 		default:
-			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9063 BLKGRD:Bad op/guard:%d/%d combination\n",
 					scsi_get_prot_op(sc), guard_type);
 			break;
 
@@ -1157,7 +1168,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
 		case SCSI_PROT_WRITE_STRIP:
 		case SCSI_PROT_NORMAL:
 		default:
-			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9075 BLKGRD: Bad op/guard:%d/%d combination\n",
 					scsi_get_prot_op(sc), guard_type);
 			break;
 		}
@@ -1259,7 +1271,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	uint16_t apptagmask, apptagval;
 
 	pde1 = (struct lpfc_pde *) bpl;
-	prof = lpfc_sc_to_sli_prof(sc);
+	prof = lpfc_sc_to_sli_prof(phba, sc);
 
 	if (prof == LPFC_PROF_INVALID)
 		goto out;
@@ -1359,7 +1371,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		return 0;
 	}
 
-	prof = lpfc_sc_to_sli_prof(sc);
+	prof = lpfc_sc_to_sli_prof(phba, sc);
 	if (prof == LPFC_PROF_INVALID)
 		goto out;
 
@@ -1408,7 +1420,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		subtotal = 0; /* total bytes processed for current prot grp */
 		while (!pgdone) {
 			if (!sgde) {
-				printk(KERN_ERR "%s Invalid data segment\n",
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9065 BLKGRD:%s Invalid data segment\n",
 						__func__);
 				return 0;
 			}
@@ -1462,7 +1475,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 			reftag += protgrp_blks;
 		} else {
 			/* if we're here, we have a bug */
-			printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9054 BLKGRD: bug in %s\n", __func__);
 		}
 
 	} while (!alldone);
@@ -1544,8 +1558,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
 
 		lpfc_cmd->seg_cnt = datasegcnt;
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-			printk(KERN_ERR "%s: Too many sg segments from "
-					"dma_map_sg.  Config %d, seg_cnt %d\n",
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9067 BLKGRD: %s: Too many sg segments"
+					" from dma_map_sg.  Config %d, seg_cnt"
+					" %d\n",
 					__func__, phba->cfg_sg_seg_cnt,
 					lpfc_cmd->seg_cnt);
 			scsi_dma_unmap(scsi_cmnd);
@@ -1579,8 +1595,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
 			lpfc_cmd->prot_seg_cnt = protsegcnt;
 			if (lpfc_cmd->prot_seg_cnt
 			    > phba->cfg_prot_sg_seg_cnt) {
-				printk(KERN_ERR "%s: Too many prot sg segments "
-						"from dma_map_sg.  Config %d,"
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9068 BLKGRD: %s: Too many prot sg "
+					"segments from dma_map_sg.  Config %d,"
 						"prot_seg_cnt %d\n", __func__,
 						phba->cfg_prot_sg_seg_cnt,
 						lpfc_cmd->prot_seg_cnt);
@@ -1671,23 +1688,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 	uint32_t bgstat = bgf->bgstat;
 	uint64_t failing_sector = 0;
 
-	printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
+	lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
+			" 0x%x lba 0x%llx blk cnt 0x%x "
 			"bgstat=0x%x bghm=0x%x\n",
 			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
 			blk_rq_sectors(cmd->request), bgstat, bghm);
 
 	spin_lock(&_dump_buf_lock);
 	if (!_dump_buf_done) {
-		printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
+			" Data for %u blocks to debugfs\n",
 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
-		lpfc_debug_save_data(cmd);
+		lpfc_debug_save_data(phba, cmd);
 
 		/* If we have a prot sgl, save the DIF buffer */
 		if (lpfc_prot_group_type(phba, cmd) ==
 				LPFC_PG_TYPE_DIF_BUF) {
-			printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
-					(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
-			lpfc_debug_save_dif(cmd);
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
+				"Saving DIF for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+			lpfc_debug_save_dif(phba, cmd);
 		}
 
 		_dump_buf_done = 1;
@@ -1696,15 +1716,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
 		cmd->result = ScsiResult(DID_ERROR, 0);
-		printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
-				bgstat);
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
+			" BlockGuard profile. bgstat:0x%x\n",
+			bgstat);
 		ret = (-1);
 		goto out;
 	}
 
 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
 		cmd->result = ScsiResult(DID_ERROR, 0);
-		printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
+				"Invalid BlockGuard DIF Block. bgstat:0x%x\n",
 				bgstat);
 		ret = (-1);
 		goto out;
@@ -1718,7 +1740,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 		cmd->result = DRIVER_SENSE << 24
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 		phba->bg_guard_err_cnt++;
-		printk(KERN_ERR "BLKGRD: guard_tag error\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9055 BLKGRD: guard_tag error\n");
 	}
 
 	if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -1730,7 +1753,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
 		phba->bg_reftag_err_cnt++;
-		printk(KERN_ERR "BLKGRD: ref_tag error\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9056 BLKGRD: ref_tag error\n");
 	}
 
 	if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -1742,7 +1766,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
 		phba->bg_apptag_err_cnt++;
-		printk(KERN_ERR "BLKGRD: app_tag error\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9061 BLKGRD: app_tag error\n");
 	}
 
 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -1763,7 +1788,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 	if (!ret) {
 		/* No error was reported - problem in FW? */
 		cmd->result = ScsiResult(DID_ERROR, 0);
-		printk(KERN_ERR "BLKGRD: no errors reported!\n");
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9057 BLKGRD: no errors reported!\n");
 	}
 
 out:
@@ -1822,9 +1848,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 
 		lpfc_cmd->seg_cnt = nseg;
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-			printk(KERN_ERR "%s: Too many sg segments from "
-			       "dma_map_sg.  Config %d, seg_cnt %d\n",
-			       __func__, phba->cfg_sg_seg_cnt,
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
+				" %s: Too many sg segments from "
+				"dma_map_sg.  Config %d, seg_cnt %d\n",
+				__func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
 			scsi_dma_unmap(scsi_cmnd);
 			return 1;
@@ -2050,6 +2077,21 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		goto out;
 	}
 
+	if (resp_info & RSP_LEN_VALID) {
+		rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
+		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2719 Invalid response length: "
+				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 rsplen);
+			host_status = DID_ERROR;
+			goto out;
+		}
+	}
+
 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
 		if (snslen > SCSI_SENSE_BUFFERSIZE)
@@ -2074,15 +2116,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 			 be32_to_cpu(fcprsp->rspRspLen),
 			 fcprsp->rspInfo3);
 
-	if (resp_info & RSP_LEN_VALID) {
-		rsplen = be32_to_cpu(fcprsp->rspRspLen);
-		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
-		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
-			host_status = DID_ERROR;
-			goto out;
-		}
-	}
-
 	scsi_set_resid(cmnd, 0);
 	if (resp_info & RESID_UNDER) {
 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
@@ -2180,7 +2213,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 	int result;
 	struct scsi_device *tmp_sdev;
-	int depth = 0;
+	int depth;
 	unsigned long flags;
 	struct lpfc_fast_path_event *fast_path_evt;
 	struct Scsi_Host *shost = cmd->device->host;
@@ -2264,7 +2297,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 					lpfc_printf_vlog(vport, KERN_WARNING,
 							LOG_BG,
 							"9031 non-zero BGSTAT "
-							"on unprotected cmd");
+							"on unprotected cmd\n");
 				}
 			}
 
@@ -2347,67 +2380,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		return;
 	}
 
-
 	if (!result)
 		lpfc_rampup_queue_depth(vport, queue_depth);
 
-	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
-	   ((jiffies - pnode->last_ramp_up_time) >
-		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
-	   ((jiffies - pnode->last_q_full_time) >
-		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
-	   (vport->cfg_lun_queue_depth > queue_depth)) {
-		shost_for_each_device(tmp_sdev, shost) {
-			if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
-				if (tmp_sdev->id != scsi_id)
-					continue;
-				if (tmp_sdev->ordered_tags)
-					scsi_adjust_queue_depth(tmp_sdev,
-						MSG_ORDERED_TAG,
-						tmp_sdev->queue_depth+1);
-				else
-					scsi_adjust_queue_depth(tmp_sdev,
-						MSG_SIMPLE_TAG,
-						tmp_sdev->queue_depth+1);
-
-				pnode->last_ramp_up_time = jiffies;
-			}
-		}
-		lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
-			0xFFFFFFFF,
-			queue_depth , queue_depth + 1);
-	}
-
 	/*
 	 * Check for queue full.  If the lun is reporting queue full, then
 	 * back off the lun queue depth to prevent target overloads.
 	 */
 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
 	    NLP_CHK_NODE_ACT(pnode)) {
-		pnode->last_q_full_time = jiffies;
-
 		shost_for_each_device(tmp_sdev, shost) {
 			if (tmp_sdev->id != scsi_id)
 				continue;
 			depth = scsi_track_queue_full(tmp_sdev,
-					tmp_sdev->queue_depth - 1);
-		}
-		/*
-		 * The queue depth cannot be lowered any more.
-		 * Modify the returned error code to store
-		 * the final depth value set by
-		 * scsi_track_queue_full.
-		 */
-		if (depth == -1)
-			depth = shost->cmd_per_lun;
-
-		if (depth) {
+						      tmp_sdev->queue_depth-1);
+			if (depth <= 0)
+				continue;
 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 					 "0711 detected queue full - lun queue "
 					 "depth adjusted to %d.\n", depth);
 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
-				pnode, 0xFFFFFFFF,
-				depth+1, depth);
+							       pnode,
+							       tmp_sdev->lun,
+							       depth+1, depth);
 		}
 	}
 
@@ -2745,7 +2740,9 @@ void lpfc_poll_timeout(unsigned long ptr)
 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-		lpfc_sli_poll_fcp_ring (phba);
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
 			lpfc_poll_rearm_timer(phba);
 	}
@@ -2771,7 +2768,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
-	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct lpfc_nodelist *ndlp;
 	struct lpfc_scsi_buf *lpfc_cmd;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 	int err;
@@ -2781,13 +2778,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		cmnd->result = err;
 		goto out_fail_command;
 	}
+	ndlp = rdata->pnode;
 
 	if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
 		scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
 
-		printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
-				"str=%s without registering for BlockGuard - "
-				"Rejecting command\n",
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
+				" op:%02x str=%s without registering for"
+				" BlockGuard - Rejecting command\n",
 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
 				dif_op_str[scsi_get_prot_op(cmnd)]);
 		goto out_fail_command;
@@ -2827,61 +2826,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	cmnd->scsi_done = done;
 
 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
-		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+		if (vport->phba->cfg_enable_bg) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 				"9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
 				"str=%s\n",
 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
 				dif_op_str[scsi_get_prot_op(cmnd)]);
-		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 				"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
 				"%02x %02x %02x %02x %02x\n",
 				cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
 				cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
 				cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
 				cmnd->cmnd[9]);
-		if (cmnd->cmnd[0] == READ_10)
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+			if (cmnd->cmnd[0] == READ_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					"9035 BLKGRD: READ @ sector %llu, "
 					"count %u\n",
 					(unsigned long long)scsi_get_lba(cmnd),
 					blk_rq_sectors(cmnd->request));
-		else if (cmnd->cmnd[0] == WRITE_10)
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+			else if (cmnd->cmnd[0] == WRITE_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					"9036 BLKGRD: WRITE @ sector %llu, "
 					"count %u cmd=%p\n",
 					(unsigned long long)scsi_get_lba(cmnd),
 					blk_rq_sectors(cmnd->request),
 					cmnd);
+		}
 
 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
 	} else {
-		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-				"9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
-				" str=%s\n",
-				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
-				dif_op_str[scsi_get_prot_op(cmnd)]);
-		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-				 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
-				 "%02x %02x %02x %02x %02x\n",
-				 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
-				 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
-				 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
-				 cmnd->cmnd[9]);
-		if (cmnd->cmnd[0] == READ_10)
+		if (vport->phba->cfg_enable_bg) {
 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-					 "9040 dbg: READ @ sector %llu, "
-					 "count %u\n",
-					 (unsigned long long)scsi_get_lba(cmnd),
+					"9038 BLKGRD: rcvd unprotected cmd:"
+					"%02x op:%02x str=%s\n",
+					cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+					dif_op_str[scsi_get_prot_op(cmnd)]);
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9039 BLKGRD: CDB: %02x %02x %02x "
+					"%02x %02x %02x %02x %02x %02x %02x\n",
+					cmnd->cmnd[0], cmnd->cmnd[1],
+					cmnd->cmnd[2], cmnd->cmnd[3],
+					cmnd->cmnd[4], cmnd->cmnd[5],
+					cmnd->cmnd[6], cmnd->cmnd[7],
+					cmnd->cmnd[8], cmnd->cmnd[9]);
+			if (cmnd->cmnd[0] == READ_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9040 dbg: READ @ sector %llu, "
+					"count %u\n",
+					(unsigned long long)scsi_get_lba(cmnd),
 					 blk_rq_sectors(cmnd->request));
-		else if (cmnd->cmnd[0] == WRITE_10)
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+			else if (cmnd->cmnd[0] == WRITE_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					 "9041 dbg: WRITE @ sector %llu, "
 					 "count %u cmd=%p\n",
 					 (unsigned long long)scsi_get_lba(cmnd),
 					 blk_rq_sectors(cmnd->request), cmnd);
-		else
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+			else
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 					 "9042 dbg: parser not implemented\n");
+		}
 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
 	}
 
@@ -2898,7 +2902,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		goto out_host_busy_free_buf;
 	}
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-		lpfc_sli_poll_fcp_ring(phba);
+		spin_unlock(shost->host_lock);
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+		spin_lock(shost->host_lock);
 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
 			lpfc_poll_rearm_timer(phba);
 	}
@@ -2917,28 +2925,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 }
 
 /**
- * lpfc_block_error_handler - Routine to block error  handler
- * @cmnd: Pointer to scsi_cmnd data structure.
- *
- *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
- **/
-static void
-lpfc_block_error_handler(struct scsi_cmnd *cmnd)
-{
-	struct Scsi_Host *shost = cmnd->device->host;
-	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
-
-	spin_lock_irq(shost->host_lock);
-	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
-		spin_unlock_irq(shost->host_lock);
-		msleep(1000);
-		spin_lock_irq(shost->host_lock);
-	}
-	spin_unlock_irq(shost->host_lock);
-	return;
-}
-
-/**
  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
  * @cmnd: Pointer to scsi_cmnd data structure.
  *
@@ -2961,7 +2947,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	int ret = SUCCESS;
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
-	lpfc_block_error_handler(cmnd);
+	fc_block_scsi_eh(cmnd);
 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
 	BUG_ON(!lpfc_cmd);
 
@@ -3001,6 +2987,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 
 	icmd->ulpLe = 1;
 	icmd->ulpClass = cmd->ulpClass;
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+
 	if (lpfc_is_link_up(phba))
 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
 	else
@@ -3016,7 +3006,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	}
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
-		lpfc_sli_poll_fcp_ring (phba);
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
 
 	lpfc_cmd->waitq = &waitq;
 	/* Wait for abort to complete */
@@ -3166,9 +3157,15 @@ static int
 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
 {
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
-	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct lpfc_nodelist *pnode;
 	unsigned long later;
 
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
 	/*
 	 * If target is not in a MAPPED state, delay until
 	 * target is rediscovered or devloss timeout expires.
@@ -3253,13 +3250,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
-	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
-	lpfc_block_error_handler(cmnd);
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0798 Device Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	fc_block_scsi_eh(cmnd);
 
 	status = lpfc_chk_tgt_mapped(vport, cmnd);
 	if (status == FAILED) {
@@ -3312,13 +3315,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
-	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
-	lpfc_block_error_handler(cmnd);
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0799 Target Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	fc_block_scsi_eh(cmnd);
 
 	status = lpfc_chk_tgt_mapped(vport, cmnd);
 	if (status == FAILED) {
@@ -3384,7 +3393,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
 
-	lpfc_block_error_handler(cmnd);
+	fc_block_scsi_eh(cmnd);
 
 	/*
 	 * Since the driver manages a single bus device, reset all
@@ -3498,6 +3507,8 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 				 "Allocated %d buffers.\n",
 				 num_to_alloc, num_allocated);
 	}
+	if (num_allocated > 0)
+		phba->total_scsi_bufs += num_allocated;
 	return 0;
 }
 
@@ -3534,7 +3545,8 @@ lpfc_slave_configure(struct scsi_device *sdev)
 	rport->dev_loss_tmo = vport->cfg_devloss_tmo;
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-		lpfc_sli_poll_fcp_ring(phba);
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
 			lpfc_poll_rearm_timer(phba);
 	}
@@ -3576,6 +3588,7 @@ struct scsi_host_template lpfc_template = {
 	.shost_attrs		= lpfc_hba_attrs,
 	.max_sectors		= 0xFFFF,
 	.vendor_id		= LPFC_NL_VENDOR_ID,
+	.change_queue_depth	= lpfc_change_queue_depth,
 };
 
 struct scsi_host_template lpfc_vport_template = {
@@ -3597,4 +3610,5 @@ struct scsi_host_template lpfc_vport_template = {
 	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_vport_attrs,
 	.max_sectors		= 0xFFFF,
+	.change_queue_depth	= lpfc_change_queue_depth,
 };
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43cbe336f1f8..b3a69f984d95 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -30,6 +30,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/fc/fc_fs.h>
+#include <linux/aer.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -58,8 +59,11 @@ typedef enum _lpfc_iocb_type {
 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
 				  uint32_t);
 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
-			    uint8_t *, uint32_t *);
-
+			      uint8_t *, uint32_t *);
+static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
+							 struct lpfc_iocbq *);
+static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
+				      struct hbq_dmabuf *);
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
 {
@@ -259,6 +263,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 	bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
+	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
+		readl(q->phba->sli4_hba.EQCQDBregaddr);
 	return released;
 }
 
@@ -515,6 +522,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
 	struct lpfc_sglq *sglq = NULL;
 	uint16_t adj_xri;
 	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+	if (!sglq)
+		return NULL;
 	adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
 	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
 	return sglq;
@@ -572,9 +581,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
 	if (sglq)  {
 		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
-			|| ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+			&& ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
 			&& (iocbq->iocb.un.ulpWord[4]
-				== IOERR_SLI_ABORTED))) {
+				== IOERR_ABORT_REQUESTED))) {
 			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
 					iflag);
 			list_add(&sglq->list,
@@ -767,6 +776,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
 	case CMD_CLOSE_XRI_CX:
 	case CMD_XRI_ABORTED_CX:
 	case CMD_ABORT_MXRI64_CN:
+	case CMD_XMIT_BLS_RSP64_CX:
 		type = LPFC_ABORT_IOCB;
 		break;
 	case CMD_RCV_SEQUENCE_CX:
@@ -2068,8 +2078,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
 	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
-		Rctl = FC_ELS_REQ;
-		Type = FC_ELS_DATA;
+		Rctl = FC_RCTL_ELS_REQ;
+		Type = FC_TYPE_ELS;
 	} else {
 		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
 		Rctl = w5p->hcsw.Rctl;
@@ -2079,8 +2089,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
-			Rctl = FC_ELS_REQ;
-			Type = FC_ELS_DATA;
+			Rctl = FC_RCTL_ELS_REQ;
+			Type = FC_TYPE_ELS;
 			w5p->hcsw.Rctl = Rctl;
 			w5p->hcsw.Type = Type;
 		}
@@ -2324,168 +2334,6 @@ void lpfc_poll_eratt(unsigned long ptr)
 	return;
 }
 
-/**
- * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
- * @phba: Pointer to HBA context object.
- *
- * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
- * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
- * is enabled.
- *
- * The caller does not hold any lock.
- * The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
- * LE bit set. The function will call the completion handler of the command iocb
- * if the response iocb indicates a completion for a command iocb or it is
- * an abort completion.
- **/
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
-{
-	struct lpfc_sli      *psli  = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
-	IOCB_t *irsp = NULL;
-	IOCB_t *entry = NULL;
-	struct lpfc_iocbq *cmdiocbq = NULL;
-	struct lpfc_iocbq rspiocbq;
-	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
-	uint32_t status;
-	uint32_t portRspPut, portRspMax;
-	int type;
-	uint32_t rsp_cmpl = 0;
-	uint32_t ha_copy;
-	unsigned long iflags;
-
-	pring->stats.iocb_event++;
-
-	/*
-	 * The next available response entry should never exceed the maximum
-	 * entries.  If it does, treat it as an adapter hardware error.
-	 */
-	portRspMax = pring->numRiocb;
-	portRspPut = le32_to_cpu(pgp->rspPutInx);
-	if (unlikely(portRspPut >= portRspMax)) {
-		lpfc_sli_rsp_pointers_error(phba, pring);
-		return;
-	}
-
-	rmb();
-	while (pring->rspidx != portRspPut) {
-		entry = lpfc_resp_iocb(phba, pring);
-		if (++pring->rspidx >= portRspMax)
-			pring->rspidx = 0;
-
-		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
-				      (uint32_t *) &rspiocbq.iocb,
-				      phba->iocb_rsp_size);
-		irsp = &rspiocbq.iocb;
-		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
-		pring->stats.iocb_rsp++;
-		rsp_cmpl++;
-
-		if (unlikely(irsp->ulpStatus)) {
-			/* Rsp ring <ringno> error: IOCB */
-			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-					"0326 Rsp Ring %d error: IOCB Data: "
-					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
-					pring->ringno,
-					irsp->un.ulpWord[0],
-					irsp->un.ulpWord[1],
-					irsp->un.ulpWord[2],
-					irsp->un.ulpWord[3],
-					irsp->un.ulpWord[4],
-					irsp->un.ulpWord[5],
-					*(uint32_t *)&irsp->un1,
-					*((uint32_t *)&irsp->un1 + 1));
-		}
-
-		switch (type) {
-		case LPFC_ABORT_IOCB:
-		case LPFC_SOL_IOCB:
-			/*
-			 * Idle exchange closed via ABTS from port.  No iocb
-			 * resources need to be recovered.
-			 */
-			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
-				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-						"0314 IOCB cmd 0x%x "
-						"processed. Skipping "
-						"completion",
-						irsp->ulpCommand);
-				break;
-			}
-
-			spin_lock_irqsave(&phba->hbalock, iflags);
-			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
-							 &rspiocbq);
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
-			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
-				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
-						      &rspiocbq);
-			}
-			break;
-		default:
-			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
-				char adaptermsg[LPFC_MAX_ADPTMSG];
-				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
-				memcpy(&adaptermsg[0], (uint8_t *) irsp,
-				       MAX_MSG_DATA);
-				dev_warn(&((phba->pcidev)->dev),
-					 "lpfc%d: %s\n",
-					 phba->brd_no, adaptermsg);
-			} else {
-				/* Unknown IOCB command */
-				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-						"0321 Unknown IOCB command "
-						"Data: x%x, x%x x%x x%x x%x\n",
-						type, irsp->ulpCommand,
-						irsp->ulpStatus,
-						irsp->ulpIoTag,
-						irsp->ulpContext);
-			}
-			break;
-		}
-
-		/*
-		 * The response IOCB has been processed.  Update the ring
-		 * pointer in SLIM.  If the port response put pointer has not
-		 * been updated, sync the pgp->rspPutInx and fetch the new port
-		 * response put pointer.
-		 */
-		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
-
-		if (pring->rspidx == portRspPut)
-			portRspPut = le32_to_cpu(pgp->rspPutInx);
-	}
-
-	ha_copy = readl(phba->HAregaddr);
-	ha_copy >>= (LPFC_FCP_RING * 4);
-
-	if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
-		spin_lock_irqsave(&phba->hbalock, iflags);
-		pring->stats.iocb_rsp_full++;
-		status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
-		writel(status, phba->CAregaddr);
-		readl(phba->CAregaddr);
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
-	}
-	if ((ha_copy & HA_R0CE_RSP) &&
-	    (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
-		spin_lock_irqsave(&phba->hbalock, iflags);
-		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
-		pring->stats.iocb_cmd_empty++;
-
-		/* Force update of the local copy of cmdGetInx */
-		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
-		lpfc_sli_resume_iocb(phba, pring);
-
-		if ((pring->lpfc_sli_cmd_available))
-			(pring->lpfc_sli_cmd_available) (phba, pring);
-
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
-	}
-
-	return;
-}
 
 /**
  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2502,9 +2350,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
  * function if this is an unsolicited iocb.
  * This routine presumes LPFC_FCP_RING handling and doesn't bother
- * to check it explicitly. This function always returns 1.
- **/
-static int
+ * to check it explicitly.
+ */
+int
 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 				struct lpfc_sli_ring *pring, uint32_t mask)
 {
@@ -2534,6 +2382,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		return 1;
 	}
+	if (phba->fcp_ring_in_use) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return 1;
+	} else
+		phba->fcp_ring_in_use = 1;
 
 	rmb();
 	while (pring->rspidx != portRspPut) {
@@ -2604,10 +2457,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
 							 &rspiocbq);
 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
-				if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
-							      &rspiocbq);
-				} else {
 					spin_unlock_irqrestore(&phba->hbalock,
 							       iflag);
 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
@@ -2615,7 +2464,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 					spin_lock_irqsave(&phba->hbalock,
 							  iflag);
 				}
-			}
 			break;
 		case LPFC_UNSOL_IOCB:
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -2675,6 +2523,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 
 	}
 
+	phba->fcp_ring_in_use = 0;
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return rc;
 }
@@ -3018,16 +2867,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
 				   struct lpfc_sli_ring *pring, uint32_t mask)
 {
 	struct lpfc_iocbq *irspiocbq;
+	struct hbq_dmabuf *dmabuf;
+	struct lpfc_cq_event *cq_event;
 	unsigned long iflag;
 
-	while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
 		/* Get the response iocb from the head of work queue */
 		spin_lock_irqsave(&phba->hbalock, iflag);
-		list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
-				 irspiocbq, struct lpfc_iocbq, list);
+		list_remove_head(&phba->sli4_hba.sp_queue_event,
+				 cq_event, struct lpfc_cq_event, list);
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
-		/* Process the response iocb */
-		lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+
+		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+		case CQE_CODE_COMPL_WQE:
+			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+						 cq_event);
+			/* Translate ELS WCQE to response IOCBQ */
+			irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
+								   irspiocbq);
+			if (irspiocbq)
+				lpfc_sli_sp_handle_rspiocb(phba, pring,
+							   irspiocbq);
+			break;
+		case CQE_CODE_RECEIVE:
+			dmabuf = container_of(cq_event, struct hbq_dmabuf,
+					      cq_event);
+			lpfc_sli4_handle_received_buffer(phba, dmabuf);
+			break;
+		default:
+			break;
+		}
 	}
 }
 
@@ -3416,6 +3288,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
 
 	/* perform board reset */
 	phba->fc_eventTag = 0;
+	phba->link_events = 0;
 	phba->pport->fc_myDID = 0;
 	phba->pport->fc_prevDID = 0;
 
@@ -3476,6 +3349,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 
 	/* perform board reset */
 	phba->fc_eventTag = 0;
+	phba->link_events = 0;
 	phba->pport->fc_myDID = 0;
 	phba->pport->fc_prevDID = 0;
 
@@ -3495,7 +3369,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 	list_del_init(&phba->sli4_hba.dat_rq->list);
 	list_del_init(&phba->sli4_hba.mbx_cq->list);
 	list_del_init(&phba->sli4_hba.els_cq->list);
-	list_del_init(&phba->sli4_hba.rxq_cq->list);
 	for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
 		list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
 	for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -3531,9 +3404,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
 	struct lpfc_sli *psli;
 	volatile uint32_t word0;
 	void __iomem *to_slim;
+	uint32_t hba_aer_enabled;
 
 	spin_lock_irq(&phba->hbalock);
 
+	/* Take PCIe device Advanced Error Reporting (AER) state */
+	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
 	psli = &phba->sli;
 
 	/* Restart HBA */
@@ -3573,6 +3450,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
 	/* Give the INITFF and Post time to settle. */
 	mdelay(100);
 
+	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
+	if (hba_aer_enabled)
+		pci_disable_pcie_error_reporting(phba->pcidev);
+
 	lpfc_hba_down_post(phba);
 
 	return 0;
@@ -4042,6 +3923,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
 	if (rc)
 		goto lpfc_sli_hba_setup_error;
 
+	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
+	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+		rc = pci_enable_pcie_error_reporting(phba->pcidev);
+		if (!rc) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2709 This device supports "
+					"Advanced Error Reporting (AER)\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= HBA_AER_ENABLED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2708 This device does not support "
+					"Advanced Error Reporting (AER)\n");
+			phba->cfg_aer_support = 0;
+		}
+	}
+
 	if (phba->sli_rev == 3) {
 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -4243,7 +4142,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
 
 	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
 	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
-	lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
 		lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
 				     LPFC_QUEUE_REARM);
@@ -4322,6 +4220,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
 		phba->hba_flag |= HBA_FCOE_SUPPORT;
+
+	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+		LPFC_DCBX_CEE_MODE)
+		phba->hba_flag |= HBA_FIP_SUPPORT;
+	else
+		phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
 	if (phba->sli_rev != LPFC_SLI_REV4 ||
 	    !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4468,7 +4373,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	rc = lpfc_sli4_post_sgl_list(phba);
 	if (unlikely(rc)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-				"0582 Error %d during sgl post operation", rc);
+				"0582 Error %d during sgl post operation\n",
+					rc);
 		rc = -ENODEV;
 		goto out_free_vpd;
 	}
@@ -4477,8 +4383,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	rc = lpfc_sli4_repost_scsi_sgl_list(phba);
 	if (unlikely(rc)) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
-				"0383 Error %d during scsi sgl post opeation",
-				rc);
+				"0383 Error %d during scsi sgl post "
+				"operation\n", rc);
 		/* Some Scsi buffers were moved to the abort scsi list */
 		/* A pci function reset will repost them */
 		rc = -ENODEV;
@@ -4494,10 +4400,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		rc = -ENODEV;
 		goto out_free_vpd;
 	}
-	if (phba->cfg_enable_fip)
-		bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
-	else
-		bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
 
 	/* Set up all the queues to the device */
 	rc = lpfc_sli4_queue_setup(phba);
@@ -5669,7 +5571,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
 		case CMD_GEN_REQUEST64_CX:
 			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
 				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
-					FC_FCP_CMND) ||
+					FC_RCTL_DD_UNSOL_CMD) ||
 				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
 					MENLO_TRANSPORT_TYPE))
 
@@ -5849,7 +5751,7 @@ static int
 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		union lpfc_wqe *wqe)
 {
-	uint32_t payload_len = 0;
+	uint32_t xmit_len = 0, total_len = 0;
 	uint8_t ct = 0;
 	uint32_t fip;
 	uint32_t abort_tag;
@@ -5857,12 +5759,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 	uint8_t cmnd;
 	uint16_t xritag;
 	struct ulp_bde64 *bpl = NULL;
+	uint32_t els_id = ELS_ID_DEFAULT;
+	int numBdes, i;
+	struct ulp_bde64 bde;
 
-	fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
+	fip = phba->hba_flag & HBA_FIP_SUPPORT;
 	/* The fcp commands will set command type */
 	if (iocbq->iocb_flag &  LPFC_IO_FCP)
 		command_type = FCP_COMMAND;
-	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
+	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
 		command_type = ELS_COMMAND_FIP;
 	else
 		command_type = ELS_COMMAND_NON_FIP;
@@ -5874,6 +5779,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 	wqe->words[7] = 0; /* The ct field has moved so reset */
 	/* words0-2 bpl convert bde */
 	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
 		bpl  = (struct ulp_bde64 *)
 			((struct lpfc_dmabuf *)iocbq->context3)->virt;
 		if (!bpl)
@@ -5886,9 +5793,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		 * can assign it to the sgl.
 		 */
 		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
-		payload_len = wqe->generic.bde.tus.f.bdeSize;
+		xmit_len = wqe->generic.bde.tus.f.bdeSize;
+		total_len = 0;
+		for (i = 0; i < numBdes; i++) {
+			bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
+			total_len += bde.tus.f.bdeSize;
+		}
 	} else
-		payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+		xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
 
 	iocbq->iocb.ulpIoTag = iocbq->iotag;
 	cmnd = iocbq->iocb.ulpCommand;
@@ -5902,7 +5814,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 				iocbq->iocb.ulpCommand);
 			return IOCB_ERROR;
 		}
-		wqe->els_req.payload_len = payload_len;
+		wqe->els_req.payload_len = xmit_len;
 		/* Els_reguest64 has a TMO */
 		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
 			iocbq->iocb.ulpTimeout);
@@ -5923,7 +5835,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
 		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
 		/* CCP CCPE PV PRI in word10 were set in the memcpy */
+
+		if (command_type == ELS_COMMAND_FIP) {
+			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
+					>> LPFC_FIP_ELS_ID_SHIFT);
+		}
+		bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
+
 	break;
+	case CMD_XMIT_SEQUENCE64_CX:
+		bf_set(lpfc_wqe_gen_context, &wqe->generic,
+					iocbq->iocb.un.ulpWord[3]);
+		wqe->generic.word3 = 0;
+		bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+		bf_set(wqe_xc, &wqe->generic, 1);
+		/* The entire sequence is transmitted for this IOCB */
+		xmit_len = total_len;
+		cmnd = CMD_XMIT_SEQUENCE64_CR;
 	case CMD_XMIT_SEQUENCE64_CR:
 		/* word3 iocb=io_tag32 wqe=payload_offset */
 		/* payload offset used for multilpe outstanding
@@ -5933,7 +5861,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		/* word4 relative_offset memcpy */
 		/* word5 r_ctl/df_ctl memcpy */
 		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
-		wqe->xmit_sequence.xmit_len = payload_len;
+		wqe->xmit_sequence.xmit_len = xmit_len;
+		command_type = OTHER_COMMAND;
 	break;
 	case CMD_XMIT_BCAST64_CN:
 		/* word3 iocb=iotag32 wqe=payload_len */
@@ -5962,7 +5891,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 	case CMD_FCP_IREAD64_CR:
 		/* FCP_CMD is always the 1st sgl entry */
 		wqe->fcp_iread.payload_len =
-			payload_len + sizeof(struct fcp_rsp);
+			xmit_len + sizeof(struct fcp_rsp);
 
 		/* word 4 (xfer length) should have been set on the memcpy */
 
@@ -5999,7 +5928,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		 * sgl[1] = rsp.
 		 *
 		 */
-		wqe->gen_req.command_len = payload_len;
+		wqe->gen_req.command_len = xmit_len;
 		/* Word4 parameter  copied in the memcpy */
 		/* Word5 [rctl, type, df_ctl, la] copied in memcpy */
 		/* word6 context tag copied in memcpy */
@@ -6066,6 +5995,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		command_type = OTHER_COMMAND;
 		xritag = 0;
 	break;
+	case CMD_XMIT_BLS_RSP64_CX:
+		/* As BLS ABTS-ACC WQE is very different from other WQEs,
+		 * we re-construct this WQE here based on information in
+		 * iocbq from scratch.
+		 */
+		memset(wqe, 0, sizeof(union lpfc_wqe));
+		/* OX_ID is invariable to who sent ABTS to CT exchange */
+		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
+		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
+		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
+		    LPFC_ABTS_UNSOL_INT) {
+			/* ABTS sent by initiator to CT exchange, the
+			 * RX_ID field will be filled with the newly
+			 * allocated responder XRI.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       iocbq->sli4_xritag);
+		} else {
+			/* ABTS sent by responder to CT exchange, the
+			 * RX_ID field will be filled with the responder
+			 * RX_ID from ABTS.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
+		}
+		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+		       iocbq->iocb.ulpContext);
+		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
+		command_type = OTHER_COMMAND;
+	break;
 	case CMD_XRI_ABORTED_CX:
 	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
 		/* words0-2 are all 0's no bde */
@@ -6120,11 +6081,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 	uint16_t xritag;
 	union lpfc_wqe wqe;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
-	uint32_t fcp_wqidx;
 
 	if (piocb->sli4_xritag == NO_XRI) {
 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
-			piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
 			sglq = NULL;
 		else {
 			sglq = __lpfc_sli_get_sglq(phba);
@@ -6155,8 +6115,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 		return IOCB_ERROR;
 
 	if (piocb->iocb_flag &  LPFC_IO_FCP) {
-		fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
+		/*
+		 * For FCP command IOCB, get a new WQ index to distribute
+		 * WQE across the WQsr. On the other hand, for abort IOCB,
+		 * it carries the same WQ index to the original command
+		 * IOCB.
+		 */
+		if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+		    (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
+			piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
+				     &wqe))
 			return IOCB_ERROR;
 	} else {
 		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
@@ -6449,31 +6418,37 @@ lpfc_sli_setup(struct lpfc_hba *phba)
 			pring->iotag_max = 4096;
 			pring->lpfc_sli_rcv_async_status =
 				lpfc_sli_async_event_handler;
-			pring->num_mask = 4;
+			pring->num_mask = LPFC_MAX_RING_MASK;
 			pring->prt[0].profile = 0;	/* Mask 0 */
-			pring->prt[0].rctl = FC_ELS_REQ;
-			pring->prt[0].type = FC_ELS_DATA;
+			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+			pring->prt[0].type = FC_TYPE_ELS;
 			pring->prt[0].lpfc_sli_rcv_unsol_event =
 			    lpfc_els_unsol_event;
 			pring->prt[1].profile = 0;	/* Mask 1 */
-			pring->prt[1].rctl = FC_ELS_RSP;
-			pring->prt[1].type = FC_ELS_DATA;
+			pring->prt[1].rctl = FC_RCTL_ELS_REP;
+			pring->prt[1].type = FC_TYPE_ELS;
 			pring->prt[1].lpfc_sli_rcv_unsol_event =
 			    lpfc_els_unsol_event;
 			pring->prt[2].profile = 0;	/* Mask 2 */
 			/* NameServer Inquiry */
-			pring->prt[2].rctl = FC_UNSOL_CTL;
+			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
 			/* NameServer */
-			pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
+			pring->prt[2].type = FC_TYPE_CT;
 			pring->prt[2].lpfc_sli_rcv_unsol_event =
 			    lpfc_ct_unsol_event;
 			pring->prt[3].profile = 0;	/* Mask 3 */
 			/* NameServer response */
-			pring->prt[3].rctl = FC_SOL_CTL;
+			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
 			/* NameServer */
-			pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
+			pring->prt[3].type = FC_TYPE_CT;
 			pring->prt[3].lpfc_sli_rcv_unsol_event =
 			    lpfc_ct_unsol_event;
+			/* abort unsolicited sequence */
+			pring->prt[4].profile = 0;	/* Mask 4 */
+			pring->prt[4].rctl = FC_RCTL_BA_ABTS;
+			pring->prt[4].type = FC_TYPE_BLS;
+			pring->prt[4].lpfc_sli_rcv_unsol_event =
+			    lpfc_sli4_ct_abort_unsol_event;
 			break;
 		}
 		totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
@@ -6976,8 +6951,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
 
 		spin_lock_irq(&phba->hbalock);
-		if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
-			abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (abort_iotag != 0 &&
+				abort_iotag <= phba->sli.last_iotag)
+				abort_iocb =
+					phba->sli.iocbq_lookup[abort_iotag];
+		} else
+			/* For sli4 the abort_tag is the XRI,
+			 * so the abort routine puts the iotag  of the iocb
+			 * being aborted in the context field of the abort
+			 * IOCB.
+			 */
+			abort_iocb = phba->sli.iocbq_lookup[abort_context];
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
 				"0327 Cannot abort els iocb %p "
@@ -6991,9 +6976,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		 *  might have completed already. Do not free it again.
 		 */
 		if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			spin_unlock_irq(&phba->hbalock);
-			lpfc_sli_release_iocbq(phba, cmdiocb);
-			return;
+			if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli_release_iocbq(phba, cmdiocb);
+				return;
+			}
+			/* For SLI4 the ulpContext field for abort IOCB
+			 * holds the iotag of the IOCB being aborted so
+			 * the local abort_context needs to be reset to
+			 * match the aborted IOCBs ulpContext.
+			 */
+			if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
+				abort_context = abort_iocb->iocb.ulpContext;
 		}
 		/*
 		 * make sure we have the right iocbq before taking it
@@ -7112,13 +7106,18 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	iabt = &abtsiocbp->iocb;
 	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
 	iabt->un.acxri.abortContextTag = icmd->ulpContext;
-	if (phba->sli_rev == LPFC_SLI_REV4)
+	if (phba->sli_rev == LPFC_SLI_REV4) {
 		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+		iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+	}
 	else
 		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
 	iabt->ulpLe = 1;
 	iabt->ulpClass = icmd->ulpClass;
 
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+
 	if (phba->link_state >= LPFC_LINK_UP)
 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
 	else
@@ -7322,6 +7321,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
 		abtsiocb->vport = phba->pport;
 
+		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+
 		if (lpfc_is_link_up(phba))
 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
 		else
@@ -7687,31 +7689,28 @@ static int
 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
 {
 	uint32_t uerr_sta_hi, uerr_sta_lo;
-	uint32_t onlnreg0, onlnreg1;
 
 	/* For now, use the SLI4 device internal unrecoverable error
 	 * registers for error attention. This can be changed later.
 	 */
-	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
-	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
-	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
-		uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
-		uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
-		if (uerr_sta_lo || uerr_sta_hi) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"1423 HBA Unrecoverable error: "
-					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-					"online0_reg=0x%x, online1_reg=0x%x\n",
-					uerr_sta_lo, uerr_sta_hi,
-					onlnreg0, onlnreg1);
-			phba->work_status[0] = uerr_sta_lo;
-			phba->work_status[1] = uerr_sta_hi;
-			/* Set the driver HA work bitmap */
-			phba->work_ha |= HA_ERATT;
-			/* Indicate polling handles this ERATT */
-			phba->hba_flag |= HBA_ERATT_HANDLED;
-			return 1;
-		}
+	uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
+	uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
+	if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+	    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1423 HBA Unrecoverable error: "
+				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
+				uerr_sta_lo, uerr_sta_hi,
+				phba->sli4_hba.ue_mask_lo,
+				phba->sli4_hba.ue_mask_hi);
+		phba->work_status[0] = uerr_sta_lo;
+		phba->work_status[1] = uerr_sta_hi;
+		/* Set the driver HA work bitmap */
+		phba->work_ha |= HA_ERATT;
+		/* Indicate polling handles this ERATT */
+		phba->hba_flag |= HBA_ERATT_HANDLED;
+		return 1;
 	}
 	return 0;
 }
@@ -7834,7 +7833,7 @@ irqreturn_t
 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 {
 	struct lpfc_hba  *phba;
-	uint32_t ha_copy;
+	uint32_t ha_copy, hc_copy;
 	uint32_t work_ha_copy;
 	unsigned long status;
 	unsigned long iflag;
@@ -7892,8 +7891,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 		}
 
 		/* Clear up only attention source related to slow-path */
+		hc_copy = readl(phba->HCregaddr);
+		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
+			HC_LAINT_ENA | HC_ERINT_ENA),
+			phba->HCregaddr);
 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
 			phba->HAregaddr);
+		writel(hc_copy, phba->HCregaddr);
 		readl(phba->HAregaddr); /* flush */
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 	} else
@@ -8049,7 +8053,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 							KERN_ERR,
 							LOG_MBOX | LOG_SLI,
 							"0350 rc should have"
-							"been MBX_BUSY");
+							"been MBX_BUSY\n");
 						if (rc != MBX_NOT_FINISHED)
 							goto send_current_mbox;
 					}
@@ -8078,7 +8082,7 @@ send_current_mbox:
 			if (rc != MBX_SUCCESS)
 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
 						LOG_SLI, "0349 rc should be "
-						"MBX_SUCCESS");
+						"MBX_SUCCESS\n");
 		}
 
 		spin_lock_irqsave(&phba->hbalock, iflag);
@@ -8203,6 +8207,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
 	struct lpfc_hba  *phba;
 	irqreturn_t sp_irq_rc, fp_irq_rc;
 	unsigned long status1, status2;
+	uint32_t hc_copy;
 
 	/*
 	 * Get the driver's phba structure from the dev_id and
@@ -8240,7 +8245,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
 	}
 
 	/* Clear attention sources except link and error attentions */
+	hc_copy = readl(phba->HCregaddr);
+	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
+		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
+		phba->HCregaddr);
 	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+	writel(hc_copy, phba->HCregaddr);
 	readl(phba->HAregaddr); /* flush */
 	spin_unlock(&phba->hbalock);
 
@@ -8351,8 +8361,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
 
 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
 	       sizeof(struct lpfc_iocbq) - offset);
-	memset(&pIocbIn->sli4_info, 0,
-	       sizeof(struct lpfc_sli4_rspiocb_info));
 	/* Map WCQE parameters into irspiocb parameters */
 	pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
 	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8364,16 +8372,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
 			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
 	else
 		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
-	/* Load in additional WCQE parameters */
-	pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
-	pIocbIn->sli4_info.bfield = 0;
-	if (bf_get(lpfc_wcqe_c_xb, wcqe))
-		pIocbIn->sli4_info.bfield |= LPFC_XB;
-	if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
-		pIocbIn->sli4_info.bfield |= LPFC_PV;
-		pIocbIn->sli4_info.priority =
-					bf_get(lpfc_wcqe_c_priority, wcqe);
+}
+
+/**
+ * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+ * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
+			       struct lpfc_iocbq *irspiocbq)
+{
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_wcqe_complete *wcqe;
+	unsigned long iflags;
+
+	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pring->stats.iocb_event++;
+	/* Look up the ELS command IOCB and create pseudo response IOCB */
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0386 ELS complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		lpfc_sli_release_iocbq(phba, irspiocbq);
+		return NULL;
 	}
+
+	/* Fake the irspiocbq and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+
+	return irspiocbq;
 }
 
 /**
@@ -8566,45 +8607,26 @@ static bool
 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
 			     struct lpfc_wcqe_complete *wcqe)
 {
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	struct lpfc_iocbq *cmdiocbq;
 	struct lpfc_iocbq *irspiocbq;
 	unsigned long iflags;
-	bool workposted = false;
-
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	pring->stats.iocb_event++;
-	/* Look up the ELS command IOCB and create pseudo response IOCB */
-	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
-				bf_get(lpfc_wcqe_c_request_tag, wcqe));
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
-	if (unlikely(!cmdiocbq)) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-				"0386 ELS complete with no corresponding "
-				"cmdiocb: iotag (%d)\n",
-				bf_get(lpfc_wcqe_c_request_tag, wcqe));
-		return workposted;
-	}
-
-	/* Fake the irspiocbq and copy necessary response information */
+	/* Get an irspiocbq for later ELS response processing use */
 	irspiocbq = lpfc_sli_get_iocbq(phba);
 	if (!irspiocbq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0387 Failed to allocate an iocbq\n");
-		return workposted;
+		return false;
 	}
-	lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
 
-	/* Add the irspiocb to the response IOCB work list */
+	/* Save off the slow-path queue event for work thread to process */
+	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
-	/* Indicate ELS ring attention */
-	phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
+	list_add_tail(&irspiocbq->cq_event.list,
+		      &phba->sli4_hba.sp_queue_event);
+	phba->hba_flag |= HBA_SP_QUEUE_EVT;
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	workposted = true;
 
-	return workposted;
+	return true;
 }
 
 /**
@@ -8690,52 +8712,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
- * @phba: Pointer to HBA context object.
- * @cq: Pointer to the completion queue.
- * @wcqe: Pointer to a completion queue entry.
- *
- * This routine process a slow-path work-queue completion queue entry.
- *
- * Return: true if work posted to worker thread, otherwise false.
- **/
-static bool
-lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
-			 struct lpfc_cqe *cqe)
-{
-	struct lpfc_wcqe_complete wcqe;
-	bool workposted = false;
-
-	/* Copy the work queue CQE and convert endian order if needed */
-	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
-
-	/* Check and process for different type of WCQE and dispatch */
-	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
-	case CQE_CODE_COMPL_WQE:
-		/* Process the WQ complete event */
-		workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
-					(struct lpfc_wcqe_complete *)&wcqe);
-		break;
-	case CQE_CODE_RELEASE_WQE:
-		/* Process the WQ release event */
-		lpfc_sli4_sp_handle_rel_wcqe(phba,
-					(struct lpfc_wcqe_release *)&wcqe);
-		break;
-	case CQE_CODE_XRI_ABORTED:
-		/* Process the WQ XRI abort event */
-		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
-					(struct sli4_wcqe_xri_aborted *)&wcqe);
-		break;
-	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"0388 Not a valid WCQE code: x%x\n",
-				bf_get(lpfc_wcqe_c_code, &wcqe));
-		break;
-	}
-	return workposted;
-}
-
-/**
  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
  * @phba: Pointer to HBA context object.
  * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8721,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * Return: true if work posted to worker thread, otherwise false.
  **/
 static bool
-lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
 {
-	struct lpfc_rcqe rcqe;
 	bool workposted = false;
 	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
 	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,31 +8730,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
 	uint32_t status;
 	unsigned long iflags;
 
-	/* Copy the receive queue CQE and convert endian order if needed */
-	lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
-	lpfc_sli4_rq_release(hrq, drq);
-	if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
-		goto out;
-	if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
+	if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
 		goto out;
 
-	status = bf_get(lpfc_rcqe_status, &rcqe);
+	status = bf_get(lpfc_rcqe_status, rcqe);
 	switch (status) {
 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2537 Receive Frame Truncated!!\n");
 	case FC_STATUS_RQ_SUCCESS:
+		lpfc_sli4_rq_release(hrq, drq);
 		spin_lock_irqsave(&phba->hbalock, iflags);
 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
 		if (!dma_buf) {
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			goto out;
 		}
-		memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
 		/* save off the frame for the word thread to process */
-		list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+		list_add_tail(&dma_buf->cq_event.list,
+			      &phba->sli4_hba.sp_queue_event);
 		/* Frame received */
-		phba->hba_flag |= HBA_RECEIVE_BUFFER;
+		phba->hba_flag |= HBA_SP_QUEUE_EVT;
 		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		workposted = true;
 		break;
@@ -8794,7 +8766,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
 	}
 out:
 	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or recieve queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_cqe cqevt;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
 
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_cqe_code, &cqevt)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ/RQ complete event */
+		workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+				(struct lpfc_wcqe_complete *)&cqevt);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_sp_handle_rel_wcqe(phba,
+				(struct lpfc_wcqe_release *)&cqevt);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&cqevt);
+		break;
+	case CQE_CODE_RECEIVE:
+		/* Process the RQ event */
+		workposted = lpfc_sli4_sp_handle_rcqe(phba,
+				(struct lpfc_rcqe *)&cqevt);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0388 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_cqe_code, &cqevt));
+		break;
+	}
+	return workposted;
 }
 
 /**
@@ -8858,14 +8881,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
 		break;
 	case LPFC_WCQ:
 		while ((cqe = lpfc_sli4_cq_get(cq))) {
-			workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
-			if (!(++ecount % LPFC_GET_QE_REL_INT))
-				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
-		}
-		break;
-	case LPFC_RCQ:
-		while ((cqe = lpfc_sli4_cq_get(cq))) {
-			workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+			workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
 			if (!(++ecount % LPFC_GET_QE_REL_INT))
 				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
 		}
@@ -10427,8 +10443,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
 		return xritag;
 	}
 	spin_unlock_irq(&phba->hbalock);
-
-	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"2004 Failed to allocate XRI.last XRITAG is %d"
 			" Max XRI is %d, Used XRI is %d\n",
 			phba->sli4_hba.next_xri,
@@ -10492,15 +10507,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
 		lpfc_sli4_mbox_cmd_free(phba, mbox);
 		return -ENOMEM;
 	}
-
 	/* Get the first SGE entry from the non-embedded DMA memory */
-	if (unlikely(!mbox->sge_array)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-				"2525 Failed to get the non-embedded SGE "
-				"virtual address\n");
-		lpfc_sli4_mbox_cmd_free(phba, mbox);
-		return -ENOMEM;
-	}
 	viraddr = mbox->sge_array->addr[0];
 
 	/* Set up the SGL pages in the non-embedded DMA pages */
@@ -10524,8 +10531,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
 		sgl_pg_pairs++;
 	}
 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
-	pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
-	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
 	/* Perform endian conversion if necessary */
 	sgl->word0 = cpu_to_le32(sgl->word0);
 
@@ -10607,15 +10613,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
 		lpfc_sli4_mbox_cmd_free(phba, mbox);
 		return -ENOMEM;
 	}
-
 	/* Get the first SGE entry from the non-embedded DMA memory */
-	if (unlikely(!mbox->sge_array)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-				"2565 Failed to get the non-embedded SGE "
-				"virtual address\n");
-		lpfc_sli4_mbox_cmd_free(phba, mbox);
-		return -ENOMEM;
-	}
 	viraddr = mbox->sge_array->addr[0];
 
 	/* Set up the SGL pages in the non-embedded DMA pages */
@@ -10802,6 +10800,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
 }
 
 /**
+ * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
+ * @vport: The vport to work on.
+ *
+ * This function updates the receive sequence time stamp for this vport. The
+ * receive sequence time stamp indicates the time that the last frame of the
+ * the sequence that has been idle for the longest amount of time was received.
+ * the driver uses this time stamp to indicate if any received sequences have
+ * timed out.
+ **/
+void
+lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* get the oldest sequence on the rcv list */
+	h_buf = list_get_first(&vport->rcv_buffer_list,
+			       struct lpfc_dmabuf, list);
+	if (!h_buf)
+		return;
+	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
+}
+
+/**
+ * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function cleans up all outstanding received sequences. This is called
+ * by the driver when a link event or user action invalidates all the received
+ * sequences.
+ **/
+void
+lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+}
+
+/**
+ * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function determines whether any received sequences have timed out by
+ * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
+ * indicates that there is at least one timed out sequence this routine will
+ * go through the received sequences one at a time from most inactive to most
+ * active to determine which ones need to be cleaned up. Once it has determined
+ * that a sequence needs to be cleaned up it will simply free up the resources
+ * without sending an abort.
+ **/
+void
+lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+	unsigned long timeout;
+	int abort_count = 0;
+
+	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+		   vport->rcv_buffer_time_stamp);
+	if (list_empty(&vport->rcv_buffer_list) ||
+	    time_before(jiffies, timeout))
+		return;
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+			   dmabuf->time_stamp);
+		if (time_before(jiffies, timeout))
+			break;
+		abort_count++;
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+	if (abort_count)
+		lpfc_update_rcv_time_stamp(vport);
+}
+
+/**
  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
  *
@@ -10823,6 +10920,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
 	struct hbq_dmabuf *seq_dmabuf = NULL;
 	struct hbq_dmabuf *temp_dmabuf = NULL;
 
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	dmabuf->time_stamp = jiffies;
 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
 	/* Use the hdr_buf to find the sequence that this frame belongs to */
 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10841,13 +10940,21 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
 		 * Queue the buffer on the vport's rcv_buffer_list.
 		 */
 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		lpfc_update_rcv_time_stamp(vport);
 		return dmabuf;
 	}
 	temp_hdr = seq_dmabuf->hbuf.virt;
 	if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
-		list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+		list_del_init(&seq_dmabuf->hbuf.list);
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+		lpfc_update_rcv_time_stamp(vport);
 		return dmabuf;
 	}
+	/* move this sequence to the tail to indicate a young sequence */
+	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
+	seq_dmabuf->time_stamp = jiffies;
+	lpfc_update_rcv_time_stamp(vport);
 	/* find the correct place in the sequence to insert this frame */
 	list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
 		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10865,6 +10972,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
 }
 
 /**
+ * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the partially assembed sequence, described
+ * by the information from basic abbort @dmabuf. It checks to see whether such
+ * partially assembled sequence held by the driver. If so, it shall free up all
+ * the frames from the partially assembled sequence.
+ *
+ * Return
+ * true  -- if there is matching partially assembled sequence present and all
+ *          the frames freed with the sequence;
+ * false -- if there is no matching partially assembled sequence present so
+ *          nothing got aborted in the lower layer driver
+ **/
+static bool
+lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
+			    struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+
+	/* Use the hdr_buf to find the sequence that matches this frame */
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	INIT_LIST_HEAD(&dmabuf->hbuf.list);
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+
+	/* Free up all the frames from the partially assembled sequence */
+	if (seq_dmabuf) {
+		list_for_each_entry_safe(d_buf, n_buf,
+					 &seq_dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		return true;
+	}
+	return false;
+}
+
+/**
+ * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
+ * @phba: Pointer to HBA context object.
+ * @cmd_iocbq: pointer to the command iocbq structure.
+ * @rsp_iocbq: pointer to the response iocbq structure.
+ *
+ * This function handles the sequence abort accept iocb command complete
+ * event. It properly releases the memory allocated to the sequence abort
+ * accept iocb.
+ **/
+static void
+lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
+			     struct lpfc_iocbq *cmd_iocbq,
+			     struct lpfc_iocbq *rsp_iocbq)
+{
+	if (cmd_iocbq)
+		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+}
+
+/**
+ * lpfc_sli4_seq_abort_acc - Accept sequence abort
+ * @phba: Pointer to HBA context object.
+ * @fc_hdr: pointer to a FC frame header.
+ *
+ * This function sends a basic accept to a previous unsol sequence abort
+ * event after aborting the sequence handling.
+ **/
+static void
+lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+			struct fc_frame_header *fc_hdr)
+{
+	struct lpfc_iocbq *ctiocb = NULL;
+	struct lpfc_nodelist *ndlp;
+	uint16_t oxid, rxid;
+	uint32_t sid, fctl;
+	IOCB_t *icmd;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
+
+	ndlp = lpfc_findnode_did(phba->pport, sid);
+	if (!ndlp) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+				"1268 Find ndlp returned NULL for oxid:x%x "
+				"SID:x%x\n", oxid, sid);
+		return;
+	}
+
+	/* Allocate buffer for acc iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb)
+		return;
+
+	/* Extract the F_CTL field from FC_HDR */
+	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.bdeSize = 0;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
+
+	/* Fill in the rest of iocb fields */
+	icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
+	icmd->ulpBdeCount = 0;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	icmd->ulpContext = ndlp->nlp_rpi;
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->vport = phba->pport;
+	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
+
+	if (fctl & FC_FC_EX_CTX) {
+		/* ABTS sent by responder to CT exchange, construction
+		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
+		 * field and RX_ID from ABTS for RX_ID field.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
+		bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
+		ctiocb->sli4_xritag = oxid;
+	} else {
+		/* ABTS sent by initiator to CT exchange, construction
+		 * of BA_ACC will need to allocate a new XRI as for the
+		 * XRI_TAG and RX_ID fields.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
+		bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
+		ctiocb->sli4_xritag = NO_XRI;
+	}
+	bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
+
+	/* Xmit CT abts accept on exchange <xid> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
+			CMD_XMIT_BLS_RSP64_CX, phba->link_state);
+	lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+}
+
+/**
+ * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles an SLI-4 unsolicited abort event. If the unsolicited
+ * receive sequence is only partially assembed by the driver, it shall abort
+ * the partially assembled frames for the sequence. Otherwise, if the
+ * unsolicited receive sequence has been completely assembled and passed to
+ * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * unsolicited sequence has been aborted. After that, it will issue a basic
+ * accept to accept the abort.
+ **/
+void
+lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
+			     struct hbq_dmabuf *dmabuf)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_frame_header fc_hdr;
+	uint32_t fctl;
+	bool abts_par;
+
+	/* Make a copy of fc_hdr before the dmabuf being released */
+	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
+
+	if (fctl & FC_FC_EX_CTX) {
+		/*
+		 * ABTS sent by responder to exchange, just free the buffer
+		 */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+	} else {
+		/*
+		 * ABTS sent by initiator to exchange, need to do cleanup
+		 */
+		/* Try to abort partially assembled seq */
+		abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+
+		/* Send abort to ULP if partially seq abort failed */
+		if (abts_par == false)
+			lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
+		else
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+	}
+	/* Send basic accept (BA_ACC) to the abort requester */
+	lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
+}
+
+/**
  * lpfc_seq_complete - Indicates if a sequence is complete
  * @dmabuf: pointer to a dmabuf that describes the FC sequence
  *
@@ -10935,10 +11246,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
 	/* remove from receive buffer list */
 	list_del_init(&seq_dmabuf->hbuf.list);
+	lpfc_update_rcv_time_stamp(vport);
 	/* get the Remote Port's SID */
-	sid = (fc_hdr->fh_s_id[0] << 16 |
-	       fc_hdr->fh_s_id[1] << 8 |
-	       fc_hdr->fh_s_id[2]);
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
 	/* Get an iocbq struct to fill in. */
 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
 	if (first_iocbq) {
@@ -10957,7 +11267,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 							LPFC_DATA_BUF_SIZE;
 		first_iocbq->iocb.un.rcvels.remoteID = sid;
 		first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+				bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
 	}
 	iocbq = first_iocbq;
 	/*
@@ -10975,7 +11286,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 			iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
 			first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+				bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
 		} else {
 			iocbq = lpfc_sli_get_iocbq(vport->phba);
 			if (!iocbq) {
@@ -10994,7 +11306,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 			iocbq->iocb.un.cont64[0].tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
 			first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+				bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
 			iocbq->iocb.un.rcvels.remoteID = sid;
 			list_add_tail(&iocbq->list, &first_iocbq->list);
 		}
@@ -11002,6 +11315,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 	return first_iocbq;
 }
 
+static void
+lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
+			  struct hbq_dmabuf *seq_dmabuf)
+{
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
+	struct lpfc_hba *phba = vport->phba;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+	if (!iocbq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2707 Ring %d handler: Failed to allocate "
+				"iocb Rctl x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+		return;
+	}
+	if (!lpfc_complete_unsol_iocb(phba,
+				      &phba->sli.ring[LPFC_ELS_RING],
+				      iocbq, fc_hdr->fh_r_ctl,
+				      fc_hdr->fh_type))
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2540 Ring %d handler: unexpected Rctl "
+				"x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+
+	/* Free iocb created in lpfc_prep_seq */
+	list_for_each_entry_safe(curr_iocb, next_iocb,
+		&iocbq->list, list) {
+		list_del_init(&curr_iocb->list);
+		lpfc_sli_release_iocbq(phba, curr_iocb);
+	}
+	lpfc_sli_release_iocbq(phba, iocbq);
+}
+
 /**
  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
  * @phba: Pointer to HBA context object.
@@ -11014,67 +11364,54 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
  * appropriate receive function when the final frame in a sequence is received.
  **/
-int
-lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+				 struct hbq_dmabuf *dmabuf)
 {
-	LIST_HEAD(cmplq);
-	struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+	struct hbq_dmabuf *seq_dmabuf;
 	struct fc_frame_header *fc_hdr;
 	struct lpfc_vport *vport;
 	uint32_t fcfi;
-	struct lpfc_iocbq *iocbq;
-
-	/* Clear hba flag and get all received buffers into the cmplq */
-	spin_lock_irq(&phba->hbalock);
-	phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
-	list_splice_init(&phba->rb_pend_list, &cmplq);
-	spin_unlock_irq(&phba->hbalock);
 
 	/* Process each received buffer */
-	while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
-		fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
-		/* check to see if this a valid type of frame */
-		if (lpfc_fc_frame_check(phba, fc_hdr)) {
-			lpfc_in_buf_free(phba, &dmabuf->dbuf);
-			continue;
-		}
-		fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
-		vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
-		if (!vport) {
-			/* throw out the frame */
-			lpfc_in_buf_free(phba, &dmabuf->dbuf);
-			continue;
-		}
-		/* Link this frame */
-		seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
-		if (!seq_dmabuf) {
-			/* unable to add frame to vport - throw it out */
-			lpfc_in_buf_free(phba, &dmabuf->dbuf);
-			continue;
-		}
-		/* If not last frame in sequence continue processing frames. */
-		if (!lpfc_seq_complete(seq_dmabuf)) {
-			/*
-			 * When saving off frames post a new one and mark this
-			 * frame to be freed when it is finished.
-			 **/
-			lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
-			dmabuf->tag = -1;
-			continue;
-		}
-		fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
-		iocbq = lpfc_prep_seq(vport, seq_dmabuf);
-		if (!lpfc_complete_unsol_iocb(phba,
-					      &phba->sli.ring[LPFC_ELS_RING],
-					      iocbq, fc_hdr->fh_r_ctl,
-					      fc_hdr->fh_type))
-			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-					"2540 Ring %d handler: unexpected Rctl "
-					"x%x Type x%x received\n",
-					LPFC_ELS_RING,
-					fc_hdr->fh_r_ctl, fc_hdr->fh_type);
-	};
-	return 0;
+	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* check to see if this a valid type of frame */
+	if (lpfc_fc_frame_check(phba, fc_hdr)) {
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+	if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+		/* throw out the frame */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	/* Handle the basic abort sequence (BA_ABTS) event */
+	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
+		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
+		return;
+	}
+
+	/* Link this frame */
+	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+	if (!seq_dmabuf) {
+		/* unable to add frame to vport - throw it out */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	/* If not last frame in sequence continue processing frames. */
+	if (!lpfc_seq_complete(seq_dmabuf)) {
+		/*
+		 * When saving off frames post a new one and mark this
+		 * frame to be freed when it is finished.
+		 **/
+		lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
+		dmabuf->tag = -1;
+		return;
+	}
+	/* Send the complete sequence to the upper layer protocol */
+	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
 }
 
 /**
@@ -11334,6 +11671,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
 {
 	LPFC_MBOXQ_t *mboxq;
 	int rc = 0;
+	int retval = MBX_SUCCESS;
 	uint32_t mbox_tmo;
 
 	if (vpi == 0)
@@ -11344,16 +11682,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
 	lpfc_init_vpi(phba, mboxq, vpi);
 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mboxq, phba->mbox_mem_pool);
 	if (rc != MBX_SUCCESS) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2022 INIT VPI Mailbox failed "
 				"status %d, mbxStatus x%x\n", rc,
 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
-		rc = -EIO;
+		retval = -EIO;
 	}
-	return rc;
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+
+	return retval;
 }
 
 /**
@@ -11438,13 +11777,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
 	 */
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
 	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
-	if (unlikely(!mboxq->sge_array)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-				"2526 Failed to get the non-embedded SGE "
-				"virtual address\n");
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		return -ENOMEM;
-	}
 	virt_addr = mboxq->sge_array->addr[0];
 	/*
 	 * Configure the FCF record for FCFI 0.  This is the driver's
@@ -11542,7 +11874,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2000 Failed to allocate mbox for "
 				"READ_FCF cmd\n");
-		return -ENOMEM;
+		error = -ENOMEM;
+		goto fail_fcfscan;
 	}
 
 	req_len = sizeof(struct fcf_record) +
@@ -11558,8 +11891,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
 				"0291 Allocated DMA memory size (x%x) is "
 				"less than the requested DMA memory "
 				"size (x%x)\n", alloc_len, req_len);
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		return -ENOMEM;
+		error = -ENOMEM;
+		goto fail_fcfscan;
 	}
 
 	/* Get the first SGE entry from the non-embedded DMA memory. This
@@ -11567,13 +11900,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
 	 */
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
 	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
-	if (unlikely(!mboxq->sge_array)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-				"2527 Failed to get the non-embedded SGE "
-				"virtual address\n");
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		return -ENOMEM;
-	}
 	virt_addr = mboxq->sge_array->addr[0];
 	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
 
@@ -11586,7 +11912,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
 		error = -EIO;
 	} else {
 		spin_lock_irq(&phba->hbalock);
@@ -11594,6 +11919,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
 		spin_unlock_irq(&phba->hbalock);
 		error = 0;
 	}
+fail_fcfscan:
+	if (error) {
+		if (mboxq)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		/* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		spin_unlock_irq(&phba->hbalock);
+	}
 	return error;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3c53316cf6d0..ba38de3c28f1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
 	LPFC_CTX_HOST
 } lpfc_ctx_cmd;
 
-/* This structure is used to carry the needed response IOCB states */
-struct lpfc_sli4_rspiocb_info {
-	uint8_t hw_status;
-	uint8_t bfield;
-#define LPFC_XB	0x1
-#define LPFC_PV	0x2
-	uint8_t priority;
-	uint8_t reserved;
+struct lpfc_cq_event {
+	struct list_head list;
+	union {
+		struct lpfc_mcqe		mcqe_cmpl;
+		struct lpfc_acqe_link		acqe_link;
+		struct lpfc_acqe_fcoe		acqe_fcoe;
+		struct lpfc_acqe_dcbx		acqe_dcbx;
+		struct lpfc_rcqe		rcqe_cmpl;
+		struct sli4_wcqe_xri_aborted	wcqe_axri;
+		struct lpfc_wcqe_complete	wcqe_cmpl;
+	} cqe;
 };
 
 /* This structure is used to handle IOCB requests / responses */
@@ -46,6 +49,7 @@ struct lpfc_iocbq {
 	struct list_head clist;
 	uint16_t iotag;         /* pre-assigned IO tag */
 	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct lpfc_cq_event cq_event;
 
 	IOCB_t iocb;		/* IOCB cmd */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
@@ -56,11 +60,13 @@ struct lpfc_iocbq {
 #define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
 #define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
 #define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
-#define LPFC_FIP_ELS		0x40
+#define LPFC_FIP_ELS_ID_MASK	0xc0	/* ELS_ID range 0-3 */
+#define LPFC_FIP_ELS_ID_SHIFT	6
 
 	uint8_t abort_count;
 	uint8_t rsvd2;
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
+	uint32_t fcp_wqidx;	/* index to FCP work queue */
 	struct lpfc_vport *vport;/* virtual port pointer */
 	void *context1;		/* caller context information */
 	void *context2;		/* caller context information */
@@ -76,7 +82,6 @@ struct lpfc_iocbq {
 			   struct lpfc_iocbq *);
 	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 			   struct lpfc_iocbq *);
-	struct lpfc_sli4_rspiocb_info sli4_info;
 };
 
 #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
@@ -110,7 +115,7 @@ typedef struct lpfcMboxq {
 				   return */
 #define MBX_NOWAIT      2	/* issue command then return immediately */
 
-#define LPFC_MAX_RING_MASK  4	/* max num of rctl/type masks allowed per
+#define LPFC_MAX_RING_MASK  5	/* max num of rctl/type masks allowed per
 				   ring */
 #define LPFC_MAX_RING       4	/* max num of SLI rings used by driver */
 
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index b5f4ba1a5c27..25d66d070cf8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -58,6 +58,16 @@
 #define LPFC_FCOE_FKA_ADV_PER	0
 #define LPFC_FCOE_FIP_PRIORITY	0x80
 
+#define sli4_sid_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_s_id[0] << 16 | \
+	 (fc_hdr)->fh_s_id[1] <<  8 | \
+	 (fc_hdr)->fh_s_id[2])
+
+#define sli4_fctl_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_f_ctl[0] << 16 | \
+	 (fc_hdr)->fh_f_ctl[1] <<  8 | \
+	 (fc_hdr)->fh_f_ctl[2])
+
 enum lpfc_sli4_queue_type {
 	LPFC_EQ,
 	LPFC_GCQ,
@@ -110,18 +120,6 @@ struct lpfc_queue {
 	union sli4_qe qe[1];	/* array to index entries (must be last) */
 };
 
-struct lpfc_cq_event {
-	struct list_head list;
-	union {
-		struct lpfc_mcqe		mcqe_cmpl;
-		struct lpfc_acqe_link		acqe_link;
-		struct lpfc_acqe_fcoe		acqe_fcoe;
-		struct lpfc_acqe_dcbx		acqe_dcbx;
-		struct lpfc_rcqe		rcqe_cmpl;
-		struct sli4_wcqe_xri_aborted	wcqe_axri;
-	} cqe;
-};
-
 struct lpfc_sli4_link {
 	uint8_t speed;
 	uint8_t duplex;
@@ -166,7 +164,7 @@ struct lpfc_fip_param_hdr {
 #define	lpfc_fip_param_hdr_fipp_mode_SHIFT	6
 #define	lpfc_fip_param_hdr_fipp_mode_MASK	0x3
 #define lpfc_fip_param_hdr_fipp_mode_WORD	parm_flags
-#define	FIPP_MODE_ON				0x2
+#define	FIPP_MODE_ON				0x1
 #define	FIPP_MODE_OFF				0x0
 #define FIPP_VLAN_VALID				0x1
 };
@@ -295,9 +293,8 @@ struct lpfc_sli4_hba {
 	/* BAR0 PCI config space register memory map */
 	void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
 	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
-	void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
-	void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
-#define LPFC_ONLINE_NERR	0xFFFFFFFF
+	void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
+	void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
 	void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
 	/* BAR1 FCoE function CSR register memory map */
 	void __iomem *STAregaddr;    /* Address to HST_STATE register */
@@ -311,6 +308,8 @@ struct lpfc_sli4_hba {
 	void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
 	void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
 
+	uint32_t ue_mask_lo;
+	uint32_t ue_mask_hi;
 	struct msix_entry *msix_entries;
 	uint32_t cfg_eqn;
 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -325,7 +324,6 @@ struct lpfc_sli4_hba {
 	struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
 	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
 	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
-	struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
 
 	/* Setup information for various queue parameters */
 	int eq_esize;
@@ -360,7 +358,7 @@ struct lpfc_sli4_hba {
 	unsigned long *rpi_bmask;
 	uint16_t rpi_count;
 	struct lpfc_sli4_flags sli4_flags;
-	struct list_head sp_rspiocb_work_queue;
+	struct list_head sp_queue_event;
 	struct list_head sp_cqe_event_pool;
 	struct list_head sp_asynce_work_queue;
 	struct list_head sp_fcp_xri_aborted_work_queue;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ae20af4bdb7..c7f3aed2aab8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,8 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.4"
-
+#define LPFC_DRIVER_VERSION "8.3.6"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 606efa767548..7d6dd83d3592 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 	 * by the port.
 	 */
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
-		(pport->vfi_state & LPFC_VFI_REGISTERED)) {
+	    (pport->vpi_state & LPFC_VPI_REGISTERED)) {
 		rc = lpfc_sli4_init_vpi(phba, vpi);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -700,6 +700,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 			}
 			spin_unlock_irq(&phba->ndlp_lock);
 		}
+		if (vport->vpi_state != LPFC_VPI_REGISTERED)
+			goto skip_logo;
 		vport->unreg_vpi_cmpl = VPORT_INVAL;
 		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
 		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 234f0b7eb21c..fd181c2a8ae4 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -335,12 +335,17 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
  * megaraid_change_queue_depth - Change the device's queue depth
  * @sdev:	scsi device struct
  * @qdepth:	depth to set
+ * @reason:	calling context
  *
  * Return value:
  * 	actual depth set
  */
-static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
+				       int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (qdepth > MBOX_MAX_SCSI_CMDS)
 		qdepth = MBOX_MAX_SCSI_CMDS;
 	scsi_adjust_queue_depth(sdev, 0, qdepth);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a39addc3a596..134c63ef6d38 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
  *	   2 of the License, or (at your option) any later version.
  *
  * FILE		: megaraid_sas.c
- * Version     : v00.00.04.01-rc1
+ * Version     : v00.00.04.12-rc1
  *
  * Authors:
  *	(email-id : megaraidlinux@lsi.com)
@@ -40,6 +40,7 @@
 #include <linux/compat.h>
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
+#include <linux/poll.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -75,6 +76,10 @@ static struct pci_device_id megasas_pci_table[] = {
 	/* gen2*/
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
 	/* gen2*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+	/* skinny*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+	/* skinny*/
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
 	/* xscale IOP, vega */
 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
@@ -89,8 +94,14 @@ static struct megasas_mgmt_info megasas_mgmt_info;
 static struct fasync_struct *megasas_async_queue;
 static DEFINE_MUTEX(megasas_async_queue_mutex);
 
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
 static u32 megasas_dbg_lvl;
 
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
 static void
 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 		     u8 alt_status);
@@ -215,7 +226,10 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
  * @regs :			MFI register set
  */
 static inline void 
-megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs)
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
 {
 	writel((frame_phys_addr >> 3)|(frame_count),
 	       &(regs)->inbound_queue_port);
@@ -312,7 +326,10 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
  * @regs :			MFI register set
  */
 static inline void 
-megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
 {
 	writel((frame_phys_addr | (frame_count<<1))|1, 
 			&(regs)->inbound_queue_port);
@@ -328,6 +345,104 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
 };
 
 /**
+ * megasas_enable_intr_skinny -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny -	Disables interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	u32 mask = 0xFFFFFFFF;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+		return 1;
+	}
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	writel(status, &regs->outbound_intr_status);
+
+	/*
+	* dummy read to flush PCI
+	*/
+	readl(&regs->outbound_intr_status);
+
+	return 0;
+}
+
+/**
+ * megasas_fire_cmd_skinny -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&instance->fire_lock, flags);
+	writel(0, &(regs)->inbound_high_queue_port);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+		&(regs)->inbound_low_queue_port);
+	spin_unlock_irqrestore(&instance->fire_lock, flags);
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+	.fire_cmd = megasas_fire_cmd_skinny,
+	.enable_intr = megasas_enable_intr_skinny,
+	.disable_intr = megasas_disable_intr_skinny,
+	.clear_intr = megasas_clear_intr_skinny,
+	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+};
+
+
+/**
 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
 *	controllers
 */
@@ -404,7 +519,9 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
  * @regs :                     MFI register set
  */
 static inline void
-megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count,
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
 			struct megasas_register_set __iomem *regs)
 {
 	writel((frame_phys_addr | (frame_count<<1))|1,
@@ -446,7 +563,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 	/*
 	 * Issue the frame using inbound queue port
 	 */
-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
+	instance->instancet->fire_cmd(instance,
+			cmd->frame_phys_addr, 0, instance->reg_set);
 
 	/*
 	 * Wait for cmd_status to change
@@ -477,7 +595,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
 {
 	cmd->cmd_status = ENODATA;
 
-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
+	instance->instancet->fire_cmd(instance,
+			cmd->frame_phys_addr, 0, instance->reg_set);
 
 	wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA),
 		MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
@@ -522,7 +641,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
 	cmd->sync_cmd = 1;
 	cmd->cmd_status = 0xFF;
 
-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
+	instance->instancet->fire_cmd(instance,
+			cmd->frame_phys_addr, 0, instance->reg_set);
 
 	/*
 	 * Wait for this cmd to complete
@@ -592,6 +712,35 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	return sge_count;
 }
 
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance:           Adapter soft state
+ * @scp:                SCSI command from the mid-layer
+ * @mfi_sgl:            SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+			mfi_sgl->sge_skinny[i].phys_addr =
+						sg_dma_address(os_sgl);
+		}
+	}
+	return sge_count;
+}
+
  /**
  * megasas_get_frame_count - Computes the number of frames
  * @frame_type		: type of frame- io or pthru frame
@@ -600,7 +749,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
  * Returns the number of frames required for numnber of sge's (sge_count)
  */
 
-static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+			u8 sge_count, u8 frame_type)
 {
 	int num_cnt;
 	int sge_bytes;
@@ -610,6 +760,10 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
 	    sizeof(struct megasas_sge32);
 
+	if (instance->flag_ieee) {
+		sge_sz = sizeof(struct megasas_sge_skinny);
+	}
+
 	/*
 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
 	 * 3 SGEs for 32-bit SGLs for ldio &
@@ -617,12 +771,16 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
 	 * 2 SGEs for 32-bit SGLs for pthru frame
 	 */
 	if (unlikely(frame_type == PTHRU_FRAME)) {
-		if (IS_DMA64)
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
 			num_cnt = sge_count - 1;
 		else
 			num_cnt = sge_count - 2;
 	} else {
-		if (IS_DMA64)
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
 			num_cnt = sge_count - 2;
 		else
 			num_cnt = sge_count - 3;
@@ -671,6 +829,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	else if (scp->sc_data_direction == PCI_DMA_NONE)
 		flags = MFI_FRAME_DIR_NONE;
 
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
 	/*
 	 * Prepare the DCDB frame
 	 */
@@ -687,9 +849,24 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
 
 	/*
+	* If the command is for the tape device, set the
+	* pthru timeout to the os layer timeout value.
+	*/
+	if (scp->device->type == TYPE_TAPE) {
+		if ((scp->request->timeout / HZ) > 0xFFFF)
+			pthru->timeout = 0xFFFF;
+		else
+			pthru->timeout = scp->request->timeout / HZ;
+	}
+
+	/*
 	 * Construct SGL
 	 */
-	if (IS_DMA64) {
+	if (instance->flag_ieee == 1) {
+		pthru->flags |= MFI_FRAME_SGL64;
+		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+						      &pthru->sgl);
+	} else if (IS_DMA64) {
 		pthru->flags |= MFI_FRAME_SGL64;
 		pthru->sge_count = megasas_make_sgl64(instance, scp,
 						      &pthru->sgl);
@@ -708,7 +885,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	 * Compute the total number of frames this command consumes. FW uses
 	 * this number to pull sufficient number of frames from host memory.
 	 */
-	cmd->frame_count = megasas_get_frame_count(pthru->sge_count,
+	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
 							PTHRU_FRAME);
 
 	return cmd->frame_count;
@@ -739,6 +916,10 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
 		flags = MFI_FRAME_DIR_READ;
 
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
 	/*
 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
 	 */
@@ -809,7 +990,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	/*
 	 * Construct SGL
 	 */
-	if (IS_DMA64) {
+	if (instance->flag_ieee) {
+		ldio->flags |= MFI_FRAME_SGL64;
+		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+					      &ldio->sgl);
+	} else if (IS_DMA64) {
 		ldio->flags |= MFI_FRAME_SGL64;
 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
 	} else
@@ -826,7 +1011,8 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
 	 * Compute the total number of frames this command consumes. FW uses
 	 * this number to pull sufficient number of frames from host memory.
 	 */
-	cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME);
+	cmd->frame_count = megasas_get_frame_count(instance,
+			ldio->sge_count, IO_FRAME);
 
 	return cmd->frame_count;
 }
@@ -983,7 +1169,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
 	 */
 	atomic_inc(&instance->fw_outstanding);
 
-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
+	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+				cmd->frame_count-1, instance->reg_set);
 	/*
 	 * Check if we have pend cmds to be completed
 	 */
@@ -1000,24 +1187,76 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
 	return 0;
 }
 
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+	int i;
+
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+		if ((megasas_mgmt_info.instance[i]) &&
+		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+			return megasas_mgmt_info.instance[i];
+	}
+
+	return NULL;
+}
+
 static int megasas_slave_configure(struct scsi_device *sdev)
 {
+	u16             pd_index = 0;
+	struct  megasas_instance *instance ;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+
 	/*
-	 * Don't export physical disk devices to the disk driver.
-	 *
-	 * FIXME: Currently we don't export them to the midlayer at all.
-	 * 	  That will be fixed once LSI engineers have audited the
-	 * 	  firmware for possible issues.
-	 */
-	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK)
+	* Don't export physical disk devices to the disk driver.
+	*
+	* FIXME: Currently we don't export them to the midlayer at all.
+	*        That will be fixed once LSI engineers have audited the
+	*        firmware for possible issues.
+	*/
+	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+				sdev->type == TYPE_DISK) {
+		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+								sdev->id;
+		if (instance->pd_list[pd_index].driveState ==
+						MR_PD_STATE_SYSTEM) {
+			blk_queue_rq_timeout(sdev->request_queue,
+				MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+			return 0;
+		}
 		return -ENXIO;
+	}
 
 	/*
-	 * The RAID firmware may require extended timeouts.
-	 */
-	if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
-		blk_queue_rq_timeout(sdev->request_queue,
-				     MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+	* The RAID firmware may require extended timeouts.
+	*/
+	blk_queue_rq_timeout(sdev->request_queue,
+		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+	return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+	u16             pd_index = 0;
+	struct megasas_instance *instance ;
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
+				(sdev->type == TYPE_DISK)) {
+		/*
+		 * Open the OS scan to the SYSTEM PD
+		 */
+		pd_index =
+			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
+		if ((instance->pd_list[pd_index].driveState ==
+					MR_PD_STATE_SYSTEM) &&
+			(instance->pd_list[pd_index].driveType ==
+						TYPE_DISK)) {
+			return 0;
+		}
+		return -ENXIO;
+	}
 	return 0;
 }
 
@@ -1072,7 +1311,14 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
 
 		spin_lock_irqsave(instance->host->host_lock, flags);
 		instance->flag &= ~MEGASAS_FW_BUSY;
-		instance->host->can_queue =
+		if ((instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+			(instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+			instance->host->can_queue =
+				instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+		} else
+			instance->host->can_queue =
 				instance->max_fw_cmds - MEGASAS_INT_CMDS;
 
 		spin_unlock_irqrestore(instance->host->host_lock, flags);
@@ -1117,8 +1363,16 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
 		* Send signal to FW to stop processing any pending cmds.
 		* The controller will be taken offline by the OS now.
 		*/
-		writel(MFI_STOP_ADP,
+		if ((instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+			(instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+			writel(MFI_STOP_ADP,
+				&instance->reg_set->reserved_0[0]);
+		} else {
+			writel(MFI_STOP_ADP,
 				&instance->reg_set->inbound_doorbell);
+		}
 		megasas_dump_pending_frames(instance);
 		instance->hw_crit_error = 1;
 		return FAILED;
@@ -1266,6 +1520,8 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 	return 0;
 }
 
+static void megasas_aen_polling(struct work_struct *work);
+
 /**
  * megasas_service_aen -	Processes an event notification
  * @instance:			Adapter soft state
@@ -1281,16 +1537,36 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 static void
 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
+	unsigned long flags;
 	/*
 	 * Don't signal app if it is just an aborted previously registered aen
 	 */
-	if (!cmd->abort_aen)
+	if ((!cmd->abort_aen) && (instance->unload == 0)) {
+		spin_lock_irqsave(&poll_aen_lock, flags);
+		megasas_poll_wait_aen = 1;
+		spin_unlock_irqrestore(&poll_aen_lock, flags);
+		wake_up(&megasas_poll_wait);
 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+	}
 	else
 		cmd->abort_aen = 0;
 
 	instance->aen_cmd = NULL;
 	megasas_return_cmd(instance, cmd);
+
+	if (instance->unload == 0) {
+		struct megasas_aen_event *ev;
+		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+		if (!ev) {
+			printk(KERN_ERR "megasas_service_aen: out of memory\n");
+		} else {
+			ev->instance = instance;
+			instance->ev = ev;
+			INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
+			schedule_delayed_work(
+				(struct delayed_work *)&ev->hotplug_work, 0);
+		}
+	}
 }
 
 /*
@@ -1302,6 +1578,7 @@ static struct scsi_host_template megasas_template = {
 	.name = "LSI SAS based MegaRAID driver",
 	.proc_name = "megaraid_sas",
 	.slave_configure = megasas_slave_configure,
+	.slave_alloc = megasas_slave_alloc,
 	.queuecommand = megasas_queue_command,
 	.eh_device_reset_handler = megasas_reset_device,
 	.eh_bus_reset_handler = megasas_reset_bus_host,
@@ -1370,6 +1647,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 {
 	int exception = 0;
 	struct megasas_header *hdr = &cmd->frame->hdr;
+	unsigned long flags;
 
 	if (cmd->scmd)
 		cmd->scmd->SCp.ptr = NULL;
@@ -1459,6 +1737,12 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 	case MFI_CMD_SMP:
 	case MFI_CMD_STP:
 	case MFI_CMD_DCMD:
+		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+			cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+			spin_lock_irqsave(&poll_aen_lock, flags);
+			megasas_poll_wait_aen = 0;
+			spin_unlock_irqrestore(&poll_aen_lock, flags);
+		}
 
 		/*
 		 * See if got an event notification
@@ -1536,6 +1820,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 	u8 max_wait;
 	u32 fw_state;
 	u32 cur_state;
+	u32 abs_state, curr_abs_state;
 
 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
 
@@ -1545,6 +1830,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 
 	while (fw_state != MFI_STATE_READY) {
 
+		abs_state =
+		instance->instancet->read_fw_status_reg(instance->reg_set);
+
 		switch (fw_state) {
 
 		case MFI_STATE_FAULT:
@@ -1556,18 +1844,36 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 			/*
 			 * Set the CLR bit in inbound doorbell
 			 */
-			writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
-				&instance->reg_set->inbound_doorbell);
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+
+				writel(
+				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+				  &instance->reg_set->reserved_0[0]);
+			} else {
+				writel(
+				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
+			}
 
-			max_wait = 2;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
 			break;
 
 		case MFI_STATE_BOOT_MESSAGE_PENDING:
-			writel(MFI_INIT_HOTPLUG,
-				&instance->reg_set->inbound_doorbell);
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+			(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+				writel(MFI_INIT_HOTPLUG,
+				&instance->reg_set->reserved_0[0]);
+			} else
+				writel(MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
 
-			max_wait = 10;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
 			break;
 
@@ -1576,9 +1882,17 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 			 * Bring it to READY state; assuming max wait 10 secs
 			 */
 			instance->instancet->disable_intr(instance->reg_set);
-			writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell);
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->reserved_0[0]);
+			} else
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->inbound_doorbell);
 
-			max_wait = 60;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_OPERATIONAL;
 			break;
 
@@ -1586,32 +1900,32 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 			/*
 			 * This state should not last for more than 2 seconds
 			 */
-			max_wait = 2;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_UNDEFINED;
 			break;
 
 		case MFI_STATE_BB_INIT:
-			max_wait = 2;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_BB_INIT;
 			break;
 
 		case MFI_STATE_FW_INIT:
-			max_wait = 20;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_FW_INIT;
 			break;
 
 		case MFI_STATE_FW_INIT_2:
-			max_wait = 20;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_FW_INIT_2;
 			break;
 
 		case MFI_STATE_DEVICE_SCAN:
-			max_wait = 20;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_DEVICE_SCAN;
 			break;
 
 		case MFI_STATE_FLUSH_CACHE:
-			max_wait = 20;
+			max_wait = MEGASAS_RESET_WAIT_TIME;
 			cur_state = MFI_STATE_FLUSH_CACHE;
 			break;
 
@@ -1627,8 +1941,10 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 		for (i = 0; i < (max_wait * 1000); i++) {
 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &  
 					MFI_STATE_MASK ;
+		curr_abs_state =
+		instance->instancet->read_fw_status_reg(instance->reg_set);
 
-			if (fw_state == cur_state) {
+			if (abs_state == curr_abs_state) {
 				msleep(1);
 			} else
 				break;
@@ -1637,7 +1953,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
 		/*
 		 * Return error if fw_state hasn't changed after max_wait
 		 */
-		if (fw_state == cur_state) {
+		if (curr_abs_state == abs_state) {
 			printk(KERN_DEBUG "FW state [%d] hasn't changed "
 			       "in %d secs\n", fw_state, max_wait);
 			return -ENODEV;
@@ -1715,6 +2031,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
 	    sizeof(struct megasas_sge32);
 
+	if (instance->flag_ieee) {
+		sge_sz = sizeof(struct megasas_sge_skinny);
+	}
+
 	/*
 	 * Calculated the number of 64byte frames required for SGL
 	 */
@@ -1777,6 +2097,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 		}
 
 		cmd->frame->io.context = cmd->index;
+		cmd->frame->io.pad_0 = 0;
 	}
 
 	return 0;
@@ -1882,6 +2203,97 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
 	return 0;
 }
 
+/*
+ * megasas_get_pd_list_info -	Returns FW's pd_list structure
+ * @instance:				Adapter soft state
+ * @pd_list:				pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+	int ret = 0, pd_index = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_PD_LIST *ci;
+	struct MR_PD_ADDRESS *pd_addr;
+	dma_addr_t ci_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+	dcmd->mbox.b[1] = 0;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	/*
+	* the following function will get the instance PD LIST.
+	*/
+
+	pd_addr = ci->addr;
+
+	if ( ret == 0 &&
+		(ci->count <
+		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+
+		memset(instance->pd_list, 0,
+			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+		for (pd_index = 0; pd_index < ci->count; pd_index++) {
+
+			instance->pd_list[pd_addr->deviceId].tid	=
+							pd_addr->deviceId;
+			instance->pd_list[pd_addr->deviceId].driveType	=
+							pd_addr->scsiDevType;
+			instance->pd_list[pd_addr->deviceId].driveState	=
+							MR_PD_STATE_SYSTEM;
+			pd_addr++;
+		}
+	}
+
+	pci_free_consistent(instance->pdev,
+				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+				ci, ci_h);
+	megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
 /**
  * megasas_get_controller_info -	Returns FW's controller structure
  * @instance:				Adapter soft state
@@ -2081,6 +2493,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 	 * Map the message registers
 	 */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
 		instance->base_addr = pci_resource_start(instance->pdev, 1);
 	} else {
@@ -2111,6 +2525,10 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
 			instance->instancet = &megasas_instance_template_gen2;
 			break;
+		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+			instance->instancet = &megasas_instance_template_skinny;
+			break;
 		case PCI_DEVICE_ID_LSI_SAS1064R:
 		case PCI_DEVICE_ID_DELL_PERC5:
 		default:
@@ -2166,6 +2584,10 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 	if (megasas_issue_init_mfi(instance))
 		goto fail_fw_init;
 
+	memset(instance->pd_list, 0 ,
+		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+	megasas_get_pd_list(instance);
+
 	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
 
 	/*
@@ -2409,6 +2831,11 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
 	dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
 	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
 
+	if (instance->aen_cmd != NULL) {
+		megasas_return_cmd(instance, cmd);
+		return 0;
+	}
+
 	/*
 	 * Store reference to the cmd used to register for AEN. When an
 	 * application wants us to register for AEN, we have to abort this
@@ -2419,7 +2846,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
 	/*
 	 * Issue the aen registration frame
 	 */
-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
+	instance->instancet->fire_cmd(instance,
+			cmd->frame_phys_addr, 0, instance->reg_set);
 
 	return 0;
 }
@@ -2465,7 +2893,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
 	 */
 	host->irq = instance->pdev->irq;
 	host->unique_id = instance->unique_id;
-	host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		host->can_queue =
+			instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+	} else
+		host->can_queue =
+			instance->max_fw_cmds - MEGASAS_INT_CMDS;
 	host->this_id = instance->init_id;
 	host->sg_tablesize = instance->max_num_sge;
 	host->max_sectors = instance->max_sectors_per_req;
@@ -2572,6 +3006,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	*instance->producer = 0;
 	*instance->consumer = 0;
+	megasas_poll_wait_aen = 0;
+	instance->flag_ieee = 0;
+	instance->ev = NULL;
 
 	instance->evt_detail = pci_alloc_consistent(pdev,
 						    sizeof(struct
@@ -2595,10 +3032,11 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	init_waitqueue_head(&instance->abort_cmd_wait_q);
 
 	spin_lock_init(&instance->cmd_pool_lock);
+	spin_lock_init(&instance->fire_lock);
 	spin_lock_init(&instance->completion_lock);
+	spin_lock_init(&poll_aen_lock);
 
 	mutex_init(&instance->aen_mutex);
-	sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
 
 	/*
 	 * Initialize PCI related and misc parameters
@@ -2608,8 +3046,16 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
 
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		instance->flag_ieee = 1;
+		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+	} else
+		sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+
 	megasas_dbg_lvl = 0;
 	instance->flag = 0;
+	instance->unload = 1;
 	instance->last_time = 0;
 
 	/*
@@ -2655,6 +3101,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (megasas_io_attach(instance))
 		goto fail_io_attach;
 
+	instance->unload = 0;
 	return 0;
 
       fail_start_aen:
@@ -2778,12 +3225,23 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	instance = pci_get_drvdata(pdev);
 	host = instance->host;
+	instance->unload = 1;
 
 	if (poll_mode_io)
 		del_timer_sync(&instance->io_completion_timer);
 
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+	/* cancel the delayed work if this work still in queue */
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work(
+			(struct delayed_work *)&ev->hotplug_work);
+		flush_scheduled_work();
+		instance->ev = NULL;
+	}
+
 	tasklet_kill(&instance->isr_tasklet);
 
 	pci_set_drvdata(instance->pdev, instance);
@@ -2873,6 +3331,8 @@ megasas_resume(struct pci_dev *pdev)
 		megasas_start_timer(instance, &instance->io_completion_timer,
 				megasas_io_completion_timer,
 				MEGASAS_COMPLETION_TIMER_INTERVAL);
+	instance->unload = 0;
+
 	return 0;
 
 fail_irq:
@@ -2913,6 +3373,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
 	struct megasas_instance *instance;
 
 	instance = pci_get_drvdata(pdev);
+	instance->unload = 1;
 	host = instance->host;
 
 	if (poll_mode_io)
@@ -2921,6 +3382,16 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
 	scsi_remove_host(instance->host);
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+	/* cancel the delayed work if this work still in queue*/
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work(
+			(struct delayed_work *)&ev->hotplug_work);
+		flush_scheduled_work();
+		instance->ev = NULL;
+	}
+
 	tasklet_kill(&instance->isr_tasklet);
 
 	/*
@@ -2969,6 +3440,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
 static void megasas_shutdown(struct pci_dev *pdev)
 {
 	struct megasas_instance *instance = pci_get_drvdata(pdev);
+	instance->unload = 1;
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
 }
@@ -3016,6 +3488,23 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
 }
 
 /**
+ * megasas_mgmt_poll -  char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask;
+	unsigned long flags;
+	poll_wait(file, &megasas_poll_wait, wait);
+	spin_lock_irqsave(&poll_aen_lock, flags);
+	if (megasas_poll_wait_aen)
+		mask =   (POLLIN | POLLRDNORM);
+	else
+		mask = 0;
+	spin_unlock_irqrestore(&poll_aen_lock, flags);
+	return mask;
+}
+
+/**
  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
  * @instance:			Adapter soft state
  * @argp:			User's ioctl packet
@@ -3032,7 +3521,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	int error = 0, i;
 	void *sense = NULL;
 	dma_addr_t sense_handle;
-	u32 *sense_ptr;
+	unsigned long *sense_ptr;
 
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
@@ -3056,6 +3545,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	 */
 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
 	cmd->frame->hdr.context = cmd->index;
+	cmd->frame->hdr.pad_0 = 0;
 
 	/*
 	 * The management interface between applications and the fw uses
@@ -3109,7 +3599,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 		}
 
 		sense_ptr =
-		    (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
+		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
 		*sense_ptr = sense_handle;
 	}
 
@@ -3140,8 +3630,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 		 * sense_ptr points to the location that has the user
 		 * sense buffer address
 		 */
-		sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
-				     ioc->sense_off);
+		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+				ioc->sense_off);
 
 		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
 				 sense, ioc->sense_len)) {
@@ -3177,20 +3667,6 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	return error;
 }
 
-static struct megasas_instance *megasas_lookup_instance(u16 host_no)
-{
-	int i;
-
-	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-
-		if ((megasas_mgmt_info.instance[i]) &&
-		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
-			return megasas_mgmt_info.instance[i];
-	}
-
-	return NULL;
-}
-
 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
 {
 	struct megasas_iocpacket __user *user_ioc =
@@ -3214,6 +3690,17 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
 		goto out_kfree_ioc;
 	}
 
+	if (instance->hw_crit_error == 1) {
+		printk(KERN_DEBUG "Controller in Crit ERROR\n");
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (instance->unload == 1) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
 	/*
 	 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
 	 */
@@ -3249,6 +3736,14 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
 	if (!instance)
 		return -ENODEV;
 
+	if (instance->hw_crit_error == 1) {
+		error = -ENODEV;
+	}
+
+	if (instance->unload == 1) {
+		return -ENODEV;
+	}
+
 	mutex_lock(&instance->aen_mutex);
 	error = megasas_register_aen(instance, aen.seq_num,
 				     aen.class_locale_word);
@@ -3337,6 +3832,7 @@ static const struct file_operations megasas_mgmt_fops = {
 	.open = megasas_mgmt_open,
 	.fasync = megasas_mgmt_fasync,
 	.unlocked_ioctl = megasas_mgmt_ioctl,
+	.poll = megasas_mgmt_poll,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = megasas_mgmt_compat_ioctl,
 #endif
@@ -3378,6 +3874,15 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
 		   NULL);
 
 static ssize_t
+megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_poll_for_event);
+}
+
+static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
+			megasas_sysfs_show_support_poll_for_event, NULL);
+
+static ssize_t
 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
@@ -3451,6 +3956,92 @@ out:
 	return retval;
 }
 
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+	struct megasas_aen_event *ev =
+		container_of(work, struct megasas_aen_event, hotplug_work);
+	struct megasas_instance *instance = ev->instance;
+	union megasas_evt_class_locale class_locale;
+	struct  Scsi_Host *host;
+	struct  scsi_device *sdev1;
+	u16     pd_index = 0;
+	int     i, j, doscan = 0;
+	u32 seq_num;
+	int error;
+
+	if (!instance) {
+		printk(KERN_ERR "invalid instance!\n");
+		kfree(ev);
+		return;
+	}
+	instance->ev = NULL;
+	host = instance->host;
+	if (instance->evt_detail) {
+
+		switch (instance->evt_detail->code) {
+		case MR_EVT_PD_INSERTED:
+		case MR_EVT_PD_REMOVED:
+		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+			doscan = 1;
+			break;
+		default:
+			doscan = 0;
+			break;
+		}
+	} else {
+		printk(KERN_ERR "invalid evt_detail!\n");
+		kfree(ev);
+		return;
+	}
+
+	if (doscan) {
+		printk(KERN_INFO "scanning ...\n");
+		megasas_get_pd_list(instance);
+		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+				sdev1 = scsi_device_lookup(host, i, j, 0);
+				if (instance->pd_list[pd_index].driveState ==
+							MR_PD_STATE_SYSTEM) {
+					if (!sdev1) {
+						scsi_add_device(host, i, j, 0);
+					}
+					if (sdev1)
+						scsi_device_put(sdev1);
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+		}
+	}
+
+	if ( instance->aen_cmd != NULL ) {
+		kfree(ev);
+		return ;
+	}
+
+	seq_num = instance->evt_detail->seq_num + 1;
+
+	/* Register AEN with FW for latest sequence number plus 1 */
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+	mutex_lock(&instance->aen_mutex);
+	error = megasas_register_aen(instance, seq_num,
+					class_locale.word);
+	mutex_unlock(&instance->aen_mutex);
+
+	if (error)
+		printk(KERN_ERR "register aen failed error %x\n", error);
+
+	kfree(ev);
+}
+
+
 static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
 		megasas_sysfs_show_poll_mode_io,
 		megasas_sysfs_set_poll_mode_io);
@@ -3468,6 +4059,8 @@ static int __init megasas_init(void)
 	printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
 	       MEGASAS_EXT_VERSION);
 
+	support_poll_for_event = 2;
+
 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
 
 	/*
@@ -3500,6 +4093,12 @@ static int __init megasas_init(void)
 				  &driver_attr_release_date);
 	if (rval)
 		goto err_dcf_rel_date;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_poll_for_event);
+	if (rval)
+		goto err_dcf_support_poll_for_event;
+
 	rval = driver_create_file(&megasas_pci_driver.driver,
 				  &driver_attr_dbg_lvl);
 	if (rval)
@@ -3516,7 +4115,12 @@ err_dcf_poll_mode_io:
 			   &driver_attr_dbg_lvl);
 err_dcf_dbg_lvl:
 	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+
+err_dcf_support_poll_for_event:
+	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_release_date);
+
 err_dcf_rel_date:
 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
 err_dcf_attr_ver:
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0d033248fdf1..72b28e436e32 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"00.00.04.01"
-#define MEGASAS_RELDATE				"July 24, 2008"
-#define MEGASAS_EXT_VERSION			"Thu July 24 11:41:51 PST 2008"
+#define MEGASAS_VERSION				"00.00.04.12-rc1"
+#define MEGASAS_RELDATE				"Sep. 17, 2009"
+#define MEGASAS_EXT_VERSION			"Thu Sep. 17 11:41:51 PST 2009"
 
 /*
  * Device IDs
@@ -30,6 +30,8 @@
 #define	PCI_DEVICE_ID_LSI_VERDE_ZCR		0x0413
 #define	PCI_DEVICE_ID_LSI_SAS1078GEN2		0x0078
 #define	PCI_DEVICE_ID_LSI_SAS0079GEN2		0x0079
+#define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
+#define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
 
 /*
  * =====================================
@@ -94,6 +96,7 @@
 #define MFI_FRAME_DIR_WRITE			0x0008
 #define MFI_FRAME_DIR_READ			0x0010
 #define MFI_FRAME_DIR_BOTH			0x0018
+#define MFI_FRAME_IEEE                          0x0020
 
 /*
  * Definition for cmd_status
@@ -131,6 +134,7 @@
 #define MR_DCMD_CLUSTER				0x08000000
 #define MR_DCMD_CLUSTER_RESET_ALL		0x08010100
 #define MR_DCMD_CLUSTER_RESET_LD		0x08010200
+#define MR_DCMD_PD_LIST_QUERY                   0x02010100
 
 /*
  * MFI command completion codes
@@ -251,9 +255,100 @@ enum MR_EVT_ARGS {
 	MR_EVT_ARGS_STR,
 	MR_EVT_ARGS_TIME,
 	MR_EVT_ARGS_ECC,
+	MR_EVT_ARGS_LD_PROP,
+	MR_EVT_ARGS_PD_SPARE,
+	MR_EVT_ARGS_PD_INDEX,
+	MR_EVT_ARGS_DIAG_PASS,
+	MR_EVT_ARGS_DIAG_FAIL,
+	MR_EVT_ARGS_PD_LBA_LBA,
+	MR_EVT_ARGS_PORT_PHY,
+	MR_EVT_ARGS_PD_MISSING,
+	MR_EVT_ARGS_PD_ADDRESS,
+	MR_EVT_ARGS_BITMAP,
+	MR_EVT_ARGS_CONNECTOR,
+	MR_EVT_ARGS_PD_PD,
+	MR_EVT_ARGS_PD_FRU,
+	MR_EVT_ARGS_PD_PATHINFO,
+	MR_EVT_ARGS_PD_POWER_STATE,
+	MR_EVT_ARGS_GENERIC,
+};
 
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+	MR_PD_QUERY_TYPE_ALL                = 0,
+	MR_PD_QUERY_TYPE_STATE              = 1,
+	MR_PD_QUERY_TYPE_POWER_STATE        = 2,
+	MR_PD_QUERY_TYPE_MEDIA_TYPE         = 3,
+	MR_PD_QUERY_TYPE_SPEED              = 4,
+	MR_PD_QUERY_TYPE_EXPOSED_TO_HOST    = 5,
 };
 
+#define MR_EVT_CFG_CLEARED                              0x0004
+#define MR_EVT_LD_STATE_CHANGE                          0x0051
+#define MR_EVT_PD_INSERTED                              0x005b
+#define MR_EVT_PD_REMOVED                               0x0070
+#define MR_EVT_LD_CREATED                               0x008a
+#define MR_EVT_LD_DELETED                               0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED                     0x00db
+#define MR_EVT_LD_OFFLINE                               0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED             0x0152
+#define MAX_LOGICAL_DRIVES				64
+
+enum MR_PD_STATE {
+	MR_PD_STATE_UNCONFIGURED_GOOD   = 0x00,
+	MR_PD_STATE_UNCONFIGURED_BAD    = 0x01,
+	MR_PD_STATE_HOT_SPARE           = 0x02,
+	MR_PD_STATE_OFFLINE             = 0x10,
+	MR_PD_STATE_FAILED              = 0x11,
+	MR_PD_STATE_REBUILD             = 0x14,
+	MR_PD_STATE_ONLINE              = 0x18,
+	MR_PD_STATE_COPYBACK            = 0x20,
+	MR_PD_STATE_SYSTEM              = 0x40,
+ };
+
+
+ /*
+ * defines the physical drive address structure
+ */
+struct MR_PD_ADDRESS {
+	u16     deviceId;
+	u16     enclDeviceId;
+
+	union {
+		struct {
+			u8  enclIndex;
+			u8  slotNumber;
+		} mrPdAddress;
+		struct {
+			u8  enclPosition;
+			u8  enclConnectorIndex;
+		} mrEnclAddress;
+	};
+	u8      scsiDevType;
+	union {
+		u8      connectedPortBitmap;
+		u8      connectedPortNumbers;
+	};
+	u64     sasAddr[2];
+} __packed;
+
+/*
+ * defines the physical drive list structure
+ */
+struct MR_PD_LIST {
+	u32             size;
+	u32             count;
+	struct MR_PD_ADDRESS   addr[1];
+} __packed;
+
+struct megasas_pd_list {
+	u16             tid;
+	u8             driveType;
+	u8             driveState;
+} __packed;
+
 /*
  * SAS controller properties
  */
@@ -282,7 +377,7 @@ struct megasas_ctrl_prop {
 	u8 expose_encl_devices;
 	u8 reserved[38];
 
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * SAS controller information
@@ -525,7 +620,7 @@ struct megasas_ctrl_info {
 
 	u8 pad[0x800 - 0x6a0];
 
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * ===============================
@@ -540,6 +635,8 @@ struct megasas_ctrl_info {
 #define MEGASAS_DEFAULT_INIT_ID			-1
 #define MEGASAS_MAX_LUN				8
 #define MEGASAS_MAX_LD				64
+#define MEGASAS_MAX_PD                          (MEGASAS_MAX_PD_CHANNELS * \
+						MEGASAS_MAX_DEV_PER_CHANNEL)
 
 #define MEGASAS_DBG_LVL				1
 
@@ -570,6 +667,7 @@ struct megasas_ctrl_info {
  * is shown below
  */
 #define MEGASAS_INT_CMDS			32
+#define MEGASAS_SKINNY_INT_CMDS			5
 
 /*
  * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
@@ -584,6 +682,8 @@ struct megasas_ctrl_info {
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
 #define MFI_GEN2_ENABLE_INTERRUPT_MASK		(0x00000001 | 0x00000004)
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT	0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK	(0x00000001)
 
 /*
 * register set for both 1068 and 1078 controllers
@@ -644,10 +744,17 @@ struct megasas_sge64 {
 
 } __attribute__ ((packed));
 
+struct megasas_sge_skinny {
+	u64 phys_addr;
+	u32 length;
+	u32 flag;
+} __packed;
+
 union megasas_sgl {
 
 	struct megasas_sge32 sge32[1];
 	struct megasas_sge64 sge64[1];
+	struct megasas_sge_skinny sge_skinny[1];
 
 } __attribute__ ((packed));
 
@@ -1061,16 +1168,10 @@ struct megasas_evt_detail {
 
 } __attribute__ ((packed));
 
- struct megasas_instance_template {
-	void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
-
-	void (*enable_intr)(struct megasas_register_set __iomem *) ;
-	void (*disable_intr)(struct megasas_register_set __iomem *);
-
-	int (*clear_intr)(struct megasas_register_set __iomem *);
-
-	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
- };
+struct megasas_aen_event {
+	struct work_struct hotplug_work;
+	struct megasas_instance *instance;
+};
 
 struct megasas_instance {
 
@@ -1085,17 +1186,21 @@ struct megasas_instance {
 	unsigned long base_addr;
 	struct megasas_register_set __iomem *reg_set;
 
+	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
 	s8 init_id;
 
 	u16 max_num_sge;
 	u16 max_fw_cmds;
 	u32 max_sectors_per_req;
+	struct megasas_aen_event *ev;
 
 	struct megasas_cmd **cmd_list;
 	struct list_head cmd_pool;
 	spinlock_t cmd_pool_lock;
 	/* used to synch producer, consumer ptrs in dpc */
 	spinlock_t completion_lock;
+	/* used to sync fire the cmd to fw */
+	spinlock_t fire_lock;
 	struct dma_pool *frame_dma_pool;
 	struct dma_pool *sense_dma_pool;
 
@@ -1120,11 +1225,25 @@ struct megasas_instance {
 	struct tasklet_struct isr_tasklet;
 
 	u8 flag;
+	u8 unload;
+	u8 flag_ieee;
 	unsigned long last_time;
 
 	struct timer_list io_completion_timer;
 };
 
+struct megasas_instance_template {
+	void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
+		u32, struct megasas_register_set __iomem *);
+
+	void (*enable_intr)(struct megasas_register_set __iomem *) ;
+	void (*disable_intr)(struct megasas_register_set __iomem *);
+
+	int (*clear_intr)(struct megasas_register_set __iomem *);
+
+	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+};
+
 #define MEGASAS_IS_LOGICAL(scp)						\
 	(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index f9f6c0839276..914168105297 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.12
+ *  mpi2.h Version:  02.00.13
  *
  *  Version History
  *  ---------------
@@ -52,6 +52,7 @@
  *                      MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
  *                      bytes reserved.
  *                      Added RAID Accelerator functionality.
+ *  07-30-09  02.00.13  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -77,7 +78,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x0C)
+#define MPI2_HEADER_VERSION_UNIT            (0x0D)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index ab47c4679640..1611c57a6fdf 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.11
+ *    mpi2_cnfg.h Version:  02.00.12
  *
  *  Version History
  *  ---------------
@@ -100,6 +100,13 @@
  *                      Added expander reduced functionality data to SAS
  *                      Expander Page 0.
  *                      Added SAS PHY Page 2 and SAS PHY Page 3.
+ *  07-30-09  02.00.12  Added IO Unit Page 7.
+ *                      Added new device ids.
+ *                      Added SAS IO Unit Page 5.
+ *                      Added partial and slumber power management capable flags
+ *                      to SAS Device Page 0 Flags field.
+ *                      Added PhyInfo defines for power condition.
+ *                      Added Ethernet configuration pages.
  *  --------------------------------------------------------------------------
  */
 
@@ -182,6 +189,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
 #define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG         (0x16)
 #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING      (0x17)
 #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT            (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET            (0x19)
 
 
 /*****************************************************************************
@@ -268,6 +276,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
 #define MPI2_DPM_PGAD_START_ENTRY_MASK              (0x0000FFFF)
 
 
+/* Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK                (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM              (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK           (0x000000FF)
+
+
+
 /****************************************************************************
 *   Configuration messages
 ****************************************************************************/
@@ -349,6 +365,15 @@ typedef struct _MPI2_CONFIG_REPLY
 #define MPI2_MFGPAGE_DEVID_SAS2116_1                (0x0064)
 #define MPI2_MFGPAGE_DEVID_SAS2116_2                (0x0065)
 
+#define MPI2_MFGPAGE_DEVID_SAS2208_1                (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2                (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3                (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4                (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5                (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6                (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2208_7                (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2208_8                (0x0087)
+
 
 /* Manufacturing Page 0 */
 
@@ -787,6 +812,56 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
 #define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR  (0x0001)
 
 
+/* IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+    MPI2_CONFIG_PAGE_HEADER Header;                                 /* 0x00 */
+    U16                     Reserved1;                              /* 0x04 */
+    U8                      PCIeWidth;                              /* 0x06 */
+    U8                      PCIeSpeed;                              /* 0x07 */
+    U32                     ProcessorState;                         /* 0x08 */
+    U32                     Reserved2;                              /* 0x0C */
+    U16                     IOCTemperature;                         /* 0x10 */
+    U8                      IOCTemperatureUnits;                    /* 0x12 */
+    U8                      IOCSpeed;                               /* 0x13 */
+    U32                     Reserved3;                              /* 0x14 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+  Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x00)
+
+/* defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1              (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2              (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4              (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8              (0x08)
+
+/* defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS        (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS        (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS        (0x02)
+
+/* defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND         (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND        (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT         (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED            (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED             (0x02)
+
+/* defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT       (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT        (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS           (0x02)
+
+/* defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL             (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF             (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER          (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH           (0x08)
+
+
+
 /****************************************************************************
 *   IOC Config Pages
 ****************************************************************************/
@@ -1470,6 +1545,12 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
 
 /* values for PhyInfo fields */
 #define MPI2_SAS_PHYINFO_PHY_VACANT                     (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK       (0x18000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE               (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL              (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER              (0x10000000)
+
 #define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS       (0x04000000)
 #define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT        (0x02000000)
 #define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS               (0x01000000)
@@ -1682,11 +1763,11 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1
 /* values for SAS IO Unit Page 1 PortFlags */
 #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG                 (0x01)
 
-/* values for SAS IO Unit Page 2 PhyFlags */
+/* values for SAS IO Unit Page 1 PhyFlags */
 #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE                      (0x10)
 #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE                        (0x08)
 
-/* values for SAS IO Unit Page 0 MaxMinLinkRate */
+/* values for SAS IO Unit Page 1 MaxMinLinkRate */
 #define MPI2_SASIOUNIT1_MAX_RATE_MASK                               (0xF0)
 #define MPI2_SASIOUNIT1_MAX_RATE_1_5                                (0x80)
 #define MPI2_SASIOUNIT1_MAX_RATE_3_0                                (0x90)
@@ -1745,6 +1826,74 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
 #define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK               (0x03)
 
 
+/* SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+    U8          ControlFlags;               /* 0x00 */
+    U8          Reserved1;                  /* 0x01 */
+    U16         InactivityTimerExponent;    /* 0x02 */
+    U8          SATAPartialTimeout;         /* 0x04 */
+    U8          Reserved2;                  /* 0x05 */
+    U8          SATASlumberTimeout;         /* 0x06 */
+    U8          Reserved3;                  /* 0x07 */
+    U8          SASPartialTimeout;          /* 0x08 */
+    U8          Reserved4;                  /* 0x09 */
+    U8          SASSlumberTimeout;          /* 0x0A */
+    U8          Reserved5;                  /* 0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+  MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+  Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t;
+
+/* defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE      (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE      (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE     (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE     (0x01)
+
+/* defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER            (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER           (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL            (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL           (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER           (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER          (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL           (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL          (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS                 (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND                  (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS        (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS            (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND             (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS        (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS            (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND             (0)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.ExtPageLength or NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX        (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+    MPI2_CONFIG_EXTENDED_PAGE_HEADER    Header;		/* 0x00 */
+    U8                                  NumPhys;	/* 0x08 */
+    U8                                  Reserved1;	/* 0x09 */
+    U16                                 Reserved2;	/* 0x0A */
+    U32                                 Reserved3;	/* 0x0C */
+    MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS   SASPhyPowerManagementSettings
+					[MPI2_SAS_IOUNIT5_PHY_MAX];  /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+  MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+  Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION     (0x00)
+
+
+
+
 /****************************************************************************
 *   SAS Expander Config Pages
 ****************************************************************************/
@@ -1927,6 +2076,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
 /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
 
 /* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE           (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE           (0x0800)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY     (0x0400)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE             (0x0200)
 #define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE           (0x0100)
@@ -2343,5 +2494,122 @@ typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0
 #define MPI2_DRVMAP0_MAPINFO_MISSING_MASK           (0x000F)
 
 
+/****************************************************************************
+*   Ethernet Config Pages
+****************************************************************************/
+
+/* Ethernet Page 0 */
+
+/* IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+    U32     IPv4Addr;
+    U32     IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR,
+  Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH          (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+    MPI2_CONFIG_EXTENDED_PAGE_HEADER    Header;                 /* 0x00 */
+    U8                                  NumInterfaces;          /* 0x08 */
+    U8                                  Reserved0;              /* 0x09 */
+    U16                                 Reserved1;              /* 0x0A */
+    U32                                 Status;                 /* 0x0C */
+    U8                                  MediaState;             /* 0x10 */
+    U8                                  Reserved2;              /* 0x11 */
+    U16                                 Reserved3;              /* 0x12 */
+    U8                                  MacAddress[6];          /* 0x14 */
+    U8                                  Reserved4;              /* 0x1A */
+    U8                                  Reserved5;              /* 0x1B */
+    MPI2_ETHERNET_IP_ADDR               IpAddress;              /* 0x1C */
+    MPI2_ETHERNET_IP_ADDR               SubnetMask;             /* 0x2C */
+    MPI2_ETHERNET_IP_ADDR               GatewayIpAddress;       /* 0x3C */
+    MPI2_ETHERNET_IP_ADDR               DNS1IpAddress;          /* 0x4C */
+    MPI2_ETHERNET_IP_ADDR               DNS2IpAddress;          /* 0x5C */
+    MPI2_ETHERNET_IP_ADDR               DhcpIpAddress;          /* 0x6C */
+    U8                                  HostName
+				[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+  Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION   (0x00)
+
+/* values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE             (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE             (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED        (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF               (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED         (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED           (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED             (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED      (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED             (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED             (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES           (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED           (0x00000001)
+
+/* values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK                  (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX                  (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX                  (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK           (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED                (0x00)
+#define MPI2_ETHPG0_MS_10MBIT                       (0x01)
+#define MPI2_ETHPG0_MS_100MBIT                      (0x02)
+#define MPI2_ETHPG0_MS_1GBIT                        (0x03)
+
+
+/* Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+    MPI2_CONFIG_EXTENDED_PAGE_HEADER    Header;                 /* 0x00 */
+    U32                                 Reserved0;              /* 0x08 */
+    U32                                 Flags;                  /* 0x0C */
+    U8                                  MediaState;             /* 0x10 */
+    U8                                  Reserved1;              /* 0x11 */
+    U16                                 Reserved2;              /* 0x12 */
+    U8                                  MacAddress[6];          /* 0x14 */
+    U8                                  Reserved3;              /* 0x1A */
+    U8                                  Reserved4;              /* 0x1B */
+    MPI2_ETHERNET_IP_ADDR               StaticIpAddress;        /* 0x1C */
+    MPI2_ETHERNET_IP_ADDR               StaticSubnetMask;       /* 0x2C */
+    MPI2_ETHERNET_IP_ADDR               StaticGatewayIpAddress; /* 0x3C */
+    MPI2_ETHERNET_IP_ADDR               StaticDNS1IpAddress;    /* 0x4C */
+    MPI2_ETHERNET_IP_ADDR               StaticDNS2IpAddress;    /* 0x5C */
+    U32                                 Reserved5;              /* 0x6C */
+    U32                                 Reserved6;              /* 0x70 */
+    U32                                 Reserved7;              /* 0x74 */
+    U32                                 Reserved8;              /* 0x78 */
+    U8                                  HostName
+				[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+  Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION   (0x00)
+
+/* values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF             (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD         (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET              (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2                (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT         (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6                (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4                (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES         (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF              (0x00000001)
+
+/* values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK                  (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX                  (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX                  (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK               (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO               (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT             (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT            (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT              (0x03)
+
+
 #endif
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index c294128bdeb4..ea51ce868690 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.11
+ *  mpi2_ioc.h Version:  02.00.12
  *
  *  Version History
  *  ---------------
@@ -84,6 +84,9 @@
  *                      Added two new reason codes for SAS Device Status Change
  *                      Event.
  *                      Added new event: SAS PHY Counter.
+ *  07-30-09  02.00.12  Added GPIO Interrupt event define and structure.
+ *                      Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ *                      Added new product id family for 2208.
  *  --------------------------------------------------------------------------
  */
 
@@ -274,6 +277,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
 #define MPI2_IOCFACTS_CAPABILITY_MULTICAST              (0x00000100)
 #define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET   (0x00000080)
 #define MPI2_IOCFACTS_CAPABILITY_EEDP                   (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER        (0x00000020)
 #define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER        (0x00000010)
 #define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER      (0x00000008)
 #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
@@ -448,6 +452,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
 #define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST     (0x0020)
 #define MPI2_EVENT_LOG_ENTRY_ADDED                  (0x0021)
 #define MPI2_EVENT_SAS_PHY_COUNTER                  (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT                   (0x0023)
 
 
 /* Log Entry Added Event data */
@@ -469,6 +474,16 @@ typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED
   MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
   Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t;
 
+/* GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+    U8          GPIONum;                            /* 0x00 */
+    U8          Reserved1;                          /* 0x01 */
+    U16         Reserved2;                          /* 0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+  MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+  Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
+
 /* Hard Reset Received Event data */
 
 typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
@@ -1117,6 +1132,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER
 #define MPI2_FW_HEADER_PID_FAMILY_MASK          (0x00FF)
 /* SAS */
 #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS      (0x0010)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS      (0x0011)
 
 /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 7134816d9046..5160c33d2a00 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.03
+ *    mpi2_raid.h Version:  02.00.04
  *
  *  Version History
  *  ---------------
@@ -20,6 +20,8 @@
  *  05-21-08  02.00.03  Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
  *                      the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
  *                      can be sized by the build environment.
+ *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
+ *                      VolumeCreationFlags and marked the old one as obsolete.
  *  --------------------------------------------------------------------------
  */
 
@@ -217,10 +219,14 @@ typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT
 /* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
 
 /* defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS     (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT      (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT       (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA         (0x00000001)
+/* The following is an obsolete define.
+ * It must be shifted left 24 bits in order to set the proper bit.
+ */
 #define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
-#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT      (0x04)
-#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT       (0x02)
-#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA         (0x01)
 
 
 /* RAID Online Capacity Expansion Structure */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 007e950f7bfa..73fcdbf92632 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.03
+ *    mpi2_tool.h Version:  02.00.04
  *
  *  Version History
  *  ---------------
@@ -18,6 +18,10 @@
  *                      structures and defines.
  *  02-29-08  02.00.02  Modified various names to make them 32-character unique.
  *  05-06-09  02.00.03  Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ *  07-30-09  02.00.04  Added ExtendedType field to DiagnosticBufferPost request
+ *                      and reply messages.
+ *                      Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ *                      Incremented MPI2_DIAG_BUF_TYPE_COUNT.
  *  --------------------------------------------------------------------------
  */
 
@@ -282,7 +286,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
 
 typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
 {
-    U8                      Reserved1;                  /* 0x00 */
+    U8                      ExtendedType;               /* 0x00 */
     U8                      BufferType;                 /* 0x01 */
     U8                      ChainOffset;                /* 0x02 */
     U8                      Function;                   /* 0x03 */
@@ -301,11 +305,15 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
 } MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
   Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t;
 
+/* values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION         (0x02)
+
 /* values for the BufferType field */
 #define MPI2_DIAG_BUF_TYPE_TRACE                    (0x00)
 #define MPI2_DIAG_BUF_TYPE_SNAPSHOT                 (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED                 (0x02)
 /* count of the number of buffer types */
-#define MPI2_DIAG_BUF_TYPE_COUNT                    (0x02)
+#define MPI2_DIAG_BUF_TYPE_COUNT                    (0x03)
 
 
 /****************************************************************************
@@ -314,7 +322,7 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
 
 typedef struct _MPI2_DIAG_BUFFER_POST_REPLY
 {
-    U8                      Reserved1;                  /* 0x00 */
+    U8                      ExtendedType;               /* 0x00 */
     U8                      BufferType;                 /* 0x01 */
     U8                      MsgLength;                  /* 0x02 */
     U8                      Function;                   /* 0x03 */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 670241efa4b5..6422e258fd52 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/sort.h>
 #include <linux/io.h>
+#include <linux/time.h>
 
 #include "mpt2sas_base.h"
 
@@ -77,6 +78,44 @@ static int msix_disable = -1;
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
+    "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+
+int mpt2sas_fwfault_debug;
+MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+    "and halt firmware - (default=0)");
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+	int ret = param_set_int(val, kp);
+	struct MPT2SAS_ADAPTER *ioc;
+
+	if (ret)
+		return ret;
+
+	printk(KERN_INFO "setting logging_level(0x%08x)\n",
+				mpt2sas_fwfault_debug);
+	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
+		ioc->fwfault_debug = mpt2sas_fwfault_debug;
+	return 0;
+}
+module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
+    param_get_int, &mpt2sas_fwfault_debug, 0644);
+
 /**
  * _base_fault_reset_work - workq handling ioc fault conditions
  * @work: input argument, used to derive ioc
@@ -121,7 +160,7 @@ _base_fault_reset_work(struct work_struct *work)
 
 /**
  * mpt2sas_base_start_watchdog - start the fault_reset_work_q
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * Context: sleep.
  *
  * Return nothing.
@@ -155,7 +194,7 @@ mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
 
 /**
  * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * Context: sleep.
  *
  * Return nothing.
@@ -177,10 +216,55 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
 	}
 }
 
+/**
+ * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
+{
+	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
+	    ioc->name, fault_code);
+}
+
+/**
+ * mpt2sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues.  Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
+{
+	u32 doorbell;
+
+	if (!ioc->fwfault_debug)
+		return;
+
+	dump_stack();
+
+	doorbell = readl(&ioc->chip->Doorbell);
+	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+		mpt2sas_base_fault_info(ioc , doorbell);
+	else {
+		writel(0xC0FFEE00, &ioc->chip->Doorbell);
+		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
+		    "timeout\n", ioc->name);
+	}
+
+	panic("panic in %s\n", __func__);
+}
+
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
  * _base_sas_ioc_info - verbose translation of the ioc status
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @request_hdr: request mf
  *
@@ -394,7 +478,7 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
 
 /**
  * _base_display_event_data - verbose translation of firmware asyn events
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  *
  * Return nothing.
@@ -474,7 +558,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
 
 /**
  * _base_sas_log_info - verbose translation of firmware log info
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @log_info: log info
  *
  * Return nothing.
@@ -526,22 +610,8 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
 }
 
 /**
- * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
- * @ioc: pointer to scsi command object
- * @fault_code: fault code
- *
- * Return nothing.
- */
-void
-mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
-{
-	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
-	    ioc->name, fault_code);
-}
-
-/**
  * _base_display_reply_info -
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @smid: system request message index
  * @msix_index: MSIX table index supplied by the OS
  * @reply: reply message frame(lower 32bit addr)
@@ -570,7 +640,7 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 
 /**
  * mpt2sas_base_done - base internal command completion routine
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @smid: system request message index
  * @msix_index: MSIX table index supplied by the OS
  * @reply: reply message frame(lower 32bit addr)
@@ -603,7 +673,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 
 /**
  * _base_async_event - main callback handler for firmware asyn events
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  * @msix_index: MSIX table index supplied by the OS
  * @reply: reply message frame(lower 32bit addr)
  *
@@ -684,7 +754,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
 
 /**
  * _base_mask_interrupts - disable interrupts
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  *
  * Disabling ResetIRQ, Reply and Doorbell Interrupts
  *
@@ -704,7 +774,7 @@ _base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
 
 /**
  * _base_unmask_interrupts - enable interrupts
- * @ioc: pointer to scsi command object
+ * @ioc: per adapter object
  *
  * Enabling only Reply Interrupts
  *
@@ -1258,12 +1328,13 @@ mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  * @ioc: per adapter object
  * @smid: system request message index
  *
- * Returns phys pointer to sense buffer.
+ * Returns phys pointer to the low 32bit address of the sense buffer.
  */
-dma_addr_t
+__le32
 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
 {
-	return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE);
+	return cpu_to_le32(ioc->sense_dma +
+			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
 }
 
 /**
@@ -1697,6 +1768,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->facts.IOCCapabilities &
+	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
+		i++;
+	}
+
+	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
 		printk("%sTask Set Full", i ? "," : "");
 		i++;
@@ -2871,6 +2948,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitReply_t mpi_reply;
 	int r;
+	struct timeval current_time;
+	u16 ioc_status;
 
 	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
 	    __func__));
@@ -2921,6 +3000,13 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 	    cpu_to_le32(ioc->reply_post_free_dma);
 #endif
 
+	/* This time stamp specifies number of milliseconds
+	 * since epoch ~ midnight January 1, 1970.
+	 */
+	do_gettimeofday(&current_time);
+	mpi_request.TimeStamp = (current_time.tv_sec * 1000) +
+	    (current_time.tv_usec >> 3);
+
 	if (ioc->logging_level & MPT_DEBUG_INIT) {
 		u32 *mfp;
 		int i;
@@ -2943,7 +3029,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 		return r;
 	}
 
-	if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS ||
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
 	    mpi_reply.IOCLogInfo) {
 		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
 		r = -EIO;
@@ -3461,11 +3548,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 		return r;
 
 	pci_set_drvdata(ioc->pdev, ioc->shost);
-	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
 	if (r)
 		goto out_free_resources;
 
-	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
 	if (r)
 		goto out_free_resources;
 
@@ -3531,6 +3618,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 		goto out_free_resources;
 
 	mpt2sas_base_start_watchdog(ioc);
+	if (diag_buffer_enable != 0)
+		mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
 	return 0;
 
  out_free_resources:
@@ -3684,6 +3773,9 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
 	dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
 	    __func__));
 
+	if (mpt2sas_fwfault_debug)
+		mpt2sas_halt_firmware(ioc);
+
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	if (ioc->shost_recovery) {
 		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0cf6bc236e4d..bb4f14656afa 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"02.100.03.00"
-#define MPT2SAS_MAJOR_VERSION		02
+#define MPT2SAS_DRIVER_VERSION		"03.100.03.00"
+#define MPT2SAS_MAJOR_VERSION		03
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		03
 #define MPT2SAS_RELEASE_VERSION		00
@@ -278,7 +278,7 @@ struct _internal_cmd {
  * @sas_address: device sas address
  * @device_name: retrieved from the SAS IDENTIFY frame.
  * @handle: device handle
- * @parent_handle: handle to parent device
+ * @sas_address_parent: sas address of parent expander or sas host
  * @enclosure_handle: enclosure handle
  * @enclosure_logical_id: enclosure logical identifier
  * @volume_handle: volume handle (valid when hidden raid member)
@@ -296,7 +296,7 @@ struct _sas_device {
 	u64	sas_address;
 	u64	device_name;
 	u16	handle;
-	u16	parent_handle;
+	u64	sas_address_parent;
 	u16	enclosure_handle;
 	u64	enclosure_logical_id;
 	u16	volume_handle;
@@ -352,8 +352,6 @@ struct _boot_device {
 /**
  * struct _sas_port - wide/narrow sas port information
  * @port_list: list of ports belonging to expander
- * @handle: device handle for this port
- * @sas_address: sas address of this port
  * @num_phys: number of phys belonging to this port
  * @remote_identify: attached device identification
  * @rphy: sas transport rphy object
@@ -362,8 +360,6 @@ struct _boot_device {
  */
 struct _sas_port {
 	struct list_head port_list;
-	u16	handle;
-	u64	sas_address;
 	u8	num_phys;
 	struct sas_identify remote_identify;
 	struct sas_rphy *rphy;
@@ -398,7 +394,7 @@ struct _sas_phy {
  * @num_phys: number phys belonging to this sas_host/expander
  * @sas_address: sas address of this sas_host/expander
  * @handle: handle for this sas_host/expander
- * @parent_handle: parent handle
+ * @sas_address_parent: sas address of parent expander or sas host
  * @enclosure_handle: handle for this a member of an enclosure
  * @device_info: bitwise defining capabilities of this sas_host/expander
  * @responding: used in _scsih_expander_device_mark_responding
@@ -411,7 +407,7 @@ struct _sas_node {
 	u8	num_phys;
 	u64	sas_address;
 	u16	handle;
-	u16	parent_handle;
+	u64	sas_address_parent;
 	u16	enclosure_handle;
 	u64	enclosure_logical_id;
 	u8	responding;
@@ -470,6 +466,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
  * @chip_phys: physical addrss prior to mapping
  * @pio_chip: I/O mapped register space
  * @logging_level: see mpt2sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
  * @ir_firmware: IR firmware present
  * @bars: bitmask of BAR's that must be configured
  * @mask_interrupts: ignore interrupt
@@ -495,12 +492,14 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
  * @msix_table_backup: backup msix table
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
  * @transport_cb_idx: transport internal commands
  * @ctl_cb_idx: clt internal commands
  * @base_cb_idx: base internal commands
  * @config_cb_idx: base internal commands
  * @base_cmds:
  * @transport_cmds:
+ * @scsih_cmds:
  * @tm_cmds:
  * @ctl_cmds:
  * @config_cmds:
@@ -591,6 +590,7 @@ struct MPT2SAS_ADAPTER {
 	unsigned long	chip_phys;
 	unsigned long	pio_chip;
 	int		logging_level;
+	int		fwfault_debug;
 	u8		ir_firmware;
 	int		bars;
 	u8		mask_interrupts;
@@ -626,6 +626,7 @@ struct MPT2SAS_ADAPTER {
 	u8		scsi_io_cb_idx;
 	u8		tm_cb_idx;
 	u8		transport_cb_idx;
+	u8		scsih_cb_idx;
 	u8		ctl_cb_idx;
 	u8		base_cb_idx;
 	u8		config_cb_idx;
@@ -633,6 +634,7 @@ struct MPT2SAS_ADAPTER {
 	u8		tm_sas_control_cb_idx;
 	struct _internal_cmd base_cmds;
 	struct _internal_cmd transport_cmds;
+	struct _internal_cmd scsih_cmds;
 	struct _internal_cmd tm_cmds;
 	struct _internal_cmd ctl_cmds;
 	struct _internal_cmd config_cmds;
@@ -773,7 +775,7 @@ int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
 void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid);
 void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
 void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
-dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
+__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
     u16 smid);
 
 /* hi-priority queue */
@@ -807,6 +809,8 @@ int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
     Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
 void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type);
 
+void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+
 /* scsih shared API */
 u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
@@ -886,19 +890,22 @@ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
     Mpi2EventNotificationReply_t *mpi_reply);
 
+void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc,
+	u8 bits_to_regsiter);
+
 /* transport shared API */
 u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
     u32 reply);
 struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc,
-    u16 handle, u16 parent_handle);
+     u16 handle, u64 sas_address);
 void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
-    u16 parent_handle);
+     u64 sas_address_parent);
 int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
     *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
 int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
     *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
-void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle,
-   u16 attached_handle, u8 phy_number, u8 link_rate);
+void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
+     u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
 extern struct sas_function_template mpt2sas_transport_functions;
 extern struct scsi_transport_template *mpt2sas_transport_template;
 extern int scsi_internal_device_block(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 57d724633906..84a124f8e21f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -740,7 +740,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 		Mpi2SCSIIORequest_t *scsiio_request =
 		    (Mpi2SCSIIORequest_t *)mpi_request;
 		scsiio_request->SenseBufferLowAddress =
-		    (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid);
+		    mpt2sas_base_get_sense_buffer_dma(ioc, smid);
 		priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
 		memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
 		mpt2sas_base_put_smid_scsi_io(ioc, smid,
@@ -848,8 +848,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 		printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
 		    "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
 		    "TerminationCount(0x%08x)\n", ioc->name,
-		    tm_reply->IOCStatus, tm_reply->IOCLogInfo,
-		    tm_reply->TerminationCount);
+		    le16_to_cpu(tm_reply->IOCStatus),
+		    le32_to_cpu(tm_reply->IOCLogInfo),
+		    le32_to_cpu(tm_reply->TerminationCount));
 	}
 #endif
 	/* copy out xdata to user */
@@ -896,6 +897,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 			printk(MPT2SAS_INFO_FMT "issue target reset: handle "
 			    "= (0x%04x)\n", ioc->name,
 			    mpi_request->FunctionDependent1);
+			mpt2sas_halt_firmware(ioc);
 			mutex_lock(&ioc->tm_cmds.mutex);
 			mpt2sas_scsih_issue_tm(ioc,
 			    mpi_request->FunctionDependent1, 0,
@@ -1229,7 +1231,7 @@ _ctl_btdh_mapping(void __user *arg)
 /**
  * _ctl_diag_capability - return diag buffer capability
  * @ioc: per adapter object
- * @buffer_type: specifies either TRACE or SNAPSHOT
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
  *
  * returns 1 when diag buffer support is enabled in firmware
  */
@@ -1249,24 +1251,25 @@ _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type)
 		    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
 			rc = 1;
 		break;
+	case MPI2_DIAG_BUF_TYPE_EXTENDED:
+		if (ioc->facts.IOCCapabilities &
+		    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+			rc = 1;
 	}
 
 	return rc;
 }
 
 /**
- * _ctl_diag_register - application register with driver
- * @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
  *
- * This will allow the driver to setup any required buffers that will be
- * needed by firmware to communicate with the driver.
  */
 static long
-_ctl_diag_register(void __user *arg, enum block_state state)
+_ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
+    struct mpt2_diag_register *diag_register)
 {
-	struct mpt2_diag_register karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	int rc, i;
 	void *request_data = NULL;
 	dma_addr_t request_data_dma;
@@ -1279,18 +1282,17 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 	u16 ioc_status;
 	u8 issue_reset = 0;
 
-	if (copy_from_user(&karg, arg, sizeof(karg))) {
-		printk(KERN_ERR "failure at %s:%d/%s()!\n",
-		    __FILE__, __LINE__, __func__);
-		return -EFAULT;
-	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
-
 	dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
 	    __func__));
 
-	buffer_type = karg.buffer_type;
+	if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
+		printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
+		    ioc->name, __func__);
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	buffer_type = diag_register->buffer_type;
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
 		printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
 		    "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
@@ -1305,24 +1307,12 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 		return -EINVAL;
 	}
 
-	if (karg.requested_buffer_size % 4)  {
+	if (diag_register->requested_buffer_size % 4)  {
 		printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size "
 		    "is not 4 byte aligned\n", ioc->name, __func__);
 		return -EINVAL;
 	}
 
-	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-		return -EAGAIN;
-	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
-		return -ERESTARTSYS;
-
-	if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
-		printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
-		    ioc->name, __func__);
-		rc = -EAGAIN;
-		goto out;
-	}
-
 	smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
 	if (!smid) {
 		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
@@ -1338,12 +1328,12 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 	ioc->ctl_cmds.smid = smid;
 
 	request_data = ioc->diag_buffer[buffer_type];
-	request_data_sz = karg.requested_buffer_size;
-	ioc->unique_id[buffer_type] = karg.unique_id;
+	request_data_sz = diag_register->requested_buffer_size;
+	ioc->unique_id[buffer_type] = diag_register->unique_id;
 	ioc->diag_buffer_status[buffer_type] = 0;
-	memcpy(ioc->product_specific[buffer_type], karg.product_specific,
-	    MPT2_PRODUCT_SPECIFIC_DWORDS);
-	ioc->diagnostic_flags[buffer_type] = karg.diagnostic_flags;
+	memcpy(ioc->product_specific[buffer_type],
+	    diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS);
+	ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
 
 	if (request_data) {
 		request_data_dma = ioc->diag_buffer_dma[buffer_type];
@@ -1373,8 +1363,8 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 	}
 
 	mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
-	mpi_request->BufferType = karg.buffer_type;
-	mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags);
+	mpi_request->BufferType = diag_register->buffer_type;
+	mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
 	mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
 	mpi_request->BufferLength = cpu_to_le32(request_data_sz);
 	mpi_request->VF_ID = 0; /* TODO */
@@ -1422,7 +1412,7 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 	} else {
 		printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
 		    "log_info(0x%08x)\n", ioc->name, __func__,
-		    ioc_status, mpi_reply->IOCLogInfo);
+		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -1438,6 +1428,83 @@ _ctl_diag_register(void __user *arg, enum block_state state)
 		    request_data, request_data_dma);
 
 	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+	return rc;
+}
+
+/**
+ * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+	struct mpt2_diag_register diag_register;
+
+	memset(&diag_register, 0, sizeof(struct mpt2_diag_register));
+
+	if (bits_to_register & 1) {
+		printk(MPT2SAS_INFO_FMT "registering trace buffer support\n",
+		    ioc->name);
+		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+		/* register for 1MB buffers  */
+		diag_register.requested_buffer_size = (1024 * 1024);
+		diag_register.unique_id = 0x7075900;
+		_ctl_diag_register_2(ioc,  &diag_register);
+	}
+
+	if (bits_to_register & 2) {
+		printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n",
+		    ioc->name);
+		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+		/* register for 2MB buffers  */
+		diag_register.requested_buffer_size = 2 * (1024 * 1024);
+		diag_register.unique_id = 0x7075901;
+		_ctl_diag_register_2(ioc,  &diag_register);
+	}
+
+	if (bits_to_register & 4) {
+		printk(MPT2SAS_INFO_FMT "registering extended buffer support\n",
+		    ioc->name);
+		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+		/* register for 2MB buffers  */
+		diag_register.requested_buffer_size = 2 * (1024 * 1024);
+		diag_register.unique_id = 0x7075901;
+		_ctl_diag_register_2(ioc,  &diag_register);
+	}
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @arg - user space buffer containing ioctl content
+ * @state - NON_BLOCKING or BLOCKING
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(void __user *arg, enum block_state state)
+{
+	struct mpt2_diag_register karg;
+	struct MPT2SAS_ADAPTER *ioc;
+	long rc;
+
+	if (copy_from_user(&karg, arg, sizeof(karg))) {
+		printk(KERN_ERR "failure at %s:%d/%s()!\n",
+		    __FILE__, __LINE__, __func__);
+		return -EFAULT;
+	}
+	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
+		return -ENODEV;
+
+	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
+		return -EAGAIN;
+	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+		return -ERESTARTSYS;
+	rc = _ctl_diag_register_2(ioc, &karg);
 	mutex_unlock(&ioc->ctl_cmds.mutex);
 	return rc;
 }
@@ -1600,7 +1667,7 @@ _ctl_diag_query(void __user *arg)
 /**
  * _ctl_send_release - Diag Release Message
  * @ioc: per adapter object
- * @buffer_type - specifies either TRACE or SNAPSHOT
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
  * @issue_reset - specifies whether host reset is required.
  *
  */
@@ -1690,7 +1757,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
 	} else {
 		printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
 		    "log_info(0x%08x)\n", ioc->name, __func__,
-		    ioc_status, mpi_reply->IOCLogInfo);
+		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -1951,7 +2018,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
 	} else {
 		printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
 		    "log_info(0x%08x)\n", ioc->name, __func__,
-		    ioc_status, mpi_reply->IOCLogInfo);
+		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -2474,6 +2541,43 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
     _ctl_logging_level_show, _ctl_logging_level_store);
 
+/* device attributes */
+/*
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt2sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev,
+    struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev,
+    struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+	int val = 0;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	ioc->fwfault_debug = val;
+	printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name,
+	    ioc->fwfault_debug);
+	return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+    _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
 struct device_attribute *mpt2sas_host_attrs[] = {
 	&dev_attr_version_fw,
 	&dev_attr_version_bios,
@@ -2487,13 +2591,12 @@ struct device_attribute *mpt2sas_host_attrs[] = {
 	&dev_attr_io_delay,
 	&dev_attr_device_delay,
 	&dev_attr_logging_level,
+	&dev_attr_fwfault_debug,
 	&dev_attr_fw_queue_depth,
 	&dev_attr_host_sas_address,
 	NULL,
 };
 
-/* device attributes */
-
 /**
  * _ctl_device_sas_address_show - sas address
  * @cdev - pointer to embedded class device
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 211f296dd191..8a5eeb1a5c84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -313,7 +313,7 @@ struct mpt2_ioctl_btdh_mapping {
  * struct mpt2_diag_register - application register with driver
  * @hdr - generic header
  * @reserved -
- * @buffer_type - specifies either TRACE or SNAPSHOT
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
  * @application_flags - misc flags
  * @diagnostic_flags - specifies flags affecting command processing
  * @product_specific - product specific information
@@ -352,7 +352,7 @@ struct mpt2_diag_unregister {
  * struct mpt2_diag_query - query relevant info associated with diag buffers
  * @hdr - generic header
  * @reserved -
- * @buffer_type - specifies either TRACE or SNAPSHOT
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
  * @application_flags - misc flags
  * @diagnostic_flags - specifies flags affecting command processing
  * @product_specific - product specific information
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 86ab32d7ab15..55ee014a7e08 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -76,6 +76,7 @@ static u8 tm_cb_idx = -1;
 static u8 ctl_cb_idx = -1;
 static u8 base_cb_idx = -1;
 static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
 static u8 config_cb_idx = -1;
 static int mpt_ids;
 
@@ -196,10 +197,28 @@ static struct pci_device_id scsih_pci_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID },
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
 		PCI_ANY_ID, PCI_ANY_ID },
+	/* Meteor ~ 2116 */
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
 		PCI_ANY_ID, PCI_ANY_ID },
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
 		PCI_ANY_ID, PCI_ANY_ID },
+	/* Thunderbolt ~ 2208 */
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8,
+		PCI_ANY_ID, PCI_ANY_ID },
 	{0}	/* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, scsih_pci_table);
@@ -317,6 +336,47 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
 }
 
 /**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+    u64 *sas_address)
+{
+	Mpi2SasDevicePage0_t sas_device_pg0;
+	Mpi2ConfigReply_t mpi_reply;
+	u32 ioc_status;
+
+	if (handle <= ioc->sas_hba.num_phys) {
+		*sas_address = ioc->sas_hba.sas_address;
+		return 0;
+	} else
+		*sas_address = 0;
+
+	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return -ENXIO;
+	}
+
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+	    MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
+		    "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+		     __FILE__, __LINE__, __func__);
+		return -EIO;
+	}
+
+	*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+	return 0;
+}
+
+/**
  * _scsih_determine_boot_device - determine boot device.
  * @ioc: per adapter object
  * @device: either sas_device or raid_device object
@@ -510,8 +570,6 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
     struct _sas_device *sas_device)
 {
 	unsigned long flags;
-	u16 handle, parent_handle;
-	u64 sas_address;
 
 	dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle"
 	    "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
@@ -521,10 +579,8 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	handle = sas_device->handle;
-	parent_handle = sas_device->parent_handle;
-	sas_address = sas_device->sas_address;
-	if (!mpt2sas_transport_port_add(ioc, handle, parent_handle))
+	if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
+	     sas_device->sas_address_parent))
 		_scsih_sas_device_remove(ioc, sas_device);
 }
 
@@ -553,31 +609,6 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt2sas_scsih_expander_find_by_handle - expander device search
- * @ioc: per adapter object
- * @handle: expander handle (assigned by firmware)
- * Context: Calling function should acquire ioc->sas_device_lock
- *
- * This searches for expander device based on handle, then returns the
- * sas_node object.
- */
-struct _sas_node *
-mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
-{
-	struct _sas_node *sas_expander, *r;
-
-	r = NULL;
-	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
-		if (sas_expander->handle != handle)
-			continue;
-		r = sas_expander;
-		goto out;
-	}
- out:
-	return r;
-}
-
-/**
  * _scsih_raid_device_find_by_id - raid device search
  * @ioc: per adapter object
  * @id: sas device target id
@@ -699,6 +730,31 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
+ * mpt2sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+	struct _sas_node *sas_expander, *r;
+
+	r = NULL;
+	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+		if (sas_expander->handle != handle)
+			continue;
+		r = sas_expander;
+		goto out;
+	}
+ out:
+	return r;
+}
+
+/**
  * mpt2sas_scsih_expander_find_by_sas_address - expander device search
  * @ioc: per adapter object
  * @sas_address: sas address
@@ -1043,17 +1099,46 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
  * _scsih_change_queue_depth - setting device queue depth
  * @sdev: scsi device struct
  * @qdepth: requested queue depth
+ * @reason: calling context
  *
  * Returns queue depth.
  */
 static int
-_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
 	struct Scsi_Host *shost = sdev->host;
 	int max_depth;
 	int tag_type;
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+	struct MPT2SAS_DEVICE *sas_device_priv_data;
+	struct MPT2SAS_TARGET *sas_target_priv_data;
+	struct _sas_device *sas_device;
+	unsigned long flags;
+
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
 
 	max_depth = shost->can_queue;
+
+	/* limit max device queue for SATA to 32 */
+	sas_device_priv_data = sdev->hostdata;
+	if (!sas_device_priv_data)
+		goto not_sata;
+	sas_target_priv_data = sas_device_priv_data->sas_target;
+	if (!sas_target_priv_data)
+		goto not_sata;
+	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+		goto not_sata;
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	   sas_device_priv_data->sas_target->sas_address);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	if (sas_device && sas_device->device_info &
+	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+		max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+
+ not_sata:
+
 	if (!sdev->tagged_supported)
 		max_depth = 1;
 	if (qdepth > max_depth)
@@ -1488,7 +1573,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
 		    r_level, raid_device->handle,
 		    (unsigned long long)raid_device->wwid,
 		    raid_device->num_pds, ds);
-		_scsih_change_queue_depth(sdev, qdepth);
+		_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
 		return 0;
 	}
 
@@ -1534,7 +1619,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
 			_scsih_display_sata_capabilities(ioc, sas_device, sdev);
 	}
 
-	_scsih_change_queue_depth(sdev, qdepth);
+	_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
 
 	if (ssp_target)
 		sas_read_port_mode_page(sdev);
@@ -1874,6 +1959,8 @@ _scsih_abort(struct scsi_cmnd *scmd)
 		goto out;
 	}
 
+	mpt2sas_halt_firmware(ioc);
+
 	mutex_lock(&ioc->tm_cmds.mutex);
 	handle = sas_device_priv_data->sas_target->handle;
 	mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
@@ -2297,7 +2384,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
 	u16 handle;
 	u16 reason_code;
 	u8 phy_number;
-	u8 link_rate;
 
 	for (i = 0; i < event_data->NumEntries; i++) {
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
@@ -2308,11 +2394,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
 			_scsih_block_io_device(ioc, handle);
-		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
-			link_rate = event_data->PHY[i].LinkRate >> 4;
-			if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
-				_scsih_ublock_io_device(ioc, handle);
-		}
 	}
 }
 
@@ -2349,16 +2430,10 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
-		    ioc->name, __func__);
-		return;
-	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	/* skip is hidden raid component */
-	if (sas_device->hidden_raid_component)
+	if (sas_device && sas_device->hidden_raid_component)
 		return;
 
 	smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
@@ -2371,18 +2446,31 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 		delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
 		list_add_tail(&delayed_tr->list,
 		    &ioc->delayed_tr_list);
-		if (sas_device->starget)
+		if (sas_device && sas_device->starget) {
 			dewtprintk(ioc, starget_printk(KERN_INFO,
 			    sas_device->starget, "DELAYED:tr:handle(0x%04x), "
-			    "(open)\n", sas_device->handle));
+			    "(open)\n", handle));
+		} else {
+			dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+			    "DELAYED:tr:handle(0x%04x), (open)\n",
+			    ioc->name, handle));
+		}
 		return;
 	}
 
-	if (sas_device->starget && sas_device->starget->hostdata) {
-		sas_target_priv_data = sas_device->starget->hostdata;
-		sas_target_priv_data->tm_busy = 1;
-		dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
-		    "tr:handle(0x%04x), (open)\n", sas_device->handle));
+	if (sas_device) {
+		sas_device->state |= MPTSAS_STATE_TR_SEND;
+		sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
+		if (sas_device->starget && sas_device->starget->hostdata) {
+			sas_target_priv_data = sas_device->starget->hostdata;
+			sas_target_priv_data->tm_busy = 1;
+			dewtprintk(ioc, starget_printk(KERN_INFO,
+			    sas_device->starget, "tr:handle(0x%04x), (open)\n",
+			    handle));
+		}
+	} else {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "tr:handle(0x%04x), (open)\n", ioc->name, handle));
 	}
 
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
@@ -2390,8 +2478,6 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 	mpi_request->DevHandle = cpu_to_le16(handle);
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-	sas_device->state |= MPTSAS_STATE_TR_SEND;
-	sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
 	mpt2sas_base_put_smid_hi_priority(ioc, smid);
 }
 
@@ -2426,21 +2512,25 @@ _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
-		    ioc->name, __func__);
-		return 1;
-	}
-	sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (sas_device->starget)
-		dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
+	if (sas_device) {
+		sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
+		if (sas_device->starget)
+			dewtprintk(ioc, starget_printk(KERN_INFO,
+			    sas_device->starget,
+			    "sc_complete:handle(0x%04x), "
+			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
+			    handle, le16_to_cpu(mpi_reply->IOCStatus),
+			    le32_to_cpu(mpi_reply->IOCLogInfo)));
+	} else {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
 		    "sc_complete:handle(0x%04x), "
 		    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-		    handle, le16_to_cpu(mpi_reply->IOCStatus),
+		    ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
 		    le32_to_cpu(mpi_reply->IOCLogInfo)));
+	}
+
 	return 1;
 }
 
@@ -2478,28 +2568,33 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 	handle = le16_to_cpu(mpi_reply->DevHandle);
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n",
-		    ioc->name, __func__);
-		return 1;
-	}
-	sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (sas_device->starget)
-		dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget,
-		    "tr_complete:handle(0x%04x), (%s) ioc_status(0x%04x), "
-		    "loginfo(0x%08x), completed(%d)\n",
-		    sas_device->handle, (sas_device->state &
-		    MPT2SAS_REQ_SAS_CNTRL) ? "open" : "active",
-		    le16_to_cpu(mpi_reply->IOCStatus),
+	if (sas_device) {
+		sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
+		if (sas_device->starget) {
+			dewtprintk(ioc, starget_printk(KERN_INFO,
+			    sas_device->starget, "tr_complete:handle(0x%04x), "
+			    "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
+			    "completed(%d)\n", sas_device->handle,
+			    (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
+			    "open" : "active",
+			    le16_to_cpu(mpi_reply->IOCStatus),
+			    le32_to_cpu(mpi_reply->IOCLogInfo),
+			    le32_to_cpu(mpi_reply->TerminationCount)));
+			if (sas_device->starget->hostdata) {
+				sas_target_priv_data =
+				    sas_device->starget->hostdata;
+				sas_target_priv_data->tm_busy = 0;
+			}
+		}
+	} else {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
+		    "loginfo(0x%08x), completed(%d)\n", ioc->name,
+		    handle, le16_to_cpu(mpi_reply->IOCStatus),
 		    le32_to_cpu(mpi_reply->IOCLogInfo),
 		    le32_to_cpu(mpi_reply->TerminationCount)));
-
-	if (sas_device->starget && sas_device->starget->hostdata) {
-		sas_target_priv_data = sas_device->starget->hostdata;
-		sas_target_priv_data->tm_busy = 0;
 	}
 
 	if (!list_empty(&ioc->delayed_tr_list)) {
@@ -2514,8 +2609,7 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 	} else
 		rc = 1;
 
-
-	if (!(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
+	if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
 		return rc;
 
 	if (ioc->shost_recovery) {
@@ -2531,12 +2625,14 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 		return rc;
 	}
 
+	if (sas_device)
+		sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
+
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
 	mpi_request->DevHandle = mpi_reply->DevHandle;
-	sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
 	mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
 	return rc;
 }
@@ -2678,8 +2774,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
 	else
 		return;
 
-	mpi_request->EEDPBlockSize = scmd->device->sector_size;
-
 	switch (prot_type) {
 	case SCSI_PROT_DIF_TYPE1:
 
@@ -2687,8 +2781,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
 		* enable ref/guard checking
 		* auto increment ref tag
 		*/
-		mpi_request->EEDPFlags = eedp_flags |
-		    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
@@ -2701,11 +2794,11 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
 		/*
 		* enable guard checking
 		*/
-		mpi_request->EEDPFlags = eedp_flags |
-		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
-
+		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
 		break;
 	}
+	mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size);
+	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
 }
 
 /**
@@ -2788,7 +2881,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 	}
 
 	/* see if we are busy with task managment stuff */
-	if (sas_target_priv_data->tm_busy)
+	if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -2842,7 +2935,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
 	mpi_request->SenseBufferLowAddress =
-	    (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid);
+	    mpt2sas_base_get_sense_buffer_dma(ioc, smid);
 	mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
 	mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI +
 	    MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR);
@@ -3059,7 +3152,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
 		response_bytes = (u8 *)&response_info;
-		_scsih_response_code(ioc, response_bytes[3]);
+		_scsih_response_code(ioc, response_bytes[0]);
 	}
 }
 #endif
@@ -3177,7 +3270,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 	u8 scsi_status;
 	u32 log_info;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
-	u32 response_code;
+	u32 response_code = 0;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 	scmd = _scsih_scsi_lookup_get(ioc, smid);
@@ -3199,16 +3292,16 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 	}
 
 	/* turning off TLR */
+	scsi_state = mpi_reply->SCSIState;
+	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+		response_code =
+		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
 	if (!sas_device_priv_data->tlr_snoop_check) {
 		sas_device_priv_data->tlr_snoop_check++;
-		if (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) {
-			response_code = (le32_to_cpu(mpi_reply->ResponseInfo)
-			    >> 24);
-			if (response_code ==
-			    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
-				sas_device_priv_data->flags &=
-				    ~MPT_DEVICE_TLR_ON;
-		}
+		if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
+			sas_device_priv_data->flags &=
+			    ~MPT_DEVICE_TLR_ON;
 	}
 
 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@@ -3219,7 +3312,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 	else
 		log_info = 0;
 	ioc_status &= MPI2_IOCSTATUS_MASK;
-	scsi_state = mpi_reply->SCSIState;
 	scsi_status = mpi_reply->SCSIStatus;
 
 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
@@ -3255,10 +3347,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 
 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 		if (sas_device_priv_data->block) {
-			scmd->result = (DID_BUS_BUSY << 16);
-			break;
+			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+			goto out;
 		}
-
 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 		scmd->result = DID_RESET << 16;
@@ -3304,8 +3395,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 	case MPI2_IOCSTATUS_SUCCESS:
 		scmd->result = (DID_OK << 16) | scsi_status;
-		if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
-		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
+		if (response_code ==
+		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
 			scmd->result = DID_SOFT_ERROR << 16;
 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
 			scmd->result = DID_RESET << 16;
@@ -3344,7 +3437,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 /**
  * _scsih_sas_host_refresh - refreshing sas host object contents
  * @ioc: per adapter object
- * @update: update link information
  * Context: user
  *
  * During port enable, fw will send topology events for every device. Its
@@ -3354,13 +3446,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  * Return nothing.
  */
 static void
-_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
+_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc)
 {
 	u16 sz;
 	u16 ioc_status;
 	int i;
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+	u16 attached_handle;
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
 	    "updating handles for sas_host(0x%016llx)\n",
@@ -3374,27 +3467,24 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
 		    ioc->name, __FILE__, __LINE__, __func__);
 		return;
 	}
-	if (!(mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
-	    sas_iounit_pg0, sz))) {
-		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
-		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
-			goto out;
-		for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
-			ioc->sas_hba.phy[i].handle =
-			    le16_to_cpu(sas_iounit_pg0->PhyData[i].
-				ControllerDevHandle);
-			if (update)
-				mpt2sas_transport_update_links(
-				    ioc,
-				    ioc->sas_hba.phy[i].handle,
-				    le16_to_cpu(sas_iounit_pg0->PhyData[i].
-				    AttachedDevHandle), i,
-				    sas_iounit_pg0->PhyData[i].
-				    NegotiatedLinkRate >> 4);
-		}
-	}
 
+	if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+	    sas_iounit_pg0, sz)) != 0)
+		goto out;
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+		goto out;
+	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+		if (i == 0)
+			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+			    PhyData[0].ControllerDevHandle);
+		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+		    AttachedDevHandle);
+		mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+		    attached_handle, i, sas_iounit_pg0->PhyData[i].
+		    NegotiatedLinkRate >> 4);
+	}
  out:
 	kfree(sas_iounit_pg0);
 }
@@ -3507,19 +3597,21 @@ _scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc)
 			    ioc->name, __FILE__, __LINE__, __func__);
 			goto out;
 		}
-		ioc->sas_hba.phy[i].handle =
-		    le16_to_cpu(sas_iounit_pg0->PhyData[i].ControllerDevHandle);
+
+		if (i == 0)
+			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+			    PhyData[0].ControllerDevHandle);
+		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
 		ioc->sas_hba.phy[i].phy_id = i;
 		mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
 		    phy_pg0, ioc->sas_hba.parent_dev);
 	}
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
-	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.phy[0].handle))) {
+	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 		goto out;
 	}
-	ioc->sas_hba.handle = le16_to_cpu(sas_device_pg0.DevHandle);
 	ioc->sas_hba.enclosure_handle =
 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
@@ -3562,7 +3654,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 	Mpi2SasEnclosurePage0_t enclosure_pg0;
 	u32 ioc_status;
 	u16 parent_handle;
-	__le64 sas_address;
+	__le64 sas_address, sas_address_parent = 0;
 	int i;
 	unsigned long flags;
 	struct _sas_port *mpt2sas_port = NULL;
@@ -3591,10 +3683,16 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 
 	/* handle out of order topology events */
 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
-	if (parent_handle >= ioc->sas_hba.num_phys) {
+	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+	    != 0) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return -1;
+	}
+	if (sas_address_parent != ioc->sas_hba.sas_address) {
 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
-		sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
-		    parent_handle);
+		sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
+		    sas_address_parent);
 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		if (!sas_expander) {
 			rc = _scsih_expander_add(ioc, parent_handle);
@@ -3622,14 +3720,12 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 
 	sas_expander->handle = handle;
 	sas_expander->num_phys = expander_pg0.NumPhys;
-	sas_expander->parent_handle = parent_handle;
-	sas_expander->enclosure_handle =
-	    le16_to_cpu(expander_pg0.EnclosureHandle);
+	sas_expander->sas_address_parent = sas_address_parent;
 	sas_expander->sas_address = sas_address;
 
 	printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x),"
 	    " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
-	    handle, sas_expander->parent_handle, (unsigned long long)
+	    handle, parent_handle, (unsigned long long)
 	    sas_expander->sas_address, sas_expander->num_phys);
 
 	if (!sas_expander->num_phys)
@@ -3645,7 +3741,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 
 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
 	mpt2sas_port = mpt2sas_transport_port_add(ioc, handle,
-	    sas_expander->parent_handle);
+	    sas_address_parent);
 	if (!mpt2sas_port) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
@@ -3691,20 +3787,54 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 
 	if (mpt2sas_port)
 		mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
-		    sas_expander->parent_handle);
+		    sas_address_parent);
 	kfree(sas_expander);
 	return rc;
 }
 
 /**
+ * _scsih_done -  scsih callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated message frames.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ *        0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+	MPI2DefaultReply_t *mpi_reply;
+
+	mpi_reply =  mpt2sas_base_get_reply_virt_addr(ioc, reply);
+	if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED)
+		return 1;
+	if (ioc->scsih_cmds.smid != smid)
+		return 1;
+	ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE;
+	if (mpi_reply) {
+		memcpy(ioc->scsih_cmds.reply, mpi_reply,
+		    mpi_reply->MsgLength*4);
+		ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID;
+	}
+	ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING;
+	complete(&ioc->scsih_cmds.done);
+	return 1;
+}
+
+/**
  * _scsih_expander_remove - removing expander object
  * @ioc: per adapter object
- * @handle: expander handle
+ * @sas_address: expander sas_address
  *
  * Return nothing.
  */
 static void
-_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
 {
 	struct _sas_node *sas_expander;
 	unsigned long flags;
@@ -3713,7 +3843,8 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 		return;
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
-	sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
+	sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
+	    sas_address);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	_scsih_expander_node_remove(ioc, sas_expander);
 }
@@ -3805,8 +3936,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
 	}
 
 	sas_device->handle = handle;
-	sas_device->parent_handle =
-	    le16_to_cpu(sas_device_pg0.ParentDevHandle);
+	if (_scsih_get_sas_address(ioc, le16_to_cpu
+		(sas_device_pg0.ParentDevHandle),
+		&sas_device->sas_address_parent) != 0)
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
 	sas_device->enclosure_handle =
 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
 	sas_device->slot =
@@ -3836,43 +3970,39 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
 /**
  * _scsih_remove_device -  removing sas device object
  * @ioc: per adapter object
- * @handle: sas device handle
+ * @sas_device: the sas_device object
  *
  * Return nothing.
  */
 static void
-_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
+    *sas_device)
 {
 	struct MPT2SAS_TARGET *sas_target_priv_data;
-	struct _sas_device *sas_device;
-	unsigned long flags;
 	Mpi2SasIoUnitControlReply_t mpi_reply;
 	Mpi2SasIoUnitControlRequest_t mpi_request;
-	u16 device_handle;
+	u16 device_handle, handle;
 
-	/* lookup sas_device */
-	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	if (!sas_device)
 		return;
-	}
 
-	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle"
-	    "(0x%04x)\n", ioc->name, __func__, handle));
+	handle = sas_device->handle;
+	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
+	    " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
+	    (unsigned long long) sas_device->sas_address));
 
 	if (sas_device->starget && sas_device->starget->hostdata) {
 		sas_target_priv_data = sas_device->starget->hostdata;
 		sas_target_priv_data->deleted = 1;
 	}
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (ioc->remove_host)
+	if (ioc->remove_host || ioc->shost_recovery || !handle)
 		goto out;
 
 	if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
-		   "target_reset handle(0x%04x)\n", ioc->name, handle));
+		   "target_reset handle(0x%04x)\n", ioc->name,
+		   handle));
 		goto skip_tr;
 	}
 
@@ -3925,10 +4055,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 	_scsih_ublock_io_device(ioc, handle);
 
 	mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
-	    sas_device->parent_handle);
+	    sas_device->sas_address_parent);
 
 	printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
-	    "(0x%016llx)\n", ioc->name, sas_device->handle,
+	    "(0x%016llx)\n", ioc->name, handle,
 	    (unsigned long long) sas_device->sas_address);
 	_scsih_sas_device_remove(ioc, sas_device);
 
@@ -3952,7 +4082,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
 	u16 reason_code;
 	u8 phy_number;
 	char *status_str = NULL;
-	char link_rate[25];
+	u8 link_rate, prev_link_rate;
 
 	switch (event_data->ExpStatus) {
 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
@@ -3962,6 +4092,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
 		status_str = "remove";
 		break;
 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+	case 0:
 		status_str =  "responding";
 		break;
 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
@@ -3987,30 +4118,30 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
 		switch (reason_code) {
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
-			snprintf(link_rate, 25, ": add, link(0x%02x)",
-			    (event_data->PHY[i].LinkRate >> 4));
-			status_str = link_rate;
+			status_str = "target add";
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
-			status_str = ": remove";
+			status_str = "target remove";
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
-			status_str = ": remove_delay";
+			status_str = "delay target remove";
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
-			snprintf(link_rate, 25, ": link(0x%02x)",
-			    (event_data->PHY[i].LinkRate >> 4));
-			status_str = link_rate;
+			status_str = "link rate change";
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
-			status_str = ": responding";
+			status_str = "target responding";
 			break;
 		default:
-			status_str = ": unknown";
+			status_str = "unknown";
 			break;
 		}
-		printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x)%s\n",
-		    phy_number, handle, status_str);
+		link_rate = event_data->PHY[i].LinkRate >> 4;
+		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+		printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x): %s:"
+		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+		    handle, status_str, link_rate, prev_link_rate);
+
 	}
 }
 #endif
@@ -4031,8 +4162,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
 	u16 reason_code;
 	u8 phy_number;
 	struct _sas_node *sas_expander;
+	struct _sas_device *sas_device;
+	u64 sas_address;
 	unsigned long flags;
-	u8 link_rate_;
+	u8 link_rate, prev_link_rate;
 	Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
 
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4040,10 +4173,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
 		_scsih_sas_topology_change_event_debug(ioc, event_data);
 #endif
 
+	if (ioc->shost_recovery)
+		return;
+
 	if (!ioc->sas_hba.num_phys)
 		_scsih_sas_host_add(ioc);
 	else
-		_scsih_sas_host_refresh(ioc, 0);
+		_scsih_sas_host_refresh(ioc);
 
 	if (fw_event->ignore) {
 		dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander "
@@ -4058,6 +4194,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
 		if (_scsih_expander_add(ioc, parent_handle) != 0)
 			return;
 
+	spin_lock_irqsave(&ioc->sas_node_lock, flags);
+	sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
+	    parent_handle);
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+	if (sas_expander)
+		sas_address = sas_expander->sas_address;
+	else if (parent_handle < ioc->sas_hba.num_phys)
+		sas_address = ioc->sas_hba.sas_address;
+	else
+		return;
+
 	/* handle siblings events */
 	for (i = 0; i < event_data->NumEntries; i++) {
 		if (fw_event->ignore) {
@@ -4077,48 +4224,47 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
 		if (!handle)
 			continue;
-		link_rate_ = event_data->PHY[i].LinkRate >> 4;
+		link_rate = event_data->PHY[i].LinkRate >> 4;
+		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
 		switch (reason_code) {
 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+			if (link_rate == prev_link_rate)
+				break;
+
+			mpt2sas_transport_update_links(ioc, sas_address,
+			    handle, phy_number, link_rate);
+
+			if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
+				_scsih_ublock_io_device(ioc, handle);
+			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
-			if (!parent_handle) {
-				if (phy_number < ioc->sas_hba.num_phys)
-					mpt2sas_transport_update_links(
-					ioc,
-					ioc->sas_hba.phy[phy_number].handle,
-					handle, phy_number, link_rate_);
-			} else {
-				spin_lock_irqsave(&ioc->sas_node_lock, flags);
-				sas_expander =
-				    mpt2sas_scsih_expander_find_by_handle(ioc,
-					parent_handle);
-				spin_unlock_irqrestore(&ioc->sas_node_lock,
-				    flags);
-				if (sas_expander) {
-					if (phy_number < sas_expander->num_phys)
-						mpt2sas_transport_update_links(
-						ioc,
-						sas_expander->
-						phy[phy_number].handle,
-						handle, phy_number,
-						link_rate_);
-				}
-			}
-			if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
-				if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
-					break;
-				_scsih_add_device(ioc, handle, phy_number, 0);
-			}
+
+			mpt2sas_transport_update_links(ioc, sas_address,
+			    handle, phy_number, link_rate);
+
+			_scsih_add_device(ioc, handle, phy_number, 0);
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
-			_scsih_remove_device(ioc, handle);
+
+			spin_lock_irqsave(&ioc->sas_device_lock, flags);
+			sas_device = _scsih_sas_device_find_by_handle(ioc,
+			    handle);
+			if (!sas_device) {
+				spin_unlock_irqrestore(&ioc->sas_device_lock,
+				    flags);
+				break;
+			}
+			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+			_scsih_remove_device(ioc, sas_device);
 			break;
 		}
 	}
 
 	/* handle expander removal */
-	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
-		_scsih_expander_remove(ioc, parent_handle);
+	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+	    sas_expander)
+		_scsih_expander_remove(ioc, sas_address);
 
 }
 
@@ -4170,6 +4316,12 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
 		reason_str = "internal async notification";
 		break;
+	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+		reason_str = "expander reduced functionality";
+		break;
+	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+		reason_str = "expander reduced functionality complete";
+		break;
 	default:
 		reason_str = "unknown reason";
 		break;
@@ -4197,11 +4349,43 @@ static void
 _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
     struct fw_event_work *fw_event)
 {
+	struct MPT2SAS_TARGET *target_priv_data;
+	struct _sas_device *sas_device;
+	__le64 sas_address;
+	unsigned long flags;
+	Mpi2EventDataSasDeviceStatusChange_t *event_data =
+	    fw_event->event_data;
+
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
 		_scsih_sas_device_status_change_event_debug(ioc,
-		     fw_event->event_data);
+		     event_data);
 #endif
+
+	if (!(event_data->ReasonCode ==
+	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+	   event_data->ReasonCode ==
+	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET))
+		return;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_address = le64_to_cpu(event_data->SASAddress);
+	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	    sas_address);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+	if (!sas_device || !sas_device->starget)
+		return;
+
+	target_priv_data = sas_device->starget->hostdata;
+	if (!target_priv_data)
+		return;
+
+	if (event_data->ReasonCode ==
+	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+		target_priv_data->tm_busy = 1;
+	else
+		target_priv_data->tm_busy = 0;
 }
 
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4281,6 +4465,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
 #endif
+	u16 ioc_status;
 	dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: "
 	    "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
 	    event_data->PortWidth));
@@ -4314,8 +4499,9 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
 		mpt2sas_scsih_issue_tm(ioc, handle, lun,
 		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
 		ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
-
-		if ((mpi_reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) &&
+		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+		    & MPI2_IOCSTATUS_MASK;
+		if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
 		    (mpi_reply->ResponseCode ==
 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
 		     mpi_reply->ResponseCode ==
@@ -4570,7 +4756,7 @@ _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	if (!sas_device)
 		return;
-	_scsih_remove_device(ioc, handle);
+	_scsih_remove_device(ioc, sas_device);
 }
 
 /**
@@ -4591,6 +4777,8 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasDevicePage0_t sas_device_pg0;
 	u32 ioc_status;
+	u64 sas_address;
+	u16 parent_handle;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
@@ -4615,9 +4803,10 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
 		return;
 	}
 
-	mpt2sas_transport_update_links(ioc,
-	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
-	    handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+		mpt2sas_transport_update_links(ioc, sas_address, handle,
+		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
 
 	_scsih_add_device(ioc, handle, 0, 1);
 }
@@ -4857,7 +5046,7 @@ static void
 _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
     struct fw_event_work *fw_event)
 {
-	u16 handle;
+	u16 handle, parent_handle;
 	u32 state;
 	struct _sas_device *sas_device;
 	unsigned long flags;
@@ -4865,6 +5054,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
 	Mpi2SasDevicePage0_t sas_device_pg0;
 	u32 ioc_status;
 	Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+	u64 sas_address;
 
 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
 		return;
@@ -4906,9 +5096,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
 			return;
 		}
 
-		mpt2sas_transport_update_links(ioc,
-		    le16_to_cpu(sas_device_pg0.ParentDevHandle),
-		    handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+			mpt2sas_transport_update_links(ioc, sas_address, handle,
+			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
 
 		_scsih_add_device(ioc, handle, 0, 1);
 
@@ -4948,11 +5139,17 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc,
 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
 		reason_str = "consistency check";
 		break;
-	default:
-		reason_str = "unknown reason";
+	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+		reason_str = "background init";
+		break;
+	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+		reason_str = "make data consistent";
 		break;
 	}
 
+	if (!reason_str)
+		return;
+
 	printk(MPT2SAS_INFO_FMT "raid operational status: (%s)"
 	    "\thandle(0x%04x), percent complete(%d)\n",
 	    ioc->name, reason_str,
@@ -5252,18 +5449,23 @@ _scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
 {
 	struct _sas_node *sas_expander;
 	unsigned long flags;
+	int i;
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
-		if (sas_expander->sas_address == sas_address) {
-			sas_expander->responding = 1;
-			if (sas_expander->handle != handle) {
-				printk(KERN_INFO "old handle(0x%04x)\n",
-				    sas_expander->handle);
-				sas_expander->handle = handle;
-			}
+		if (sas_expander->sas_address != sas_address)
+			continue;
+		sas_expander->responding = 1;
+		if (sas_expander->handle == handle)
 			goto out;
-		}
+		printk(KERN_INFO "\texpander(0x%016llx): handle changed"
+		    " from(0x%04x) to (0x%04x)!!!\n",
+		    (unsigned long long)sas_expander->sas_address,
+		    sas_expander->handle, handle);
+		sas_expander->handle = handle;
+		for (i = 0 ; i < sas_expander->num_phys ; i++)
+			sas_expander->phy[i].handle = handle;
+		goto out;
 	}
  out:
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -5340,7 +5542,9 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
 			    (unsigned long long)
 			    sas_device->enclosure_logical_id,
 			    sas_device->slot);
-		_scsih_remove_device(ioc, sas_device->handle);
+		/* invalidate the device handle */
+		sas_device->handle = 0;
+		_scsih_remove_device(ioc, sas_device);
 	}
 
 	list_for_each_entry_safe(raid_device, raid_device_next,
@@ -5366,7 +5570,7 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
 			sas_expander->responding = 0;
 			continue;
 		}
-		_scsih_expander_remove(ioc, sas_expander->handle);
+		_scsih_expander_remove(ioc, sas_expander->sas_address);
 		goto retry_expander_search;
 	}
 }
@@ -5406,7 +5610,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 	case MPT2_IOC_DONE_RESET:
 		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
 		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
-		_scsih_sas_host_refresh(ioc, 0);
+		_scsih_sas_host_refresh(ioc);
 		_scsih_search_responding_sas_devices(ioc);
 		_scsih_search_responding_raid_devices(ioc);
 		_scsih_search_responding_expanders(ioc);
@@ -5646,7 +5850,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 			if (!sas_device)
 				continue;
-			_scsih_remove_device(ioc, sas_device->handle);
+			_scsih_remove_device(ioc, sas_device);
 			if (ioc->shost_recovery)
 				return;
 			goto retry_device_search;
@@ -5669,7 +5873,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 			spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 			if (!expander_sibling)
 				continue;
-			_scsih_expander_remove(ioc, expander_sibling->handle);
+			_scsih_expander_remove(ioc,
+			    expander_sibling->sas_address);
 			if (ioc->shost_recovery)
 				return;
 			goto retry_expander_search;
@@ -5677,7 +5882,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 	}
 
 	mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
-	    sas_expander->parent_handle);
+	    sas_expander->sas_address_parent);
 
 	printk(MPT2SAS_INFO_FMT "expander_remove: handle"
 	   "(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
@@ -5690,9 +5895,99 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
+{
+	Mpi2RaidActionRequest_t *mpi_request;
+	Mpi2RaidActionReply_t *mpi_reply;
+	u16 smid;
+
+	/* is IR firmware build loaded ? */
+	if (!ioc->ir_firmware)
+		return;
+
+	/* are there any volumes ? */
+	if (list_empty(&ioc->raid_device_list))
+		return;
+
+	mutex_lock(&ioc->scsih_cmds.mutex);
+
+	if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
+		printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n",
+		    ioc->name, __func__);
+		goto out;
+	}
+	ioc->scsih_cmds.status = MPT2_CMD_PENDING;
+
+	smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+	if (!smid) {
+		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+		    ioc->name, __func__);
+		ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+		goto out;
+	}
+
+	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+	ioc->scsih_cmds.smid = smid;
+	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+	printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name);
+	init_completion(&ioc->scsih_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
+	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+	if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) {
+		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+		    ioc->name, __func__);
+		goto out;
+	}
+
+	if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) {
+		mpi_reply = ioc->scsih_cmds.reply;
+
+		printk(MPT2SAS_INFO_FMT "IR shutdown (complete): "
+		    "ioc_status(0x%04x), loginfo(0x%08x)\n",
+		    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+		    le32_to_cpu(mpi_reply->IOCLogInfo));
+	}
+
+ out:
+	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+	mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+	_scsih_ir_shutdown(ioc);
+	mpt2sas_base_detach(ioc);
+}
+
+/**
  * _scsih_remove - detach and remove add host
  * @pdev: PCI device struct
  *
+ * Routine called when unloading the driver.
  * Return nothing.
  */
 static void __devexit
@@ -5726,7 +6021,7 @@ _scsih_remove(struct pci_dev *pdev)
 			    mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 			   mpt2sas_port->remote_identify.sas_address);
 			if (sas_device) {
-				_scsih_remove_device(ioc, sas_device->handle);
+				_scsih_remove_device(ioc, sas_device);
 				goto retry_again;
 			}
 		} else {
@@ -5735,7 +6030,7 @@ _scsih_remove(struct pci_dev *pdev)
 			    mpt2sas_port->remote_identify.sas_address);
 			if (expander_sibling) {
 				_scsih_expander_remove(ioc,
-				    expander_sibling->handle);
+				    expander_sibling->sas_address);
 				goto retry_again;
 			}
 		}
@@ -5749,7 +6044,7 @@ _scsih_remove(struct pci_dev *pdev)
 	}
 
 	sas_remove_host(shost);
-	mpt2sas_base_detach(ioc);
+	_scsih_shutdown(pdev);
 	list_del(&ioc->list);
 	scsi_remove_host(shost);
 	scsi_host_put(shost);
@@ -5770,7 +6065,8 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
 	void *device;
 	struct _sas_device *sas_device;
 	struct _raid_device *raid_device;
-	u16 handle, parent_handle;
+	u16 handle;
+	u64 sas_address_parent;
 	u64 sas_address;
 	unsigned long flags;
 	int rc;
@@ -5799,17 +6095,17 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
 	} else {
 		sas_device = device;
 		handle = sas_device->handle;
-		parent_handle = sas_device->parent_handle;
+		sas_address_parent = sas_device->sas_address_parent;
 		sas_address = sas_device->sas_address;
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-		    sas_device->parent_handle)) {
+		    sas_device->sas_address_parent)) {
 			_scsih_sas_device_remove(ioc, sas_device);
 		} else if (!sas_device->starget) {
 			mpt2sas_transport_port_remove(ioc, sas_address,
-			    parent_handle);
+			    sas_address_parent);
 			_scsih_sas_device_remove(ioc, sas_device);
 		}
 	}
@@ -5849,8 +6145,6 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
 {
 	struct _sas_device *sas_device, *next;
 	unsigned long flags;
-	u16 handle, parent_handle;
-	u64 sas_address;
 
 	/* SAS Device List */
 	list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
@@ -5859,14 +6153,13 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-		handle = sas_device->handle;
-		parent_handle = sas_device->parent_handle;
-		sas_address = sas_device->sas_address;
-		if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) {
+		if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
+		    sas_device->sas_address_parent)) {
 			_scsih_sas_device_remove(ioc, sas_device);
 		} else if (!sas_device->starget) {
-			mpt2sas_transport_port_remove(ioc, sas_address,
-			    parent_handle);
+			mpt2sas_transport_port_remove(ioc,
+			    sas_device->sas_address,
+			    sas_device->sas_address_parent);
 			_scsih_sas_device_remove(ioc, sas_device);
 		}
 	}
@@ -5935,6 +6228,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	ioc->ctl_cb_idx = ctl_cb_idx;
 	ioc->base_cb_idx = base_cb_idx;
 	ioc->transport_cb_idx = transport_cb_idx;
+	ioc->scsih_cb_idx = scsih_cb_idx;
 	ioc->config_cb_idx = config_cb_idx;
 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
@@ -6072,6 +6366,7 @@ static struct pci_driver scsih_driver = {
 	.id_table	= scsih_pci_table,
 	.probe		= _scsih_probe,
 	.remove		= __devexit_p(_scsih_remove),
+	.shutdown	= _scsih_shutdown,
 #ifdef CONFIG_PM
 	.suspend	= _scsih_suspend,
 	.resume		= _scsih_resume,
@@ -6113,6 +6408,9 @@ _scsih_init(void)
 	transport_cb_idx = mpt2sas_base_register_callback_handler(
 	    mpt2sas_transport_done);
 
+	/* scsih internal commands callback handler */
+	scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done);
+
 	/* configuration page API internal commands callback handler */
 	config_cb_idx = mpt2sas_base_register_callback_handler(
 	    mpt2sas_config_done);
@@ -6152,6 +6450,7 @@ _scsih_exit(void)
 	mpt2sas_base_release_callback_handler(tm_cb_idx);
 	mpt2sas_base_release_callback_handler(base_cb_idx);
 	mpt2sas_base_release_callback_handler(transport_cb_idx);
+	mpt2sas_base_release_callback_handler(scsih_cb_idx);
 	mpt2sas_base_release_callback_handler(config_cb_idx);
 	mpt2sas_base_release_callback_handler(ctl_cb_idx);
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index eb98188c7f3f..3a82872bad44 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -59,24 +59,23 @@
 
 #include "mpt2sas_base.h"
 /**
- * _transport_sas_node_find_by_handle - sas node search
+ * _transport_sas_node_find_by_sas_address - sas node search
  * @ioc: per adapter object
- * @handle: expander or hba handle (assigned by firmware)
+ * @sas_address: sas address of expander or sas host
  * Context: Calling function should acquire ioc->sas_node_lock.
  *
  * Search for either hba phys or expander device based on handle, then returns
  * the sas_node object.
  */
 static struct _sas_node *
-_transport_sas_node_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+    u64 sas_address)
 {
-	int i;
-
-	for (i = 0; i < ioc->sas_hba.num_phys; i++)
-		if (ioc->sas_hba.phy[i].handle == handle)
-			return &ioc->sas_hba;
-
-	return mpt2sas_scsih_expander_find_by_handle(ioc, handle);
+	if (ioc->sas_hba.sas_address == sas_address)
+		return &ioc->sas_hba;
+	else
+		return mpt2sas_scsih_expander_find_by_sas_address(ioc,
+		    sas_address);
 }
 
 /**
@@ -259,8 +258,7 @@ struct rep_manu_reply{
 	u8 response_length;
 	u16 expander_change_count;
 	u8 reserved0[2];
-	u8 sas_format:1;
-	u8 reserved1:7;
+	u8 sas_format;
 	u8 reserved2[3];
 	u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
 	u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
@@ -375,7 +373,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
 	mpi_request->VP_ID = 0;
 	sas_address_le = (u64 *)&mpi_request->SASAddress;
 	*sas_address_le = cpu_to_le64(sas_address);
-	mpi_request->RequestDataLength = sizeof(struct rep_manu_request);
+	mpi_request->RequestDataLength =
+	    cpu_to_le16(sizeof(struct rep_manu_request));
 	psge = &mpi_request->SGL;
 
 	/* WRITE sgel first */
@@ -438,8 +437,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
 		     SAS_EXPANDER_PRODUCT_ID_LEN);
 		strncpy(edev->product_rev, manufacture_reply->product_rev,
 		     SAS_EXPANDER_PRODUCT_REV_LEN);
-		edev->level = manufacture_reply->sas_format;
-		if (manufacture_reply->sas_format) {
+		edev->level = manufacture_reply->sas_format & 1;
+		if (edev->level) {
 			strncpy(edev->component_vendor_id,
 			    manufacture_reply->component_vendor_id,
 			     SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
@@ -469,7 +468,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
  * mpt2sas_transport_port_add - insert port to the list
  * @ioc: per adapter object
  * @handle: handle of attached device
- * @parent_handle: parent handle(either hba or expander)
+ * @sas_address: sas address of parent expander or sas host
  * Context: This function will acquire ioc->sas_node_lock.
  *
  * Adding new port object to the sas_node->sas_port_list.
@@ -478,7 +477,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
  */
 struct _sas_port *
 mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
-    u16 parent_handle)
+    u64 sas_address)
 {
 	struct _sas_phy *mpt2sas_phy, *next;
 	struct _sas_port *mpt2sas_port;
@@ -488,9 +487,6 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
 	int i;
 	struct sas_port *port;
 
-	if (!parent_handle)
-		return NULL;
-
 	mpt2sas_port = kzalloc(sizeof(struct _sas_port),
 	    GFP_KERNEL);
 	if (!mpt2sas_port) {
@@ -502,17 +498,16 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
 	INIT_LIST_HEAD(&mpt2sas_port->port_list);
 	INIT_LIST_HEAD(&mpt2sas_port->phy_list);
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
-	sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle);
+	sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 
 	if (!sas_node) {
-		printk(MPT2SAS_ERR_FMT "%s: Could not find parent(0x%04x)!\n",
-		    ioc->name, __func__, parent_handle);
+		printk(MPT2SAS_ERR_FMT "%s: Could not find "
+		    "parent sas_address(0x%016llx)!\n", ioc->name,
+		    __func__, (unsigned long long)sas_address);
 		goto out_fail;
 	}
 
-	mpt2sas_port->handle = parent_handle;
-	mpt2sas_port->sas_address = sas_node->sas_address;
 	if ((_transport_set_identify(ioc, handle,
 	    &mpt2sas_port->remote_identify))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -604,7 +599,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
  * mpt2sas_transport_port_remove - remove port from the list
  * @ioc: per adapter object
  * @sas_address: sas address of attached device
- * @parent_handle: handle to the upstream parent(either hba or expander)
+ * @sas_address_parent: sas address of parent expander or sas host
  * Context: This function will acquire ioc->sas_node_lock.
  *
  * Removing object and freeing associated memory from the
@@ -614,7 +609,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
  */
 void
 mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
-    u16 parent_handle)
+    u64 sas_address_parent)
 {
 	int i;
 	unsigned long flags;
@@ -624,7 +619,8 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
 	struct _sas_phy *mpt2sas_phy, *next_phy;
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
-	sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle);
+	sas_node = _transport_sas_node_find_by_sas_address(ioc,
+	    sas_address_parent);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	if (!sas_node)
 		return;
@@ -650,8 +646,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
 	    &mpt2sas_port->phy_list, port_siblings) {
 		if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
 			dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
-			    "remove: parent_handle(0x%04x), "
-			    "sas_addr(0x%016llx), phy(%d)\n", parent_handle,
+			    "remove: sas_addr(0x%016llx), phy(%d)\n",
 			    (unsigned long long)
 			    mpt2sas_port->remote_identify.sas_address,
 			    mpt2sas_phy->phy_id);
@@ -799,8 +794,8 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
 /**
  * mpt2sas_transport_update_links - refreshing phy link changes
  * @ioc: per adapter object
- * @handle: handle to sas_host or expander
- * @attached_handle: attached device handle
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
  * @phy_numberv: phy number
  * @link_rate: new link rate
  *
@@ -808,28 +803,25 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
  */
 void
 mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
-    u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate)
+     u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
 {
 	unsigned long flags;
 	struct _sas_node *sas_node;
 	struct _sas_phy *mpt2sas_phy;
 
-	if (ioc->shost_recovery) {
-		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
-			__func__, ioc->name);
+	if (ioc->shost_recovery)
 		return;
-	}
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
-	sas_node = _transport_sas_node_find_by_handle(ioc, handle);
+	sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	if (!sas_node)
 		return;
 
 	mpt2sas_phy = &sas_node->phy[phy_number];
-	mpt2sas_phy->attached_handle = attached_handle;
-	if (attached_handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5))
-		_transport_set_identify(ioc, mpt2sas_phy->attached_handle,
+	mpt2sas_phy->attached_handle = handle;
+	if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5))
+		_transport_set_identify(ioc, handle,
 		    &mpt2sas_phy->remote_identify);
 	else
 		memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
@@ -841,13 +833,11 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
 
 	if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
 		dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
-		    "refresh: handle(0x%04x), sas_addr(0x%016llx),\n"
+		    "refresh: parent sas_addr(0x%016llx),\n"
 		    "\tlink_rate(0x%02x), phy(%d)\n"
 		    "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
-		    handle, (unsigned long long)
-		    mpt2sas_phy->identify.sas_address, link_rate,
-		    phy_number, attached_handle,
-		    (unsigned long long)
+		    (unsigned long long)sas_address,
+		    link_rate, phy_number, handle, (unsigned long long)
 		    mpt2sas_phy->remote_identify.sas_address);
 }
 
@@ -1126,7 +1116,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
 		blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
 	if (!dma_addr_out) {
-		mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
+		mpt2sas_base_free_smid(ioc, smid);
 		goto unmap;
 	}
 
@@ -1144,7 +1134,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
 				     blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
 	if (!dma_addr_in) {
-		mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
+		mpt2sas_base_free_smid(ioc, smid);
 		goto unmap;
 	}
 
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 7a117c18114c..950202a70bcf 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -73,7 +73,8 @@ static const char *_osd_ver_desc(struct osd_request *or)
 
 #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
 
-static int _osd_print_system_info(struct osd_dev *od, void *caps)
+static int _osd_get_print_system_info(struct osd_dev *od,
+	void *caps, struct osd_dev_info *odi)
 {
 	struct osd_request *or;
 	struct osd_attr get_attrs[] = {
@@ -137,8 +138,12 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
 	OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
 		(char *)pFirst);
 
-	pFirst = get_attrs[a].val_ptr;
-	OSD_INFO("OSD_NAME               [%s]\n", (char *)pFirst);
+	odi->osdname_len = get_attrs[a].len;
+	/* Avoid NULL for memcmp optimization 0-length is good enough */
+	odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+	if (odi->osdname_len)
+		memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
+	OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
 	a++;
 
 	pFirst = get_attrs[a++].val_ptr;
@@ -171,6 +176,14 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
 				   sid_dump, sizeof(sid_dump), true);
 		OSD_INFO("OSD_SYSTEM_ID(%d)\n"
 			 "        [%s]\n", len, sid_dump);
+
+		if (unlikely(len > sizeof(odi->systemid))) {
+			OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
+				"device idetification might not work\n", len);
+			len = sizeof(odi->systemid);
+		}
+		odi->systemid_len = len;
+		memcpy(odi->systemid, get_attrs[a].val_ptr, len);
 		a++;
 	}
 out:
@@ -178,16 +191,17 @@ out:
 	return ret;
 }
 
-int osd_auto_detect_ver(struct osd_dev *od, void *caps)
+int osd_auto_detect_ver(struct osd_dev *od,
+	void *caps, struct osd_dev_info *odi)
 {
 	int ret;
 
 	/* Auto-detect the osd version */
-	ret = _osd_print_system_info(od, caps);
+	ret = _osd_get_print_system_info(od, caps, odi);
 	if (ret) {
 		osd_dev_set_ver(od, OSD_VER1);
 		OSD_DEBUG("converting to OSD1\n");
-		ret = _osd_print_system_info(od, caps);
+		ret = _osd_get_print_system_info(od, caps, odi);
 	}
 
 	return ret;
@@ -461,7 +475,8 @@ EXPORT_SYMBOL(osd_end_request);
 
 int osd_execute_request(struct osd_request *or)
 {
-	return blk_execute_rq(or->request->q, NULL, or->request, 0);
+	return or->async_error =
+			blk_execute_rq(or->request->q, NULL, or->request, 0);
 }
 EXPORT_SYMBOL(osd_execute_request);
 
@@ -471,8 +486,12 @@ static void osd_request_async_done(struct request *req, int error)
 
 	or->async_error = error;
 
-	if (error)
-		OSD_DEBUG("osd_request_async_done error recieved %d\n", error);
+	if (unlikely(error)) {
+		OSD_DEBUG("osd_request_async_done error recieved %d "
+			  "errors 0x%x\n", error, req->errors);
+		if (!req->errors) /* don't miss out on this one */
+			req->errors = error;
+	}
 
 	if (or->async_done)
 		or->async_done(or, or->async_private);
@@ -1153,6 +1172,7 @@ int osd_req_decode_get_attr_list(struct osd_request *or,
 				"c=%d r=%d n=%d\n",
 				cur_bytes, returned_bytes, n);
 			oa->val_ptr = NULL;
+			cur_bytes = returned_bytes; /* break the caller loop */
 			break;
 		}
 
@@ -1436,6 +1456,15 @@ int osd_finalize_request(struct osd_request *or,
 }
 EXPORT_SYMBOL(osd_finalize_request);
 
+static bool _is_osd_security_code(int code)
+{
+	return	(code == osd_security_audit_value_frozen) ||
+		(code == osd_security_working_key_frozen) ||
+		(code == osd_nonce_not_unique) ||
+		(code == osd_nonce_timestamp_out_of_range) ||
+		(code == osd_invalid_dataout_buffer_integrity_check_value);
+}
+
 #define OSD_SENSE_PRINT1(fmt, a...) \
 	do { \
 		if (__cur_sense_need_output) \
@@ -1458,9 +1487,16 @@ int osd_req_decode_sense_full(struct osd_request *or,
 #else
 	bool __cur_sense_need_output = !silent;
 #endif
+	int ret;
 
-	if (!or->request->errors)
+	if (likely(!or->request->errors)) {
+		osi->out_resid = 0;
+		osi->in_resid = 0;
 		return 0;
+	}
+
+	osi = osi ? : &local_osi;
+	memset(osi, 0, sizeof(*osi));
 
 	ssdb = or->request->sense;
 	sense_len = or->request->sense_len;
@@ -1468,17 +1504,15 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		OSD_ERR("Block-layer returned error(0x%x) but "
 			"sense_len(%u) || key(%d) is empty\n",
 			or->request->errors, sense_len, ssdb->sense_key);
-		return -EIO;
+		goto analyze;
 	}
 
 	if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
 		OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
 			ssdb->response_code, sense_len);
-		return -EIO;
+		goto analyze;
 	}
 
-	osi = osi ? : &local_osi;
-	memset(osi, 0, sizeof(*osi));
 	osi->key = ssdb->sense_key;
 	osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
 	original_sense_len = ssdb->additional_sense_length + 8;
@@ -1488,9 +1522,10 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		__cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
 #endif
 	OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
-			"additional_code=0x%x\n",
+			"additional_code=0x%x async_error=%d errors=0x%x\n",
 			osi->key, original_sense_len, sense_len,
-			osi->additional_code);
+			osi->additional_code, or->async_error,
+			or->request->errors);
 
 	if (original_sense_len < sense_len)
 		sense_len = original_sense_len;
@@ -1569,15 +1604,14 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		{
 			struct osd_sense_attributes_data_descriptor
 				*osadd = cur_descriptor;
-			int len = min(cur_len, sense_len);
-			int i = 0;
+			unsigned len = min(cur_len, sense_len);
 			struct osd_sense_attr *pattr = osadd->sense_attrs;
 
-			while (len < 0) {
+			while (len >= sizeof(*pattr)) {
 				u32 attr_page = be32_to_cpu(pattr->attr_page);
 				u32 attr_id = be32_to_cpu(pattr->attr_id);
 
-				if (i++ == 0) {
+				if (!osi->attr.attr_page) {
 					osi->attr.attr_page = attr_page;
 					osi->attr.attr_id = attr_id;
 				}
@@ -1588,6 +1622,8 @@ int osd_req_decode_sense_full(struct osd_request *or,
 					bad_attr_list++;
 					max_attr--;
 				}
+
+				len -= sizeof(*pattr);
 				OSD_SENSE_PRINT2(
 					"osd_sense_attribute_identification"
 					"attr_page=0x%x attr_id=0x%x\n",
@@ -1621,7 +1657,50 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		cur_descriptor += cur_len;
 	}
 
-	return (osi->key > scsi_sk_recovered_error) ? -EIO : 0;
+analyze:
+	if (!osi->key) {
+		/* scsi sense is Empty, the request was never issued to target
+		 * linux return code might tell us what happened.
+		 */
+		if (or->async_error == -ENOMEM)
+			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
+		else
+			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
+		ret = or->async_error;
+	} else if (osi->key <= scsi_sk_recovered_error) {
+		osi->osd_err_pri = 0;
+		ret = 0;
+	} else if (osi->additional_code == scsi_invalid_field_in_cdb) {
+		if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
+			osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
+			ret = -EFAULT; /* caller should recover from this */
+		} else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
+			osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
+			ret = -ENOENT;
+		} else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
+			osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
+			ret = -EACCES;
+		} else {
+			osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
+			ret = -EINVAL;
+		}
+	} else if (osi->additional_code == osd_quota_error) {
+		osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
+		ret = -ENOSPC;
+	} else if (_is_osd_security_code(osi->additional_code)) {
+		osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
+		ret = -EINVAL;
+	} else {
+		osi->osd_err_pri = OSD_ERR_PRI_EIO;
+		ret = -EIO;
+	}
+
+	if (or->out.req)
+		osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes;
+	if (or->in.req)
+		osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes;
+
+	return ret;
 }
 EXPORT_SYMBOL(osd_req_decode_sense_full);
 
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 0bdef3390902..0a90702b3d71 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -71,8 +71,7 @@
 #define SCSI_OSD_MAX_MINOR 64
 
 static const char osd_name[] = "osd";
-static const char *osd_version_string = "open-osd 0.1.0";
-const char osd_symlink[] = "scsi_osd";
+static const char *osd_version_string = "open-osd 0.2.0";
 
 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
 MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
@@ -82,15 +81,25 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
 
 struct osd_uld_device {
 	int minor;
-	struct kref kref;
+	struct device class_dev;
 	struct cdev cdev;
 	struct osd_dev od;
+	struct osd_dev_info odi;
 	struct gendisk *disk;
-	struct device *class_member;
 };
 
-static void __uld_get(struct osd_uld_device *oud);
-static void __uld_put(struct osd_uld_device *oud);
+struct osd_dev_handle {
+	struct osd_dev od;
+	struct file *file;
+	struct osd_uld_device *oud;
+} ;
+
+static DEFINE_IDA(osd_minor_ida);
+
+static struct class osd_uld_class = {
+	.owner		= THIS_MODULE,
+	.name		= "scsi_osd",
+};
 
 /*
  * Char Device operations
@@ -101,7 +110,7 @@ static int osd_uld_open(struct inode *inode, struct file *file)
 	struct osd_uld_device *oud = container_of(inode->i_cdev,
 					struct osd_uld_device, cdev);
 
-	__uld_get(oud);
+	get_device(&oud->class_dev);
 	/* cache osd_uld_device on file handle */
 	file->private_data = oud;
 	OSD_DEBUG("osd_uld_open %p\n", oud);
@@ -114,7 +123,7 @@ static int osd_uld_release(struct inode *inode, struct file *file)
 
 	OSD_DEBUG("osd_uld_release %p\n", file->private_data);
 	file->private_data = NULL;
-	__uld_put(oud);
+	put_device(&oud->class_dev);
 	return 0;
 }
 
@@ -177,7 +186,7 @@ static const struct file_operations osd_fops = {
 struct osd_dev *osduld_path_lookup(const char *name)
 {
 	struct osd_uld_device *oud;
-	struct osd_dev *od;
+	struct osd_dev_handle *odh;
 	struct file *file;
 	int error;
 
@@ -186,8 +195,8 @@ struct osd_dev *osduld_path_lookup(const char *name)
 		return ERR_PTR(-EINVAL);
 	}
 
-	od = kzalloc(sizeof(*od), GFP_KERNEL);
-	if (!od)
+	odh = kzalloc(sizeof(*odh), GFP_KERNEL);
+	if (unlikely(!odh))
 		return ERR_PTR(-ENOMEM);
 
 	file = filp_open(name, O_RDWR, 0);
@@ -203,33 +212,134 @@ struct osd_dev *osduld_path_lookup(const char *name)
 
 	oud = file->private_data;
 
-	*od = oud->od;
-	od->file = file;
+	odh->od = oud->od;
+	odh->file = file;
+	odh->oud = oud;
 
-	return od;
+	return &odh->od;
 
 close_file:
 	fput(file);
 free_od:
-	kfree(od);
+	kfree(odh);
 	return ERR_PTR(error);
 }
 EXPORT_SYMBOL(osduld_path_lookup);
 
-void osduld_put_device(struct osd_dev *od)
+static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
+				     const u8 *a2, unsigned a2_len)
 {
+	if (!a2_len) /* User string is Empty means don't care */
+		return true;
+
+	if (a1_len != a2_len)
+		return false;
+
+	return 0 == memcmp(a1, a2, a1_len);
+}
+
+struct find_oud_t {
+	const struct osd_dev_info *odi;
+	struct device *dev;
+	struct osd_uld_device *oud;
+} ;
+
+int _mach_odi(struct device *dev, void *find_data)
+{
+	struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
+						  class_dev);
+	struct find_oud_t *fot = find_data;
+	const struct osd_dev_info *odi = fot->odi;
+
+	if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
+			      odi->systemid, odi->systemid_len) &&
+	    _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
+			      odi->osdname, odi->osdname_len)) {
+		OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
+			  odi->systemid_len, odi->osdname_len);
+		fot->oud = oud;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+/* osduld_info_lookup - Loop through all devices, return the requested osd_dev.
+ *
+ * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't
+ * care. .e.g if they're both zero /dev/osd0 is returned.
+ */
+struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
+{
+	struct find_oud_t find = {.odi = odi};
+
+	find.dev = class_find_device(&osd_uld_class, NULL, &find, _mach_odi);
+	if (likely(find.dev)) {
+		struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
+
+		if (unlikely(!odh)) {
+			put_device(find.dev);
+			return ERR_PTR(-ENOMEM);
+		}
 
+		odh->od = find.oud->od;
+		odh->oud = find.oud;
+
+		return &odh->od;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(osduld_info_lookup);
+
+void osduld_put_device(struct osd_dev *od)
+{
 	if (od && !IS_ERR(od)) {
-		struct osd_uld_device *oud = od->file->private_data;
+		struct osd_dev_handle *odh =
+				container_of(od, struct osd_dev_handle, od);
+		struct osd_uld_device *oud = odh->oud;
 
 		BUG_ON(od->scsi_device != oud->od.scsi_device);
 
-		fput(od->file);
-		kfree(od);
+		/* If scsi has released the device (logout), and exofs has last
+		 * reference on oud it will be freed by above osd_uld_release
+		 * within fput below. But this will oops in cdev_release which
+		 * is called after the fops->release. A get_/put_ pair makes
+		 * sure we have a cdev for the duration of fput
+		 */
+		if (odh->file) {
+			get_device(&oud->class_dev);
+			fput(odh->file);
+		}
+		put_device(&oud->class_dev);
+		kfree(odh);
 	}
 }
 EXPORT_SYMBOL(osduld_put_device);
 
+const struct osd_dev_info *osduld_device_info(struct osd_dev *od)
+{
+	struct osd_dev_handle *odh =
+				container_of(od, struct osd_dev_handle, od);
+	return &odh->oud->odi;
+}
+EXPORT_SYMBOL(osduld_device_info);
+
+bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi)
+{
+	struct osd_dev_handle *odh =
+				container_of(od, struct osd_dev_handle, od);
+	struct osd_uld_device *oud = odh->oud;
+
+	return (oud->odi.systemid_len == odi->systemid_len) &&
+		_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
+				 odi->systemid, odi->systemid_len) &&
+		(oud->odi.osdname_len == odi->osdname_len) &&
+		_the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
+				  odi->osdname, odi->osdname_len);
+}
+EXPORT_SYMBOL(osduld_device_same);
+
 /*
  * Scsi Device operations
  */
@@ -250,14 +360,35 @@ static int __detect_osd(struct osd_uld_device *oud)
 		OSD_ERR("warning: scsi_test_unit_ready failed\n");
 
 	osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
-	if (osd_auto_detect_ver(&oud->od, caps))
+	if (osd_auto_detect_ver(&oud->od, caps, &oud->odi))
 		return -ENODEV;
 
 	return 0;
 }
 
-static struct class *osd_sysfs_class;
-static DEFINE_IDA(osd_minor_ida);
+static void __remove(struct device *dev)
+{
+	struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
+						  class_dev);
+	struct scsi_device *scsi_device = oud->od.scsi_device;
+
+	kfree(oud->odi.osdname);
+
+	if (oud->cdev.owner)
+		cdev_del(&oud->cdev);
+
+	osd_dev_fini(&oud->od);
+	scsi_device_put(scsi_device);
+
+	OSD_INFO("osd_remove %s\n",
+		 oud->disk ? oud->disk->disk_name : NULL);
+
+	if (oud->disk)
+		put_disk(oud->disk);
+	ida_remove(&osd_minor_ida, oud->minor);
+
+	kfree(oud);
+}
 
 static int osd_probe(struct device *dev)
 {
@@ -289,7 +420,6 @@ static int osd_probe(struct device *dev)
 	if (NULL == oud)
 		goto err_retract_minor;
 
-	kref_init(&oud->kref);
 	dev_set_drvdata(dev, oud);
 	oud->minor = minor;
 
@@ -327,18 +457,25 @@ static int osd_probe(struct device *dev)
 		OSD_ERR("cdev_add failed\n");
 		goto err_put_disk;
 	}
-	kobject_get(&oud->cdev.kobj); /* 2nd ref see osd_remove() */
-
-	/* class_member */
-	oud->class_member = device_create(osd_sysfs_class, dev,
-		MKDEV(SCSI_OSD_MAJOR, oud->minor), "%s", disk->disk_name);
-	if (IS_ERR(oud->class_member)) {
-		OSD_ERR("class_device_create failed\n");
-		error = PTR_ERR(oud->class_member);
+
+	/* class device member */
+	oud->class_dev.devt = oud->cdev.dev;
+	oud->class_dev.class = &osd_uld_class;
+	oud->class_dev.parent = dev;
+	oud->class_dev.release = __remove;
+	error = dev_set_name(&oud->class_dev, disk->disk_name);
+	if (error) {
+		OSD_ERR("dev_set_name failed => %d\n", error);
+		goto err_put_cdev;
+	}
+
+	error = device_register(&oud->class_dev);
+	if (error) {
+		OSD_ERR("device_register failed => %d\n", error);
 		goto err_put_cdev;
 	}
 
-	dev_set_drvdata(oud->class_member, oud);
+	get_device(&oud->class_dev);
 
 	OSD_INFO("osd_probe %s\n", disk->disk_name);
 	return 0;
@@ -367,54 +504,12 @@ static int osd_remove(struct device *dev)
 			scsi_device);
 	}
 
-	if (oud->class_member)
-		device_destroy(osd_sysfs_class,
-			       MKDEV(SCSI_OSD_MAJOR, oud->minor));
+	device_unregister(&oud->class_dev);
 
-	/* We have 2 references to the cdev. One is released here
-	 * and also takes down the /dev/osdX mapping. The second
-	 * Will be released in __remove() after all users have released
-	 * the osd_uld_device.
-	 */
-	if (oud->cdev.owner)
-		cdev_del(&oud->cdev);
-
-	__uld_put(oud);
+	put_device(&oud->class_dev);
 	return 0;
 }
 
-static void __remove(struct kref *kref)
-{
-	struct osd_uld_device *oud = container_of(kref,
-					struct osd_uld_device, kref);
-	struct scsi_device *scsi_device = oud->od.scsi_device;
-
-	/* now let delete the char_dev */
-	kobject_put(&oud->cdev.kobj);
-
-	osd_dev_fini(&oud->od);
-	scsi_device_put(scsi_device);
-
-	OSD_INFO("osd_remove %s\n",
-		 oud->disk ? oud->disk->disk_name : NULL);
-
-	if (oud->disk)
-		put_disk(oud->disk);
-
-	ida_remove(&osd_minor_ida, oud->minor);
-	kfree(oud);
-}
-
-static void __uld_get(struct osd_uld_device *oud)
-{
-	kref_get(&oud->kref);
-}
-
-static void __uld_put(struct osd_uld_device *oud)
-{
-	kref_put(&oud->kref, __remove);
-}
-
 /*
  * Global driver and scsi registration
  */
@@ -432,11 +527,10 @@ static int __init osd_uld_init(void)
 {
 	int err;
 
-	osd_sysfs_class = class_create(THIS_MODULE, osd_symlink);
-	if (IS_ERR(osd_sysfs_class)) {
-		OSD_ERR("Unable to register sysfs class => %ld\n",
-			PTR_ERR(osd_sysfs_class));
-		return PTR_ERR(osd_sysfs_class);
+	err = class_register(&osd_uld_class);
+	if (err) {
+		OSD_ERR("Unable to register sysfs class => %d\n", err);
+		return err;
 	}
 
 	err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
@@ -459,7 +553,7 @@ static int __init osd_uld_init(void)
 err_out_chrdev:
 	unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
 err_out:
-	class_destroy(osd_sysfs_class);
+	class_unregister(&osd_uld_class);
 	return err;
 }
 
@@ -467,7 +561,7 @@ static void __exit osd_uld_exit(void)
 {
 	scsi_unregister_driver(&osd_driver.gendrv);
 	unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
-	class_destroy(osd_sysfs_class);
+	class_unregister(&osd_uld_class);
 	OSD_INFO("UNLOADED %s\n", osd_version_string);
 }
 
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
new file mode 100644
index 000000000000..52f04296171c
--- /dev/null
+++ b/drivers/scsi/pm8001/Makefile
@@ -0,0 +1,12 @@
+#
+# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver
+#
+# Copyright (C) 2008-2009  USI Co., Ltd.
+
+
+obj-$(CONFIG_SCSI_PM8001) += pm8001.o
+pm8001-y += pm8001_init.o \
+		pm8001_sas.o  \
+		pm8001_ctl.o  \
+		pm8001_hwi.o
+
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
new file mode 100644
index 000000000000..4efa4d0950e5
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -0,0 +1,89 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_CHIPS_H_
+#define _PM8001_CHIPS_H_
+
+static inline u32 pm8001_read_32(void *virt_addr)
+{
+	return *((u32 *)virt_addr);
+}
+
+static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
+{
+	*((u32 *)(addr + offset)) = val;
+}
+
+static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
+		u32 offset)
+{
+	return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset);
+}
+
+static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar,
+		u32 addr, u32 val)
+{
+	writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr);
+}
+static inline u32 pm8001_mr32(void __iomem *addr, u32 offset)
+{
+	return readl(addr + offset);
+}
+static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val)
+{
+	writel(val, addr + offset);
+}
+static inline u32 get_pci_bar_index(u32 pcibar)
+{
+		switch (pcibar) {
+		case 0x18:
+		case 0x1C:
+			return 1;
+		case 0x20:
+			return 2;
+		case 0x24:
+			return 3;
+		default:
+			return 0;
+	}
+}
+
+#endif  /* _PM8001_CHIPS_H_ */
+
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
new file mode 100644
index 000000000000..14b13acae6dd
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -0,0 +1,573 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+#include <linux/firmware.h>
+#include "pm8001_sas.h"
+#include "pm8001_ctl.h"
+
+/* scsi host attributes */
+
+/**
+ * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		pm8001_ha->main_cfg_tbl.interface_rev);
+}
+static
+DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
+
+/**
+ * pm8001_ctl_fw_version_show - firmware version
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
+		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
+		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
+		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+}
+static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
+/**
+ * pm8001_ctl_max_out_io_show - max outstanding io supported
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pm8001_ha->main_cfg_tbl.max_out_io);
+}
+static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
+/**
+ * pm8001_ctl_max_devices_show - max devices support
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%04d\n",
+			(u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+}
+static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
+/**
+ * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
+ * hardware limitation
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%04d\n",
+			pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+}
+static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
+
+#define SAS_1_0 0x1
+#define SAS_1_1 0x2
+#define SAS_2_0 0x4
+
+static ssize_t
+show_sas_spec_support_status(unsigned int mode, char *buf)
+{
+	ssize_t len = 0;
+
+	if (mode & SAS_1_1)
+		len = sprintf(buf, "%s", "SAS1.1");
+	if (mode & SAS_2_0)
+		len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0");
+	len += sprintf(buf + len, "\n");
+
+	return len;
+}
+
+/**
+ * pm8001_ctl_sas_spec_support_show - sas spec supported
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned int mode;
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+	return show_sas_spec_support_status(mode, buf);
+}
+static DEVICE_ATTR(sas_spec_support, S_IRUGO,
+		   pm8001_ctl_sas_spec_support_show, NULL);
+
+/**
+ * pm8001_ctl_sas_address_show - sas address
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+			be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+		   pm8001_ctl_host_sas_address_show, NULL);
+
+/**
+ * pm8001_ctl_logging_level_show - logging level
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
+}
+static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	int val = 0;
+
+	if (sscanf(buf, "%x", &val) != 1)
+		return -EINVAL;
+
+	pm8001_ha->logging_level = val;
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
+	pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store);
+/**
+ * pm8001_ctl_aap_log_show - aap1 event log
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	int i;
+#define AAP1_MEMMAP(r, c) \
+	(*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
+	+ (c)))
+
+	char *str = buf;
+	int max = 2;
+	for (i = 0; i < max; i++) {
+		str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
+			       "0x%08x 0x%08x\n",
+			       AAP1_MEMMAP(i, 0),
+			       AAP1_MEMMAP(i, 4),
+			       AAP1_MEMMAP(i, 8),
+			       AAP1_MEMMAP(i, 12),
+			       AAP1_MEMMAP(i, 16),
+			       AAP1_MEMMAP(i, 20),
+			       AAP1_MEMMAP(i, 24),
+			       AAP1_MEMMAP(i, 28));
+	}
+
+	return str - buf;
+}
+static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
+/**
+ * pm8001_ctl_aap_log_show - IOP event log
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+#define IOP_MEMMAP(r, c) \
+	(*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
+	+ (c)))
+	int i;
+	char *str = buf;
+	int max = 2;
+	for (i = 0; i < max; i++) {
+		str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
+			       "0x%08x 0x%08x\n",
+			       IOP_MEMMAP(i, 0),
+			       IOP_MEMMAP(i, 4),
+			       IOP_MEMMAP(i, 8),
+			       IOP_MEMMAP(i, 12),
+			       IOP_MEMMAP(i, 16),
+			       IOP_MEMMAP(i, 20),
+			       IOP_MEMMAP(i, 24),
+			       IOP_MEMMAP(i, 28));
+	}
+
+	return str - buf;
+}
+static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
+
+#define FLASH_CMD_NONE      0x00
+#define FLASH_CMD_UPDATE    0x01
+#define FLASH_CMD_SET_NVMD    0x02
+
+struct flash_command {
+     u8      command[8];
+     int     code;
+};
+
+static struct flash_command flash_command_table[] =
+{
+     {"set_nvmd",    FLASH_CMD_SET_NVMD},
+     {"update",      FLASH_CMD_UPDATE},
+     {"",            FLASH_CMD_NONE} /* Last entry should be NULL. */
+};
+
+struct error_fw {
+     char    *reason;
+     int     err_code;
+};
+
+static struct error_fw flash_error_table[] =
+{
+     {"Failed to open fw image file",	FAIL_OPEN_BIOS_FILE},
+     {"image header mismatch",		FLASH_UPDATE_HDR_ERR},
+     {"image offset mismatch",		FLASH_UPDATE_OFFSET_ERR},
+     {"image CRC Error",		FLASH_UPDATE_CRC_ERR},
+     {"image length Error.",		FLASH_UPDATE_LENGTH_ERR},
+     {"Failed to program flash chip",	FLASH_UPDATE_HW_ERR},
+     {"Flash chip not supported.",	FLASH_UPDATE_DNLD_NOT_SUPPORTED},
+     {"Flash update disabled.",		FLASH_UPDATE_DISABLED},
+     {"Flash in progress",		FLASH_IN_PROGRESS},
+     {"Image file size Error",		FAIL_FILE_SIZE},
+     {"Input parameter error",		FAIL_PARAMETERS},
+     {"Out of memory",			FAIL_OUT_MEMORY},
+     {"OK", 0}	/* Last entry err_code = 0. */
+};
+
+static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
+{
+	struct pm8001_ioctl_payload	*payload;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	u8		*ioctlbuffer = NULL;
+	u32		length = 0;
+	u32		ret = 0;
+
+	length = 1024 * 5 + sizeof(*payload) - 1;
+	ioctlbuffer = kzalloc(length, GFP_KERNEL);
+	if (!ioctlbuffer)
+		return -ENOMEM;
+	if ((pm8001_ha->fw_image->size <= 0) ||
+	    (pm8001_ha->fw_image->size > 4096)) {
+		ret = FAIL_FILE_SIZE;
+		goto out;
+	}
+	payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
+	memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+				pm8001_ha->fw_image->size);
+	payload->length = pm8001_ha->fw_image->size;
+	payload->id = 0;
+	pm8001_ha->nvmd_completion = &completion;
+	ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
+	wait_for_completion(&completion);
+out:
+	kfree(ioctlbuffer);
+	return ret;
+}
+
+static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
+{
+	struct pm8001_ioctl_payload	*payload;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	u8		*ioctlbuffer = NULL;
+	u32		length = 0;
+	struct fw_control_info	*fwControl;
+	u32		loopNumber, loopcount = 0;
+	u32		sizeRead = 0;
+	u32		partitionSize, partitionSizeTmp;
+	u32		ret = 0;
+	u32		partitionNumber = 0;
+	struct pm8001_fw_image_header *image_hdr;
+
+	length = 1024 * 16 + sizeof(*payload) - 1;
+	ioctlbuffer = kzalloc(length, GFP_KERNEL);
+	image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
+	if (!ioctlbuffer)
+		return -ENOMEM;
+	if (pm8001_ha->fw_image->size < 28) {
+		ret = FAIL_FILE_SIZE;
+		goto out;
+	}
+
+	while (sizeRead < pm8001_ha->fw_image->size) {
+		partitionSizeTmp =
+			*(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
+		partitionSize = be32_to_cpu(partitionSizeTmp);
+		loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE;
+		if (loopcount % IOCTL_BUF_SIZE)
+			loopcount++;
+		if (loopcount == 0)
+			loopcount++;
+		for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
+			payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
+			payload->length = 1024*16;
+			payload->id = 0;
+			fwControl =
+			      (struct fw_control_info *)payload->func_specific;
+			fwControl->len = IOCTL_BUF_SIZE;   /* IN */
+			fwControl->size = partitionSize + HEADER_LEN;/* IN */
+			fwControl->retcode = 0;/* OUT */
+			fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
+
+		/* for the last chunk of data in case file size is not even with
+		4k, load only the rest*/
+		if (((loopcount-loopNumber) == 1) &&
+			((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
+			fwControl->len =
+				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+			memcpy((u8 *)fwControl->buffer,
+				(u8 *)pm8001_ha->fw_image->data + sizeRead,
+				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
+			sizeRead +=
+				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+		} else {
+			memcpy((u8 *)fwControl->buffer,
+				(u8 *)pm8001_ha->fw_image->data + sizeRead,
+				IOCTL_BUF_SIZE);
+			sizeRead += IOCTL_BUF_SIZE;
+		}
+
+		pm8001_ha->nvmd_completion = &completion;
+		ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+		wait_for_completion(&completion);
+		if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) {
+			ret = fwControl->retcode;
+			kfree(ioctlbuffer);
+			ioctlbuffer = NULL;
+			break;
+		}
+	}
+	if (ret)
+		break;
+	partitionNumber++;
+}
+out:
+	kfree(ioctlbuffer);
+	return ret;
+}
+static ssize_t pm8001_store_update_fw(struct device *cdev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	char *cmd_ptr, *filename_ptr;
+	int res, i;
+	int flash_command = FLASH_CMD_NONE;
+	int err = 0;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+
+	if (!cmd_ptr) {
+		err = FAIL_OUT_MEMORY;
+		goto out;
+	}
+
+	filename_ptr = cmd_ptr + count;
+	res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
+	if (res != 2) {
+		err = FAIL_PARAMETERS;
+		goto out1;
+	}
+
+	for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
+		if (!memcmp(flash_command_table[i].command,
+				 cmd_ptr, strlen(cmd_ptr))) {
+			flash_command = flash_command_table[i].code;
+			break;
+		}
+	}
+	if (flash_command == FLASH_CMD_NONE) {
+		err = FAIL_PARAMETERS;
+		goto out1;
+	}
+
+	if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) {
+		err = FLASH_IN_PROGRESS;
+		goto out1;
+	}
+	err = request_firmware(&pm8001_ha->fw_image,
+			       filename_ptr,
+			       pm8001_ha->dev);
+
+	if (err) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Failed to load firmware image file %s,"
+			" error %d\n", filename_ptr, err));
+		err = FAIL_OPEN_BIOS_FILE;
+		goto out1;
+	}
+
+	switch (flash_command) {
+	case FLASH_CMD_UPDATE:
+		pm8001_ha->fw_status = FLASH_IN_PROGRESS;
+		err = pm8001_update_flash(pm8001_ha);
+		break;
+	case FLASH_CMD_SET_NVMD:
+		pm8001_ha->fw_status = FLASH_IN_PROGRESS;
+		err = pm8001_set_nvmd(pm8001_ha);
+		break;
+	default:
+		pm8001_ha->fw_status = FAIL_PARAMETERS;
+		err = FAIL_PARAMETERS;
+		break;
+	}
+	release_firmware(pm8001_ha->fw_image);
+out1:
+	kfree(cmd_ptr);
+out:
+	pm8001_ha->fw_status = err;
+
+	if (!err)
+		return count;
+	else
+		return -err;
+}
+
+static ssize_t pm8001_show_update_fw(struct device *cdev,
+				     struct device_attribute *attr, char *buf)
+{
+	int i;
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	for (i = 0; flash_error_table[i].err_code != 0; i++) {
+		if (flash_error_table[i].err_code == pm8001_ha->fw_status)
+			break;
+	}
+	if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
+		pm8001_ha->fw_status = FLASH_OK;
+
+	return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
+			flash_error_table[i].err_code,
+			flash_error_table[i].reason);
+}
+
+static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO,
+	pm8001_show_update_fw, pm8001_store_update_fw);
+struct device_attribute *pm8001_host_attrs[] = {
+	&dev_attr_interface_rev,
+	&dev_attr_fw_version,
+	&dev_attr_update_fw,
+	&dev_attr_aap_log,
+	&dev_attr_iop_log,
+	&dev_attr_max_out_io,
+	&dev_attr_max_devices,
+	&dev_attr_max_sg_list,
+	&dev_attr_sas_spec_support,
+	&dev_attr_logging_level,
+	&dev_attr_host_sas_address,
+	NULL,
+};
+
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
new file mode 100644
index 000000000000..22644de26399
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -0,0 +1,67 @@
+ /*
+  * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+  *
+  * Copyright (c) 2008-2009 USI Co., Ltd.
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+  * modification, are permitted provided that the following conditions
+  * are met:
+  * 1. Redistributions of source code must retain the above copyright
+  *    notice, this list of conditions, and the following disclaimer,
+  *    without modification.
+  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+  *    substantially similar to the "NO WARRANTY" disclaimer below
+  *    ("Disclaimer") and any redistribution must be conditioned upon
+  *    including a substantially similar Disclaimer requirement for further
+  *    binary redistribution.
+  * 3. Neither the names of the above-listed copyright holders nor the names
+  *    of any contributors may be used to endorse or promote products derived
+  *    from this software without specific prior written permission.
+  *
+  * Alternatively, this software may be distributed under the terms of the
+  * GNU General Public License ("GPL") version 2 as published by the Free
+  * Software Foundation.
+  *
+  * NO WARRANTY
+  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  * POSSIBILITY OF SUCH DAMAGES.
+  *
+  */
+
+#ifndef PM8001_CTL_H_INCLUDED
+#define PM8001_CTL_H_INCLUDED
+
+#define IOCTL_BUF_SIZE		4096
+#define HEADER_LEN			28
+#define SIZE_OFFSET			16
+
+struct pm8001_ioctl_payload {
+	u32	signature;
+	u16	major_function;
+	u16	minor_function;
+	u16	length;
+	u16	status;
+	u16	offset;
+	u16	id;
+	u8	func_specific[1];
+};
+
+#define FLASH_OK                        0x000000
+#define FAIL_OPEN_BIOS_FILE             0x000100
+#define FAIL_FILE_SIZE                  0x000a00
+#define FAIL_PARAMETERS                 0x000b00
+#define FAIL_OUT_MEMORY                 0x000c00
+#define FLASH_IN_PROGRESS               0x001000
+
+#endif /* PM8001_CTL_H_INCLUDED */
+
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
new file mode 100644
index 000000000000..944afada61ee
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -0,0 +1,112 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_DEFS_H_
+#define _PM8001_DEFS_H_
+
+enum chip_flavors {
+	chip_8001,
+};
+#define USI_MAX_MEMCNT			9
+#define PM8001_MAX_DMA_SG		SG_ALL
+enum phy_speed {
+	PHY_SPEED_15 = 0x01,
+	PHY_SPEED_30 = 0x02,
+	PHY_SPEED_60 = 0x04,
+};
+
+enum data_direction {
+	DATA_DIR_NONE = 0x0,	/* NO TRANSFER */
+	DATA_DIR_IN = 0x01,	/* INBOUND */
+	DATA_DIR_OUT = 0x02,	/* OUTBOUND */
+	DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */
+};
+
+enum port_type {
+	PORT_TYPE_SAS = (1L << 1),
+	PORT_TYPE_SATA = (1L << 0),
+};
+
+/* driver compile-time configuration */
+#define	PM8001_MAX_CCB		 512	/* max ccbs supported */
+#define	PM8001_MAX_INB_NUM	 1
+#define	PM8001_MAX_OUTB_NUM	 1
+#define	PM8001_CAN_QUEUE	 128	/* SCSI Queue depth */
+
+/* unchangeable hardware details */
+#define	PM8001_MAX_PHYS		 8	/* max. possible phys */
+#define	PM8001_MAX_PORTS	 8	/* max. possible ports */
+#define	PM8001_MAX_DEVICES	 1024	/* max supported device */
+
+enum memory_region_num {
+	AAP1 = 0x0, /* application acceleration processor */
+	IOP,	    /* IO processor */
+	CI,	    /* consumer index */
+	PI,	    /* producer index */
+	IB,	    /* inbound queue */
+	OB,	    /* outbound queue */
+	NVMD,	    /* NVM device */
+	DEV_MEM,    /* memory for devices */
+	CCB_MEM,    /* memory for command control block */
+};
+#define	PM8001_EVENT_LOG_SIZE	 (128 * 1024)
+
+/*error code*/
+enum mpi_err {
+	MPI_IO_STATUS_SUCCESS = 0x0,
+	MPI_IO_STATUS_BUSY = 0x01,
+	MPI_IO_STATUS_FAIL = 0x02,
+};
+
+/**
+ * Phy Control constants
+ */
+enum phy_control_type {
+	PHY_LINK_RESET = 0x01,
+	PHY_HARD_RESET = 0x02,
+	PHY_NOTIFY_ENABLE_SPINUP = 0x10,
+};
+
+enum pm8001_hba_info_flags {
+	PM8001F_INIT_TIME	= (1U << 0),
+	PM8001F_RUN_TIME	= (1U << 1),
+};
+
+#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
new file mode 100644
index 000000000000..a3de306b9045
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -0,0 +1,4458 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include "pm8001_sas.h"
+ #include "pm8001_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+	pm8001_ha->main_cfg_tbl.signature	= pm8001_mr32(address, 0x00);
+	pm8001_ha->main_cfg_tbl.interface_rev	= pm8001_mr32(address, 0x04);
+	pm8001_ha->main_cfg_tbl.firmware_rev	= pm8001_mr32(address, 0x08);
+	pm8001_ha->main_cfg_tbl.max_out_io	= pm8001_mr32(address, 0x0C);
+	pm8001_ha->main_cfg_tbl.max_sgl		= pm8001_mr32(address, 0x10);
+	pm8001_ha->main_cfg_tbl.ctrl_cap_flag	= pm8001_mr32(address, 0x14);
+	pm8001_ha->main_cfg_tbl.gst_offset	= pm8001_mr32(address, 0x18);
+	pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+		pm8001_mr32(address, MAIN_IBQ_OFFSET);
+	pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+		pm8001_mr32(address, MAIN_OBQ_OFFSET);
+	pm8001_ha->main_cfg_tbl.hda_mode_flag	=
+		pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
+
+	/* read analog Setting offset from the configuration table */
+	pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+		pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+	/* read Error Dump Offset and Length */
+	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+	pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+	pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+	pm8001_ha->gs_tbl.gst_len_mpistate	= pm8001_mr32(address, 0x00);
+	pm8001_ha->gs_tbl.iq_freeze_state0	= pm8001_mr32(address, 0x04);
+	pm8001_ha->gs_tbl.iq_freeze_state1	= pm8001_mr32(address, 0x08);
+	pm8001_ha->gs_tbl.msgu_tcnt		= pm8001_mr32(address, 0x0C);
+	pm8001_ha->gs_tbl.iop_tcnt		= pm8001_mr32(address, 0x10);
+	pm8001_ha->gs_tbl.reserved		= pm8001_mr32(address, 0x14);
+	pm8001_ha->gs_tbl.phy_state[0]	= pm8001_mr32(address, 0x18);
+	pm8001_ha->gs_tbl.phy_state[1]	= pm8001_mr32(address, 0x1C);
+	pm8001_ha->gs_tbl.phy_state[2]	= pm8001_mr32(address, 0x20);
+	pm8001_ha->gs_tbl.phy_state[3]	= pm8001_mr32(address, 0x24);
+	pm8001_ha->gs_tbl.phy_state[4]	= pm8001_mr32(address, 0x28);
+	pm8001_ha->gs_tbl.phy_state[5]	= pm8001_mr32(address, 0x2C);
+	pm8001_ha->gs_tbl.phy_state[6]	= pm8001_mr32(address, 0x30);
+	pm8001_ha->gs_tbl.phy_state[7]	= pm8001_mr32(address, 0x34);
+	pm8001_ha->gs_tbl.reserved1		= pm8001_mr32(address, 0x38);
+	pm8001_ha->gs_tbl.reserved2		= pm8001_mr32(address, 0x3C);
+	pm8001_ha->gs_tbl.reserved3		= pm8001_mr32(address, 0x40);
+	pm8001_ha->gs_tbl.recover_err_info[0]	= pm8001_mr32(address, 0x44);
+	pm8001_ha->gs_tbl.recover_err_info[1]	= pm8001_mr32(address, 0x48);
+	pm8001_ha->gs_tbl.recover_err_info[2]	= pm8001_mr32(address, 0x4C);
+	pm8001_ha->gs_tbl.recover_err_info[3]	= pm8001_mr32(address, 0x50);
+	pm8001_ha->gs_tbl.recover_err_info[4]	= pm8001_mr32(address, 0x54);
+	pm8001_ha->gs_tbl.recover_err_info[5]	= pm8001_mr32(address, 0x58);
+	pm8001_ha->gs_tbl.recover_err_info[6]	= pm8001_mr32(address, 0x5C);
+	pm8001_ha->gs_tbl.recover_err_info[7]	= pm8001_mr32(address, 0x60);
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+	int inbQ_num = 1;
+	int i;
+	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+	for (i = 0; i < inbQ_num; i++) {
+		u32 offset = i * 0x20;
+		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
+		pm8001_ha->inbnd_q_tbl[i].pi_offset =
+			pm8001_mr32(address, (offset + 0x18));
+	}
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+	int outbQ_num = 1;
+	int i;
+	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+	for (i = 0; i < outbQ_num; i++) {
+		u32 offset = i * 0x24;
+		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
+		pm8001_ha->outbnd_q_tbl[i].ci_offset =
+			pm8001_mr32(address, (offset + 0x18));
+	}
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+	int qn = 1;
+	int i;
+	u32 offsetib, offsetob;
+	void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+	void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+	pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd			= 0;
+	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 		= 0;
+	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7		= 0;
+	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3		= 0;
+	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7		= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3	= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7	= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3	= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7	= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3	= 0;
+	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7	= 0;
+
+	pm8001_ha->main_cfg_tbl.upper_event_log_addr		=
+		pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+	pm8001_ha->main_cfg_tbl.lower_event_log_addr		=
+		pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+	pm8001_ha->main_cfg_tbl.event_log_size	= PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.event_log_option		= 0x01;
+	pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr	=
+		pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+	pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr	=
+		pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+	pm8001_ha->main_cfg_tbl.iop_event_log_size	= PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.iop_event_log_option		= 0x01;
+	pm8001_ha->main_cfg_tbl.fatal_err_interrupt		= 0x01;
+	for (i = 0; i < qn; i++) {
+		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
+			0x00000100 | (0x00000040 << 16) | (0x00<<30);
+		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
+			pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
+		pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+		pm8001_ha->inbnd_q_tbl[i].base_virt		=
+			(u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+		pm8001_ha->inbnd_q_tbl[i].total_length		=
+			pm8001_ha->memoryMap.region[IB].total_len;
+		pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr	=
+			pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+		pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr	=
+			pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+		pm8001_ha->inbnd_q_tbl[i].ci_virt		=
+			pm8001_ha->memoryMap.region[CI].virt_ptr;
+		offsetib = i * 0x20;
+		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar		=
+			get_pci_bar_index(pm8001_mr32(addressib,
+				(offsetib + 0x14)));
+		pm8001_ha->inbnd_q_tbl[i].pi_offset		=
+			pm8001_mr32(addressib, (offsetib + 0x18));
+		pm8001_ha->inbnd_q_tbl[i].producer_idx		= 0;
+		pm8001_ha->inbnd_q_tbl[i].consumer_index	= 0;
+	}
+	for (i = 0; i < qn; i++) {
+		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
+			256 | (64 << 16) | (1<<30);
+		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
+			pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
+			pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+		pm8001_ha->outbnd_q_tbl[i].base_virt		=
+			(u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+		pm8001_ha->outbnd_q_tbl[i].total_length		=
+			pm8001_ha->memoryMap.region[OB].total_len;
+		pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr	=
+			pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+		pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr	=
+			pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+		pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay	=
+			0 | (10 << 16) | (0 << 24);
+		pm8001_ha->outbnd_q_tbl[i].pi_virt		=
+			pm8001_ha->memoryMap.region[PI].virt_ptr;
+		offsetob = i * 0x24;
+		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar		=
+			get_pci_bar_index(pm8001_mr32(addressob,
+			offsetob + 0x14));
+		pm8001_ha->outbnd_q_tbl[i].ci_offset		=
+			pm8001_mr32(addressob, (offsetob + 0x18));
+		pm8001_ha->outbnd_q_tbl[i].consumer_idx		= 0;
+		pm8001_ha->outbnd_q_tbl[i].producer_index	= 0;
+	}
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+	pm8001_mw32(address, 0x24,
+		pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+	pm8001_mw32(address, 0x28,
+		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+	pm8001_mw32(address, 0x2C,
+		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+	pm8001_mw32(address, 0x30,
+		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+	pm8001_mw32(address, 0x34,
+		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+	pm8001_mw32(address, 0x38,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+	pm8001_mw32(address, 0x3C,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+	pm8001_mw32(address, 0x40,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+	pm8001_mw32(address, 0x44,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+	pm8001_mw32(address, 0x48,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+	pm8001_mw32(address, 0x4C,
+		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+	pm8001_mw32(address, 0x50,
+		pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+	pm8001_mw32(address, 0x54,
+		pm8001_ha->main_cfg_tbl.lower_event_log_addr);
+	pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
+	pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+	pm8001_mw32(address, 0x60,
+		pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+	pm8001_mw32(address, 0x64,
+		pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
+	pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+	pm8001_mw32(address, 0x6C,
+		pm8001_ha->main_cfg_tbl.iop_event_log_option);
+	pm8001_mw32(address, 0x70,
+		pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+{
+	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+	u16 offset = number * 0x20;
+	pm8001_mw32(address, offset + 0x00,
+		pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+	pm8001_mw32(address, offset + 0x04,
+		pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+	pm8001_mw32(address, offset + 0x08,
+		pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+	pm8001_mw32(address, offset + 0x0C,
+		pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+	pm8001_mw32(address, offset + 0x10,
+		pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void __devinit
+update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+{
+	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+	u16 offset = number * 0x24;
+	pm8001_mw32(address, offset + 0x00,
+		pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+	pm8001_mw32(address, offset + 0x04,
+		pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+	pm8001_mw32(address, offset + 0x08,
+		pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+	pm8001_mw32(address, offset + 0x0C,
+		pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+	pm8001_mw32(address, offset + 0x10,
+		pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+	pm8001_mw32(address, offset + 0x1C,
+		pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * bar4_shift - function is called to shift BAR base address
+ * @pm8001_ha : our hba card infomation
+ * @shiftValue : shifting value in memory bar.
+ */
+static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+{
+	u32 regVal;
+	u32 max_wait_count;
+
+	/* program the inbound AXI translation Lower Address */
+	pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
+
+	/* confirm the setting is written */
+	max_wait_count = 1 * 1000 * 1000;  /* 1 sec */
+	do {
+		udelay(1);
+		regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
+	} while ((regVal != shiftValue) && (--max_wait_count));
+
+	if (!max_wait_count) {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
+			" = 0x%x\n", regVal));
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * mpi_set_phys_g3_with_ssc
+ * @pm8001_ha: our hba card information
+ * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
+ */
+static void __devinit
+mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+{
+	u32 offset;
+	u32 value;
+	u32 i, j;
+	u32 bit_cnt;
+
+#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
+#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
+#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
+#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
+#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12
+#define PHY_G3_WITH_SSC_BIT_SHIFT 13
+#define SNW3_PHY_CAPABILITIES_PARITY 31
+
+   /*
+    * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
+    * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
+    */
+	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
+		return;
+	/* set SSC bit of PHY 0 - 3 */
+	for (i = 0; i < 4; i++) {
+		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
+		value = pm8001_cr32(pm8001_ha, 2, offset);
+		if (SSCbit) {
+			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
+			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
+		} else {
+			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
+			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
+		}
+		bit_cnt = 0;
+		for (j = 0; j < 31; j++)
+			if ((value >> j) & 0x00000001)
+				bit_cnt++;
+		if (bit_cnt % 2)
+			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+		else
+			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
+
+		pm8001_cw32(pm8001_ha, 2, offset, value);
+	}
+
+	/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
+	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
+		return;
+
+	/* set SSC bit of PHY 4 - 7 */
+	for (i = 4; i < 8; i++) {
+		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+		value = pm8001_cr32(pm8001_ha, 2, offset);
+		if (SSCbit) {
+			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
+			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
+		} else {
+			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
+			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
+		}
+		bit_cnt = 0;
+		for (j = 0; j < 31; j++)
+			if ((value >> j) & 0x00000001)
+				bit_cnt++;
+		if (bit_cnt % 2)
+			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+		else
+			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
+
+		pm8001_cw32(pm8001_ha, 2, offset, value);
+	}
+
+	/*set the shifted destination address to 0x0 to avoid error operation */
+	bar4_shift(pm8001_ha, 0x0);
+	return;
+}
+
+/**
+ * mpi_set_open_retry_interval_reg
+ * @pm8001_ha: our hba card information
+ * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
+ */
+static void __devinit
+mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
+				u32 interval)
+{
+	u32 offset;
+	u32 value;
+	u32 i;
+
+#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
+#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
+#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4
+#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4
+#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
+
+	value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
+	/* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
+	if (-1 == bar4_shift(pm8001_ha,
+			     OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
+		return;
+	for (i = 0; i < 4; i++) {
+		offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
+		pm8001_cw32(pm8001_ha, 2, offset, value);
+	}
+
+	if (-1 == bar4_shift(pm8001_ha,
+			     OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
+		return;
+	for (i = 4; i < 8; i++) {
+		offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+		pm8001_cw32(pm8001_ha, 2, offset, value);
+	}
+	/*set the shifted destination address to 0x0 to avoid error operation */
+	bar4_shift(pm8001_ha, 0x0);
+	return;
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 max_wait_count;
+	u32 value;
+	u32 gst_len_mpistate;
+	/* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+	table is updated */
+	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE);
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 1 * 1000 * 1000;/* 1 sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+		value &= SPC_MSGU_CFG_TABLE_UPDATE;
+	} while ((value != 0) && (--max_wait_count));
+
+	if (!max_wait_count)
+		return -1;
+	/* check the MPI-State for initialization */
+	gst_len_mpistate =
+		pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+		GST_GSTLEN_MPIS_OFFSET);
+	if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK))
+		return -1;
+	/* check MPI Initialization error */
+	gst_len_mpistate = gst_len_mpistate >> 16;
+	if (0x0000 != gst_len_mpistate)
+		return -1;
+	return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 value, value1;
+	u32 max_wait_count;
+	/* check error state */
+	value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+	value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+	/* check AAP error */
+	if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) {
+		/* error state */
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+		return -1;
+	}
+
+	/* check IOP error */
+	if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) {
+		/* error state */
+		value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+		return -1;
+	}
+
+	/* bit 4-31 of scratch pad1 should be zeros if it is not
+	in error state*/
+	if (value & SCRATCH_PAD1_STATE_MASK) {
+		/* error case */
+		pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+		return -1;
+	}
+
+	/* bit 2, 4-31 of scratch pad2 should be zeros if it is not
+	in error state */
+	if (value1 & SCRATCH_PAD2_STATE_MASK) {
+		/* error case */
+		return -1;
+	}
+
+	max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */
+
+	/* wait until scratch pad 1 and 2 registers in ready state  */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
+			& SCRATCH_PAD1_RDY;
+		value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
+			& SCRATCH_PAD2_RDY;
+		if ((--max_wait_count) == 0)
+			return -1;
+	} while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY));
+	return 0;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *base_addr;
+	u32	value;
+	u32	offset;
+	u32	pcibar;
+	u32	pcilogic;
+
+	value = pm8001_cr32(pm8001_ha, 0, 0x44);
+	offset = value & 0x03FFFFFF;
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Scratchpad 0 Offset: %x \n", offset));
+	pcilogic = (value & 0xFC000000) >> 26;
+	pcibar = get_pci_bar_index(pcilogic);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar));
+	pm8001_ha->main_cfg_tbl_addr = base_addr =
+		pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+	pm8001_ha->general_stat_tbl_addr =
+		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18);
+	pm8001_ha->inbnd_q_tbl_addr =
+		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C);
+	pm8001_ha->outbnd_q_tbl_addr =
+		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20);
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+	/* check the firmware status */
+	if (-1 == check_fw_ready(pm8001_ha)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Firmware is not ready!\n"));
+		return -EBUSY;
+	}
+
+	/* Initialize pci space address eg: mpi offset */
+	init_pci_device_addresses(pm8001_ha);
+	init_default_table_values(pm8001_ha);
+	read_main_config_table(pm8001_ha);
+	read_general_status_table(pm8001_ha);
+	read_inbnd_queue_table(pm8001_ha);
+	read_outbnd_queue_table(pm8001_ha);
+	/* update main config table ,inbound table and outbound table */
+	update_main_config_table(pm8001_ha);
+	update_inbnd_queue_table(pm8001_ha, 0);
+	update_outbnd_queue_table(pm8001_ha, 0);
+	mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+	mpi_set_open_retry_interval_reg(pm8001_ha, 7);
+	/* notify firmware update finished and check initialization status */
+	if (0 == mpi_init_check(pm8001_ha)) {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("MPI initialize successful!\n"));
+	} else
+		return -EBUSY;
+	/*This register is a 16-bit timer with a resolution of 1us. This is the
+	timer used for interrupt delay/coalescing in the PCIe Application Layer.
+	Zero is not a valid value. A value of 1 in the register will cause the
+	interrupts to be normal. A value greater than 1 will cause coalescing
+	delays.*/
+	pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1);
+	pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0);
+	return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 max_wait_count;
+	u32 value;
+	u32 gst_len_mpistate;
+	init_pci_device_addresses(pm8001_ha);
+	/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+	table is stop */
+	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET);
+
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 1 * 1000 * 1000;/* 1 sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+		value &= SPC_MSGU_CFG_TABLE_RESET;
+	} while ((value != 0) && (--max_wait_count));
+
+	if (!max_wait_count) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
+		return -1;
+	}
+
+	/* check the MPI-State for termination in progress */
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 1 * 1000 * 1000;  /* 1 sec */
+	do {
+		udelay(1);
+		gst_len_mpistate =
+			pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+			GST_GSTLEN_MPIS_OFFSET);
+		if (GST_MPI_STATE_UNINIT ==
+			(gst_len_mpistate & GST_MPI_STATE_MASK))
+			break;
+	} while (--max_wait_count);
+	if (!max_wait_count) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+				gst_len_mpistate & GST_MPI_STATE_MASK));
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * soft_reset_ready_check - Function to check FW is ready for soft reset.
+ * @pm8001_ha: our hba card information
+ */
+static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 regVal, regVal1, regVal2;
+	if (mpi_uninit_check(pm8001_ha) != 0) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("MPI state is not ready\n"));
+		return -1;
+	}
+	/* read the scratch pad 2 register bit 2 */
+	regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
+		& SCRATCH_PAD2_FWRDY_RST;
+	if (regVal == SCRATCH_PAD2_FWRDY_RST) {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("Firmware is ready for reset .\n"));
+	} else {
+	/* Trigger NMI twice via RB6 */
+		if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Shift Bar4 to 0x%x failed\n",
+					RB6_ACCESS_REG));
+			return -1;
+		}
+		pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
+			RB6_MAGIC_NUMBER_RST);
+		pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST);
+		/* wait for 100 ms */
+		mdelay(100);
+		regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) &
+			SCRATCH_PAD2_FWRDY_RST;
+		if (regVal != SCRATCH_PAD2_FWRDY_RST) {
+			regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+			regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
+				"=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
+				regVal1, regVal2));
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ * @signature: signature in host scratch pad0 register.
+ */
+static int
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+{
+	u32	regVal, toggleVal;
+	u32	max_wait_count;
+	u32	regVal1, regVal2, regVal3;
+
+	/* step1: Check FW is ready for soft reset */
+	if (soft_reset_ready_check(pm8001_ha) != 0) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
+		return -1;
+	}
+
+	/* step 2: clear NMI status register on AAP1 and IOP, write the same
+	value to clear */
+	/* map 0x60000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Shift Bar4 to 0x%x failed\n",
+			MBIC_AAP1_ADDR_BASE));
+		return -1;
+	}
+	regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
+	/* map 0x70000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Shift Bar4 to 0x%x failed\n",
+			MBIC_IOP_ADDR_BASE));
+		return -1;
+	}
+	regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
+
+	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
+
+	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("PCIE - Event Interrupt  = 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
+
+	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
+
+	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
+
+	/* read the scratch pad 1 register bit 2 */
+	regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
+		& SCRATCH_PAD1_RST;
+	toggleVal = regVal ^ SCRATCH_PAD1_RST;
+
+	/* set signature in host scratch pad0 register to tell SPC that the
+	host performs the soft reset */
+	pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature);
+
+	/* read required registers for confirmming */
+	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Shift Bar4 to 0x%x failed\n",
+			GSM_ADDR_BASE));
+		return -1;
+	}
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
+		" Reset = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+	/* step 3: host read GSM Configuration and Reset register */
+	regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
+	/* Put those bits to low */
+	/* GSM XCBI offset = 0x70 0000
+	0x00 Bit 13 COM_SLV_SW_RSTB 1
+	0x00 Bit 12 QSSP_SW_RSTB 1
+	0x00 Bit 11 RAAE_SW_RSTB 1
+	0x00 Bit 9 RB_1_SW_RSTB 1
+	0x00 Bit 8 SM_SW_RSTB 1
+	*/
+	regVal &= ~(0x00003b00);
+	/* host write GSM Configuration and Reset register */
+	pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
+		"Configuration and Reset is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+	/* step 4: */
+	/* disable GSM - Read Address Parity Check */
+	regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700038 - Read Address Parity Check "
+		"Enable = 0x%x\n", regVal1));
+	pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
+		"is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+
+	/* disable GSM - Write Address Parity Check */
+	regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700040 - Write Address Parity Check"
+		" Enable = 0x%x\n", regVal2));
+	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700040 - Write Address Parity Check "
+		"Enable is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+
+	/* disable GSM - Write Data Parity Check */
+	regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x300048 - Write Data Parity Check"
+		" Enable = 0x%x\n", regVal3));
+	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
+		"is set to = 0x%x\n",
+	pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+
+	/* step 5: delay 10 usec */
+	udelay(10);
+	/* step 5-b: set GPIO-0 output control to tristate anyway */
+	if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+		PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("Shift Bar4 to 0x%x failed\n",
+				GPIO_ADDR_BASE));
+		return -1;
+	}
+	regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
+		PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("GPIO Output Control Register:"
+				" = 0x%x\n", regVal));
+	/* set GPIO-0 output control to tri-state */
+	regVal &= 0xFFFFFFFC;
+	pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
+
+	/* Step 6: Reset the IOP and AAP1 */
+	/* map 0x00000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
+			SPC_TOP_LEVEL_ADDR_BASE));
+		return -1;
+	}
+	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Top Register before resetting IOP/AAP1"
+		":= 0x%x\n", regVal));
+	regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
+	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+	/* step 7: Reset the BDMA/OSSP */
+	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Top Register before resetting BDMA/OSSP"
+		": = 0x%x\n", regVal));
+	regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
+	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+	/* step 8: delay 10 usec */
+	udelay(10);
+
+	/* step 9: bring the BDMA and OSSP out of reset */
+	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Top Register before bringing up BDMA/OSSP"
+		":= 0x%x\n", regVal));
+	regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
+	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+	/* step 10: delay 10 usec */
+	udelay(10);
+
+	/* step 11: reads and sets the GSM Configuration and Reset Register */
+	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
+			GSM_ADDR_BASE));
+		return -1;
+	}
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
+		"Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+	regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
+	/* Put those bits to high */
+	/* GSM XCBI offset = 0x70 0000
+	0x00 Bit 13 COM_SLV_SW_RSTB 1
+	0x00 Bit 12 QSSP_SW_RSTB 1
+	0x00 Bit 11 RAAE_SW_RSTB 1
+	0x00 Bit 9   RB_1_SW_RSTB 1
+	0x00 Bit 8   SM_SW_RSTB 1
+	*/
+	regVal |= (GSM_CONFIG_RESET_VALUE);
+	pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
+		" Configuration and Reset is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+	/* step 12: Restore GSM - Read Address Parity Check */
+	regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
+	/* just for debugging */
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
+		" = 0x%x\n", regVal));
+	pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700038 - Read Address Parity"
+		" Check Enable is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+	/* Restore GSM - Write Address Parity Check */
+	regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
+	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700040 - Write Address Parity Check"
+		" Enable is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+	/* Restore GSM - Write Data Parity Check */
+	regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
+	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
+		"is set to = 0x%x\n",
+		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+
+	/* step 13: bring the IOP and AAP1 out of reset */
+	/* map 0x00000 to BAR4(0x20), BAR2(win) */
+	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Shift Bar4 to 0x%x failed\n",
+			SPC_TOP_LEVEL_ADDR_BASE));
+		return -1;
+	}
+	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+	regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
+	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+	/* step 14: delay 10 usec - Normal Mode */
+	udelay(10);
+	/* check Soft Reset Normal mode or Soft Reset HDA mode */
+	if (signature == SPC_SOFT_RESET_SIGNATURE) {
+		/* step 15 (Normal Mode): wait until scratch pad1 register
+		bit 2 toggled */
+		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+		do {
+			udelay(1);
+			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+				SCRATCH_PAD1_RST;
+		} while ((regVal != toggleVal) && (--max_wait_count));
+
+		if (!max_wait_count) {
+			regVal = pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_1);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
+				"MSGU_SCRATCH_PAD1 = 0x%x\n",
+				toggleVal, regVal));
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_0)));
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_2)));
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_3)));
+			return -1;
+		}
+
+		/* step 16 (Normal) - Clear ODMR and ODCR */
+		pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+
+		/* step 17 (Normal Mode): wait for the FW and IOP to get
+		ready - 1 sec timeout */
+		/* Wait for the SPC Configuration Table to be ready */
+		if (check_fw_ready(pm8001_ha) == -1) {
+			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+			/* return error if MPI Configuration Table not ready */
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("FW not ready SCRATCH_PAD1"
+				" = 0x%x\n", regVal));
+			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+			/* return error if MPI Configuration Table not ready */
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("FW not ready SCRATCH_PAD2"
+				" = 0x%x\n", regVal));
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_0)));
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+				pm8001_cr32(pm8001_ha, 0,
+				MSGU_SCRATCH_PAD_3)));
+			return -1;
+		}
+	}
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("SPC soft reset Complete\n"));
+	return 0;
+}
+
+static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 i;
+	u32 regVal;
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("chip reset start\n"));
+
+	/* do SPC chip reset. */
+	regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
+	regVal &= ~(SPC_REG_RESET_DEVICE);
+	pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
+
+	/* delay 10 usec */
+	udelay(10);
+
+	/* bring chip reset out of reset */
+	regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
+	regVal |= SPC_REG_RESET_DEVICE;
+	pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
+
+	/* delay 10 usec */
+	udelay(10);
+
+	/* wait for 20 msec until the firmware gets reloaded */
+	i = 20;
+	do {
+		mdelay(1);
+	} while ((--i) != 0);
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_iounmap - which maped when initilized.
+ * @pm8001_ha: our hba card information
+ */
+static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+{
+	s8 bar, logical = 0;
+	for (bar = 0; bar < 6; bar++) {
+		/*
+		** logical BARs for SPC:
+		** bar 0 and 1 - logical BAR0
+		** bar 2 and 3 - logical BAR1
+		** bar4 - logical BAR2
+		** bar5 - logical BAR3
+		** Skip the appropriate assignments:
+		*/
+		if ((bar == 1) || (bar == 3))
+			continue;
+		if (pm8001_ha->io_mem[logical].memvirtaddr) {
+			iounmap(pm8001_ha->io_mem[logical].memvirtaddr);
+			logical++;
+		}
+	}
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+ /**
+  * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+  * @pm8001_ha: our hba card information
+  */
+static void
+pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
+	u32 int_vec_idx)
+{
+	u32 msi_index;
+	u32 value;
+	msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
+	msi_index += MSIX_TABLE_BASE;
+	pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
+	value = (1 << int_vec_idx);
+	pm8001_cw32(pm8001_ha, 0,  MSGU_ODCR, value);
+
+}
+
+/**
+ * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
+	u32 int_vec_idx)
+{
+	u32 msi_index;
+	msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
+	msi_index += MSIX_TABLE_BASE;
+	pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
+
+}
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+#ifdef PM8001_USE_MSIX
+	pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
+	return;
+#endif
+	pm8001_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+#ifdef PM8001_USE_MSIX
+	pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
+	return;
+#endif
+	pm8001_chip_intx_interrupt_disable(pm8001_ha);
+
+}
+
+/**
+ * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * @circularQ: the inbound queue  we want to transfer to HBA.
+ * @messageSize: the message size of this transfer, normally it is 64 bytes
+ * @messagePtr: the pointer to message.
+ */
+static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+			    u16 messageSize, void **messagePtr)
+{
+	u32 offset, consumer_index;
+	struct mpi_msg_hdr *msgHeader;
+	u8 bcCount = 1; /* only support single buffer */
+
+	/* Checks is the requested message size can be allocated in this queue*/
+	if (messageSize > 64) {
+		*messagePtr = NULL;
+		return -1;
+	}
+
+	/* Stores the new consumer index */
+	consumer_index = pm8001_read_32(circularQ->ci_virt);
+	circularQ->consumer_index = cpu_to_le32(consumer_index);
+	if (((circularQ->producer_idx + bcCount) % 256) ==
+		circularQ->consumer_index) {
+		*messagePtr = NULL;
+		return -1;
+	}
+	/* get memory IOMB buffer address */
+	offset = circularQ->producer_idx * 64;
+	/* increment to next bcCount element */
+	circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
+	/* Adds that distance to the base of the region virtual address plus
+	the message header size*/
+	msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt	+ offset);
+	*messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr);
+	return 0;
+}
+
+/**
+ * mpi_build_cmd- build the message queue for transfer, update the PI to FW
+ * to tell the fw to get this message from IOMB.
+ * @pm8001_ha: our hba card information
+ * @circularQ: the inbound queue we want to transfer to HBA.
+ * @opCode: the operation code represents commands which LLDD and fw recognized.
+ * @payload: the command payload of each operation command.
+ */
+static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+			 struct inbound_queue_table *circularQ,
+			 u32 opCode, void *payload)
+{
+	u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
+	u32 responseQueue = 0;
+	void *pMessage;
+
+	if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("No free mpi buffer \n"));
+		return -1;
+	}
+	BUG_ON(!payload);
+	/*Copy to the payload*/
+	memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+
+	/*Build the header*/
+	Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
+		| ((responseQueue & 0x3F) << 16)
+		| ((category & 0xF) << 12) | (opCode & 0xFFF));
+
+	pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header));
+	/*Update the PI to the firmware*/
+	pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
+		circularQ->pi_offset, circularQ->producer_idx);
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx,
+		circularQ->consumer_index));
+	return 0;
+}
+
+static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+			    struct outbound_queue_table *circularQ, u8 bc)
+{
+	u32 producer_index;
+	struct mpi_msg_hdr *msgHeader;
+	struct mpi_msg_hdr *pOutBoundMsgHeader;
+
+	msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
+	pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
+				circularQ->consumer_idx * 64);
+	if (pOutBoundMsgHeader != msgHeader) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("consumer_idx = %d msgHeader = %p\n",
+			circularQ->consumer_idx, msgHeader));
+
+		/* Update the producer index from SPC */
+		producer_index = pm8001_read_32(circularQ->pi_virt);
+		circularQ->producer_index = cpu_to_le32(producer_index);
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("consumer_idx = %d producer_index = %d"
+			"msgHeader = %p\n", circularQ->consumer_idx,
+			circularQ->producer_index, msgHeader));
+		return 0;
+	}
+	/* free the circular queue buffer elements associated with the message*/
+	circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
+	/* update the CI of outbound queue */
+	pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
+		circularQ->consumer_idx);
+	/* Update the producer index from SPC*/
+	producer_index = pm8001_read_32(circularQ->pi_virt);
+	circularQ->producer_index = cpu_to_le32(producer_index);
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
+		circularQ->producer_index));
+	return 0;
+}
+
+/**
+ * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * @pm8001_ha: our hba card information
+ * @circularQ: the outbound queue  table.
+ * @messagePtr1: the message contents of this outbound message.
+ * @pBC: the message size.
+ */
+static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+			   struct outbound_queue_table *circularQ,
+			   void **messagePtr1, u8 *pBC)
+{
+	struct mpi_msg_hdr	*msgHeader;
+	__le32	msgHeader_tmp;
+	u32 header_tmp;
+	do {
+		/* If there are not-yet-delivered messages ... */
+		if (circularQ->producer_index != circularQ->consumer_idx) {
+			/*Get the pointer to the circular queue buffer element*/
+			msgHeader = (struct mpi_msg_hdr *)
+				(circularQ->base_virt +
+				circularQ->consumer_idx * 64);
+			/* read header */
+			header_tmp = pm8001_read_32(msgHeader);
+			msgHeader_tmp = cpu_to_le32(header_tmp);
+			if (0 != (msgHeader_tmp & 0x80000000)) {
+				if (OPC_OUB_SKIP_ENTRY !=
+					(msgHeader_tmp & 0xfff)) {
+					*messagePtr1 =
+						((u8 *)msgHeader) +
+						sizeof(struct mpi_msg_hdr);
+					*pBC = (u8)((msgHeader_tmp >> 24) &
+						0x1f);
+					PM8001_IO_DBG(pm8001_ha,
+						pm8001_printk(": CI=%d PI=%d "
+						"msgHeader=%x\n",
+						circularQ->consumer_idx,
+						circularQ->producer_index,
+						msgHeader_tmp));
+					return MPI_IO_STATUS_SUCCESS;
+				} else {
+					circularQ->consumer_idx =
+						(circularQ->consumer_idx +
+						((msgHeader_tmp >> 24) & 0x1f))
+						% 256;
+					msgHeader_tmp = 0;
+					pm8001_write_32(msgHeader, 0, 0);
+					/* update the CI of outbound queue */
+					pm8001_cw32(pm8001_ha,
+						circularQ->ci_pci_bar,
+						circularQ->ci_offset,
+						circularQ->consumer_idx);
+				}
+			} else {
+				circularQ->consumer_idx =
+					(circularQ->consumer_idx +
+					((msgHeader_tmp >> 24) & 0x1f)) % 256;
+				msgHeader_tmp = 0;
+				pm8001_write_32(msgHeader, 0, 0);
+				/* update the CI of outbound queue */
+				pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar,
+					circularQ->ci_offset,
+					circularQ->consumer_idx);
+				return MPI_IO_STATUS_FAIL;
+			}
+		} else {
+			u32 producer_index;
+			void *pi_virt = circularQ->pi_virt;
+			/* Update the producer index from SPC */
+			producer_index = pm8001_read_32(pi_virt);
+			circularQ->producer_index = cpu_to_le32(producer_index);
+		}
+	} while (circularQ->producer_index != circularQ->consumer_idx);
+	/* while we don't have any more not-yet-delivered message */
+	/* report empty */
+	return MPI_IO_STATUS_BUSY;
+}
+
+static void pm8001_work_queue(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
+	struct pm8001_device *pm8001_dev;
+	struct domain_device	*dev;
+
+	switch (wq->handler) {
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		pm8001_dev = wq->data;
+		dev = pm8001_dev->sas_device;
+		pm8001_I_T_nexus_reset(dev);
+		break;
+	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+		pm8001_dev = wq->data;
+		dev = pm8001_dev->sas_device;
+		pm8001_I_T_nexus_reset(dev);
+		break;
+	case IO_DS_IN_ERROR:
+		pm8001_dev = wq->data;
+		dev = pm8001_dev->sas_device;
+		pm8001_I_T_nexus_reset(dev);
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		pm8001_dev = wq->data;
+		dev = pm8001_dev->sas_device;
+		pm8001_I_T_nexus_reset(dev);
+		break;
+	}
+	list_del(&wq->entry);
+	kfree(wq);
+}
+
+static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+			       int handler)
+{
+	struct pm8001_wq *wq;
+	int ret = 0;
+
+	wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
+	if (wq) {
+		wq->pm8001_ha = pm8001_ha;
+		wq->data = data;
+		wq->handler = handler;
+		INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
+		list_add_tail(&wq->entry, &pm8001_ha->wq_list);
+		schedule_delayed_work(&wq->work_q, 0);
+	} else
+		ret = -ENOMEM;
+
+	return ret;
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 status;
+	u32 param;
+	u32 tag;
+	struct ssp_completion_resp *psspPayload;
+	struct task_status_struct *ts;
+	struct ssp_response_iu *iu;
+	struct pm8001_device *pm8001_dev;
+	psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psspPayload->status);
+	tag = le32_to_cpu(psspPayload->tag);
+	ccb = &pm8001_ha->ccb_info[tag];
+	pm8001_dev = ccb->device;
+	param = le32_to_cpu(psspPayload->param);
+
+	t = ccb->task;
+
+	if (status && status != IO_UNDERFLOW)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sas IO status 0x%x\n", status));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+	ts = &t->task_status;
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
+			",param = %d \n", param));
+		if (param == 0) {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_GOOD;
+		} else {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_PROTO_RESPONSE;
+			ts->residual = param;
+			iu = &psspPayload->ssp_resp_iu;
+			sas_ssp_task_response(pm8001_ha->dev, t, iu);
+		}
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB Tag \n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		break;
+	case IO_UNDERFLOW:
+		/* SSP Completion with error */
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
+			",param = %d \n", param));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		ts->residual = param;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+			"NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_DMA:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_XFER_ERROR_DMA\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_DS_NON_OPERATIONAL);
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_TM_TAG_NOT_FOUND:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	}
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("scsi_status = %x \n ",
+		psspPayload->ssp_resp_iu.status));
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+			" io_status 0x%x resp 0x%x "
+			"stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	unsigned long flags;
+	struct task_status_struct *ts;
+	struct pm8001_ccb_info *ccb;
+	struct pm8001_device *pm8001_dev;
+	struct ssp_event_resp *psspPayload =
+		(struct ssp_event_resp *)(piomb + 4);
+	u32 event = le32_to_cpu(psspPayload->event);
+	u32 tag = le32_to_cpu(psspPayload->tag);
+	u32 port_id = le32_to_cpu(psspPayload->port_id);
+	u32 dev_id = le32_to_cpu(psspPayload->device_id);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	t = ccb->task;
+	pm8001_dev = ccb->device;
+	if (event)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sas IO status 0x%x\n", event));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+	ts = &t->task_status;
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("port_id = %x,device_id = %x\n",
+		port_id, dev_id));
+	switch (event) {
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_INTERRUPTED;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+			"_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+			"NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_UNEXPECTED_PHASE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_CMD_FRAME_ISSUED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("  IO_XFER_CMD_FRAME_ISSUED\n"));
+		return;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", event));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+			" event 0x%x resp 0x%x "
+			"stat 0x%x but aborted by upper layer!\n",
+			t, event, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 param;
+	u32 status;
+	u32 tag;
+	struct sata_completion_resp *psataPayload;
+	struct task_status_struct *ts;
+	struct ata_task_resp *resp ;
+	u32 *sata_resp;
+	struct pm8001_device *pm8001_dev;
+
+	psataPayload = (struct sata_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psataPayload->status);
+	tag = le32_to_cpu(psataPayload->tag);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	param = le32_to_cpu(psataPayload->param);
+	t = ccb->task;
+	ts = &t->task_status;
+	pm8001_dev = ccb->device;
+	if (status)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sata IO status 0x%x\n", status));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+		if (param == 0) {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_GOOD;
+		} else {
+			u8 len;
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_PROTO_RESPONSE;
+			ts->residual = param;
+			PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+				param));
+			sata_resp = &psataPayload->sata_resp[0];
+			resp = (struct ata_task_resp *)ts->buf;
+			if (t->ata_task.dma_xfer == 0 &&
+			t->data_dir == PCI_DMA_FROMDEVICE) {
+				len = sizeof(struct pio_setup_fis);
+				PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("PIO read len = %d\n", len));
+			} else if (t->ata_task.use_ncq) {
+				len = sizeof(struct set_dev_bits_fis);
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("FPDMA len = %d\n", len));
+			} else {
+				len = sizeof(struct dev_to_host_fis);
+				PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("other len = %d\n", len));
+			}
+			if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+				resp->frame_len = len;
+				memcpy(&resp->ending_fis[0], sata_resp, len);
+				ts->buf_valid_size = sizeof(*resp);
+			} else
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("response to large \n"));
+		}
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB Tag \n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+		/* following cases are to do cases */
+	case IO_UNDERFLOW:
+		/* SATA Completion with error */
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		ts->residual =  param;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_INTERRUPTED;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+			"_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*in order to force CPU ordering*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+			"NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
+			"_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/* ditto*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_DMA:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_DMA\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		break;
+	case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha, pm8001_dev,
+				    IO_DS_NON_OPERATIONAL);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("  IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_IN_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha, pm8001_dev,
+				    IO_DS_IN_ERROR);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task 0x%p done with io_status 0x%x"
+			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* ditto */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	unsigned long flags;
+	struct task_status_struct *ts;
+	struct pm8001_ccb_info *ccb;
+	struct pm8001_device *pm8001_dev;
+	struct sata_event_resp *psataPayload =
+		(struct sata_event_resp *)(piomb + 4);
+	u32 event = le32_to_cpu(psataPayload->event);
+	u32 tag = le32_to_cpu(psataPayload->tag);
+	u32 port_id = le32_to_cpu(psataPayload->port_id);
+	u32 dev_id = le32_to_cpu(psataPayload->device_id);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	t = ccb->task;
+	pm8001_dev = ccb->device;
+	if (event)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sata IO status 0x%x\n", event));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+	ts = &t->task_status;
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("port_id = %x,device_id = %x\n",
+		port_id, dev_id));
+	switch (event) {
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_INTERRUPTED;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+			"_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			t->task_done(t);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+			"NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_PEER_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_UNEXPECTED_PHASE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_CMD_FRAME_ISSUED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+		break;
+	case IO_XFER_PIO_SETUP_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", event));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task 0x%p done with io_status 0x%x"
+			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+			t, event, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 param;
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 status;
+	u32 tag;
+	struct smp_completion_resp *psmpPayload;
+	struct task_status_struct *ts;
+	struct pm8001_device *pm8001_dev;
+
+	psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psmpPayload->status);
+	tag = le32_to_cpu(psmpPayload->tag);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	param = le32_to_cpu(psmpPayload->param);
+	t = ccb->task;
+	ts = &t->task_status;
+	pm8001_dev = ccb->device;
+	if (status)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("smp IO status 0x%x\n", status));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_GOOD;
+	if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_ERROR_HW_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_BUSY;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_BUSY;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_BUSY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+			"NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_RX_FRAME:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_ERROR_INTERNAL_SMP_RESOURCE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_QUEUE_FULL;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		/* not allowed case. Therefore, return failed status */
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+			" io_status 0x%x resp 0x%x "
+			"stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+static void
+mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct set_dev_state_resp *pPayload =
+		(struct set_dev_state_resp *)(piomb + 4);
+	u32 tag = le32_to_cpu(pPayload->tag);
+	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+	struct pm8001_device *pm8001_dev = ccb->device;
+	u32 status = le32_to_cpu(pPayload->status);
+	u32 device_id = le32_to_cpu(pPayload->device_id);
+	u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
+	u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
+		"from 0x%x to 0x%x status = 0x%x!\n",
+		device_id, pds, nds, status));
+	complete(pm8001_dev->setds_completion);
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, tag);
+}
+
+static void
+mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct get_nvm_data_resp *pPayload =
+		(struct get_nvm_data_resp *)(piomb + 4);
+	u32 tag = le32_to_cpu(pPayload->tag);
+	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+	u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+	complete(pm8001_ha->nvmd_completion);
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
+	if ((dlen_status & NVMD_STAT) != 0) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Set nvm data error!\n"));
+		return;
+	}
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, tag);
+}
+
+static void
+mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct fw_control_ex	*fw_control_context;
+	struct get_nvm_data_resp *pPayload =
+		(struct get_nvm_data_resp *)(piomb + 4);
+	u32 tag = le32_to_cpu(pPayload->tag);
+	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+	u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+	u32 ir_tds_bn_dps_das_nvm =
+		le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
+	void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+	fw_control_context = ccb->fw_control_context;
+
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
+	if ((dlen_status & NVMD_STAT) != 0) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Get nvm data error!\n"));
+		complete(pm8001_ha->nvmd_completion);
+		return;
+	}
+
+	if (ir_tds_bn_dps_das_nvm & IPMode) {
+		/* indirect mode - IR bit set */
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("Get NVMD success, IR=1\n"));
+		if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
+			if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
+				memcpy(pm8001_ha->sas_addr,
+				      ((u8 *)virt_addr + 4),
+				       SAS_ADDR_SIZE);
+				PM8001_MSG_DBG(pm8001_ha,
+					pm8001_printk("Get SAS address"
+					" from VPD successfully!\n"));
+			}
+		} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
+			|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
+			((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) {
+				;
+		} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP)
+			|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) {
+			;
+		} else {
+			/* Should not be happened*/
+			PM8001_MSG_DBG(pm8001_ha,
+				pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
+				ir_tds_bn_dps_das_nvm));
+		}
+	} else /* direct mode */{
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
+			(dlen_status & NVMD_LEN) >> 24));
+	}
+	memcpy(fw_control_context->usrAddr,
+		pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+		fw_control_context->len);
+	complete(pm8001_ha->nvmd_completion);
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, tag);
+}
+
+static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct local_phy_ctl_resp *pPayload =
+		(struct local_phy_ctl_resp *)(piomb + 4);
+	u32 status = le32_to_cpu(pPayload->status);
+	u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
+	u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
+	if (status != 0) {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("%x phy execute %x phy op failed! \n",
+			phy_id, phy_op));
+	} else
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("%x phy execute %x phy op success! \n",
+			phy_id, phy_op));
+	return 0;
+}
+
+/**
+ * pm8001_bytes_dmaed - one of the interface function communication with libsas
+ * @pm8001_ha: our hba card information
+ * @i: which phy that received the event.
+ *
+ * when HBA driver received the identify done event or initiate FIS received
+ * event(for SATA), it will invoke this function to notify the sas layer that
+ * the sas toplogy has formed, please discover the the whole sas domain,
+ * while receive a broadcast(change) primitive just tell the sas
+ * layer to discover the changed domain rather than the whole domain.
+ */
+static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+{
+	struct pm8001_phy *phy = &pm8001_ha->phy[i];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_ha_struct *sas_ha;
+	if (!phy->phy_attached)
+		return;
+
+	sas_ha = pm8001_ha->sas;
+	if (sas_phy->phy) {
+		struct sas_phy *sphy = sas_phy->phy;
+		sphy->negotiated_linkrate = sas_phy->linkrate;
+		sphy->minimum_linkrate = phy->minimum_linkrate;
+		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+		sphy->maximum_linkrate = phy->maximum_linkrate;
+		sphy->maximum_linkrate_hw = phy->maximum_linkrate;
+	}
+
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		struct sas_identify_frame *id;
+		id = (struct sas_identify_frame *)phy->frame_rcvd;
+		id->dev_type = phy->identify.device_type;
+		id->initiator_bits = SAS_PROTOCOL_ALL;
+		id->target_bits = phy->identify.target_port_protocols;
+	} else if (phy->phy_type & PORT_TYPE_SATA) {
+		/*Nothing*/
+	}
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
+
+	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+	pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+}
+
+/* Get the link rate speed  */
+static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+{
+	struct sas_phy *sas_phy = phy->sas_phy.phy;
+
+	switch (link_rate) {
+	case PHY_SPEED_60:
+		phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
+		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+		break;
+	case PHY_SPEED_30:
+		phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
+		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+		break;
+	case PHY_SPEED_15:
+		phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
+		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+		break;
+	}
+	sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
+	sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+	sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+	sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+	sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+/**
+ * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * @phy: pointer to asd_phy
+ * @sas_addr: pointer to buffer where the SAS address is to be written
+ *
+ * This function extracts the SAS address from an IDENTIFY frame
+ * received.  If OOB is SATA, then a SAS address is generated from the
+ * HA tables.
+ *
+ * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
+ * buffer.
+ */
+static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+	u8 *sas_addr)
+{
+	if (phy->sas_phy.frame_rcvd[0] == 0x34
+		&& phy->sas_phy.oob_mode == SATA_OOB_MODE) {
+		struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha;
+		/* FIS device-to-host */
+		u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr);
+		addr += phy->sas_phy.id;
+		*(__be64 *)sas_addr = cpu_to_be64(addr);
+	} else {
+		struct sas_identify_frame *idframe =
+			(void *) phy->sas_phy.frame_rcvd;
+		memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
+	}
+}
+
+/**
+ * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+	u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+	struct hw_event_ack_req	 payload;
+	u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+	struct inbound_queue_table *circularQ;
+
+	memset((u8 *)&payload, 0, sizeof(payload));
+	circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+	payload.tag = 1;
+	payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+		((phyId & 0x0F) << 4) | (port_id & 0x0F));
+	payload.param0 = cpu_to_le32(param0);
+	payload.param1 = cpu_to_le32(param1);
+	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+}
+
+static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+	u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_evt_status_phyid_portid =
+		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+	u8 link_rate =
+		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+	u8 phy_id =
+		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	unsigned long flags;
+	u8 deviceType = pPayload->sas_identify.dev_type;
+
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
+
+	switch (deviceType) {
+	case SAS_PHY_UNUSED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("device type no device.\n"));
+		break;
+	case SAS_END_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+		pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
+			PHY_NOTIFY_ENABLE_SPINUP);
+		get_lrate_mode(phy, link_rate);
+		break;
+	case SAS_EDGE_EXPANDER_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("expander device.\n"));
+		get_lrate_mode(phy, link_rate);
+		break;
+	case SAS_FANOUT_EXPANDER_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("fanout expander device.\n"));
+		get_lrate_mode(phy, link_rate);
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("unkown device type(%x)\n", deviceType));
+		break;
+	}
+	phy->phy_type |= PORT_TYPE_SAS;
+	phy->identify.device_type = deviceType;
+	phy->phy_attached = 1;
+	if (phy->identify.device_type == SAS_END_DEV)
+		phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+	else if (phy->identify.device_type != NO_DEVICE)
+		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+	phy->sas_phy.oob_mode = SAS_OOB_MODE;
+	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+	memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+		sizeof(struct sas_identify_frame)-4);
+	phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+	if (pm8001_ha->flags == PM8001F_RUN_TIME)
+		mdelay(200);/*delay a moment to wait disk to spinup*/
+	pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_evt_status_phyid_portid =
+		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+	u8 link_rate =
+		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+	u8 phy_id =
+		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	unsigned long flags;
+	get_lrate_mode(phy, link_rate);
+	phy->phy_type |= PORT_TYPE_SATA;
+	phy->phy_attached = 1;
+	phy->sas_phy.oob_mode = SATA_OOB_MODE;
+	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+	memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+		sizeof(struct dev_to_host_fis));
+	phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+	phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+	phy->identify.device_type = SATA_DEV;
+	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+	pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_evt_status_phyid_portid =
+		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+	u8 phy_id =
+		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+
+	switch (portstate) {
+	case PORT_VALID:
+		break;
+	case PORT_INVALID:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" PortInvalid portID %d \n", port_id));
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" Last phy Down and port invalid\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+			port_id, phy_id, 0, 0);
+		break;
+	case PORT_IN_RESET:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" PortInReset portID %d \n", port_id));
+		break;
+	case PORT_NOT_ESTABLISHED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+		break;
+	case PORT_LOSTCOMM:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" Last phy Down and port invalid\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+			port_id, phy_id, 0, 0);
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and(default) = %x\n",
+			portstate));
+		break;
+
+	}
+}
+
+/**
+ * mpi_reg_resp -process register device ID response.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ *
+ * when sas layer find a device it will notify LLDD, then the driver register
+ * the domain device to FW, this event is the return device ID which the FW
+ * has assigned, from now,inter-communication with FW is no longer using the
+ * SAS address, use device ID which FW assigned.
+ */
+static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 status;
+	u32 device_id;
+	u32 htag;
+	struct pm8001_ccb_info *ccb;
+	struct pm8001_device *pm8001_dev;
+	struct dev_reg_resp *registerRespPayload =
+		(struct dev_reg_resp *)(piomb + 4);
+
+	htag = le32_to_cpu(registerRespPayload->tag);
+	ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
+	pm8001_dev = ccb->device;
+	status = le32_to_cpu(registerRespPayload->status);
+	device_id = le32_to_cpu(registerRespPayload->device_id);
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk(" register device is status = %d\n", status));
+	switch (status) {
+	case DEVREG_SUCCESS:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
+		pm8001_dev->device_id = device_id;
+		break;
+	case DEVREG_FAILURE_OUT_OF_RESOURCE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
+		break;
+	case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
+		PM8001_MSG_DBG(pm8001_ha,
+		   pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
+		break;
+	case DEVREG_FAILURE_INVALID_PHY_ID:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
+		break;
+	case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
+		PM8001_MSG_DBG(pm8001_ha,
+		   pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
+		break;
+	case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
+		break;
+	case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
+		break;
+	case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
+		PM8001_MSG_DBG(pm8001_ha,
+		       pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+		 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
+		break;
+	}
+	complete(pm8001_dev->dcompletion);
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, htag);
+	return 0;
+}
+
+static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 status;
+	u32 device_id;
+	struct dev_reg_resp *registerRespPayload =
+		(struct dev_reg_resp *)(piomb + 4);
+
+	status = le32_to_cpu(registerRespPayload->status);
+	device_id = le32_to_cpu(registerRespPayload->device_id);
+	if (status != 0)
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" deregister device failed ,status = %x"
+			", device_id = %x\n", status, device_id));
+	return 0;
+}
+
+static int
+mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 status;
+	struct fw_control_ex	fw_control_context;
+	struct fw_flash_Update_resp *ppayload =
+		(struct fw_flash_Update_resp *)(piomb + 4);
+	u32 tag = le32_to_cpu(ppayload->tag);
+	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+	status = le32_to_cpu(ppayload->status);
+	memcpy(&fw_control_context,
+		ccb->fw_control_context,
+		sizeof(fw_control_context));
+	switch (status) {
+	case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
+		PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
+		break;
+	case FLASH_UPDATE_IN_PROGRESS:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
+		break;
+	case FLASH_UPDATE_HDR_ERR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
+		break;
+	case FLASH_UPDATE_OFFSET_ERR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
+		break;
+	case FLASH_UPDATE_CRC_ERR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
+		break;
+	case FLASH_UPDATE_LENGTH_ERR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
+		break;
+	case FLASH_UPDATE_HW_ERR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
+		break;
+	case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
+		break;
+	case FLASH_UPDATE_DISABLED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("No matched status = %d\n", status));
+		break;
+	}
+	ccb->fw_control_context->fw_control->retcode = status;
+	pci_free_consistent(pm8001_ha->pdev,
+			fw_control_context.len,
+			fw_control_context.virtAddr,
+			fw_control_context.phys_addr);
+	complete(pm8001_ha->nvmd_completion);
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, tag);
+	return 0;
+}
+
+static int
+mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	u32 status;
+	int i;
+	struct general_event_resp *pPayload =
+		(struct general_event_resp *)(piomb + 4);
+	status = le32_to_cpu(pPayload->status);
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk(" status = 0x%x\n", status));
+	for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i,
+			pPayload->inb_IOMB_payload[i]));
+	return 0;
+}
+
+static int
+mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 status ;
+	u32 tag, scp;
+	struct task_status_struct *ts;
+
+	struct task_abort_resp *pPayload =
+		(struct task_abort_resp *)(piomb + 4);
+	ccb = &pm8001_ha->ccb_info[pPayload->tag];
+	t = ccb->task;
+
+
+	status = le32_to_cpu(pPayload->status);
+	tag = le32_to_cpu(pPayload->tag);
+	scp = le32_to_cpu(pPayload->scp);
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk(" status = 0x%x\n", status));
+	if (t == NULL)
+		return -1;
+	ts = &t->task_status;
+	if (status != 0)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task abort failed status 0x%x ,"
+			"tag = 0x%x, scp= 0x%x\n", status, tag, scp));
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_GOOD;
+		break;
+	case IO_NOT_VALID:
+		PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
+		ts->resp = TMF_RESP_FUNC_FAILED;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	spin_unlock_irqrestore(&t->task_state_lock, flags);
+	pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
+	mb();
+	t->task_done(t);
+	return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+{
+	unsigned long flags;
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_evt_status_phyid_portid =
+		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+	u8 phy_id =
+		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	u16 eventType =
+		(u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8);
+	u8 status =
+		(u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24);
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("outbound queue HW event & event type : "));
+	switch (eventType) {
+	case HW_EVENT_PHY_START_STATUS:
+		PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("HW_EVENT_PHY_START_STATUS"
+			" status = %x\n", status));
+		if (status == 0) {
+			phy->phy_state = 1;
+			if (pm8001_ha->flags == PM8001F_RUN_TIME)
+				complete(phy->enable_completion);
+		}
+		break;
+	case HW_EVENT_SAS_PHY_UP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_START_STATUS \n"));
+		hw_event_sas_phy_up(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_SATA_PHY_UP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_SATA_PHY_UP \n"));
+		hw_event_sata_phy_up(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_PHY_STOP_STATUS:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
+			"status = %x\n", status));
+		if (status == 0)
+			phy->phy_state = 0;
+		break;
+	case HW_EVENT_SATA_SPINUP_HOLD:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n"));
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+		break;
+	case HW_EVENT_PHY_DOWN:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_DOWN \n"));
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+		phy->phy_attached = 0;
+		phy->phy_state = 0;
+		hw_event_phy_down(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_PORT_INVALID:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	/* the broadcast change primitive received, tell the LIBSAS this event
+	to revalidate the sas domain*/
+	case HW_EVENT_BROADCAST_CHANGE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+			port_id, phy_id, 1, 0);
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_PHY_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+		sas_phy_disconnected(&phy->sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+		break;
+	case HW_EVENT_BROADCAST_EXP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_LINK_ERR_INVALID_DWORD:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_CODE_VIOLATION,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+		PM8001_MSG_DBG(pm8001_ha,
+		      pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_MALFUNCTION:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+		break;
+	case HW_EVENT_BROADCAST_SES:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_INBOUND_CRC_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_INBOUND_CRC_ERROR,
+			port_id, phy_id, 0, 0);
+		break;
+	case HW_EVENT_HARD_RESET_RECEIVED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+		sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+		break;
+	case HW_EVENT_ID_FRAME_TIMEOUT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n"));
+		pm8001_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RESET_TIMER_TMO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RECOVER:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RECOVER \n"));
+		break;
+	case HW_EVENT_PORT_RESET_COMPLETE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n"));
+		break;
+	case EVENT_BROADCAST_ASYNCH_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("Unknown event type = %x\n", eventType));
+		break;
+	}
+	return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 pHeader = (u32)*(u32 *)piomb;
+	u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
+
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
+
+	switch (opc) {
+	case OPC_OUB_ECHO:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n"));
+		break;
+	case OPC_OUB_HW_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_HW_EVENT \n"));
+		mpi_hw_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_COMP \n"));
+		mpi_ssp_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SMP_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SMP_COMP \n"));
+		mpi_smp_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_LOCAL_PHY_CNTRL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+		mpi_local_phy_ctl(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEV_REGIST:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEV_REGIST \n"));
+		mpi_reg_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEREG_DEV:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("unresgister the deviece \n"));
+		mpi_dereg_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_DEV_HANDLE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n"));
+		break;
+	case OPC_OUB_SATA_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_COMP \n"));
+		mpi_sata_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SATA_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_EVENT \n"));
+		mpi_sata_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+		mpi_ssp_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEV_HANDLE_ARRIV:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+		/*This is for target*/
+		break;
+	case OPC_OUB_SSP_RECV_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+		/*This is for target*/
+		break;
+	case OPC_OUB_DEV_INFO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEV_INFO\n"));
+		break;
+	case OPC_OUB_FW_FLASH_UPDATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+		mpi_fw_flash_update_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GPIO_RESPONSE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+		break;
+	case OPC_OUB_GPIO_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+		break;
+	case OPC_OUB_GENERAL_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+		mpi_general_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+		mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SATA_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+		mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SAS_DIAG_MODE_START_END:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+		break;
+	case OPC_OUB_SAS_DIAG_EXECUTE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+		break;
+	case OPC_OUB_GET_TIME_STAMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+		break;
+	case OPC_OUB_SAS_HW_EVENT_ACK:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+		break;
+	case OPC_OUB_PORT_CONTROL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+		break;
+	case OPC_OUB_SMP_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+		mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_NVMD_DATA:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+		mpi_get_nvmd_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SET_NVMD_DATA:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+		mpi_set_nvmd_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+		break;
+	case OPC_OUB_SET_DEVICE_STATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+		mpi_set_dev_state_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_DEVICE_STATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+		break;
+	case OPC_OUB_SET_DEV_INFO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+		break;
+	case OPC_OUB_SAS_RE_INITIALIZE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
+			opc));
+		break;
+	}
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha)
+{
+	struct outbound_queue_table *circularQ;
+	void *pMsg1 = NULL;
+	u8 bc = 0;
+	u32 ret = MPI_IO_STATUS_FAIL;
+
+	circularQ = &pm8001_ha->outbnd_q_tbl[0];
+	do {
+		ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+		if (MPI_IO_STATUS_SUCCESS == ret) {
+			/* process the outbound message */
+			process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+			/* free the message from the outbound circular buffer */
+			mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+		}
+		if (MPI_IO_STATUS_BUSY == ret) {
+			u32 producer_idx;
+			/* Update the producer index from SPC */
+			producer_idx = pm8001_read_32(circularQ->pi_virt);
+			circularQ->producer_index = cpu_to_le32(producer_idx);
+			if (circularQ->producer_index ==
+				circularQ->consumer_idx)
+				/* OQ is empty */
+				break;
+		}
+	} while (1);
+	return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+	[PCI_DMA_TODEVICE]	= DATA_DIR_OUT,/* OUTBOUND */
+	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
+	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
+};
+static void
+pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
+{
+	int i;
+	struct scatterlist *sg;
+	struct pm8001_prd *buf_prd = prd;
+
+	for_each_sg(scatter, sg, nr, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd->im_len.e = 0;
+		buf_prd++;
+	}
+}
+
+static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
+{
+	psmp_cmd->tag = cpu_to_le32(hTag);
+	psmp_cmd->device_id = cpu_to_le32(deviceID);
+	psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	int elem, rc;
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct scatterlist *sg_req, *sg_resp;
+	u32 req_len, resp_len;
+	struct smp_req smp_cmd;
+	u32 opc;
+	struct inbound_queue_table *circularQ;
+
+	memset(&smp_cmd, 0, sizeof(smp_cmd));
+	/*
+	 * DMA-map SMP request, response buffers
+	 */
+	sg_req = &task->smp_task.smp_req;
+	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	if (!elem)
+		return -ENOMEM;
+	req_len = sg_dma_len(sg_req);
+
+	sg_resp = &task->smp_task.smp_resp;
+	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	if (!elem) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	resp_len = sg_dma_len(sg_resp);
+	/* must be in dwords */
+	if ((req_len & 0x3) || (resp_len & 0x3)) {
+		rc = -EINVAL;
+		goto err_out_2;
+	}
+
+	opc = OPC_INB_SMP_REQUEST;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+	smp_cmd.long_smp_req.long_req_addr =
+		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+	smp_cmd.long_smp_req.long_req_size =
+		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+	smp_cmd.long_smp_req.long_resp_addr =
+		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
+	smp_cmd.long_smp_req.long_resp_size =
+		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+	build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
+	mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+	return 0;
+
+err_out_2:
+	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+			PCI_DMA_FROMDEVICE);
+err_out:
+	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+			PCI_DMA_TODEVICE);
+	return rc;
+}
+
+/**
+ * pm8001_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct ssp_ini_io_start_req ssp_cmd;
+	u32 tag = ccb->ccb_tag;
+	int ret;
+	__le64 phys_addr;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SSPINIIOSTART;
+	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+	ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
+	SAS 1.1 compatible TLR*/
+	ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+	ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+	ssp_cmd.tag = cpu_to_le32(tag);
+	if (task->ssp_task.enable_first_burst)
+		ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	/* fill in PRD (scatter/gather) table, if any */
+	if (task->num_scatter > 1) {
+		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
+		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]));
+		ssp_cmd.addr_low = lower_32_bits(phys_addr);
+		ssp_cmd.addr_high = upper_32_bits(phys_addr);
+		ssp_cmd.esgl = cpu_to_le32(1<<31);
+	} else if (task->num_scatter == 1) {
+		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
+		ssp_cmd.addr_low = lower_32_bits(dma_addr);
+		ssp_cmd.addr_high = upper_32_bits(dma_addr);
+		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+		ssp_cmd.esgl = 0;
+	} else if (task->num_scatter == 0) {
+		ssp_cmd.addr_low = 0;
+		ssp_cmd.addr_high = 0;
+		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+		ssp_cmd.esgl = 0;
+	}
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+	return ret;
+}
+
+static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+	u32 tag = ccb->ccb_tag;
+	int ret;
+	struct sata_start_req sata_cmd;
+	u32 hdr_tag, ncg_tag = 0;
+	__le64 phys_addr;
+	u32 ATAP = 0x0;
+	u32 dir;
+	struct inbound_queue_table *circularQ;
+	u32  opc = OPC_INB_SATA_HOST_OPSTART;
+	memset(&sata_cmd, 0, sizeof(sata_cmd));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	if (task->data_dir == PCI_DMA_NONE) {
+		ATAP = 0x04;  /* no data*/
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n"));
+	} else if (likely(!task->ata_task.device_control_reg_update)) {
+		if (task->ata_task.dma_xfer) {
+			ATAP = 0x06; /* DMA */
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n"));
+		} else {
+			ATAP = 0x05; /* PIO*/
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n"));
+		}
+		if (task->ata_task.use_ncq &&
+			dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+			ATAP = 0x07; /* FPDMA */
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n"));
+		}
+	}
+	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+		ncg_tag = cpu_to_le32(hdr_tag);
+	dir = data_dir_flags[task->data_dir] << 8;
+	sata_cmd.tag = cpu_to_le32(tag);
+	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+	sata_cmd.ncqtag_atap_dir_m =
+		cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
+	sata_cmd.sata_fis = task->ata_task.fis;
+	if (likely(!task->ata_task.device_control_reg_update))
+		sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+	sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+	/* fill in PRD (scatter/gather) table, if any */
+	if (task->num_scatter > 1) {
+		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
+		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]));
+		sata_cmd.addr_low = lower_32_bits(phys_addr);
+		sata_cmd.addr_high = upper_32_bits(phys_addr);
+		sata_cmd.esgl = cpu_to_le32(1 << 31);
+	} else if (task->num_scatter == 1) {
+		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
+		sata_cmd.addr_low = lower_32_bits(dma_addr);
+		sata_cmd.addr_high = upper_32_bits(dma_addr);
+		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+		sata_cmd.esgl = 0;
+	} else if (task->num_scatter == 0) {
+		sata_cmd.addr_low = 0;
+		sata_cmd.addr_high = 0;
+		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+		sata_cmd.esgl = 0;
+	}
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+	return ret;
+}
+
+/**
+ * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+	struct phy_start_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 tag = 0x01;
+	u32 opcode = OPC_INB_PHYSTART;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&payload, 0, sizeof(payload));
+	payload.tag = cpu_to_le32(tag);
+	/*
+	 ** [0:7]   PHY Identifier
+	 ** [8:11]  link rate 1.5G, 3G, 6G
+	 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both
+	 ** [14]    0b disable spin up hold; 1b enable spin up hold
+	 */
+	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+		LINKMODE_AUTO |	LINKRATE_15 |
+		LINKRATE_30 | LINKRATE_60 | phy_id);
+	payload.sas_identify.dev_type = SAS_END_DEV;
+	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+	memcpy(payload.sas_identify.sas_addr,
+		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+	payload.sas_identify.phy_id = phy_id;
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+	return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+	u8 phy_id)
+{
+	struct phy_stop_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 tag = 0x01;
+	u32 opcode = OPC_INB_PHYSTOP;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&payload, 0, sizeof(payload));
+	payload.tag = cpu_to_le32(tag);
+	payload.phy_id = cpu_to_le32(phy_id);
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+	return ret;
+}
+
+/**
+ * see comments on mpi_reg_resp.
+ */
+static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_device *pm8001_dev, u32 flag)
+{
+	struct reg_dev_req payload;
+	u32	opc;
+	u32 stp_sspsmp_sata = 0x4;
+	struct inbound_queue_table *circularQ;
+	u32 linkrate, phy_id;
+	int rc, tag = 0xdeadbeef;
+	struct pm8001_ccb_info *ccb;
+	u8 retryFlag = 0x1;
+	u16 firstBurstSize = 0;
+	u16 ITNT = 2000;
+	struct domain_device *dev = pm8001_dev->sas_device;
+	struct domain_device *parent_dev = dev->parent;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	memset(&payload, 0, sizeof(payload));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return rc;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->device = pm8001_dev;
+	ccb->ccb_tag = tag;
+	payload.tag = cpu_to_le32(tag);
+	if (flag == 1)
+		stp_sspsmp_sata = 0x02; /*direct attached sata */
+	else {
+		if (pm8001_dev->dev_type == SATA_DEV)
+			stp_sspsmp_sata = 0x00; /* stp*/
+		else if (pm8001_dev->dev_type == SAS_END_DEV ||
+			pm8001_dev->dev_type == EDGE_DEV ||
+			pm8001_dev->dev_type == FANOUT_DEV)
+			stp_sspsmp_sata = 0x01; /*ssp or smp*/
+	}
+	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+		phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+	else
+		phy_id = pm8001_dev->attached_phy;
+	opc = OPC_INB_REG_DEV;
+	linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+			pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+	payload.phyid_portid =
+		cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
+		((phy_id & 0x0F) << 4));
+	payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
+		((linkrate & 0x0F) * 0x1000000) |
+		((stp_sspsmp_sata & 0x03) * 0x10000000));
+	payload.firstburstsize_ITNexustimeout =
+		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+	memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
+		SAS_ADDR_SIZE);
+	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return rc;
+}
+
+/**
+ * see comments on mpi_reg_resp.
+ */
+static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+	u32 device_id)
+{
+	struct dereg_dev_req payload;
+	u32 opc = OPC_INB_DEREG_DEV_HANDLE;
+	int ret;
+	struct inbound_queue_table *circularQ;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&payload, 0, sizeof(payload));
+	payload.tag = 1;
+	payload.device_id = cpu_to_le32(device_id);
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("unregister device device_id = %d\n", device_id));
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return ret;
+}
+
+/**
+ * pm8001_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+	u32 phyId, u32 phy_op)
+{
+	struct local_phy_ctl_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+	memset((u8 *)&payload, 0, sizeof(payload));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = 1;
+	payload.phyop_phyid =
+		cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return ret;
+}
+
+static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 value;
+#ifdef PM8001_USE_MSIX
+	return 1;
+#endif
+	value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+	if (value)
+		return 1;
+	return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	pm8001_chip_interrupt_disable(pm8001_ha);
+	process_oq(pm8001_ha);
+	pm8001_chip_interrupt_enable(pm8001_ha);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
+	u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
+{
+	struct task_abort_req task_abort;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&task_abort, 0, sizeof(task_abort));
+	if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+		task_abort.abort_all = 0;
+		task_abort.device_id = cpu_to_le32(dev_id);
+		task_abort.tag_to_abort = cpu_to_le32(task_tag);
+		task_abort.tag = cpu_to_le32(cmd_tag);
+	} else if (ABORT_ALL == (flag & ABORT_MASK)) {
+		task_abort.abort_all = cpu_to_le32(1);
+		task_abort.device_id = cpu_to_le32(dev_id);
+		task_abort.tag = cpu_to_le32(cmd_tag);
+	}
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+	return ret;
+}
+
+/**
+ * pm8001_chip_abort_task - SAS abort task when error or exception happened.
+ * @task: the task we wanted to aborted.
+ * @flag: the abort flag.
+ */
+static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
+{
+	u32 opc, device_id;
+	int rc = TMF_RESP_FUNC_FAILED;
+	PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
+		" = %x", cmd_tag, task_tag));
+	if (pm8001_dev->dev_type == SAS_END_DEV)
+		opc = OPC_INB_SSP_ABORT;
+	else if (pm8001_dev->dev_type == SATA_DEV)
+		opc = OPC_INB_SATA_ABORT;
+	else
+		opc = OPC_INB_SMP_ABORT;/* SMP */
+	device_id = pm8001_dev->device_id;
+	rc = send_task_abort(pm8001_ha, opc, device_id, flag,
+		task_tag, cmd_tag);
+	if (rc != TMF_RESP_FUNC_COMPLETE)
+		PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
+	return rc;
+}
+
+/**
+ * pm8001_chip_ssp_tm_req - built the task managment command.
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information.
+ * @tmf: task management function.
+ */
+static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+{
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	u32 opc = OPC_INB_SSPINITMSTART;
+	struct inbound_queue_table *circularQ;
+	struct ssp_ini_tm_start_req sspTMCmd;
+	int ret;
+
+	memset(&sspTMCmd, 0, sizeof(sspTMCmd));
+	sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+	sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+	sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
+	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
+	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+	return ret;
+}
+
+static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+	void *payload)
+{
+	u32 opc = OPC_INB_GET_NVMD_DATA;
+	u32 nvmd_type;
+	int rc;
+	u32 tag;
+	struct pm8001_ccb_info *ccb;
+	struct inbound_queue_table *circularQ;
+	struct get_nvm_data_req nvmd_req;
+	struct fw_control_ex *fw_control_context;
+	struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+	nvmd_type = ioctl_payload->minor_function;
+	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+	fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+	fw_control_context->len = ioctl_payload->length;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&nvmd_req, 0, sizeof(nvmd_req));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return rc;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->ccb_tag = tag;
+	ccb->fw_control_context = fw_control_context;
+	nvmd_req.tag = cpu_to_le32(tag);
+
+	switch (nvmd_type) {
+	case TWI_DEVICE: {
+		u32 twi_addr, twi_page_size;
+		twi_addr = 0xa8;
+		twi_page_size = 2;
+
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
+			twi_page_size << 8 | TWI_DEVICE);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
+	case C_SEEPROM: {
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
+	case VPD_FLASH: {
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
+	case EXPAN_ROM: {
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
+	default:
+		break;
+	}
+	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+	return rc;
+}
+
+static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+	void *payload)
+{
+	u32 opc = OPC_INB_SET_NVMD_DATA;
+	u32 nvmd_type;
+	int rc;
+	u32 tag;
+	struct pm8001_ccb_info *ccb;
+	struct inbound_queue_table *circularQ;
+	struct set_nvm_data_req nvmd_req;
+	struct fw_control_ex *fw_control_context;
+	struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+	nvmd_type = ioctl_payload->minor_function;
+	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+		ioctl_payload->func_specific,
+		ioctl_payload->length);
+	memset(&nvmd_req, 0, sizeof(nvmd_req));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return rc;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->fw_control_context = fw_control_context;
+	ccb->ccb_tag = tag;
+	nvmd_req.tag = cpu_to_le32(tag);
+	switch (nvmd_type) {
+	case TWI_DEVICE: {
+		u32 twi_addr, twi_page_size;
+		twi_addr = 0xa8;
+		twi_page_size = 2;
+		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
+			twi_page_size << 8 | TWI_DEVICE);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
+	case C_SEEPROM:
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	case VPD_FLASH:
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	case EXPAN_ROM:
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+		nvmd_req.resp_addr_hi =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	default:
+		break;
+	}
+	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+	return rc;
+}
+
+/**
+ * pm8001_chip_fw_flash_update_build - support the firmware update operation
+ * @pm8001_ha: our hba card information.
+ * @fw_flash_updata_info: firmware flash update param
+ */
+static int
+pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+	void *fw_flash_updata_info, u32 tag)
+{
+	struct fw_flash_Update_req payload;
+	struct fw_flash_updata_info *info;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 opc = OPC_INB_FW_FLASH_UPDATE;
+
+	memset(&payload, 0, sizeof(struct fw_flash_Update_req));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	info = fw_flash_updata_info;
+	payload.tag = cpu_to_le32(tag);
+	payload.cur_image_len = cpu_to_le32(info->cur_image_len);
+	payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
+	payload.total_image_len = cpu_to_le32(info->total_image_len);
+	payload.len = info->sgl.im_len.len ;
+	payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
+	payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
+	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return ret;
+}
+
+static int
+pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+	void *payload)
+{
+	struct fw_flash_updata_info flash_update_info;
+	struct fw_control_info *fw_control;
+	struct fw_control_ex *fw_control_context;
+	int rc;
+	u32 tag;
+	struct pm8001_ccb_info *ccb;
+	void *buffer = NULL;
+	dma_addr_t phys_addr;
+	u32 phys_addr_hi;
+	u32 phys_addr_lo;
+	struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+	fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
+	if (fw_control->len != 0) {
+		if (pm8001_mem_alloc(pm8001_ha->pdev,
+			(void **)&buffer,
+			&phys_addr,
+			&phys_addr_hi,
+			&phys_addr_lo,
+			fw_control->len, 0) != 0) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("Mem alloc failure\n"));
+				return -ENOMEM;
+		}
+	}
+	memset(buffer, 0, fw_control->len);
+	memcpy(buffer, fw_control->buffer, fw_control->len);
+	flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
+	flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
+	flash_update_info.sgl.im_len.e = 0;
+	flash_update_info.cur_image_offset = fw_control->offset;
+	flash_update_info.cur_image_len = fw_control->len;
+	flash_update_info.total_image_len = fw_control->size;
+	fw_control_context->fw_control = fw_control;
+	fw_control_context->virtAddr = buffer;
+	fw_control_context->len = fw_control->len;
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return rc;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->fw_control_context = fw_control_context;
+	ccb->ccb_tag = tag;
+	rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
+		tag);
+	return rc;
+}
+
+static int
+pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_device *pm8001_dev, u32 state)
+{
+	struct set_dev_state_req payload;
+	struct inbound_queue_table *circularQ;
+	struct pm8001_ccb_info *ccb;
+	int rc;
+	u32 tag;
+	u32 opc = OPC_INB_SET_DEVICE_STATE;
+	memset(&payload, 0, sizeof(payload));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return -1;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->ccb_tag = tag;
+	ccb->device = pm8001_dev;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+	payload.device_id = cpu_to_le32(pm8001_dev->device_id);
+	payload.nds = cpu_to_le32(state);
+	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return rc;
+
+}
+
+static int
+pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
+{
+	struct sas_re_initialization_req payload;
+	struct inbound_queue_table *circularQ;
+	struct pm8001_ccb_info *ccb;
+	int rc;
+	u32 tag;
+	u32 opc = OPC_INB_SAS_RE_INITIALIZE;
+	memset(&payload, 0, sizeof(payload));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return -1;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->ccb_tag = tag;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+	payload.SSAHOLT = cpu_to_le32(0xd << 25);
+	payload.sata_hol_tmo = cpu_to_le32(80);
+	payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
+	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	return rc;
+
+}
+
+const struct pm8001_dispatch pm8001_8001_dispatch = {
+	.name			= "pmc8001",
+	.chip_init		= pm8001_chip_init,
+	.chip_soft_rst		= pm8001_chip_soft_rst,
+	.chip_rst		= pm8001_hw_chip_rst,
+	.chip_iounmap		= pm8001_chip_iounmap,
+	.isr			= pm8001_chip_isr,
+	.is_our_interupt	= pm8001_chip_is_our_interupt,
+	.isr_process_oq		= process_oq,
+	.interrupt_enable 	= pm8001_chip_interrupt_enable,
+	.interrupt_disable	= pm8001_chip_interrupt_disable,
+	.make_prd		= pm8001_chip_make_sg,
+	.smp_req		= pm8001_chip_smp_req,
+	.ssp_io_req		= pm8001_chip_ssp_io_req,
+	.sata_req		= pm8001_chip_sata_req,
+	.phy_start_req		= pm8001_chip_phy_start_req,
+	.phy_stop_req		= pm8001_chip_phy_stop_req,
+	.reg_dev_req		= pm8001_chip_reg_dev_req,
+	.dereg_dev_req		= pm8001_chip_dereg_dev_req,
+	.phy_ctl_req		= pm8001_chip_phy_ctl_req,
+	.task_abort		= pm8001_chip_abort_task,
+	.ssp_tm_req		= pm8001_chip_ssp_tm_req,
+	.get_nvmd_req		= pm8001_chip_get_nvmd_req,
+	.set_nvmd_req		= pm8001_chip_set_nvmd_req,
+	.fw_flash_update_req	= pm8001_chip_fw_flash_update_req,
+	.set_dev_state_req	= pm8001_chip_set_dev_state_req,
+	.sas_re_init_req	= pm8001_chip_sas_re_initialization,
+};
+
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
new file mode 100644
index 000000000000..96e4daa68b8f
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -0,0 +1,1030 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO				1	/* 0x000 */
+#define OPC_INB_PHYSTART			4	/* 0x004 */
+#define OPC_INB_PHYSTOP				5	/* 0x005 */
+#define OPC_INB_SSPINIIOSTART			6	/* 0x006 */
+#define OPC_INB_SSPINITMSTART			7	/* 0x007 */
+#define OPC_INB_SSPINIEXTIOSTART		8	/* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT		9	/* 0x009 */
+#define OPC_INB_SSPTGTIOSTART			10	/* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART			11	/* 0x00B */
+#define OPC_INB_SSPINIEDCIOSTART		12	/* 0x00C */
+#define OPC_INB_SSPINIEXTEDCIOSTART		13	/* 0x00D */
+#define OPC_INB_SSPTGTEDCIOSTART		14	/* 0x00E */
+#define OPC_INB_SSP_ABORT			15	/* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE		16	/* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE			17	/* 0x011 */
+#define OPC_INB_SMP_REQUEST			18	/* 0x012 */
+/* SMP_RESPONSE is removed */
+#define OPC_INB_SMP_RESPONSE			19	/* 0x013 */
+#define OPC_INB_SMP_ABORT			20	/* 0x014 */
+#define OPC_INB_REG_DEV				22	/* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART		23	/* 0x017 */
+#define OPC_INB_SATA_ABORT			24	/* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL		25	/* 0x019 */
+#define OPC_INB_GET_DEV_INFO			26	/* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE			32	/* 0x020 */
+#define OPC_INB_GPIO				34	/* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END		35	/* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE		36	/* 0x024 */
+#define OPC_INB_SAS_HW_EVENT_ACK		37	/* 0x025 */
+#define OPC_INB_GET_TIME_STAMP			38	/* 0x026 */
+#define OPC_INB_PORT_CONTROL			39	/* 0x027 */
+#define OPC_INB_GET_NVMD_DATA			40	/* 0x028 */
+#define OPC_INB_SET_NVMD_DATA			41	/* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE		42	/* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE		43	/* 0x02B */
+#define OPC_INB_SET_DEV_INFO			44	/* 0x02C */
+#define OPC_INB_SAS_RE_INITIALIZE		45	/* 0x02D */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO				1	/* 0x001 */
+#define OPC_OUB_HW_EVENT			4	/* 0x004 */
+#define OPC_OUB_SSP_COMP			5	/* 0x005 */
+#define OPC_OUB_SMP_COMP			6	/* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL			7	/* 0x007 */
+#define OPC_OUB_DEV_REGIST			10	/* 0x00A */
+#define OPC_OUB_DEREG_DEV			11	/* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE			12	/* 0x00C */
+#define OPC_OUB_SATA_COMP			13	/* 0x00D */
+#define OPC_OUB_SATA_EVENT			14	/* 0x00E */
+#define OPC_OUB_SSP_EVENT			15	/* 0x00F */
+#define OPC_OUB_DEV_HANDLE_ARRIV		16	/* 0x010 */
+/* SMP_RECEIVED Notification is removed */
+#define OPC_OUB_SMP_RECV_EVENT			17	/* 0x011 */
+#define OPC_OUB_SSP_RECV_EVENT			18	/* 0x012 */
+#define OPC_OUB_DEV_INFO			19	/* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE			20	/* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE			22	/* 0x016 */
+#define OPC_OUB_GPIO_EVENT			23	/* 0x017 */
+#define OPC_OUB_GENERAL_EVENT			24	/* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP			26	/* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP			27	/* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END		28	/* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE		29	/* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP			30	/* 0x01E */
+#define OPC_OUB_SAS_HW_EVENT_ACK		31	/* 0x01F */
+#define OPC_OUB_PORT_CONTROL			32	/* 0x020 */
+#define OPC_OUB_SKIP_ENTRY			33	/* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP			34	/* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA			35	/* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA			36	/* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL		37	/* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE		38	/* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE		39	/* 0x027 */
+#define OPC_OUB_SET_DEV_INFO			40	/* 0x028 */
+#define OPC_OUB_SAS_RE_INITIALIZE		41	/* 0x029 */
+
+/* for phy start*/
+#define SPINHOLD_DISABLE		(0x00 << 14)
+#define SPINHOLD_ENABLE			(0x01 << 14)
+#define LINKMODE_SAS			(0x01 << 12)
+#define LINKMODE_DSATA			(0x02 << 12)
+#define LINKMODE_AUTO			(0x03 << 12)
+#define LINKRATE_15			(0x01 << 8)
+#define LINKRATE_30			(0x02 << 8)
+#define LINKRATE_60			(0x04 << 8)
+
+struct mpi_msg_hdr{
+	__le32	header;	/* Bits [11:0]  - Message operation code */
+	/* Bits [15:12] - Message Category */
+	/* Bits [21:16] - Outboundqueue ID for the
+	operation completion message */
+	/* Bits [23:22] - Reserved */
+	/* Bits [28:24] - Buffer Count, indicates how
+	many buffer are allocated for the massage */
+	/* Bits [30:29] - Reserved */
+	/* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (64 bytes)
+ */
+struct phy_start_req {
+	__le32	tag;
+	__le32	ase_sh_lm_slr_phyid;
+	struct sas_identify_frame sas_identify;
+	u32	reserved[5];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (64 bytes)
+ */
+struct phy_stop_req {
+	__le32	tag;
+	__le32	phy_id;
+	u32	reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/* set device bits fis - device to host */
+struct  set_dev_bits_fis {
+	u8	fis_type;	/* 0xA1*/
+	u8	n_i_pmport;
+	/* b7 : n Bit. Notification bit. If set device needs attention. */
+	/* b6 : i Bit. Interrupt Bit */
+	/* b5-b4: reserved2 */
+	/* b3-b0: PM Port */
+	u8 	status;
+	u8	error;
+	u32	_r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct  pio_setup_fis {
+	u8	fis_type;	/* 0x5f */
+	u8	i_d_pmPort;
+	/* b7 : reserved */
+	/* b6 : i bit. Interrupt bit */
+	/* b5 : d bit. data transfer direction. set to 1 for device to host
+	xfer */
+	/* b4 : reserved */
+	/* b3-b0: PM Port */
+	u8	status;
+	u8	error;
+	u8	lbal;
+	u8	lbam;
+	u8	lbah;
+	u8	device;
+	u8	lbal_exp;
+	u8	lbam_exp;
+	u8	lbah_exp;
+	u8	_r_a;
+	u8	sector_count;
+	u8	sector_count_exp;
+	u8	_r_b;
+	u8	e_status;
+	u8	_r_c[2];
+	u8	transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to discribe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	u32	sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+struct hw_event_resp {
+	__le32	lr_evt_status_phyid_portid;
+	__le32	evt_param;
+	__le32	npip_portstate;
+	struct sas_identify_frame	sas_identify;
+	struct dev_to_host_fis	sata_fis;
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of  REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+	__le32	tag;
+	__le32	phyid_portid;
+	__le32	dtype_dlr_retry;
+	__le32	firstburstsize_ITNexustimeout;
+	u32	sas_addr_hi;
+	u32	sas_addr_low;
+	__le32	upper_device_id;
+	u32	reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of  DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+	__le32	tag;
+	__le32	device_id;
+	u32	reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration  (64 bytes)
+ */
+
+struct dev_reg_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	device_id;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+	__le32	tag;
+	__le32	phyop_phyid;
+	u32	reserved1[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+struct local_phy_ctl_resp {
+	__le32	tag;
+	__le32	phyop_phyid;
+	__le32	status;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x0000000F
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+	__le32	tag;
+	__le32	portop_portid;
+	__le32	param0;
+	__le32	param1;
+	u32	reserved1[11];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+
+struct hw_event_ack_req {
+	__le32	tag;
+	__le32	sea_phyid_portid;
+	__le32	param0;
+	__le32	param1;
+	u32	reserved1[11];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion  (n bytes)
+ */
+struct ssp_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	__le32	ssptag_rescv_rescpad;
+	struct ssp_response_iu  ssp_resp_iu;
+	__le32	residual_count;
+} __attribute__((packed, aligned(4)));
+
+
+#define SSP_RESCV_BIT	0x00010000
+
+/*
+ * brief the data structure of SATA EVNET esponse
+ * use to indicate a SATA Completion  (64 bytes)
+ */
+
+struct sata_event_resp {
+	__le32	tag;
+	__le32	event;
+	__le32	port_id;
+	__le32	device_id;
+	u32	reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion  (64 bytes)
+ */
+
+struct ssp_event_resp {
+	__le32	tag;
+	__le32	event;
+	__le32	port_id;
+	__le32	device_id;
+	u32	reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+	__le32	status;
+	__le32	inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+
+#define GENERAL_EVENT_PAYLOAD	14
+#define OPCODE_BITS	0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	len_ip_ir;
+	/* Bits [0]  - Indirect response */
+	/* Bits [1] - Indirect Payload */
+	/* Bits [15:2] - Reserved */
+	/* Bits [23:16] - direct payload Len */
+	/* Bits [31:24] - Reserved */
+	u8	smp_req16[16];
+	union {
+		u8	smp_req[32];
+		struct {
+			__le64 long_req_addr;/* sg dma address, LE */
+			__le32 long_req_size;/* LE */
+			u32	_r_a;
+			__le64 long_resp_addr;/* sg dma address, LE */
+			__le32 long_resp_size;/* LE */
+			u32	_r_b;
+			} long_smp_req;/* sequencer extension */
+	};
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	__le32	_r_a[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	tag_to_abort;
+	__le32	abort_all;
+	u32	reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK		0x3
+#define ABORT_SINGLE		0x0
+#define ABORT_ALL		0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	scp;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+	__le32	tag;
+	__le32	operation_phyid;
+	u32	reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req{
+	__le32	tag;
+	__le32	cmdtype_cmddesc_phyid;
+	__le32	pat1_pat2;
+	__le32	threshold;
+	__le32	codepat_errmsk;
+	__le32	pmon;
+	__le32	pERF1CTL;
+	u32	reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	nds;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of sas_re_initialization
+ */
+struct sas_re_initialization_req {
+
+	__le32	tag;
+	__le32	SSAHOLT;/* bit29-set max port;
+			** bit28-set open reject cmd retries.
+			** bit27-set open reject data retries.
+			** bit26-set open reject option, remap:1 or not:0.
+			** bit25-set sata head of line time out.
+			*/
+	__le32 reserved_maxPorts;
+	__le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16;
+						    * data retries: bit15-bit0.
+						    */
+	__le32	sata_hol_tmo;
+	u32	reserved1[10];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ */
+
+struct sata_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	data_len;
+	__le32	ncqtag_atap_dir_m;
+	struct host_to_dev_fis	sata_fis;
+	u32	reserved1;
+	u32	reserved2;
+	u32	addr_low;
+	u32	addr_high;
+	__le32	len;
+	__le32	esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	relate_tag;
+	__le32	tmf;
+	u8	lun[8];
+	__le32	ds_ads_m;
+	u32	reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+struct ssp_info_unit {
+	u8	lun[8];/* SCSI Logical Unit Number */
+	u8	reserved1;/* reserved */
+	u8	efb_prio_attr;
+	/* B7   : enabledFirstBurst */
+	/* B6-3 : taskPriority */
+	/* B2-0 : taskAttribute */
+	u8	reserved2;	/* reserved */
+	u8	additional_cdb_len;
+	/* B7-2 : additional_cdb_len */
+	/* B1-0 : reserved */
+	u8	cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ */
+struct ssp_ini_io_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	data_len;
+	__le32	dir_m_tlr;
+	struct ssp_info_unit	ssp_iu;
+	__le32	addr_low;
+	__le32	addr_high;
+	__le32	len;
+	__le32	esgl;
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+	__le32	tag;
+	__le32	cur_image_offset;
+	__le32	cur_image_len;
+	__le32	total_image_len;
+	u32	reserved0[7];
+	__le32	sgl_addr_lo;
+	__le32	sgl_addr_hi;
+	__le32	len;
+	__le32	ext_reserved;
+} __attribute__((packed, aligned(4)));
+
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+struct fw_flash_Update_resp {
+	dma_addr_t	tag;
+	__le32	status;
+	u32	reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+	__le32	tag;
+	__le32	len_ir_vpdd;
+	__le32	vpd_offset;
+	u32	reserved[8];
+	__le32	resp_addr_lo;
+	__le32	resp_addr_hi;
+	__le32	resp_len;
+	u32	reserved1;
+} __attribute__((packed, aligned(4)));
+
+
+struct set_nvm_data_req {
+	__le32	tag;
+	__le32	len_ir_vpdd;
+	__le32	vpd_offset;
+	u32	reserved[8];
+	__le32	resp_addr_lo;
+	__le32	resp_addr_hi;
+	__le32	resp_len;
+	u32	reserved1;
+} __attribute__((packed, aligned(4)));
+
+
+#define TWI_DEVICE	0x0
+#define C_SEEPROM	0x1
+#define VPD_FLASH	0x4
+#define AAP1_RDUMP	0x5
+#define IOP_RDUMP	0x6
+#define EXPAN_ROM	0x7
+
+#define IPMode		0x80000000
+#define NVMD_TYPE	0x0000000F
+#define NVMD_STAT	0x0000FFFF
+#define NVMD_LEN	0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+	__le32		tag;
+	__le32		ir_tda_bn_dps_das_nvm;
+	__le32		dlen_status;
+	__le32		nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+	__le32		tag;
+	__le32		status;
+	u32		reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+	__le32		tag;
+	__le32		cmdtype_cmddesc_phyid;
+	__le32		Status;
+	__le32		ReportData;
+	u32		reserved[11];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+	__le32		tag;
+	__le32		status;
+	__le32		device_id;
+	__le32		pds_nds;
+	u32		reserved[11];
+} __attribute__((packed, aligned(4)));
+
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START			0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE		0x02
+#define HW_EVENT_PHY_STOP_STATUS		0x03
+#define HW_EVENT_SAS_PHY_UP			0x04
+#define HW_EVENT_SATA_PHY_UP			0x05
+#define HW_EVENT_SATA_SPINUP_HOLD		0x06
+#define HW_EVENT_PHY_DOWN			0x07
+#define HW_EVENT_PORT_INVALID			0x08
+#define HW_EVENT_BROADCAST_CHANGE		0x09
+#define HW_EVENT_PHY_ERROR			0x0A
+#define HW_EVENT_BROADCAST_SES			0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR		0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED		0x0D
+#define HW_EVENT_MALFUNCTION			0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT		0x0F
+#define HW_EVENT_BROADCAST_EXP			0x10
+#define HW_EVENT_PHY_START_STATUS		0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD		0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR	0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION	0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH	0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED	0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO	0x17
+#define HW_EVENT_PORT_RECOVER			0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO		0x19
+#define HW_EVENT_PORT_RESET_COMPLETE		0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT		0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED			0x00
+#define PORT_VALID				0x01
+#define PORT_LOSTCOMM				0x02
+#define PORT_IN_RESET				0x04
+#define PORT_INVALID				0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS				0x00
+#define IO_ABORTED				0x01
+#define IO_OVERFLOW				0x02
+#define IO_UNDERFLOW				0x03
+#define IO_FAILED				0x04
+#define IO_ABORT_RESET				0x05
+#define IO_NOT_VALID				0x06
+#define IO_NO_DEVICE				0x07
+#define IO_ILLEGAL_PARAMETER			0x08
+#define IO_LINK_FAILURE				0x09
+#define IO_PROG_ERROR				0x0A
+#define IO_EDC_IN_ERROR				0x0B
+#define IO_EDC_OUT_ERROR			0x0C
+#define IO_ERROR_HW_TIMEOUT			0x0D
+#define IO_XFER_ERROR_BREAK			0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY		0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED	0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION		0x11
+#define IO_OPEN_CNX_ERROR_BREAK				0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS			0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION		0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED	0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY		0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION		0x17
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR			0x18
+#define IO_XFER_ERROR_NAK_RECEIVED			0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT			0x1A
+#define IO_XFER_ERROR_PEER_ABORTED			0x1B
+#define IO_XFER_ERROR_RX_FRAME				0x1C
+#define IO_XFER_ERROR_DMA				0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT			0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT			0x1F
+#define IO_XFER_ERROR_SATA				0x20
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST		0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE			0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE			0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT			0x24
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR		0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE			0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN			0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED		0x28
+
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT		0x30
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK	0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK	0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH			0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN		0x35
+#define IO_XFER_CMD_FRAME_ISSUED			0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE			0x37
+#define IO_PORT_IN_RESET				0x38
+#define IO_DS_NON_OPERATIONAL				0x39
+#define IO_DS_IN_RECOVERY				0x3A
+#define IO_TM_TAG_NOT_FOUND				0x3B
+#define IO_XFER_PIO_SETUP_ERROR				0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR			0x3D
+#define IO_DS_IN_ERROR					0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY		0x3F
+#define IO_ABORT_IN_PROGRESS				0x40
+#define IO_ABORT_DELAYED				0x41
+#define IO_INVALID_LENGTH				0x42
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC			0x43
+
+/* MSGU CONFIGURATION  TABLE*/
+
+#define SPC_MSGU_CFG_TABLE_UPDATE		0x01/* Inbound doorbell bit0 */
+#define SPC_MSGU_CFG_TABLE_RESET		0x02/* Inbound doorbell bit1 */
+#define SPC_MSGU_CFG_TABLE_FREEZE		0x04/* Inbound doorbell bit2 */
+#define SPC_MSGU_CFG_TABLE_UNFREEZE		0x08/* Inbound doorbell bit4 */
+#define MSGU_IBDB_SET				0x04
+#define MSGU_HOST_INT_STATUS			0x08
+#define MSGU_HOST_INT_MASK			0x0C
+#define MSGU_IOPIB_INT_STATUS			0x18
+#define MSGU_IOPIB_INT_MASK			0x1C
+#define MSGU_IBDB_CLEAR				0x20/* RevB - Host not use */
+#define MSGU_MSGU_CONTROL			0x24
+#define MSGU_ODR				0x3C/* RevB */
+#define MSGU_ODCR				0x40/* RevB */
+#define MSGU_SCRATCH_PAD_0			0x44
+#define MSGU_SCRATCH_PAD_1			0x48
+#define MSGU_SCRATCH_PAD_2			0x4C
+#define MSGU_SCRATCH_PAD_3			0x50
+#define MSGU_HOST_SCRATCH_PAD_0			0x54
+#define MSGU_HOST_SCRATCH_PAD_1			0x58
+#define MSGU_HOST_SCRATCH_PAD_2			0x5C
+#define MSGU_HOST_SCRATCH_PAD_3			0x60
+#define MSGU_HOST_SCRATCH_PAD_4			0x64
+#define MSGU_HOST_SCRATCH_PAD_5			0x68
+#define MSGU_HOST_SCRATCH_PAD_6			0x6C
+#define MSGU_HOST_SCRATCH_PAD_7			0x70
+#define MSGU_ODMR				0x74/* RevB */
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL				0xFFFFFFFF/* mask all
+					interrupt vector */
+#define ODMR_CLEAR_ALL				0/* clear all
+					interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL		0xFFFFFFFF   /* mask all
+					interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET		0x2000
+#define MSIX_TABLE_ELEMENT_SIZE		0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET	0xC
+#define MSIX_TABLE_BASE	  (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE		0x1
+#define MSIX_INTERRUPT_ENABLE		0x0
+
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD1_POR		0x00  /* power on reset state */
+#define SCRATCH_PAD1_SFR		0x01  /* soft reset state */
+#define SCRATCH_PAD1_ERR		0x02  /* error state */
+#define SCRATCH_PAD1_RDY		0x03  /* ready state */
+#define SCRATCH_PAD1_RST		0x04  /* soft reset toggle flag */
+#define SCRATCH_PAD1_AAP1RDY_RST	0x08  /* AAP1 ready for soft reset */
+#define SCRATCH_PAD1_STATE_MASK		0xFFFFFFF0   /* ScratchPad1
+ Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */
+#define SCRATCH_PAD1_RESERVED		0x000003F8   /* Scratch Pad1
+ Reserved bit 3 to 9 */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR		0x00  /* power on state */
+#define SCRATCH_PAD2_SFR		0x01  /* soft reset state */
+#define SCRATCH_PAD2_ERR		0x02  /* error state */
+#define SCRATCH_PAD2_RDY		0x03  /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST		0x04  /* FW ready for soft reset flag*/
+#define SCRATCH_PAD2_IOPRDY_RST		0x08  /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK		0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED		0x000003FC   /* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK		0xFFFFFC00   /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK		0x00000003   /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET		0x00/* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION		0x04/* DWORD 0x01 */
+#define MAIN_FW_REVISION		0x08/* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET	0x0C/* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET		0x10/* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET		0x14/* DWORD 0x05 */
+#define MAIN_GST_OFFSET			0x18/* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET			0x1C/* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET			0x20/* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET		0x24/* DWORD 0x09 */
+#define MAIN_OB_HW_EVENT_PID03_OFFSET	0x28/* DWORD 0x0A */
+#define MAIN_OB_HW_EVENT_PID47_OFFSET	0x2C/* DWORD 0x0B */
+#define MAIN_OB_NCQ_EVENT_PID03_OFFSET	0x30/* DWORD 0x0C */
+#define MAIN_OB_NCQ_EVENT_PID47_OFFSET	0x34/* DWORD 0x0D */
+#define MAIN_TITNX_EVENT_PID03_OFFSET	0x38/* DWORD 0x0E */
+#define MAIN_TITNX_EVENT_PID47_OFFSET	0x3C/* DWORD 0x0F */
+#define MAIN_OB_SSP_EVENT_PID03_OFFSET	0x40/* DWORD 0x10 */
+#define MAIN_OB_SSP_EVENT_PID47_OFFSET	0x44/* DWORD 0x11 */
+#define MAIN_OB_SMP_EVENT_PID03_OFFSET	0x48/* DWORD 0x12 */
+#define MAIN_OB_SMP_EVENT_PID47_OFFSET	0x4C/* DWORD 0x13 */
+#define MAIN_EVENT_LOG_ADDR_HI		0x50/* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO		0x54/* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE	0x58/* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION		0x5C/* DWORD 0x17 */
+#define MAIN_IOP_EVENT_LOG_ADDR_HI	0x60/* DWORD 0x18 */
+#define MAIN_IOP_EVENT_LOG_ADDR_LO	0x64/* DWORD 0x19 */
+#define MAIN_IOP_EVENT_LOG_BUFF_SIZE	0x68/* DWORD 0x1A */
+#define MAIN_IOP_EVENT_LOG_OPTION	0x6C/* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT	0x70/* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET	0x74/* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH	0x78/* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET	0x7C/* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH	0x80/* DWORD 0x20 */
+#define MAIN_HDA_FLAGS_OFFSET		0x84/* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET	0x88/* DWORD 0x22 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET		0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET	0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET	0x08
+#define GST_MSGUTCNT_OFFSET		0x0C
+#define GST_IOPTCNT_OFFSET		0x10
+#define GST_PHYSTATE_OFFSET		0x18
+#define GST_PHYSTATE0_OFFSET		0x18
+#define GST_PHYSTATE1_OFFSET		0x1C
+#define GST_PHYSTATE2_OFFSET		0x20
+#define GST_PHYSTATE3_OFFSET		0x24
+#define GST_PHYSTATE4_OFFSET		0x28
+#define GST_PHYSTATE5_OFFSET		0x2C
+#define GST_PHYSTATE6_OFFSET		0x30
+#define GST_PHYSTATE7_OFFSET		0x34
+#define GST_RERRINFO_OFFSET		0x44
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT		0x00
+#define GST_MPI_STATE_INIT		0x01
+#define GST_MPI_STATE_TERMINATION	0x02
+#define GST_MPI_STATE_ERROR		0x03
+#define GST_MPI_STATE_MASK		0x07
+
+#define MBIC_NMI_ENABLE_VPE0_IOP	0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1	0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE	0x003040
+#define PCIE_EVENT_INTERRUPT		0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE	0x003048
+#define PCIE_ERROR_INTERRUPT		0x00304C
+/* signature defintion for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE	0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET			0x000000/* reset register */
+
+/* bit difination for SPC_RESET register */
+#define   SPC_REG_RESET_OSSP		0x00000001
+#define   SPC_REG_RESET_RAAE		0x00000002
+#define   SPC_REG_RESET_PCS_SPBC	0x00000004
+#define   SPC_REG_RESET_PCS_IOP_SS	0x00000008
+#define   SPC_REG_RESET_PCS_AAP1_SS	0x00000010
+#define   SPC_REG_RESET_PCS_AAP2_SS	0x00000020
+#define   SPC_REG_RESET_PCS_LM		0x00000040
+#define   SPC_REG_RESET_PCS		0x00000080
+#define   SPC_REG_RESET_GSM		0x00000100
+#define   SPC_REG_RESET_DDR2		0x00010000
+#define   SPC_REG_RESET_BDMA_CORE	0x00020000
+#define   SPC_REG_RESET_BDMA_SXCBI	0x00040000
+#define   SPC_REG_RESET_PCIE_AL_SXCBI	0x00080000
+#define   SPC_REG_RESET_PCIE_PWR	0x00100000
+#define   SPC_REG_RESET_PCIE_SFT	0x00200000
+#define   SPC_REG_RESET_PCS_SXCBI	0x00400000
+#define   SPC_REG_RESET_LMS_SXCBI	0x00800000
+#define   SPC_REG_RESET_PMIC_SXCBI	0x01000000
+#define   SPC_REG_RESET_PMIC_CORE	0x02000000
+#define   SPC_REG_RESET_PCIE_PC_SXCBI	0x04000000
+#define   SPC_REG_RESET_DEVICE		0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPC_IBW_AXI_TRANSLATION_LOW	0x003258
+
+#define MBIC_AAP1_ADDR_BASE		0x060000
+#define MBIC_IOP_ADDR_BASE		0x070000
+#define GSM_ADDR_BASE			0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET		0x00000000
+#define RAM_ECC_DB_ERR			0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC	0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC	0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC	0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK	0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK	0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK	0x00000048
+
+#define RB6_ACCESS_REG			0x6A0000
+#define HDAC_EXEC_CMD			0x0002
+#define HDA_C_PA			0xcb
+#define HDA_SEQ_ID_BITS			0x00ff0000
+#define HDA_GSM_OFFSET_BITS		0x00FFFFFF
+#define MBIC_AAP1_ADDR_BASE		0x060000
+#define MBIC_IOP_ADDR_BASE		0x070000
+#define GSM_ADDR_BASE			0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE		0x000000
+#define GSM_CONFIG_RESET_VALUE          0x00003b00
+#define GPIO_ADDR_BASE                  0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET   0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET			0x80C0
+/* Magic number of  soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST		0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS					0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE			0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED	0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID			0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED	0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE		0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07
+
+#endif
+
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
new file mode 100644
index 000000000000..42ebe725d5a5
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -0,0 +1,891 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#include "pm8001_sas.h"
+#include "pm8001_chips.h"
+
+static struct scsi_transport_template *pm8001_stt;
+
+static const struct pm8001_chip_info pm8001_chips[] = {
+	[chip_8001] = {  8, &pm8001_8001_dispatch,},
+};
+static int pm8001_id;
+
+LIST_HEAD(hba_list);
+
+/**
+ * The main structure which LLDD must register for scsi core.
+ */
+static struct scsi_host_template pm8001_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.queuecommand		= sas_queuecommand,
+	.target_alloc		= sas_target_alloc,
+	.slave_configure	= pm8001_slave_configure,
+	.slave_destroy		= sas_slave_destroy,
+	.scan_finished		= pm8001_scan_finished,
+	.scan_start		= pm8001_scan_start,
+	.change_queue_depth	= sas_change_queue_depth,
+	.change_queue_type	= sas_change_queue_type,
+	.bios_param		= sas_bios_param,
+	.can_queue		= 1,
+	.cmd_per_lun		= 1,
+	.this_id		= -1,
+	.sg_tablesize		= SG_ALL,
+	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_device_reset_handler = sas_eh_device_reset_handler,
+	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.slave_alloc		= pm8001_slave_alloc,
+	.target_destroy		= sas_target_destroy,
+	.ioctl			= sas_ioctl,
+	.shost_attrs		= pm8001_host_attrs,
+};
+
+/**
+ * Sas layer call this function to execute specific task.
+ */
+static struct sas_domain_function_template pm8001_transport_ops = {
+	.lldd_dev_found		= pm8001_dev_found,
+	.lldd_dev_gone		= pm8001_dev_gone,
+
+	.lldd_execute_task	= pm8001_queue_command,
+	.lldd_control_phy	= pm8001_phy_control,
+
+	.lldd_abort_task	= pm8001_abort_task,
+	.lldd_abort_task_set	= pm8001_abort_task_set,
+	.lldd_clear_aca		= pm8001_clear_aca,
+	.lldd_clear_task_set	= pm8001_clear_task_set,
+	.lldd_I_T_nexus_reset   = pm8001_I_T_nexus_reset,
+	.lldd_lu_reset		= pm8001_lu_reset,
+	.lldd_query_task	= pm8001_query_task,
+};
+
+/**
+ *pm8001_phy_init - initiate our adapter phys
+ *@pm8001_ha: our hba structure.
+ *@phy_id: phy id.
+ */
+static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
+	int phy_id)
+{
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	phy->phy_state = 0;
+	phy->pm8001_ha = pm8001_ha;
+	sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
+	sas_phy->class = SAS;
+	sas_phy->iproto = SAS_PROTOCOL_ALL;
+	sas_phy->tproto = 0;
+	sas_phy->type = PHY_TYPE_PHYSICAL;
+	sas_phy->role = PHY_ROLE_INITIATOR;
+	sas_phy->oob_mode = OOB_NOT_CONNECTED;
+	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+	sas_phy->id = phy_id;
+	sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
+	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+	sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
+	sas_phy->lldd_phy = phy;
+}
+
+/**
+ *pm8001_free - free hba
+ *@pm8001_ha:	our hba structure.
+ *
+ */
+static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	struct pm8001_wq *wq;
+
+	if (!pm8001_ha)
+		return;
+
+	for (i = 0; i < USI_MAX_MEMCNT; i++) {
+		if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
+			pci_free_consistent(pm8001_ha->pdev,
+				pm8001_ha->memoryMap.region[i].element_size,
+				pm8001_ha->memoryMap.region[i].virt_ptr,
+				pm8001_ha->memoryMap.region[i].phys_addr);
+			}
+	}
+	PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
+	if (pm8001_ha->shost)
+		scsi_host_put(pm8001_ha->shost);
+	list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
+		cancel_delayed_work(&wq->work_q);
+	kfree(pm8001_ha->tags);
+	kfree(pm8001_ha);
+}
+
+#ifdef PM8001_USE_TASKLET
+static void pm8001_tasklet(unsigned long opaque)
+{
+	struct pm8001_hba_info *pm8001_ha;
+	pm8001_ha = (struct pm8001_hba_info *)opaque;;
+	if (unlikely(!pm8001_ha))
+		BUG_ON(1);
+	PM8001_CHIP_DISP->isr(pm8001_ha);
+}
+#endif
+
+
+ /**
+  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
+  * dispatcher to handle each case.
+  * @irq: irq number.
+  * @opaque: the passed general host adapter struct
+  */
+static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+{
+	struct pm8001_hba_info *pm8001_ha;
+	irqreturn_t ret = IRQ_HANDLED;
+	struct sas_ha_struct *sha = opaque;
+	pm8001_ha = sha->lldd_ha;
+	if (unlikely(!pm8001_ha))
+		return IRQ_NONE;
+	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+		return IRQ_NONE;
+#ifdef PM8001_USE_TASKLET
+	tasklet_schedule(&pm8001_ha->tasklet);
+#else
+	ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+#endif
+	return ret;
+}
+
+/**
+ * pm8001_alloc - initiate our hba structure and 6 DMAs area.
+ * @pm8001_ha:our hba structure.
+ *
+ */
+static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	spin_lock_init(&pm8001_ha->lock);
+	for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+		pm8001_phy_init(pm8001_ha, i);
+
+	pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
+	if (!pm8001_ha->tags)
+		goto err_out;
+	/* MPI Memory region 1 for AAP Event Log for fw */
+	pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
+	pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->memoryMap.region[AAP1].alignment = 32;
+
+	/* MPI Memory region 2 for IOP Event Log for fw */
+	pm8001_ha->memoryMap.region[IOP].num_elements = 1;
+	pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->memoryMap.region[IOP].alignment = 32;
+
+	/* MPI Memory region 3 for consumer Index of inbound queues */
+	pm8001_ha->memoryMap.region[CI].num_elements = 1;
+	pm8001_ha->memoryMap.region[CI].element_size = 4;
+	pm8001_ha->memoryMap.region[CI].total_len = 4;
+	pm8001_ha->memoryMap.region[CI].alignment = 4;
+
+	/* MPI Memory region 4 for producer Index of outbound queues */
+	pm8001_ha->memoryMap.region[PI].num_elements = 1;
+	pm8001_ha->memoryMap.region[PI].element_size = 4;
+	pm8001_ha->memoryMap.region[PI].total_len = 4;
+	pm8001_ha->memoryMap.region[PI].alignment = 4;
+
+	/* MPI Memory region 5 inbound queues */
+	pm8001_ha->memoryMap.region[IB].num_elements = 256;
+	pm8001_ha->memoryMap.region[IB].element_size = 64;
+	pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
+	pm8001_ha->memoryMap.region[IB].alignment = 64;
+
+	/* MPI Memory region 6 inbound queues */
+	pm8001_ha->memoryMap.region[OB].num_elements = 256;
+	pm8001_ha->memoryMap.region[OB].element_size = 64;
+	pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
+	pm8001_ha->memoryMap.region[OB].alignment = 64;
+
+	/* Memory region write DMA*/
+	pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
+	pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
+	pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
+	/* Memory region for devices*/
+	pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
+	pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
+		sizeof(struct pm8001_device);
+	pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
+		sizeof(struct pm8001_device);
+
+	/* Memory region for ccb_info*/
+	pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
+	pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
+		sizeof(struct pm8001_ccb_info);
+	pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
+		sizeof(struct pm8001_ccb_info);
+
+	for (i = 0; i < USI_MAX_MEMCNT; i++) {
+		if (pm8001_mem_alloc(pm8001_ha->pdev,
+			&pm8001_ha->memoryMap.region[i].virt_ptr,
+			&pm8001_ha->memoryMap.region[i].phys_addr,
+			&pm8001_ha->memoryMap.region[i].phys_addr_hi,
+			&pm8001_ha->memoryMap.region[i].phys_addr_lo,
+			pm8001_ha->memoryMap.region[i].total_len,
+			pm8001_ha->memoryMap.region[i].alignment) != 0) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("Mem%d alloc failed\n",
+					i));
+				goto err_out;
+		}
+	}
+
+	pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
+	for (i = 0; i < PM8001_MAX_DEVICES; i++) {
+		pm8001_ha->devices[i].dev_type = NO_DEVICE;
+		pm8001_ha->devices[i].id = i;
+		pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
+		pm8001_ha->devices[i].running_req = 0;
+	}
+	pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
+	for (i = 0; i < PM8001_MAX_CCB; i++) {
+		pm8001_ha->ccb_info[i].ccb_dma_handle =
+			pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
+			i * sizeof(struct pm8001_ccb_info);
+		pm8001_ha->ccb_info[i].task = NULL;
+		pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
+		pm8001_ha->ccb_info[i].device = NULL;
+		++pm8001_ha->tags_num;
+	}
+	pm8001_ha->flags = PM8001F_INIT_TIME;
+	/* Initialize tags */
+	pm8001_tag_init(pm8001_ha);
+	return 0;
+err_out:
+	return 1;
+}
+
+/**
+ * pm8001_ioremap - remap the pci high physical address to kernal virtual
+ * address so that we can access them.
+ * @pm8001_ha:our hba structure.
+ */
+static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 bar;
+	u32 logicalBar = 0;
+	struct pci_dev *pdev;
+
+	pdev = pm8001_ha->pdev;
+	/* map pci mem (PMC pci base 0-3)*/
+	for (bar = 0; bar < 6; bar++) {
+		/*
+		** logical BARs for SPC:
+		** bar 0 and 1 - logical BAR0
+		** bar 2 and 3 - logical BAR1
+		** bar4 - logical BAR2
+		** bar5 - logical BAR3
+		** Skip the appropriate assignments:
+		*/
+		if ((bar == 1) || (bar == 3))
+			continue;
+		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+			pm8001_ha->io_mem[logicalBar].membase =
+				pci_resource_start(pdev, bar);
+			pm8001_ha->io_mem[logicalBar].membase &=
+				(u32)PCI_BASE_ADDRESS_MEM_MASK;
+			pm8001_ha->io_mem[logicalBar].memsize =
+				pci_resource_len(pdev, bar);
+			pm8001_ha->io_mem[logicalBar].memvirtaddr =
+				ioremap(pm8001_ha->io_mem[logicalBar].membase,
+				pm8001_ha->io_mem[logicalBar].memsize);
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("PCI: bar %d, logicalBar %d "
+				"virt_addr=%lx,len=%d\n", bar, logicalBar,
+				(unsigned long)
+				pm8001_ha->io_mem[logicalBar].memvirtaddr,
+				pm8001_ha->io_mem[logicalBar].memsize));
+		} else {
+			pm8001_ha->io_mem[logicalBar].membase	= 0;
+			pm8001_ha->io_mem[logicalBar].memsize	= 0;
+			pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+		}
+		logicalBar++;
+	}
+	return 0;
+}
+
+/**
+ * pm8001_pci_alloc - initialize our ha card structure
+ * @pdev: pci device.
+ * @ent: ent
+ * @shost: scsi host struct which has been initialized before.
+ */
+static struct pm8001_hba_info *__devinit
+pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
+{
+	struct pm8001_hba_info *pm8001_ha;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+
+	pm8001_ha = sha->lldd_ha;
+	if (!pm8001_ha)
+		return NULL;
+
+	pm8001_ha->pdev = pdev;
+	pm8001_ha->dev = &pdev->dev;
+	pm8001_ha->chip_id = chip_id;
+	pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
+	pm8001_ha->irq = pdev->irq;
+	pm8001_ha->sas = sha;
+	pm8001_ha->shost = shost;
+	pm8001_ha->id = pm8001_id++;
+	INIT_LIST_HEAD(&pm8001_ha->wq_list);
+	pm8001_ha->logging_level = 0x01;
+	sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+#ifdef PM8001_USE_TASKLET
+	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
+		(unsigned long)pm8001_ha);
+#endif
+	pm8001_ioremap(pm8001_ha);
+	if (!pm8001_alloc(pm8001_ha))
+		return pm8001_ha;
+	pm8001_free(pm8001_ha);
+	return NULL;
+}
+
+/**
+ * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
+ * @pdev: pci device.
+ */
+static int pci_go_44(struct pci_dev *pdev)
+{
+	int rc;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
+		if (rc) {
+			rc = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (rc) {
+				dev_printk(KERN_ERR, &pdev->dev,
+					"44-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				"32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
+/**
+ * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
+ * @shost: scsi host which has been allocated outside.
+ * @chip_info: our ha struct.
+ */
+static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
+	const struct pm8001_chip_info *chip_info)
+{
+	int phy_nr, port_nr;
+	struct asd_sas_phy **arr_phy;
+	struct asd_sas_port **arr_port;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+	phy_nr = chip_info->n_phy;
+	port_nr = phy_nr;
+	memset(sha, 0x00, sizeof(*sha));
+	arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+	if (!arr_phy)
+		goto exit;
+	arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+	if (!arr_port)
+		goto exit_free2;
+
+	sha->sas_phy = arr_phy;
+	sha->sas_port = arr_port;
+	sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
+	if (!sha->lldd_ha)
+		goto exit_free1;
+
+	shost->transportt = pm8001_stt;
+	shost->max_id = PM8001_MAX_DEVICES;
+	shost->max_lun = 8;
+	shost->max_channel = 0;
+	shost->unique_id = pm8001_id;
+	shost->max_cmd_len = 16;
+	shost->can_queue = PM8001_CAN_QUEUE;
+	shost->cmd_per_lun = 32;
+	return 0;
+exit_free1:
+	kfree(arr_port);
+exit_free2:
+	kfree(arr_phy);
+exit:
+	return -1;
+}
+
+/**
+ * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
+ * @shost: scsi host which has been allocated outside
+ * @chip_info: our ha struct.
+ */
+static void  __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
+	const struct pm8001_chip_info *chip_info)
+{
+	int i = 0;
+	struct pm8001_hba_info *pm8001_ha;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+	pm8001_ha = sha->lldd_ha;
+	for (i = 0; i < chip_info->n_phy; i++) {
+		sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
+		sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
+	}
+	sha->sas_ha_name = DRV_NAME;
+	sha->dev = pm8001_ha->dev;
+
+	sha->lldd_module = THIS_MODULE;
+	sha->sas_addr = &pm8001_ha->sas_addr[0];
+	sha->num_phys = chip_info->n_phy;
+	sha->lldd_max_execute_num = 1;
+	sha->lldd_queue_size = PM8001_CAN_QUEUE;
+	sha->core.shost = shost;
+}
+
+/**
+ * pm8001_init_sas_add - initialize sas address
+ * @chip_info: our ha struct.
+ *
+ * Currently we just set the fixed SAS address to our HBA,for manufacture,
+ * it should read from the EEPROM
+ */
+static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+{
+	u8 i;
+#ifdef PM8001_READ_VPD
+	DECLARE_COMPLETION_ONSTACK(completion);
+	pm8001_ha->nvmd_completion = &completion;
+	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
+	wait_for_completion(&completion);
+	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+		memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
+			SAS_ADDR_SIZE);
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("phy %d sas_addr = %x \n", i,
+			(u64)pm8001_ha->phy[i].dev_sas_addr));
+	}
+#else
+	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+		pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
+		pm8001_ha->phy[i].dev_sas_addr =
+			cpu_to_be64((u64)
+				(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
+	}
+	memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
+		SAS_ADDR_SIZE);
+#endif
+}
+
+#ifdef PM8001_USE_MSIX
+/**
+ * pm8001_setup_msix - enable MSI-X interrupt
+ * @chip_info: our ha struct.
+ * @irq_handler: irq_handler
+ */
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
+	irq_handler_t irq_handler)
+{
+	u32 i = 0, j = 0;
+	u32 number_of_intr = 1;
+	int flag = 0;
+	u32 max_entry;
+	int rc;
+	max_entry = sizeof(pm8001_ha->msix_entries) /
+		sizeof(pm8001_ha->msix_entries[0]);
+	flag |= IRQF_DISABLED;
+	for (i = 0; i < max_entry ; i++)
+		pm8001_ha->msix_entries[i].entry = i;
+	rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
+		number_of_intr);
+	pm8001_ha->number_of_intr = number_of_intr;
+	if (!rc) {
+		for (i = 0; i < number_of_intr; i++) {
+			if (request_irq(pm8001_ha->msix_entries[i].vector,
+				irq_handler, flag, DRV_NAME,
+				SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+				for (j = 0; j < i; j++)
+					free_irq(
+					pm8001_ha->msix_entries[j].vector,
+					SHOST_TO_SAS_HA(pm8001_ha->shost));
+				pci_disable_msix(pm8001_ha->pdev);
+				break;
+			}
+		}
+	}
+	return rc;
+}
+#endif
+
+/**
+ * pm8001_request_irq - register interrupt
+ * @chip_info: our ha struct.
+ */
+static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
+{
+	struct pci_dev *pdev;
+	irq_handler_t irq_handler = pm8001_interrupt;
+	int rc;
+
+	pdev = pm8001_ha->pdev;
+
+#ifdef PM8001_USE_MSIX
+	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+		return pm8001_setup_msix(pm8001_ha, irq_handler);
+	else
+		goto intx;
+#endif
+
+intx:
+	/* intialize the INT-X interrupt */
+	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
+		SHOST_TO_SAS_HA(pm8001_ha->shost));
+	return rc;
+}
+
+/**
+ * pm8001_pci_probe - probe supported device
+ * @pdev: pci device which kernel has been prepared for.
+ * @ent: pci device id
+ *
+ * This function is the main initialization function, when register a new
+ * pci driver it is invoked, all struct an hardware initilization should be done
+ * here, also, register interrupt
+ */
+static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	unsigned int rc;
+	u32	pci_reg;
+	struct pm8001_hba_info *pm8001_ha;
+	struct Scsi_Host *shost = NULL;
+	const struct pm8001_chip_info *chip;
+
+	dev_printk(KERN_INFO, &pdev->dev,
+		"pm8001: driver version %s\n", DRV_VERSION);
+	rc = pci_enable_device(pdev);
+	if (rc)
+		goto err_out_enable;
+	pci_set_master(pdev);
+	/*
+	 * Enable pci slot busmaster by setting pci command register.
+	 * This is required by FW for Cyclone card.
+	 */
+
+	pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
+	pci_reg |= 0x157;
+	pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out_disable;
+	rc = pci_go_44(pdev);
+	if (rc)
+		goto err_out_regions;
+
+	shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
+	if (!shost) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+	chip = &pm8001_chips[ent->driver_data];
+	SHOST_TO_SAS_HA(shost) =
+		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+	if (!SHOST_TO_SAS_HA(shost)) {
+		rc = -ENOMEM;
+		goto err_out_free_host;
+	}
+
+	rc = pm8001_prep_sas_ha_init(shost, chip);
+	if (rc) {
+		rc = -ENOMEM;
+		goto err_out_free;
+	}
+	pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+	pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+	if (!pm8001_ha) {
+		rc = -ENOMEM;
+		goto err_out_free;
+	}
+	list_add_tail(&pm8001_ha->list, &hba_list);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
+	if (rc)
+		goto err_out_ha_free;
+
+	rc = scsi_add_host(shost, &pdev->dev);
+	if (rc)
+		goto err_out_ha_free;
+	rc = pm8001_request_irq(pm8001_ha);
+	if (rc)
+		goto err_out_shost;
+
+	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+	pm8001_init_sas_add(pm8001_ha);
+	pm8001_post_sas_ha_init(shost, chip);
+	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+	if (rc)
+		goto err_out_shost;
+	scsi_scan_host(pm8001_ha->shost);
+	return 0;
+
+err_out_shost:
+	scsi_remove_host(pm8001_ha->shost);
+err_out_ha_free:
+	pm8001_free(pm8001_ha);
+err_out_free:
+	kfree(SHOST_TO_SAS_HA(shost));
+err_out_free_host:
+	kfree(shost);
+err_out_regions:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+err_out_enable:
+	return rc;
+}
+
+static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
+{
+	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+	struct pm8001_hba_info *pm8001_ha;
+	int i;
+	pm8001_ha = sha->lldd_ha;
+	pci_set_drvdata(pdev, NULL);
+	sas_unregister_ha(sha);
+	sas_remove_host(pm8001_ha->shost);
+	list_del(&pm8001_ha->list);
+	scsi_remove_host(pm8001_ha->shost);
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+
+#ifdef PM8001_USE_MSIX
+	for (i = 0; i < pm8001_ha->number_of_intr; i++)
+		synchronize_irq(pm8001_ha->msix_entries[i].vector);
+	for (i = 0; i < pm8001_ha->number_of_intr; i++)
+		free_irq(pm8001_ha->msix_entries[i].vector, sha);
+	pci_disable_msix(pdev);
+#else
+	free_irq(pm8001_ha->irq, sha);
+#endif
+#ifdef PM8001_USE_TASKLET
+	tasklet_kill(&pm8001_ha->tasklet);
+#endif
+	pm8001_free(pm8001_ha);
+	kfree(sha->sas_phy);
+	kfree(sha->sas_port);
+	kfree(sha);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * pm8001_pci_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+	struct pm8001_hba_info *pm8001_ha;
+	int i , pos;
+	u32 device_state;
+	pm8001_ha = sha->lldd_ha;
+	flush_scheduled_work();
+	scsi_block_requests(pm8001_ha->shost);
+	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pos == 0) {
+		printk(KERN_ERR " PCI PM not supported\n");
+		return -ENODEV;
+	}
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+#ifdef PM8001_USE_MSIX
+	for (i = 0; i < pm8001_ha->number_of_intr; i++)
+		synchronize_irq(pm8001_ha->msix_entries[i].vector);
+	for (i = 0; i < pm8001_ha->number_of_intr; i++)
+		free_irq(pm8001_ha->msix_entries[i].vector, sha);
+	pci_disable_msix(pdev);
+#else
+	free_irq(pm8001_ha->irq, sha);
+#endif
+#ifdef PM8001_USE_TASKLET
+	tasklet_kill(&pm8001_ha->tasklet);
+#endif
+	device_state = pci_choose_state(pdev, state);
+	pm8001_printk("pdev=0x%p, slot=%s, entering "
+		      "operating state [D%d]\n", pdev,
+		      pm8001_ha->name, device_state);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, device_state);
+	return 0;
+}
+
+/**
+ * pm8001_pci_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int pm8001_pci_resume(struct pci_dev *pdev)
+{
+	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+	struct pm8001_hba_info *pm8001_ha;
+	int rc;
+	u32 device_state;
+	pm8001_ha = sha->lldd_ha;
+	device_state = pdev->current_state;
+
+	pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
+		"operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		pm8001_printk("slot=%s Enable device failed during resume\n",
+			      pm8001_ha->name);
+		goto err_out_enable;
+	}
+
+	pci_set_master(pdev);
+	rc = pci_go_44(pdev);
+	if (rc)
+		goto err_out_disable;
+
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
+	if (rc)
+		goto err_out_disable;
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+	rc = pm8001_request_irq(pm8001_ha);
+	if (rc)
+		goto err_out_disable;
+	#ifdef PM8001_USE_TASKLET
+	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
+		    (unsigned long)pm8001_ha);
+	#endif
+	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+	scsi_unblock_requests(pm8001_ha->shost);
+	return 0;
+
+err_out_disable:
+	scsi_remove_host(pm8001_ha->shost);
+	pci_disable_device(pdev);
+err_out_enable:
+	return rc;
+}
+
+static struct pci_device_id __devinitdata pm8001_pci_table[] = {
+	{
+		PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
+	},
+	{
+		PCI_DEVICE(0x117c, 0x0042),
+		.driver_data = chip_8001
+	},
+	{} /* terminate list */
+};
+
+static struct pci_driver pm8001_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= pm8001_pci_table,
+	.probe		= pm8001_pci_probe,
+	.remove		= __devexit_p(pm8001_pci_remove),
+	.suspend	= pm8001_pci_suspend,
+	.resume		= pm8001_pci_resume,
+};
+
+/**
+ *	pm8001_init - initialize scsi transport template
+ */
+static int __init pm8001_init(void)
+{
+	int rc;
+	pm8001_id = 0;
+	pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
+	if (!pm8001_stt)
+		return -ENOMEM;
+	rc = pci_register_driver(&pm8001_pci_driver);
+	if (rc)
+		goto err_out;
+	return 0;
+err_out:
+	sas_release_transport(pm8001_stt);
+	return rc;
+}
+
+static void __exit pm8001_exit(void)
+{
+	pci_unregister_driver(&pm8001_pci_driver);
+	sas_release_transport(pm8001_stt);
+}
+
+module_init(pm8001_init);
+module_exit(pm8001_exit);
+
+MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
+
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
new file mode 100644
index 000000000000..1f767a0e727a
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -0,0 +1,1103 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#include "pm8001_sas.h"
+
+/**
+ * pm8001_find_tag - from sas task to find out  tag that belongs to this task
+ * @task: the task sent to the LLDD
+ * @tag: the found tag associated with the task
+ */
+static int pm8001_find_tag(struct sas_task *task, u32 *tag)
+{
+	if (task->lldd_task) {
+		struct pm8001_ccb_info *ccb;
+		ccb = task->lldd_task;
+		*tag = ccb->ccb_tag;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+  * pm8001_tag_clear - clear the tags bitmap
+  * @pm8001_ha: our hba struct
+  * @tag: the found tag associated with the task
+  */
+static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
+{
+	void *bitmap = pm8001_ha->tags;
+	clear_bit(tag, bitmap);
+}
+
+static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+{
+	pm8001_tag_clear(pm8001_ha, tag);
+}
+
+static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag)
+{
+	void *bitmap = pm8001_ha->tags;
+	set_bit(tag, bitmap);
+}
+
+/**
+  * pm8001_tag_alloc - allocate a empty tag for task used.
+  * @pm8001_ha: our hba struct
+  * @tag_out: the found empty tag .
+  */
+inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
+{
+	unsigned int index, tag;
+	void *bitmap = pm8001_ha->tags;
+
+	index = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
+	tag = index;
+	if (tag >= pm8001_ha->tags_num)
+		return -SAS_QUEUE_FULL;
+	pm8001_tag_set(pm8001_ha, tag);
+	*tag_out = tag;
+	return 0;
+}
+
+void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	for (i = 0; i < pm8001_ha->tags_num; ++i)
+		pm8001_tag_clear(pm8001_ha, i);
+}
+
+ /**
+  * pm8001_mem_alloc - allocate memory for pm8001.
+  * @pdev: pci device.
+  * @virt_addr: the allocated virtual address
+  * @pphys_addr_hi: the physical address high byte address.
+  * @pphys_addr_lo: the physical address low byte address.
+  * @mem_size: memory size.
+  */
+int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
+	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
+	u32 *pphys_addr_lo, u32 mem_size, u32 align)
+{
+	caddr_t mem_virt_alloc;
+	dma_addr_t mem_dma_handle;
+	u64 phys_align;
+	u64 align_offset = 0;
+	if (align)
+		align_offset = (dma_addr_t)align - 1;
+	mem_virt_alloc =
+		pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle);
+	if (!mem_virt_alloc) {
+		pm8001_printk("memory allocation error\n");
+		return -1;
+	}
+	memset((void *)mem_virt_alloc, 0, mem_size+align);
+	*pphys_addr = mem_dma_handle;
+	phys_align = (*pphys_addr + align_offset) & ~align_offset;
+	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
+	*pphys_addr_hi = upper_32_bits(phys_align);
+	*pphys_addr_lo = lower_32_bits(phys_align);
+	return 0;
+}
+/**
+  * pm8001_find_ha_by_dev - from domain device which come from sas layer to
+  * find out our hba struct.
+  * @dev: the domain device which from sas layer.
+  */
+static
+struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
+{
+	struct sas_ha_struct *sha = dev->port->ha;
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	return pm8001_ha;
+}
+
+/**
+  * pm8001_phy_control - this function should be registered to
+  * sas_domain_function_template to provide libsas used, note: this is just
+  * control the HBA phy rather than other expander phy if you want control
+  * other phy, you should use SMP command.
+  * @sas_phy: which phy in HBA phys.
+  * @func: the operation.
+  * @funcdata: always NULL.
+  */
+int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+	void *funcdata)
+{
+	int rc = 0, phy_id = sas_phy->id;
+	struct pm8001_hba_info *pm8001_ha = NULL;
+	struct sas_phy_linkrates *rates;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	pm8001_ha = sas_phy->ha->lldd_ha;
+	pm8001_ha->phy[phy_id].enable_completion = &completion;
+	switch (func) {
+	case PHY_FUNC_SET_LINK_RATE:
+		rates = funcdata;
+		if (rates->minimum_linkrate) {
+			pm8001_ha->phy[phy_id].minimum_linkrate =
+				rates->minimum_linkrate;
+		}
+		if (rates->maximum_linkrate) {
+			pm8001_ha->phy[phy_id].maximum_linkrate =
+				rates->maximum_linkrate;
+		}
+		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+			wait_for_completion(&completion);
+		}
+		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+					      PHY_LINK_RESET);
+		break;
+	case PHY_FUNC_HARD_RESET:
+		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+			wait_for_completion(&completion);
+		}
+		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+					      PHY_HARD_RESET);
+		break;
+	case PHY_FUNC_LINK_RESET:
+		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+			wait_for_completion(&completion);
+		}
+		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+					      PHY_LINK_RESET);
+		break;
+	case PHY_FUNC_RELEASE_SPINUP_HOLD:
+		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+					      PHY_LINK_RESET);
+		break;
+	case PHY_FUNC_DISABLE:
+		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
+		break;
+	default:
+		rc = -EOPNOTSUPP;
+	}
+	msleep(300);
+	return rc;
+}
+
+int pm8001_slave_alloc(struct scsi_device *scsi_dev)
+{
+	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+	if (dev_is_sata(dev)) {
+		/* We don't need to rescan targets
+		* if REPORT_LUNS request is failed
+		*/
+		if (scsi_dev->lun > 0)
+			return -ENXIO;
+		scsi_dev->tagged_supported = 1;
+	}
+	return sas_slave_alloc(scsi_dev);
+}
+
+/**
+  * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
+  * command to HBA.
+  * @shost: the scsi host data.
+  */
+void pm8001_scan_start(struct Scsi_Host *shost)
+{
+	int i;
+	struct pm8001_hba_info *pm8001_ha;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	pm8001_ha = sha->lldd_ha;
+	PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+}
+
+int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	/* give the phy enabling interrupt event time to come in (1s
+	* is empirically about all it takes) */
+	if (time < HZ)
+		return 0;
+	/* Wait for discovery to finish */
+	scsi_flush_work(shost);
+	return 1;
+}
+
+/**
+  * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
+  * @pm8001_ha: our hba card information
+  * @ccb: the ccb which attached to smp task
+  */
+static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
+}
+
+u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+	struct ata_queued_cmd *qc = task->uldd_task;
+	if (qc) {
+		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+			qc->tf.command == ATA_CMD_FPDMA_READ) {
+			*tag = qc->tag;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+  * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
+  * @pm8001_ha: our hba card information
+  * @ccb: the ccb which attached to sata task
+  */
+static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
+}
+
+/**
+  * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
+  * @pm8001_ha: our hba card information
+  * @ccb: the ccb which attached to TM
+  * @tmf: the task management IU
+  */
+static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+{
+	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
+}
+
+/**
+  * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
+  * @pm8001_ha: our hba card information
+  * @ccb: the ccb which attached to ssp task
+  */
+static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
+}
+int pm8001_slave_configure(struct scsi_device *sdev)
+{
+	struct domain_device *dev = sdev_to_domain_dev(sdev);
+	int ret = sas_slave_configure(sdev);
+	if (ret)
+		return ret;
+	if (dev_is_sata(dev)) {
+	#ifdef PM8001_DISABLE_NCQ
+		struct ata_port *ap = dev->sata_dev.ap;
+		struct ata_device *adev = ap->link.device;
+		adev->flags |= ATA_DFLAG_NCQ_OFF;
+		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+	#endif
+	}
+	return 0;
+}
+/**
+  * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+  * @task: the task to be execute.
+  * @num: if can_queue great than 1, the task can be queued up. for SMP task,
+  * we always execute one one time.
+  * @gfp_flags: gfp_flags.
+  * @is_tmf: if it is task management task.
+  * @tmf: the task management IU
+  */
+#define DEV_IS_GONE(pm8001_dev)	\
+	((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+static int pm8001_task_exec(struct sas_task *task, const int num,
+	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
+{
+	struct domain_device *dev = task->dev;
+	struct pm8001_hba_info *pm8001_ha;
+	struct pm8001_device *pm8001_dev;
+	struct sas_task *t = task;
+	struct pm8001_ccb_info *ccb;
+	u32 tag = 0xdeadbeef, rc, n_elem = 0;
+	u32 n = num;
+	unsigned long flags = 0;
+
+	if (!dev->port) {
+		struct task_status_struct *tsm = &t->task_status;
+		tsm->resp = SAS_TASK_UNDELIVERED;
+		tsm->stat = SAS_PHY_DOWN;
+		if (dev->dev_type != SATA_DEV)
+			t->task_done(t);
+		return 0;
+	}
+	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+	PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	do {
+		dev = t->dev;
+		pm8001_dev = dev->lldd_dev;
+		if (DEV_IS_GONE(pm8001_dev)) {
+			if (pm8001_dev) {
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("device %d not ready.\n",
+					pm8001_dev->device_id));
+			} else {
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("device %016llx not "
+					"ready.\n", SAS_ADDR(dev->sas_addr)));
+			}
+			rc = SAS_PHY_DOWN;
+			goto out_done;
+		}
+		rc = pm8001_tag_alloc(pm8001_ha, &tag);
+		if (rc)
+			goto err_out;
+		ccb = &pm8001_ha->ccb_info[tag];
+
+		if (!sas_protocol_ata(t->task_proto)) {
+			if (t->num_scatter) {
+				n_elem = dma_map_sg(pm8001_ha->dev,
+					t->scatter,
+					t->num_scatter,
+					t->data_dir);
+				if (!n_elem) {
+					rc = -ENOMEM;
+					goto err_out_tag;
+				}
+			}
+		} else {
+			n_elem = t->num_scatter;
+		}
+
+		t->lldd_task = ccb;
+		ccb->n_elem = n_elem;
+		ccb->ccb_tag = tag;
+		ccb->task = t;
+		switch (t->task_proto) {
+		case SAS_PROTOCOL_SMP:
+			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
+			break;
+		case SAS_PROTOCOL_SSP:
+			if (is_tmf)
+				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
+					ccb, tmf);
+			else
+				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
+			break;
+		case SAS_PROTOCOL_SATA:
+		case SAS_PROTOCOL_STP:
+		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
+			break;
+		default:
+			dev_printk(KERN_ERR, pm8001_ha->dev,
+				"unknown sas_task proto: 0x%x\n",
+				t->task_proto);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (rc) {
+			PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("rc is %x\n", rc));
+			goto err_out_tag;
+		}
+		/* TODO: select normal or high priority */
+		spin_lock(&t->task_state_lock);
+		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+		spin_unlock(&t->task_state_lock);
+		pm8001_dev->running_req++;
+		if (n > 1)
+			t = list_entry(t->list.next, struct sas_task, list);
+	} while (--n);
+	rc = 0;
+	goto out_done;
+
+err_out_tag:
+	pm8001_tag_free(pm8001_ha, tag);
+err_out:
+	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
+	if (!sas_protocol_ata(t->task_proto))
+		if (n_elem)
+			dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+				t->data_dir);
+out_done:
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+	return rc;
+}
+
+/**
+  * pm8001_queue_command - register for upper layer used, all IO commands sent
+  * to HBA are from this interface.
+  * @task: the task to be execute.
+  * @num: if can_queue great than 1, the task can be queued up. for SMP task,
+  * we always execute one one time
+  * @gfp_flags: gfp_flags
+  */
+int pm8001_queue_command(struct sas_task *task, const int num,
+		gfp_t gfp_flags)
+{
+	return pm8001_task_exec(task, num, gfp_flags, 0, NULL);
+}
+
+void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx)
+{
+	pm8001_tag_clear(pm8001_ha, ccb_idx);
+}
+
+/**
+  * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
+  * @pm8001_ha: our hba card information
+  * @ccb: the ccb which attached to ssp task
+  * @task: the task to be free.
+  * @ccb_idx: ccb index.
+  */
+void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
+{
+	if (!ccb->task)
+		return;
+	if (!sas_protocol_ata(task->task_proto))
+		if (ccb->n_elem)
+			dma_unmap_sg(pm8001_ha->dev, task->scatter,
+				task->num_scatter, task->data_dir);
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SMP:
+		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
+			PCI_DMA_FROMDEVICE);
+		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
+			PCI_DMA_TODEVICE);
+		break;
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SSP:
+	default:
+		/* do nothing */
+		break;
+	}
+	task->lldd_task = NULL;
+	ccb->task = NULL;
+	ccb->ccb_tag = 0xFFFFFFFF;
+	pm8001_ccb_free(pm8001_ha, ccb_idx);
+}
+
+ /**
+  * pm8001_alloc_dev - find a empty pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 dev;
+	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+		if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+			pm8001_ha->devices[dev].id = dev;
+			return &pm8001_ha->devices[dev];
+		}
+	}
+	if (dev == PM8001_MAX_DEVICES) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("max support %d devices, ignore ..\n",
+			PM8001_MAX_DEVICES));
+	}
+	return NULL;
+}
+
+static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
+{
+	u32 id = pm8001_dev->id;
+	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
+	pm8001_dev->id = id;
+	pm8001_dev->dev_type = NO_DEVICE;
+	pm8001_dev->device_id = PM8001_MAX_DEVICES;
+	pm8001_dev->sas_device = NULL;
+}
+
+/**
+  * pm8001_dev_found_notify - libsas notify a device is found.
+  * @dev: the device structure which sas layer used.
+  *
+  * when libsas find a sas domain device, it should tell the LLDD that
+  * device is found, and then LLDD register this device to HBA firmware
+  * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
+  * device ID(according to device's sas address) and returned it to LLDD. From
+  * now on, we communicate with HBA FW with the device ID which HBA assigned
+  * rather than sas address. it is the neccessary step for our HBA but it is
+  * the optional for other HBA driver.
+  */
+static int pm8001_dev_found_notify(struct domain_device *dev)
+{
+	unsigned long flags = 0;
+	int res = 0;
+	struct pm8001_hba_info *pm8001_ha = NULL;
+	struct domain_device *parent_dev = dev->parent;
+	struct pm8001_device *pm8001_device;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	u32 flag = 0;
+	pm8001_ha = pm8001_find_ha_by_dev(dev);
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+	pm8001_device = pm8001_alloc_dev(pm8001_ha);
+	pm8001_device->sas_device = dev;
+	if (!pm8001_device) {
+		res = -1;
+		goto found_out;
+	}
+	dev->lldd_dev = pm8001_device;
+	pm8001_device->dev_type = dev->dev_type;
+	pm8001_device->dcompletion = &completion;
+	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+		int phy_id;
+		struct ex_phy *phy;
+		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
+		phy_id++) {
+			phy = &parent_dev->ex_dev.ex_phy[phy_id];
+			if (SAS_ADDR(phy->attached_sas_addr)
+				== SAS_ADDR(dev->sas_addr)) {
+				pm8001_device->attached_phy = phy_id;
+				break;
+			}
+		}
+		if (phy_id == parent_dev->ex_dev.num_phys) {
+			PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Error: no attached dev:%016llx"
+			" at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
+				SAS_ADDR(parent_dev->sas_addr)));
+			res = -1;
+		}
+	} else {
+		if (dev->dev_type == SATA_DEV) {
+			pm8001_device->attached_phy =
+				dev->rphy->identify.phy_identifier;
+				flag = 1; /* directly sata*/
+		}
+	} /*register this device to HBA*/
+	PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n"));
+	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+	wait_for_completion(&completion);
+	if (dev->dev_type == SAS_END_DEV)
+		msleep(50);
+	pm8001_ha->flags = PM8001F_RUN_TIME ;
+	return 0;
+found_out:
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+	return res;
+}
+
+int pm8001_dev_found(struct domain_device *dev)
+{
+	return pm8001_dev_found_notify(dev);
+}
+
+/**
+  * pm8001_alloc_task - allocate a task structure for TMF
+  */
+static struct sas_task *pm8001_alloc_task(void)
+{
+	struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+	if (task) {
+		INIT_LIST_HEAD(&task->list);
+		spin_lock_init(&task->task_state_lock);
+		task->task_state_flags = SAS_TASK_STATE_PENDING;
+		init_timer(&task->timer);
+		init_completion(&task->completion);
+	}
+	return task;
+}
+
+static void pm8001_free_task(struct sas_task *task)
+{
+	if (task) {
+		BUG_ON(!list_empty(&task->list));
+		kfree(task);
+	}
+}
+
+static void pm8001_task_done(struct sas_task *task)
+{
+	if (!del_timer(&task->timer))
+		return;
+	complete(&task->completion);
+}
+
+static void pm8001_tmf_timedout(unsigned long data)
+{
+	struct sas_task *task = (struct sas_task *)data;
+
+	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+	complete(&task->completion);
+}
+
+#define PM8001_TASK_TIMEOUT 20
+/**
+  * pm8001_exec_internal_tmf_task - execute some task management commands.
+  * @dev: the wanted device.
+  * @tmf: which task management wanted to be take.
+  * @para_len: para_len.
+  * @parameter: ssp task parameter.
+  *
+  * when errors or exception happened, we may want to do something, for example
+  * abort the issued task which result in this execption, it is done by calling
+  * this function, note it is also with the task execute interface.
+  */
+static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
+	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
+{
+	int res, retry;
+	struct sas_task *task = NULL;
+	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+	for (retry = 0; retry < 3; retry++) {
+		task = pm8001_alloc_task();
+		if (!task)
+			return -ENOMEM;
+
+		task->dev = dev;
+		task->task_proto = dev->tproto;
+		memcpy(&task->ssp_task, parameter, para_len);
+		task->task_done = pm8001_task_done;
+		task->timer.data = (unsigned long)task;
+		task->timer.function = pm8001_tmf_timedout;
+		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+		add_timer(&task->timer);
+
+		res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
+
+		if (res) {
+			del_timer(&task->timer);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Executing internal task "
+				"failed\n"));
+			goto ex_err;
+		}
+		wait_for_completion(&task->completion);
+		res = -TMF_RESP_FUNC_FAILED;
+		/* Even TMF timed out, return direct. */
+		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("TMF task[%x]timeout.\n",
+					tmf->tmf));
+				goto ex_err;
+			}
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+			task->task_status.stat == SAM_GOOD) {
+			res = TMF_RESP_FUNC_COMPLETE;
+			break;
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		task->task_status.stat == SAS_DATA_UNDERRUN) {
+			/* no error, but return the number of bytes of
+			* underrun */
+			res = task->task_status.residual;
+			break;
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+			task->task_status.stat == SAS_DATA_OVERRUN) {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Blocked task error.\n"));
+			res = -EMSGSIZE;
+			break;
+		} else {
+			PM8001_EH_DBG(pm8001_ha,
+				pm8001_printk(" Task to dev %016llx response:"
+				"0x%x status 0x%x\n",
+				SAS_ADDR(dev->sas_addr),
+				task->task_status.resp,
+				task->task_status.stat));
+			pm8001_free_task(task);
+			task = NULL;
+		}
+	}
+ex_err:
+	BUG_ON(retry == 3 && task != NULL);
+	if (task != NULL)
+		pm8001_free_task(task);
+	return res;
+}
+
+static int
+pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
+	u32 task_tag)
+{
+	int res, retry;
+	u32 ccb_tag;
+	struct pm8001_ccb_info *ccb;
+	struct sas_task *task = NULL;
+
+	for (retry = 0; retry < 3; retry++) {
+		task = pm8001_alloc_task();
+		if (!task)
+			return -ENOMEM;
+
+		task->dev = dev;
+		task->task_proto = dev->tproto;
+		task->task_done = pm8001_task_done;
+		task->timer.data = (unsigned long)task;
+		task->timer.function = pm8001_tmf_timedout;
+		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+		add_timer(&task->timer);
+
+		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+		if (res)
+			return res;
+		ccb = &pm8001_ha->ccb_info[ccb_tag];
+		ccb->device = pm8001_dev;
+		ccb->ccb_tag = ccb_tag;
+		ccb->task = task;
+
+		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
+			pm8001_dev, flag, task_tag, ccb_tag);
+
+		if (res) {
+			del_timer(&task->timer);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Executing internal task "
+				"failed\n"));
+			goto ex_err;
+		}
+		wait_for_completion(&task->completion);
+		res = TMF_RESP_FUNC_FAILED;
+		/* Even TMF timed out, return direct. */
+		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("TMF task timeout.\n"));
+				goto ex_err;
+			}
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+			task->task_status.stat == SAM_GOOD) {
+			res = TMF_RESP_FUNC_COMPLETE;
+			break;
+
+		} else {
+			PM8001_EH_DBG(pm8001_ha,
+				pm8001_printk(" Task to dev %016llx response: "
+					"0x%x status 0x%x\n",
+				SAS_ADDR(dev->sas_addr),
+				task->task_status.resp,
+				task->task_status.stat));
+			pm8001_free_task(task);
+			task = NULL;
+		}
+	}
+ex_err:
+	BUG_ON(retry == 3 && task != NULL);
+	if (task != NULL)
+		pm8001_free_task(task);
+	return res;
+}
+
+/**
+  * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
+  * @dev: the device structure which sas layer used.
+  */
+static void pm8001_dev_gone_notify(struct domain_device *dev)
+{
+	unsigned long flags = 0;
+	u32 tag;
+	struct pm8001_hba_info *pm8001_ha;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	u32 device_id = pm8001_dev->device_id;
+	pm8001_ha = pm8001_find_ha_by_dev(dev);
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	pm8001_tag_alloc(pm8001_ha, &tag);
+	if (pm8001_dev) {
+		PM8001_DISC_DBG(pm8001_ha,
+			pm8001_printk("found dev[%d:%x] is gone.\n",
+			pm8001_dev->device_id, pm8001_dev->dev_type));
+		if (pm8001_dev->running_req) {
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+				dev, 1, 0);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
+		}
+		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+		pm8001_free_dev(pm8001_dev);
+	} else {
+		PM8001_DISC_DBG(pm8001_ha,
+			pm8001_printk("Found dev has gone.\n"));
+	}
+	dev->lldd_dev = NULL;
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+}
+
+void pm8001_dev_gone(struct domain_device *dev)
+{
+	pm8001_dev_gone_notify(dev);
+}
+
+static int pm8001_issue_ssp_tmf(struct domain_device *dev,
+	u8 *lun, struct pm8001_tmf_task *tmf)
+{
+	struct sas_ssp_task ssp_task;
+	if (!(dev->tproto & SAS_PROTOCOL_SSP))
+		return TMF_RESP_FUNC_ESUPP;
+
+	strncpy((u8 *)&ssp_task.LUN, lun, 8);
+	return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
+		tmf);
+}
+
+/**
+  * Standard mandates link reset for ATA  (type 0) and hard reset for
+  * SSP (type 1) , only for RECOVERY
+  */
+int pm8001_I_T_nexus_reset(struct domain_device *dev)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_device *pm8001_dev;
+	struct pm8001_hba_info *pm8001_ha;
+	struct sas_phy *phy;
+	if (!dev || !dev->lldd_dev)
+		return -1;
+
+	pm8001_dev = dev->lldd_dev;
+	pm8001_ha = pm8001_find_ha_by_dev(dev);
+	phy = sas_find_local_phy(dev);
+
+	if (dev_is_sata(dev)) {
+		DECLARE_COMPLETION_ONSTACK(completion_setstate);
+		rc = sas_phy_reset(phy, 1);
+		msleep(2000);
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+			dev, 1, 0);
+		pm8001_dev->setds_completion = &completion_setstate;
+		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+			pm8001_dev, 0x01);
+		wait_for_completion(&completion_setstate);
+	} else{
+	rc = sas_phy_reset(phy, 1);
+	msleep(2000);
+	}
+	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+		pm8001_dev->device_id, rc));
+	return rc;
+}
+
+/* mandatory SAM-3, the task reset the specified LUN*/
+int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_tmf_task tmf_task;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+	if (dev_is_sata(dev)) {
+		struct sas_phy *phy = sas_find_local_phy(dev);
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+			dev, 1, 0);
+		rc = sas_phy_reset(phy, 1);
+		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+			pm8001_dev, 0x01);
+		msleep(2000);
+	} else {
+		tmf_task.tmf = TMF_LU_RESET;
+		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+	}
+	/* If failed, fall-through I_T_Nexus reset */
+	PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
+		pm8001_dev->device_id, rc));
+	return rc;
+}
+
+/* optional SAM-3 */
+int pm8001_query_task(struct sas_task *task)
+{
+	u32 tag = 0xdeadbeef;
+	int i = 0;
+	struct scsi_lun lun;
+	struct pm8001_tmf_task tmf_task;
+	int rc = TMF_RESP_FUNC_FAILED;
+	if (unlikely(!task || !task->lldd_task || !task->dev))
+		return rc;
+
+	if (task->task_proto & SAS_PROTOCOL_SSP) {
+		struct scsi_cmnd *cmnd = task->uldd_task;
+		struct domain_device *dev = task->dev;
+		struct pm8001_hba_info *pm8001_ha =
+			pm8001_find_ha_by_dev(dev);
+
+		int_to_scsilun(cmnd->device->lun, &lun);
+		rc = pm8001_find_tag(task, &tag);
+		if (rc == 0) {
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+		PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
+		for (i = 0; i < 16; i++)
+			printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
+		printk(KERN_INFO "]\n");
+		tmf_task.tmf = 	TMF_QUERY_TASK;
+		tmf_task.tag_of_task_to_be_managed = tag;
+
+		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+		switch (rc) {
+		/* The task is still in Lun, release it then */
+		case TMF_RESP_FUNC_SUCC:
+			PM8001_EH_DBG(pm8001_ha,
+				pm8001_printk("The task is still in Lun \n"));
+		/* The task is not in Lun or failed, reset the phy */
+		case TMF_RESP_FUNC_FAILED:
+		case TMF_RESP_FUNC_COMPLETE:
+			PM8001_EH_DBG(pm8001_ha,
+			pm8001_printk("The task is not in Lun or failed,"
+			" reset the phy \n"));
+			break;
+		}
+	}
+	pm8001_printk(":rc= %d\n", rc);
+	return rc;
+}
+
+/*  mandatory SAM-3, still need free task/ccb info, abord the specified task */
+int pm8001_abort_task(struct sas_task *task)
+{
+	unsigned long flags;
+	u32 tag = 0xdeadbeef;
+	u32 device_id;
+	struct domain_device *dev ;
+	struct pm8001_hba_info *pm8001_ha = NULL;
+	struct pm8001_ccb_info *ccb;
+	struct scsi_lun lun;
+	struct pm8001_device *pm8001_dev;
+	struct pm8001_tmf_task tmf_task;
+	int rc = TMF_RESP_FUNC_FAILED;
+	if (unlikely(!task || !task->lldd_task || !task->dev))
+		return rc;
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		rc = TMF_RESP_FUNC_COMPLETE;
+		goto out;
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+	if (task->task_proto & SAS_PROTOCOL_SSP) {
+		struct scsi_cmnd *cmnd = task->uldd_task;
+		dev = task->dev;
+		ccb = task->lldd_task;
+		pm8001_dev = dev->lldd_dev;
+		pm8001_ha = pm8001_find_ha_by_dev(dev);
+		int_to_scsilun(cmnd->device->lun, &lun);
+		rc = pm8001_find_tag(task, &tag);
+		if (rc == 0) {
+			printk(KERN_INFO "No such tag in %s\n", __func__);
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+		device_id = pm8001_dev->device_id;
+		PM8001_EH_DBG(pm8001_ha,
+			pm8001_printk("abort io to deviceid= %d\n", device_id));
+		tmf_task.tmf = TMF_ABORT_TASK;
+		tmf_task.tag_of_task_to_be_managed = tag;
+		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+		pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+			pm8001_dev->sas_device, 0, tag);
+	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
+		task->task_proto & SAS_PROTOCOL_STP) {
+		dev = task->dev;
+		pm8001_dev = dev->lldd_dev;
+		pm8001_ha = pm8001_find_ha_by_dev(dev);
+		rc = pm8001_find_tag(task, &tag);
+		if (rc == 0) {
+			printk(KERN_INFO "No such tag in %s\n", __func__);
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+			pm8001_dev->sas_device, 0, tag);
+	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
+		/* SMP */
+		dev = task->dev;
+		pm8001_dev = dev->lldd_dev;
+		pm8001_ha = pm8001_find_ha_by_dev(dev);
+		rc = pm8001_find_tag(task, &tag);
+		if (rc == 0) {
+			printk(KERN_INFO "No such tag in %s\n", __func__);
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+			pm8001_dev->sas_device, 0, tag);
+
+	}
+out:
+	if (rc != TMF_RESP_FUNC_COMPLETE)
+		pm8001_printk("rc= %d\n", rc);
+	return rc;
+}
+
+int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_tmf_task tmf_task;
+
+	tmf_task.tmf = TMF_ABORT_TASK_SET;
+	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+	return rc;
+}
+
+int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_tmf_task tmf_task;
+
+	tmf_task.tmf = TMF_CLEAR_ACA;
+	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+
+	return rc;
+}
+
+int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_tmf_task tmf_task;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+	PM8001_EH_DBG(pm8001_ha,
+		pm8001_printk("I_T_L_Q clear task set[%x]\n",
+		pm8001_dev->device_id));
+	tmf_task.tmf = TMF_CLEAR_TASK_SET;
+	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+	return rc;
+}
+
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
new file mode 100644
index 000000000000..30f2ede55a75
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -0,0 +1,481 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_SAS_H_
+#define _PM8001_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <asm/atomic.h>
+#include "pm8001_defs.h"
+
+#define DRV_NAME		"pm8001"
+#define DRV_VERSION		"0.1.36"
+#define PM8001_FAIL_LOGGING	0x01 /* libsas EH function logging */
+#define PM8001_INIT_LOGGING	0x02 /* driver init logging */
+#define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
+#define PM8001_IO_LOGGING	0x08 /* I/O path logging */
+#define PM8001_EH_LOGGING	0x10 /* Error message logging */
+#define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
+#define PM8001_MSG_LOGGING	0x40 /* misc message logging */
+#define pm8001_printk(format, arg...)	printk(KERN_INFO "%s %d:" format,\
+				__func__, __LINE__, ## arg)
+#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)	\
+do {						\
+	if (unlikely(HBA->logging_level & LEVEL))	\
+		do {					\
+			CMD;				\
+		} while (0);				\
+} while (0);
+
+#define PM8001_EH_DBG(HBA, CMD)			\
+	PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
+
+#define PM8001_INIT_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
+
+#define PM8001_DISC_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
+
+#define PM8001_IO_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
+
+#define PM8001_FAIL_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
+
+#define PM8001_IOCTL_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
+
+#define PM8001_MSG_DBG(HBA, CMD)		\
+	PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+
+
+#define PM8001_USE_TASKLET
+#define PM8001_USE_MSIX
+
+
+#define DEV_IS_EXPANDER(type)	((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+#define PM8001_NAME_LENGTH		32/* generic length of strings */
+extern struct list_head hba_list;
+extern const struct pm8001_dispatch pm8001_8001_dispatch;
+
+struct pm8001_hba_info;
+struct pm8001_ccb_info;
+struct pm8001_device;
+struct pm8001_tmf_task;
+struct pm8001_dispatch {
+	char *name;
+	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+	int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+	void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
+	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
+	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
+	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
+	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
+	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+	void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+	int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_ccb_info *ccb);
+	int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_ccb_info *ccb);
+	int (*sata_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_ccb_info *ccb);
+	int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha,	u8 phy_id);
+	int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
+	int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_dev, u32 flag);
+	int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+	int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
+		u32 phy_id, u32 phy_op);
+	int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
+		u32 cmd_tag);
+	int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
+	int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
+	int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
+	int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
+		void *payload);
+	int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_dev, u32 state);
+	int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha,
+		u32 state);
+	int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
+		u32 state);
+	int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
+};
+
+struct pm8001_chip_info {
+	u32	n_phy;
+	const struct pm8001_dispatch	*dispatch;
+};
+#define PM8001_CHIP_DISP	(pm8001_ha->chip->dispatch)
+
+struct pm8001_port {
+	struct asd_sas_port	sas_port;
+};
+
+struct pm8001_phy {
+	struct pm8001_hba_info	*pm8001_ha;
+	struct pm8001_port	*port;
+	struct asd_sas_phy	sas_phy;
+	struct sas_identify	identify;
+	struct scsi_device	*sdev;
+	u64			dev_sas_addr;
+	u32			phy_type;
+	struct completion	*enable_completion;
+	u32			frame_rcvd_size;
+	u8			frame_rcvd[32];
+	u8			phy_attached;
+	u8			phy_state;
+	enum sas_linkrate	minimum_linkrate;
+	enum sas_linkrate	maximum_linkrate;
+};
+
+struct pm8001_device {
+	enum sas_dev_type	dev_type;
+	struct domain_device	*sas_device;
+	u32			attached_phy;
+	u32			id;
+	struct completion	*dcompletion;
+	struct completion	*setds_completion;
+	u32			device_id;
+	u32			running_req;
+};
+
+struct pm8001_prd_imt {
+	__le32			len;
+	__le32			e;
+};
+
+struct pm8001_prd {
+	__le64			addr;		/* 64-bit buffer address */
+	struct pm8001_prd_imt	im_len;		/* 64-bit length */
+} __attribute__ ((packed));
+/*
+ * CCB(Command Control Block)
+ */
+struct pm8001_ccb_info {
+	struct list_head	entry;
+	struct sas_task		*task;
+	u32			n_elem;
+	u32			ccb_tag;
+	dma_addr_t		ccb_dma_handle;
+	struct pm8001_device	*device;
+	struct pm8001_prd	buf_prd[PM8001_MAX_DMA_SG];
+	struct fw_control_ex	*fw_control_context;
+};
+
+struct mpi_mem {
+	void			*virt_ptr;
+	dma_addr_t		phys_addr;
+	u32			phys_addr_hi;
+	u32			phys_addr_lo;
+	u32			total_len;
+	u32			num_elements;
+	u32			element_size;
+	u32			alignment;
+};
+
+struct mpi_mem_req {
+	/* The number of element in the  mpiMemory array */
+	u32			count;
+	/* The array of structures that define memroy regions*/
+	struct mpi_mem		region[USI_MAX_MEMCNT];
+};
+
+struct main_cfg_table {
+	u32			signature;
+	u32			interface_rev;
+	u32			firmware_rev;
+	u32			max_out_io;
+	u32			max_sgl;
+	u32			ctrl_cap_flag;
+	u32			gst_offset;
+	u32			inbound_queue_offset;
+	u32			outbound_queue_offset;
+	u32			inbound_q_nppd_hppd;
+	u32			outbound_hw_event_pid0_3;
+	u32			outbound_hw_event_pid4_7;
+	u32			outbound_ncq_event_pid0_3;
+	u32			outbound_ncq_event_pid4_7;
+	u32			outbound_tgt_ITNexus_event_pid0_3;
+	u32			outbound_tgt_ITNexus_event_pid4_7;
+	u32			outbound_tgt_ssp_event_pid0_3;
+	u32			outbound_tgt_ssp_event_pid4_7;
+	u32			outbound_tgt_smp_event_pid0_3;
+	u32			outbound_tgt_smp_event_pid4_7;
+	u32			upper_event_log_addr;
+	u32			lower_event_log_addr;
+	u32			event_log_size;
+	u32			event_log_option;
+	u32			upper_iop_event_log_addr;
+	u32			lower_iop_event_log_addr;
+	u32			iop_event_log_size;
+	u32			iop_event_log_option;
+	u32			fatal_err_interrupt;
+	u32			fatal_err_dump_offset0;
+	u32			fatal_err_dump_length0;
+	u32			fatal_err_dump_offset1;
+	u32			fatal_err_dump_length1;
+	u32			hda_mode_flag;
+	u32			anolog_setup_table_offset;
+};
+struct general_status_table {
+	u32			gst_len_mpistate;
+	u32			iq_freeze_state0;
+	u32			iq_freeze_state1;
+	u32			msgu_tcnt;
+	u32			iop_tcnt;
+	u32			reserved;
+	u32			phy_state[8];
+	u32			reserved1;
+	u32			reserved2;
+	u32			reserved3;
+	u32			recover_err_info[8];
+};
+struct inbound_queue_table {
+	u32			element_pri_size_cnt;
+	u32			upper_base_addr;
+	u32			lower_base_addr;
+	u32			ci_upper_base_addr;
+	u32			ci_lower_base_addr;
+	u32			pi_pci_bar;
+	u32			pi_offset;
+	u32			total_length;
+	void			*base_virt;
+	void			*ci_virt;
+	u32			reserved;
+	__le32			consumer_index;
+	u32			producer_idx;
+};
+struct outbound_queue_table {
+	u32			element_size_cnt;
+	u32			upper_base_addr;
+	u32			lower_base_addr;
+	void			*base_virt;
+	u32			pi_upper_base_addr;
+	u32			pi_lower_base_addr;
+	u32			ci_pci_bar;
+	u32			ci_offset;
+	u32			total_length;
+	void			*pi_virt;
+	u32			interrup_vec_cnt_delay;
+	u32			dinterrup_to_pci_offset;
+	__le32			producer_index;
+	u32			consumer_idx;
+};
+struct pm8001_hba_memspace {
+	void __iomem  		*memvirtaddr;
+	u64			membase;
+	u32			memsize;
+};
+struct pm8001_hba_info {
+	char			name[PM8001_NAME_LENGTH];
+	struct list_head	list;
+	unsigned long		flags;
+	spinlock_t		lock;/* host-wide lock */
+	struct pci_dev		*pdev;/* our device */
+	struct device		*dev;
+	struct pm8001_hba_memspace io_mem[6];
+	struct mpi_mem_req	memoryMap;
+	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/
+	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/
+	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/
+	void __iomem	*inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
+	void __iomem	*outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
+	struct main_cfg_table	main_cfg_tbl;
+	struct general_status_table	gs_tbl;
+	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_INB_NUM];
+	struct outbound_queue_table	outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+	u8			sas_addr[SAS_ADDR_SIZE];
+	struct sas_ha_struct	*sas;/* SCSI/SAS glue */
+	struct Scsi_Host	*shost;
+	u32			chip_id;
+	const struct pm8001_chip_info	*chip;
+	struct completion	*nvmd_completion;
+	int			tags_num;
+	unsigned long		*tags;
+	struct pm8001_phy	phy[PM8001_MAX_PHYS];
+	struct pm8001_port	port[PM8001_MAX_PHYS];
+	u32			id;
+	u32			irq;
+	struct pm8001_device	*devices;
+	struct pm8001_ccb_info	*ccb_info;
+#ifdef PM8001_USE_MSIX
+	struct msix_entry	msix_entries[16];/*for msi-x interrupt*/
+	int			number_of_intr;/*will be used in remove()*/
+#endif
+#ifdef PM8001_USE_TASKLET
+	struct tasklet_struct	tasklet;
+#endif
+	struct list_head 	wq_list;
+	u32			logging_level;
+	u32			fw_status;
+	const struct firmware 	*fw_image;
+};
+
+struct pm8001_wq {
+	struct delayed_work work_q;
+	struct pm8001_hba_info *pm8001_ha;
+	void *data;
+	int handler;
+	struct list_head entry;
+};
+
+struct pm8001_fw_image_header {
+	u8 vender_id[8];
+	u8 product_id;
+	u8 hardware_rev;
+	u8 dest_partition;
+	u8 reserved;
+	u8 fw_rev[4];
+	__be32  image_length;
+	__be32 image_crc;
+	__be32 startup_entry;
+} __attribute__((packed, aligned(4)));
+
+/* define task management IU */
+struct pm8001_tmf_task {
+	u8	tmf;
+	u32	tag_of_task_to_be_managed;
+};
+/**
+ * FW Flash Update status values
+ */
+#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT	0x00
+#define FLASH_UPDATE_IN_PROGRESS		0x01
+#define FLASH_UPDATE_HDR_ERR			0x02
+#define FLASH_UPDATE_OFFSET_ERR			0x03
+#define FLASH_UPDATE_CRC_ERR			0x04
+#define FLASH_UPDATE_LENGTH_ERR			0x05
+#define FLASH_UPDATE_HW_ERR			0x06
+#define FLASH_UPDATE_DNLD_NOT_SUPPORTED		0x10
+#define FLASH_UPDATE_DISABLED			0x11
+
+/**
+ * brief param structure for firmware flash update.
+ */
+struct fw_flash_updata_info {
+	u32			cur_image_offset;
+	u32			cur_image_len;
+	u32			total_image_len;
+	struct pm8001_prd	sgl;
+};
+
+struct fw_control_info {
+	u32			retcode;/*ret code (status)*/
+	u32			phase;/*ret code phase*/
+	u32			phaseCmplt;/*percent complete for the current
+	update phase */
+	u32			version;/*Hex encoded firmware version number*/
+	u32			offset;/*Used for downloading firmware	*/
+	u32			len; /*len of buffer*/
+	u32			size;/* Used in OS VPD and Trace get size
+	operations.*/
+	u32			reserved;/* padding required for 64 bit
+	alignment */
+	u8			buffer[1];/* Start of buffer */
+};
+struct fw_control_ex {
+	struct fw_control_info *fw_control;
+	void			*buffer;/* keep buffer pointer to be
+	freed when the responce comes*/
+	void			*virtAddr;/* keep virtual address of the data */
+	void			*usrAddr;/* keep virtual address of the
+	user data */
+	dma_addr_t		phys_addr;
+	u32			len; /* len of buffer  */
+	void			*payload; /* pointer to IOCTL Payload */
+	u8			inProgress;/*if 1 - the IOCTL request is in
+	progress */
+	void			*param1;
+	void			*param2;
+	void			*param3;
+};
+
+/******************** function prototype *********************/
+int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
+void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
+u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
+void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx);
+void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
+int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+	void *funcdata);
+int pm8001_slave_alloc(struct scsi_device *scsi_dev);
+int pm8001_slave_configure(struct scsi_device *sdev);
+void pm8001_scan_start(struct Scsi_Host *shost);
+int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int pm8001_queue_command(struct sas_task *task, const int num,
+	gfp_t gfp_flags);
+int pm8001_abort_task(struct sas_task *task);
+int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
+int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
+int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
+int pm8001_dev_found(struct domain_device *dev);
+void pm8001_dev_gone(struct domain_device *dev);
+int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
+int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_query_task(struct sas_task *task);
+int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
+	dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
+	u32 mem_size, u32 align);
+
+
+/* ctl shared API */
+extern struct device_attribute *pm8001_host_attrs[];
+
+#endif
+
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 0a97bc9074bb..4874dd62a62f 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -278,12 +278,17 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
  * pmcraid_change_queue_depth - Change the device's queue depth
  * @scsi_dev: scsi device struct
  * @depth: depth to set
+ * @reason: calling context
  *
  * Return value
  * 	actual depth set
  */
-static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
+static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
+				      int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	if (depth > PMCRAID_MAX_CMD_PER_LUN)
 		depth = PMCRAID_MAX_CMD_PER_LUN;
 
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fbcb82a2f7f4..21e2bc4d7401 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1654,7 +1654,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
 	}
 
-	if (scsi_add_host(vha->host, &fc_vport->dev)) {
+	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
+				   &ha->pdev->dev)) {
 		DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
 			vha->host_no, vha->vp_idx));
 		goto vport_create_failed_2;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cca8e4ab0372..cb2eca4c26d8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -377,6 +377,24 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 	return ptr + sizeof(struct qla2xxx_mq_chain);
 }
 
+static void
+qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (rval != QLA_SUCCESS) {
+		qla_printk(KERN_WARNING, ha,
+		    "Failed to dump firmware (%x)!!!\n", rval);
+		ha->fw_dumped = 0;
+	} else {
+		qla_printk(KERN_INFO, ha,
+		    "Firmware dump saved to temp buffer (%ld/%p).\n",
+		    vha->host_no, ha->fw_dump);
+		ha->fw_dumped = 1;
+		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+	}
+}
+
 /**
  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
  * @ha: HA context
@@ -530,17 +548,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	if (rval == QLA_SUCCESS)
 		qla2xxx_copy_queues(ha, nxt);
 
-	if (rval != QLA_SUCCESS) {
-		qla_printk(KERN_WARNING, ha,
-		    "Failed to dump firmware (%x)!!!\n", rval);
-		ha->fw_dumped = 0;
-
-	} else {
-		qla_printk(KERN_INFO, ha,
-		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    base_vha->host_no, ha->fw_dump);
-		ha->fw_dumped = 1;
-	}
+	qla2xxx_dump_post_process(base_vha, rval);
 
 qla2300_fw_dump_failed:
 	if (!hardware_locked)
@@ -737,17 +745,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	if (rval == QLA_SUCCESS)
 		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
 
-	if (rval != QLA_SUCCESS) {
-		qla_printk(KERN_WARNING, ha,
-		    "Failed to dump firmware (%x)!!!\n", rval);
-		ha->fw_dumped = 0;
-
-	} else {
-		qla_printk(KERN_INFO, ha,
-		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    base_vha->host_no, ha->fw_dump);
-		ha->fw_dumped = 1;
-	}
+	qla2xxx_dump_post_process(base_vha, rval);
 
 qla2100_fw_dump_failed:
 	if (!hardware_locked)
@@ -984,17 +982,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	qla24xx_copy_eft(ha, nxt);
 
 qla24xx_fw_dump_failed_0:
-	if (rval != QLA_SUCCESS) {
-		qla_printk(KERN_WARNING, ha,
-		    "Failed to dump firmware (%x)!!!\n", rval);
-		ha->fw_dumped = 0;
-
-	} else {
-		qla_printk(KERN_INFO, ha,
-		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    base_vha->host_no, ha->fw_dump);
-		ha->fw_dumped = 1;
-	}
+	qla2xxx_dump_post_process(base_vha, rval);
 
 qla24xx_fw_dump_failed:
 	if (!hardware_locked)
@@ -1305,17 +1293,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	}
 
 qla25xx_fw_dump_failed_0:
-	if (rval != QLA_SUCCESS) {
-		qla_printk(KERN_WARNING, ha,
-		    "Failed to dump firmware (%x)!!!\n", rval);
-		ha->fw_dumped = 0;
-
-	} else {
-		qla_printk(KERN_INFO, ha,
-		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    base_vha->host_no, ha->fw_dump);
-		ha->fw_dumped = 1;
-	}
+	qla2xxx_dump_post_process(base_vha, rval);
 
 qla25xx_fw_dump_failed:
 	if (!hardware_locked)
@@ -1628,17 +1606,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	}
 
 qla81xx_fw_dump_failed_0:
-	if (rval != QLA_SUCCESS) {
-		qla_printk(KERN_WARNING, ha,
-		    "Failed to dump firmware (%x)!!!\n", rval);
-		ha->fw_dumped = 0;
-
-	} else {
-		qla_printk(KERN_INFO, ha,
-		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    base_vha->host_no, ha->fw_dump);
-		ha->fw_dumped = 1;
-	}
+	qla2xxx_dump_post_process(base_vha, rval);
 
 qla81xx_fw_dump_failed:
 	if (!hardware_locked)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 215061861794..6b9bf23c7735 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2123,6 +2123,7 @@ enum qla_work_type {
 	QLA_EVT_ASYNC_LOGIN_DONE,
 	QLA_EVT_ASYNC_LOGOUT,
 	QLA_EVT_ASYNC_LOGOUT_DONE,
+	QLA_EVT_UEVENT,
 };
 
 
@@ -2146,6 +2147,10 @@ struct qla_work_evt {
 #define QLA_LOGIO_LOGIN_RETRIED	BIT_0
 			u16 data[2];
 		} logio;
+		struct {
+			u32 code;
+#define QLA_UEVENT_CODE_FW_DUMP	0
+		} uevent;
 	} u;
 };
 
@@ -2435,11 +2440,11 @@ struct qla_hw_data {
 	dma_addr_t	edc_data_dma;
 	uint16_t	edc_data_len;
 
-#define XGMAC_DATA_SIZE	PAGE_SIZE
+#define XGMAC_DATA_SIZE	4096
 	void		*xgmac_data;
 	dma_addr_t	xgmac_data_dma;
 
-#define DCBX_TLV_DATA_SIZE PAGE_SIZE
+#define DCBX_TLV_DATA_SIZE 4096
 	void		*dcbx_tlv;
 	dma_addr_t	dcbx_tlv_dma;
 
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f3d1d1afa95b..e21851358509 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -92,6 +92,7 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
     fc_port_t *, uint16_t *);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
 
@@ -246,7 +247,7 @@ qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
 
 extern int
 qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *,
-    uint16_t *, uint16_t *, uint16_t *);
+    uint16_t *, uint16_t *, uint16_t *, uint16_t *);
 
 extern int
 qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9e3eaac25596..b74924b279ef 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -277,7 +277,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	vha->marker_needed = 0;
 	ha->isp_abort_cnt = 0;
 	ha->beacon_blink_led = 0;
-	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 
 	set_bit(0, ha->req_qid_map);
 	set_bit(0, ha->rsp_qid_map);
@@ -1203,7 +1202,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 				}
 				qla2x00_get_resource_cnts(vha, NULL,
 				    &ha->fw_xcb_count, NULL, NULL,
-				    &ha->max_npiv_vports);
+				    &ha->max_npiv_vports, NULL);
 
 				if (!fw_major_version && ql2xallocfwdump)
 					qla2x00_alloc_fw_dump(vha);
@@ -3573,6 +3572,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 			ha->isp_abort_cnt = 0;
 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
 
+			if (IS_QLA81XX(ha))
+				qla2x00_get_fw_version(vha,
+				    &ha->fw_major_version,
+				    &ha->fw_minor_version,
+				    &ha->fw_subminor_version,
+				    &ha->fw_attributes, &ha->fw_memory_size,
+				    ha->mpi_version, &ha->mpi_capabilities,
+				    ha->phy_version);
+
 			if (ha->fce) {
 				ha->flags.fce_enabled = 1;
 				memset(ha->fce, 0,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b20a7169aac2..804987397b77 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -313,10 +313,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 	static char	*link_speeds[] = { "1", "2", "?", "4", "8", "10" };
 	char		*link_speed;
 	uint16_t	handle_cnt;
-	uint16_t	cnt;
+	uint16_t	cnt, mbx;
 	uint32_t	handles[5];
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	uint32_t	rscn_entry, host_pid;
 	uint8_t		rscn_queue_index;
 	unsigned long	flags;
@@ -395,9 +396,10 @@ skip_rio:
 		break;
 
 	case MBA_SYSTEM_ERR:		/* System Error */
+		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
 		qla_printk(KERN_INFO, ha,
-		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
-		    mb[1], mb[2], mb[3]);
+		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
+		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
 
 		ha->isp_ops->fw_dump(vha, 1);
 
@@ -419,9 +421,10 @@ skip_rio:
 		break;
 
 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
-		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
-		    vha->host_no));
-		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
+		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
+		    vha->host_no, mb[1]));
+		qla_printk(KERN_WARNING, ha,
+		    "ISP Request Transfer Error (%x).\n", mb[1]);
 
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
@@ -485,10 +488,13 @@ skip_rio:
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
+		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
-		    "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
-		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
-		    mb[1], mb[2], mb[3]);
+		    "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
+		    mbx));
+		qla_printk(KERN_INFO, ha,
+		    "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
+		    mbx);
 
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 			atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1347,16 +1353,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 
 	sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
 	if (IS_FWI2_CAPABLE(ha)) {
-		sense_len = le32_to_cpu(sts24->sense_len);
-		rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
-		resid_len = le32_to_cpu(sts24->rsp_residual_count);
-		fw_resid_len = le32_to_cpu(sts24->residual_len);
+		if (scsi_status & SS_SENSE_LEN_VALID)
+			sense_len = le32_to_cpu(sts24->sense_len);
+		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+			resid_len = le32_to_cpu(sts24->rsp_residual_count);
+		if (comp_status == CS_DATA_UNDERRUN)
+			fw_resid_len = le32_to_cpu(sts24->residual_len);
 		rsp_info = sts24->data;
 		sense_data = sts24->data;
 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
 	} else {
-		sense_len = le16_to_cpu(sts->req_sense_length);
-		rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+		if (scsi_status & SS_SENSE_LEN_VALID)
+			sense_len = le16_to_cpu(sts->req_sense_length);
+		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
 		resid_len = le32_to_cpu(sts->residual_length);
 		rsp_info = sts->rsp_info;
 		sense_data = sts->req_sense_data;
@@ -1443,38 +1455,62 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		break;
 
 	case CS_DATA_UNDERRUN:
-		resid = resid_len;
+		DEBUG2(printk(KERN_INFO
+		    "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
+		    "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
+		    vha->host_no, cp->device->id, cp->device->lun, comp_status,
+		    scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
+		    cp->underflow));
+
 		/* Use F/W calculated residual length. */
-		if (IS_FWI2_CAPABLE(ha)) {
-			if (!(scsi_status & SS_RESIDUAL_UNDER)) {
-				lscsi_status = 0;
-			} else if (resid != fw_resid_len) {
-				scsi_status &= ~SS_RESIDUAL_UNDER;
-				lscsi_status = 0;
+		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
+		scsi_set_resid(cp, resid);
+		if (scsi_status & SS_RESIDUAL_UNDER) {
+			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
+				DEBUG2(printk(
+				    "scsi(%ld:%d:%d:%d) Dropped frame(s) "
+				    "detected (%x of %x bytes)...residual "
+				    "length mismatch...retrying command.\n",
+				    vha->host_no, cp->device->channel,
+				    cp->device->id, cp->device->lun, resid,
+				    scsi_bufflen(cp)));
+
+				cp->result = DID_ERROR << 16 | lscsi_status;
+				break;
 			}
-			resid = fw_resid_len;
-		}
 
-		if (scsi_status & SS_RESIDUAL_UNDER) {
-			scsi_set_resid(cp, resid);
-		} else {
-			DEBUG2(printk(KERN_INFO
-			    "scsi(%ld:%d:%d) UNDERRUN status detected "
-			    "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
-			    "os_underflow=0x%x\n", vha->host_no,
-			    cp->device->id, cp->device->lun, comp_status,
-			    scsi_status, resid_len, resid, cp->cmnd[0],
-			    cp->underflow));
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			    cp->underflow)) {
+				qla_printk(KERN_INFO, ha,
+				    "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+				    "detected (%x of %x bytes)...returning "
+				    "error status.\n", vha->host_no,
+				    cp->device->channel, cp->device->id,
+				    cp->device->lun, resid, scsi_bufflen(cp));
+
+				cp->result = DID_ERROR << 16;
+				break;
+			}
+		} else if (!lscsi_status) {
+			DEBUG2(printk(
+			    "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
+			    "(%x of %x bytes)...firmware reported underrun..."
+			    "retrying command.\n", vha->host_no,
+			    cp->device->channel, cp->device->id,
+			    cp->device->lun, resid, scsi_bufflen(cp)));
 
+			cp->result = DID_ERROR << 16;
+			break;
 		}
 
+		cp->result = DID_OK << 16 | lscsi_status;
+
 		/*
 		 * Check to see if SCSI Status is non zero. If so report SCSI
 		 * Status.
 		 */
 		if (lscsi_status != 0) {
-			cp->result = DID_OK << 16 | lscsi_status;
-
 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
 				DEBUG2(printk(KERN_INFO
 				    "scsi(%ld): QUEUE FULL status detected "
@@ -1501,42 +1537,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 				break;
 
 			qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
-		} else {
-			/*
-			 * If RISC reports underrun and target does not report
-			 * it then we must have a lost frame, so tell upper
-			 * layer to retry it by reporting an error.
-			 */
-			if (!(scsi_status & SS_RESIDUAL_UNDER)) {
-				DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
-					      "frame(s) detected (%x of %x bytes)..."
-					      "retrying command.\n",
-					vha->host_no, cp->device->channel,
-					cp->device->id, cp->device->lun, resid,
-					scsi_bufflen(cp)));
-
-				scsi_set_resid(cp, resid);
-				cp->result = DID_ERROR << 16;
-				break;
-			}
-
-			/* Handle mid-layer underflow */
-			if ((unsigned)(scsi_bufflen(cp) - resid) <
-			    cp->underflow) {
-				qla_printk(KERN_INFO, ha,
-					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
-					   "detected (%x of %x bytes)...returning "
-					   "error status.\n", vha->host_no,
-					   cp->device->channel, cp->device->id,
-					   cp->device->lun, resid,
-					   scsi_bufflen(cp));
-
-				cp->result = DID_ERROR << 16;
-				break;
-			}
-
-			/* Everybody online, looking good... */
-			cp->result = DID_OK << 16;
 		}
 		break;
 
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b6202fe118ac..05d595d9a7ef 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2006,7 +2006,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
 int
 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
     uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
-    uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
+    uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
 {
 	int rval;
 	mbx_cmd_t mc;
@@ -2017,6 +2017,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (IS_QLA81XX(vha->hw))
+		mcp->in_mb |= MBX_12;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -2027,9 +2029,10 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
 		    vha->host_no, mcp->mb[0]));
 	} else {
 		DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
-		    "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
-		    mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
-		    mcp->mb[10], mcp->mb[11]));
+		    "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__,
+		    vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3],
+		    mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11],
+		    mcp->mb[12]));
 
 		if (cur_xchg_cnt)
 			*cur_xchg_cnt = mcp->mb[3];
@@ -2041,6 +2044,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
 			*orig_iocb_cnt = mcp->mb[10];
 		if (vha->hw->flags.npiv_supported && max_npiv_vports)
 			*max_npiv_vports = mcp->mb[11];
+		if (IS_QLA81XX(vha->hw) && max_fcfs)
+			*max_fcfs = mcp->mb[12];
 	}
 
 	return (rval);
@@ -2313,6 +2318,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 {
 	int		rval, rval2;
 	struct tsk_mgmt_cmd *tsk;
+	struct sts_entry_24xx *sts;
 	dma_addr_t	tsk_dma;
 	scsi_qla_host_t *vha;
 	struct qla_hw_data *ha;
@@ -2352,20 +2358,37 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 		    sizeof(tsk->p.tsk.lun));
 	}
 
+	sts = &tsk->p.sts;
 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
 		    "(%x).\n", __func__, vha->host_no, name, rval));
-	} else if (tsk->p.sts.entry_status != 0) {
+	} else if (sts->entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- error status (%x).\n", __func__, vha->host_no,
-		    tsk->p.sts.entry_status));
+		    sts->entry_status));
 		rval = QLA_FUNCTION_FAILED;
-	} else if (tsk->p.sts.comp_status !=
+	} else if (sts->comp_status !=
 	    __constant_cpu_to_le16(CS_COMPLETE)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- completion status (%x).\n", __func__,
-		    vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
+		    vha->host_no, le16_to_cpu(sts->comp_status)));
+		rval = QLA_FUNCTION_FAILED;
+	} else if (!(le16_to_cpu(sts->scsi_status) &
+	    SS_RESPONSE_INFO_LEN_VALID)) {
+		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+		    "-- no response info (%x).\n", __func__, vha->host_no,
+		    le16_to_cpu(sts->scsi_status)));
+		rval = QLA_FUNCTION_FAILED;
+	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
+		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+		    "-- not enough response info (%d).\n", __func__,
+		    vha->host_no, le32_to_cpu(sts->rsp_data_len)));
+		rval = QLA_FUNCTION_FAILED;
+	} else if (sts->data[3]) {
+		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+		    "-- response (%x).\n", __func__,
+		    vha->host_no, sts->data[3]));
 		rval = QLA_FUNCTION_FAILED;
 	}
 
@@ -2759,8 +2782,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 		    vp_idx, MSB(stat),
 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
 		    rptid_entry->port_id[0]));
-		if (vp_idx == 0)
-			return;
+
+		vp = vha;
+		if (vp_idx == 0 && (MSB(stat) != 1))
+			goto reg_needed;
 
 		if (MSB(stat) == 1) {
 			DEBUG2(printk("scsi(%ld): Could not acquire ID for "
@@ -2783,8 +2808,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 		 * response queue. Handle it in dpc context.
 		 */
 		set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
-		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 
+reg_needed:
+		set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
+		set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 		qla2xxx_wake_dpc(vha);
 	}
 }
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index e07b3617f019..a47d34308a3a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -382,8 +382,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
 
 	vha->dpc_flags = 0L;
-	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
-	set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
 
 	/*
 	 * To fix the issue of processing a parent's RSCN for the vport before
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b79fca7d461b..41669357b186 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/kobject.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -137,7 +138,7 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
 static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 
-static int qla2x00_change_queue_depth(struct scsi_device *, int);
+static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
 static int qla2x00_change_queue_type(struct scsi_device *, int);
 
 struct scsi_host_template qla2xxx_driver_template = {
@@ -727,23 +728,6 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
-static void
-qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
-{
-	struct Scsi_Host *shost = cmnd->device->host;
-	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
-	unsigned long flags;
-
-	spin_lock_irqsave(shost->host_lock, flags);
-	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
-		spin_unlock_irqrestore(shost->host_lock, flags);
-		msleep(1000);
-		spin_lock_irqsave(shost->host_lock, flags);
-	}
-	spin_unlock_irqrestore(shost->host_lock, flags);
-	return;
-}
-
 /**************************************************************************
 * qla2xxx_eh_abort
 *
@@ -773,7 +757,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	struct req_que *req = vha->req;
 	srb_t *spt;
 
-	qla2x00_block_error_handler(cmd);
+	fc_block_scsi_eh(cmd);
 
 	if (!CMD_SP(cmd))
 		return SUCCESS;
@@ -904,7 +888,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	int err;
 
-	qla2x00_block_error_handler(cmd);
+	fc_block_scsi_eh(cmd);
 
 	if (!fcport)
 		return FAILED;
@@ -984,7 +968,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 	unsigned long serial;
 	srb_t *sp = (srb_t *) CMD_SP(cmd);
 
-	qla2x00_block_error_handler(cmd);
+	fc_block_scsi_eh(cmd);
 
 	id = cmd->device->id;
 	lun = cmd->device->lun;
@@ -1047,7 +1031,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 	srb_t *sp = (srb_t *) CMD_SP(cmd);
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	qla2x00_block_error_handler(cmd);
+	fc_block_scsi_eh(cmd);
 
 	id = cmd->device->id;
 	lun = cmd->device->lun;
@@ -1234,8 +1218,11 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
 }
 
 static int
-qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
+qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -EOPNOTSUPP;
+
 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
 	return sdev->queue_depth;
 }
@@ -2653,6 +2640,37 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
 
+int
+qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.uevent.code = code;
+	return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
+{
+	char event_string[40];
+	char *envp[] = { event_string, NULL };
+
+	switch (code) {
+	case QLA_UEVENT_CODE_FW_DUMP:
+		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+		    vha->host_no);
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+	kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
 void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
@@ -2690,6 +2708,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
 			qla2x00_async_logout_done(vha, e->u.logio.fcport,
 			    e->u.logio.data);
 			break;
+		case QLA_EVT_UEVENT:
+			qla2x00_uevent_emit(vha, e->u.uevent.code);
+			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ac107a2c34a4..807e0dbc67fa 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.01-k6"
+#define QLA2XXX_VERSION      "8.03.01-k7"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	3
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 9c053bbaa877..e3c74d1ee2db 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -43,7 +43,7 @@
  * determined for each queue request anew.
  */
 #define QLOGICPTI_REQ_QUEUE_LEN	255	/* must be power of two - 1 */
-#define QLOGICPTI_MAX_SG(ql)	(4 + ((ql) > 0) ? 7*((ql) - 1) : 0)
+#define QLOGICPTI_MAX_SG(ql)	(4 + (((ql) > 0) ? 7*((ql) - 1) : 0))
 
 /* mailbox command complete status codes */
 #define MBOX_COMMAND_COMPLETE		0x4000
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index dd098cad337b..a60da5555577 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -940,10 +940,16 @@ EXPORT_SYMBOL(scsi_adjust_queue_depth);
  */
 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 {
-	if ((jiffies >> 4) == sdev->last_queue_full_time)
+
+	/*
+	 * Don't let QUEUE_FULLs on the same
+	 * jiffies count, they could all be from
+	 * same event.
+	 */
+	if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
 		return 0;
 
-	sdev->last_queue_full_time = (jiffies >> 4);
+	sdev->last_queue_full_time = jiffies;
 	if (sdev->last_queue_full_depth != depth) {
 		sdev->last_queue_full_count = 1;
 		sdev->last_queue_full_depth = depth;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c4103bef41b5..0b575c871007 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -44,6 +44,8 @@
 
 #include <net/checksum.h>
 
+#include <asm/unaligned.h>
+
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -105,6 +107,10 @@ static const char * scsi_debug_version_date = "20070104";
 #define DEF_ATO 1
 #define DEF_PHYSBLK_EXP 0
 #define DEF_LOWEST_ALIGNED 0
+#define DEF_UNMAP_MAX_BLOCKS 0
+#define DEF_UNMAP_MAX_DESC 0
+#define DEF_UNMAP_GRANULARITY 0
+#define DEF_UNMAP_ALIGNMENT 0
 
 /* bit mask values for scsi_debug_opts */
 #define SCSI_DEBUG_OPT_NOISE   1
@@ -162,6 +168,10 @@ static int scsi_debug_guard = DEF_GUARD;
 static int scsi_debug_ato = DEF_ATO;
 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 
 static int scsi_debug_cmnd_count = 0;
 
@@ -223,7 +233,9 @@ static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
 
 static unsigned char * fake_storep;	/* ramdisk storage */
 static unsigned char *dif_storep;	/* protection info */
+static void *map_storep;		/* provisioning map */
 
+static unsigned long map_size;
 static int num_aborts = 0;
 static int num_dev_resets = 0;
 static int num_bus_resets = 0;
@@ -317,6 +329,7 @@ static void get_data_transfer_info(unsigned char *cmd,
 			(u32)cmd[28] << 24;
 		break;
 
+	case WRITE_SAME_16:
 	case WRITE_16:
 	case READ_16:
 		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
@@ -335,6 +348,7 @@ static void get_data_transfer_info(unsigned char *cmd,
 		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
 			(u32)cmd[6] << 24;
 		break;
+	case WRITE_SAME:
 	case WRITE_10:
 	case READ_10:
 	case XDWRITEREAD_10:
@@ -671,10 +685,12 @@ static int inquiry_evpd_89(unsigned char * arr)
 }
 
 
+/* Block limits VPD page (SBC-3) */
 static unsigned char vpdb0_data[] = {
-	/* from 4th byte */ 0,0,0,4,
-	0,0,0x4,0,
-	0,0,0,64,
+	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 };
 
 static int inquiry_evpd_b0(unsigned char * arr)
@@ -691,14 +707,40 @@ static int inquiry_evpd_b0(unsigned char * arr)
 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
 		arr[7] = sdebug_store_sectors & 0xff;
 	}
+
+	if (scsi_debug_unmap_max_desc) {
+		unsigned int blocks;
+
+		if (scsi_debug_unmap_max_blocks)
+			blocks = scsi_debug_unmap_max_blocks;
+		else
+			blocks = 0xffffffff;
+
+		put_unaligned_be32(blocks, &arr[16]);
+		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
+	}
+
+	if (scsi_debug_unmap_alignment) {
+		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
+		arr[28] |= 0x80; /* UGAVALID */
+	}
+
+	if (scsi_debug_unmap_granularity) {
+		put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
+		return 0x3c; /* Mandatory page length for thin provisioning */
+	}
+
 	return sizeof(vpdb0_data);
 }
 
+/* Block device characteristics VPD page (SBC-3) */
 static int inquiry_evpd_b1(unsigned char *arr)
 {
 	memset(arr, 0, 0x3c);
 	arr[0] = 0;
-	arr[1] = 1;
+	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
+	arr[2] = 0;
+	arr[3] = 5;	/* less than 1.8" */
 
 	return 0x3c;
 }
@@ -974,6 +1016,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
 	arr[11] = scsi_debug_sector_size & 0xff;
 	arr[13] = scsi_debug_physblk_exp & 0xf;
 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+
+	if (scsi_debug_unmap_granularity)
+		arr[14] |= 0x80; /* TPE */
+
 	arr[15] = scsi_debug_lowest_aligned & 0xff;
 
 	if (scsi_debug_dif) {
@@ -1887,6 +1933,70 @@ out:
 	return ret;
 }
 
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+	unsigned int granularity, alignment, mapped;
+	sector_t block, next, end;
+
+	granularity = scsi_debug_unmap_granularity;
+	alignment = granularity - scsi_debug_unmap_alignment;
+	block = lba + alignment;
+	do_div(block, granularity);
+
+	mapped = test_bit(block, map_storep);
+
+	if (mapped)
+		next = find_next_zero_bit(map_storep, map_size, block);
+	else
+		next = find_next_bit(map_storep, map_size, block);
+
+	end = next * granularity - scsi_debug_unmap_alignment;
+	*num = end - lba;
+
+	return mapped;
+}
+
+static void map_region(sector_t lba, unsigned int len)
+{
+	unsigned int granularity, alignment;
+	sector_t end = lba + len;
+
+	granularity = scsi_debug_unmap_granularity;
+	alignment = granularity - scsi_debug_unmap_alignment;
+
+	while (lba < end) {
+		sector_t block, rem;
+
+		block = lba + alignment;
+		rem = do_div(block, granularity);
+
+		set_bit(block, map_storep);
+
+		lba += granularity - rem;
+	}
+}
+
+static void unmap_region(sector_t lba, unsigned int len)
+{
+	unsigned int granularity, alignment;
+	sector_t end = lba + len;
+
+	granularity = scsi_debug_unmap_granularity;
+	alignment = granularity - scsi_debug_unmap_alignment;
+
+	while (lba < end) {
+		sector_t block, rem;
+
+		block = lba + alignment;
+		rem = do_div(block, granularity);
+
+		if (rem == 0 && lba + granularity <= end)
+			clear_bit(block, map_storep);
+
+		lba += granularity - rem;
+	}
+}
+
 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 		      unsigned int num, struct sdebug_dev_info *devip,
 		      u32 ei_lba)
@@ -1910,6 +2020,8 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 
 	write_lock_irqsave(&atomic_rw, iflags);
 	ret = do_device_access(SCpnt, devip, lba, num, 1);
+	if (scsi_debug_unmap_granularity)
+		map_region(lba, num);
 	write_unlock_irqrestore(&atomic_rw, iflags);
 	if (-1 == ret)
 		return (DID_ERROR << 16);
@@ -1917,9 +2029,143 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
+
+	return 0;
+}
+
+static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
+		      unsigned int num, struct sdebug_dev_info *devip,
+			   u32 ei_lba, unsigned int unmap)
+{
+	unsigned long iflags;
+	unsigned long long i;
+	int ret;
+
+	ret = check_device_access_params(devip, lba, num);
+	if (ret)
+		return ret;
+
+	write_lock_irqsave(&atomic_rw, iflags);
+
+	if (unmap && scsi_debug_unmap_granularity) {
+		unmap_region(lba, num);
+		goto out;
+	}
+
+	/* Else fetch one logical block */
+	ret = fetch_to_dev_buffer(scmd,
+				  fake_storep + (lba * scsi_debug_sector_size),
+				  scsi_debug_sector_size);
+
+	if (-1 == ret) {
+		write_unlock_irqrestore(&atomic_rw, iflags);
+		return (DID_ERROR << 16);
+	} else if ((ret < (num * scsi_debug_sector_size)) &&
+		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
+		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
+
+	/* Copy first sector to remaining blocks */
+	for (i = 1 ; i < num ; i++)
+		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
+		       fake_storep + (lba * scsi_debug_sector_size),
+		       scsi_debug_sector_size);
+
+	if (scsi_debug_unmap_granularity)
+		map_region(lba, num);
+out:
+	write_unlock_irqrestore(&atomic_rw, iflags);
+
 	return 0;
 }
 
+struct unmap_block_desc {
+	__be64	lba;
+	__be32	blocks;
+	__be32	__reserved;
+};
+
+static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
+{
+	unsigned char *buf;
+	struct unmap_block_desc *desc;
+	unsigned int i, payload_len, descriptors;
+	int ret;
+
+	ret = check_readiness(scmd, 1, devip);
+	if (ret)
+		return ret;
+
+	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
+	BUG_ON(scsi_bufflen(scmd) != payload_len);
+
+	descriptors = (payload_len - 8) / 16;
+
+	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
+	if (!buf)
+		return check_condition_result;
+
+	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
+
+	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
+	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
+
+	desc = (void *)&buf[8];
+
+	for (i = 0 ; i < descriptors ; i++) {
+		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
+		unsigned int num = get_unaligned_be32(&desc[i].blocks);
+
+		ret = check_device_access_params(devip, lba, num);
+		if (ret)
+			goto out;
+
+		unmap_region(lba, num);
+	}
+
+	ret = 0;
+
+out:
+	kfree(buf);
+
+	return ret;
+}
+
+#define SDEBUG_GET_LBA_STATUS_LEN 32
+
+static int resp_get_lba_status(struct scsi_cmnd * scmd,
+			       struct sdebug_dev_info * devip)
+{
+	unsigned long long lba;
+	unsigned int alloc_len, mapped, num;
+	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
+	int ret;
+
+	ret = check_readiness(scmd, 1, devip);
+	if (ret)
+		return ret;
+
+	lba = get_unaligned_be64(&scmd->cmnd[2]);
+	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
+
+	if (alloc_len < 24)
+		return 0;
+
+	ret = check_device_access_params(devip, lba, 1);
+	if (ret)
+		return ret;
+
+	mapped = map_state(lba, &num);
+
+	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
+	put_unaligned_be32(16, &arr[0]);	/* Parameter Data Length */
+	put_unaligned_be64(lba, &arr[8]);	/* LBA */
+	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
+	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
+
+	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
+}
+
 #define SDEBUG_RLUN_ARR_SZ 256
 
 static int resp_report_luns(struct scsi_cmnd * scp,
@@ -2430,6 +2676,10 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
+module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
+module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
+module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
+module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
 
 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
 MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2458,6 +2708,10 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
+MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
+MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
+MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
 
 static char sdebug_info[256];
 
@@ -2816,6 +3070,23 @@ static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
 }
 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
 
+static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
+{
+	ssize_t count;
+
+	if (scsi_debug_unmap_granularity == 0)
+		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
+				 sdebug_store_sectors);
+
+	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
+
+	buf[count++] = '\n';
+	buf[count++] = 0;
+
+	return count;
+}
+DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
+
 
 /* Note: The following function creates attribute files in the
    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -2847,11 +3118,13 @@ static int do_create_driverfs_files(void)
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
+	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
 	return ret;
 }
 
 static void do_remove_driverfs_files(void)
 {
+	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
@@ -2989,6 +3262,36 @@ static int __init scsi_debug_init(void)
 		memset(dif_storep, 0xff, dif_size);
 	}
 
+	if (scsi_debug_unmap_granularity) {
+		unsigned int map_bytes;
+
+		if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+			printk(KERN_ERR
+			       "%s: ERR: unmap_granularity < unmap_alignment\n",
+			       __func__);
+			return -EINVAL;
+		}
+
+		map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
+		map_bytes = map_size >> 3;
+		map_storep = vmalloc(map_bytes);
+
+		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
+		       map_size);
+
+		if (map_storep == NULL) {
+			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
+			ret = -ENOMEM;
+			goto free_vm;
+		}
+
+		memset(map_storep, 0x0, map_bytes);
+
+		/* Map first 1KB for partition table */
+		if (scsi_debug_num_parts)
+			map_region(0, 2);
+	}
+
 	ret = device_register(&pseudo_primary);
 	if (ret < 0) {
 		printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
@@ -3041,6 +3344,8 @@ bus_unreg:
 dev_unreg:
 	device_unregister(&pseudo_primary);
 free_vm:
+	if (map_storep)
+		vfree(map_storep);
 	if (dif_storep)
 		vfree(dif_storep);
 	vfree(fake_storep);
@@ -3167,6 +3472,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
 	int inj_dif = 0;
 	int inj_dix = 0;
 	int delay_override = 0;
+	int unmap = 0;
 
 	scsi_set_resid(SCpnt, 0);
 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
@@ -3272,13 +3578,21 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
 		errsts = resp_readcap(SCpnt, devip);
 		break;
 	case SERVICE_ACTION_IN:
-		if (SAI_READ_CAPACITY_16 != cmd[1]) {
+		if (cmd[1] == SAI_READ_CAPACITY_16)
+			errsts = resp_readcap16(SCpnt, devip);
+		else if (cmd[1] == SAI_GET_LBA_STATUS) {
+
+			if (scsi_debug_unmap_max_desc == 0) {
+				mk_sense_buffer(devip, ILLEGAL_REQUEST,
+						INVALID_COMMAND_OPCODE, 0);
+				errsts = check_condition_result;
+			} else
+				errsts = resp_get_lba_status(SCpnt, devip);
+		} else {
 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
 					INVALID_OPCODE, 0);
 			errsts = check_condition_result;
-			break;
 		}
-		errsts = resp_readcap16(SCpnt, devip);
 		break;
 	case MAINTENANCE_IN:
 		if (MI_REPORT_TARGET_PGS != cmd[1]) {
@@ -3378,6 +3692,29 @@ write:
 			errsts = illegal_condition_result;
 		}
 		break;
+	case WRITE_SAME_16:
+		if (cmd[1] & 0x8)
+			unmap = 1;
+		/* fall through */
+	case WRITE_SAME:
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
+		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
+		break;
+	case UNMAP:
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+
+		if (scsi_debug_unmap_max_desc == 0) {
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_COMMAND_OPCODE, 0);
+			errsts = check_condition_result;
+		} else
+			errsts = resp_unmap(SCpnt, devip);
+		break;
 	case MODE_SENSE:
 	case MODE_SENSE_10:
 		errsts = resp_mode_sense(SCpnt, target, devip);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 93c2622cb969..37af178b2d17 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -168,11 +168,10 @@ static struct {
 	{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
 	{"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
 	{"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
-	{"HITACHI", "DF400", "*", BLIST_SPARSELUN},
-	{"HITACHI", "DF500", "*", BLIST_SPARSELUN},
-	{"HITACHI", "DF600", "*", BLIST_SPARSELUN},
-	{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
-	{"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
+	{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
+	{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
+	{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+	{"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
 	{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
 	{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
 	{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -454,7 +453,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
 
 
 /**
- * get_device_flags_keyed - get device specific flags from the dynamic device list.
+ * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list
  * @sdev:       &scsi_device to get flags for
  * @vendor:	vendor name
  * @model:	model name
@@ -685,7 +684,7 @@ MODULE_PARM_DESC(default_dev_flags,
 		 "scsi default device flag integer value");
 
 /**
- * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list.
+ * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
  **/
 void scsi_exit_devinfo(void)
 {
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1b0060b791e8..08ed506e6059 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -331,6 +331,64 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
 	}
 }
 
+static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
+{
+	struct scsi_host_template *sht = sdev->host->hostt;
+	struct scsi_device *tmp_sdev;
+
+	if (!sht->change_queue_depth ||
+	    sdev->queue_depth >= sdev->max_queue_depth)
+		return;
+
+	if (time_before(jiffies,
+	    sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
+		return;
+
+	if (time_before(jiffies,
+	    sdev->last_queue_full_time + sdev->queue_ramp_up_period))
+		return;
+
+	/*
+	 * Walk all devices of a target and do
+	 * ramp up on them.
+	 */
+	shost_for_each_device(tmp_sdev, sdev->host) {
+		if (tmp_sdev->channel != sdev->channel ||
+		    tmp_sdev->id != sdev->id ||
+		    tmp_sdev->queue_depth == sdev->max_queue_depth)
+			continue;
+		/*
+		 * call back into LLD to increase queue_depth by one
+		 * with ramp up reason code.
+		 */
+		sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
+					SCSI_QDEPTH_RAMP_UP);
+		sdev->last_queue_ramp_up = jiffies;
+	}
+}
+
+static void scsi_handle_queue_full(struct scsi_device *sdev)
+{
+	struct scsi_host_template *sht = sdev->host->hostt;
+	struct scsi_device *tmp_sdev;
+
+	if (!sht->change_queue_depth)
+		return;
+
+	shost_for_each_device(tmp_sdev, sdev->host) {
+		if (tmp_sdev->channel != sdev->channel ||
+		    tmp_sdev->id != sdev->id)
+			continue;
+		/*
+		 * We do not know the number of commands that were at
+		 * the device when we got the queue full so we start
+		 * from the highest possible value and work our way down.
+		 */
+		sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1,
+					SCSI_QDEPTH_QFULL);
+	}
+}
+
 /**
  * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
  * @scmd:	SCSI cmd to examine.
@@ -371,6 +429,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 	 */
 	switch (status_byte(scmd->result)) {
 	case GOOD:
+		scsi_handle_queue_ramp_up(scmd->device);
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case CHECK_CONDITION:
@@ -387,8 +446,10 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 		 * let issuer deal with this, it could be just fine
 		 */
 		return SUCCESS;
-	case BUSY:
 	case QUEUE_FULL:
+		scsi_handle_queue_full(scmd->device);
+		/* fall through */
+	case BUSY:
 	default:
 		return FAILED;
 	}
@@ -1387,6 +1448,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 	 */
 	switch (status_byte(scmd->result)) {
 	case QUEUE_FULL:
+		scsi_handle_queue_full(scmd->device);
 		/*
 		 * the case of trying to send too many commands to a
 		 * tagged queueing device.
@@ -1400,6 +1462,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		 */
 		return ADD_TO_MLQUEUE;
 	case GOOD:
+		scsi_handle_queue_ramp_up(scmd->device);
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case TASK_ABORTED:
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b98f763931c5..d9564fb04f62 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
 		case SG_SCSI_RESET_DEVICE:
 			val = SCSI_TRY_RESET_DEVICE;
 			break;
+		case SG_SCSI_RESET_TARGET:
+			val = SCSI_TRY_RESET_TARGET;
+			break;
 		case SG_SCSI_RESET_BUS:
 			val = SCSI_TRY_RESET_BUS;
 			break;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5987da857103..e495d3813948 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -898,7 +898,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				scsi_print_sense("", cmd);
 			scsi_print_command(cmd);
 		}
-		if (blk_end_request_err(req, -EIO))
+		if (blk_end_request_err(req, error))
 			scsi_requeue_command(q, cmd);
 		else
 			scsi_next_command(cmd);
@@ -1359,9 +1359,9 @@ static int scsi_lld_busy(struct request_queue *q)
 static void scsi_kill_request(struct request *req, struct request_queue *q)
 {
 	struct scsi_cmnd *cmd = req->special;
-	struct scsi_device *sdev = cmd->device;
-	struct scsi_target *starget = scsi_target(sdev);
-	struct Scsi_Host *shost = sdev->host;
+	struct scsi_device *sdev;
+	struct scsi_target *starget;
+	struct Scsi_Host *shost;
 
 	blk_start_request(req);
 
@@ -1371,6 +1371,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
 		BUG();
 	}
 
+	sdev = cmd->device;
+	starget = scsi_target(sdev);
+	shost = sdev->host;
 	scsi_init_cmd_errh(cmd);
 	cmd->result = DID_NO_CONNECT << 16;
 	atomic_inc(&cmd->device->iorequest_cnt);
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
index ac6855cd2657..dcd128583b89 100644
--- a/drivers/scsi/scsi_lib_dma.c
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd)
 	int nseg = 0;
 
 	if (scsi_sg_count(cmd)) {
-		struct device *dev = cmd->device->host->shost_gendev.parent;
+		struct device *dev = cmd->device->host->dma_dev;
 
 		nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
 				  cmd->sc_data_direction);
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map);
 void scsi_dma_unmap(struct scsi_cmnd *cmd)
 {
 	if (scsi_sg_count(cmd)) {
-		struct device *dev = cmd->device->host->shost_gendev.parent;
+		struct device *dev = cmd->device->host->dma_dev;
 
 		dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
 			     cmd->sc_data_direction);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 47291bcff0d5..012f73a96880 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -251,6 +251,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 	sdev->model = scsi_null_device_strs;
 	sdev->rev = scsi_null_device_strs;
 	sdev->host = shost;
+	sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
 	sdev->id = starget->id;
 	sdev->lun = lun;
 	sdev->channel = starget->channel;
@@ -941,6 +942,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
 		}
 	}
 
+	sdev->max_queue_depth = sdev->queue_depth;
+
 	/*
 	 * Ok, the device is now all set up, we can
 	 * register it and tell the rest of the kernel
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 392d8db33905..5a065055e68a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -766,10 +766,13 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
 	if (depth < 1)
 		return -EINVAL;
 
-	retval = sht->change_queue_depth(sdev, depth);
+	retval = sht->change_queue_depth(sdev, depth,
+					 SCSI_QDEPTH_DEFAULT);
 	if (retval < 0)
 		return retval;
 
+	sdev->max_queue_depth = sdev->queue_depth;
+
 	return count;
 }
 
@@ -778,6 +781,37 @@ static struct device_attribute sdev_attr_queue_depth_rw =
 	       sdev_store_queue_depth_rw);
 
 static ssize_t
+sdev_show_queue_ramp_up_period(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct scsi_device *sdev;
+	sdev = to_scsi_device(dev);
+	return snprintf(buf, 20, "%u\n",
+			jiffies_to_msecs(sdev->queue_ramp_up_period));
+}
+
+static ssize_t
+sdev_store_queue_ramp_up_period(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	unsigned long period;
+
+	if (strict_strtoul(buf, 10, &period))
+		return -EINVAL;
+
+	sdev->queue_ramp_up_period = msecs_to_jiffies(period);
+	return period;
+}
+
+static struct device_attribute sdev_attr_queue_ramp_up_period =
+	__ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
+	       sdev_show_queue_ramp_up_period,
+	       sdev_store_queue_ramp_up_period);
+
+static ssize_t
 sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
 			 const char *buf, size_t count)
 {
@@ -867,8 +901,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 	sdev->is_visible = 1;
 
 	/* create queue files, which may be writable, depending on the host */
-	if (sdev->host->hostt->change_queue_depth)
-		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw);
+	if (sdev->host->hostt->change_queue_depth) {
+		error = device_create_file(&sdev->sdev_gendev,
+					   &sdev_attr_queue_depth_rw);
+		error = device_create_file(&sdev->sdev_gendev,
+					   &sdev_attr_queue_ramp_up_period);
+	}
 	else
 		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
 	if (error)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index c6f70dae9b2e..6531c91501be 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -27,6 +27,7 @@
  */
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
@@ -2384,6 +2385,7 @@ fc_rport_final_delete(struct work_struct *work)
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
 	unsigned long flags;
+	int do_callback = 0;
 
 	/*
 	 * if a scan is pending, flush the SCSI Host work_q so that
@@ -2422,8 +2424,15 @@ fc_rport_final_delete(struct work_struct *work)
 	 * Avoid this call if we already called it when we preserved the
 	 * rport for the binding.
 	 */
+	spin_lock_irqsave(shost->host_lock, flags);
 	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
-	    (i->f->dev_loss_tmo_callbk))
+	    (i->f->dev_loss_tmo_callbk)) {
+		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
+		do_callback = 1;
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	if (do_callback)
 		i->f->dev_loss_tmo_callbk(rport);
 
 	fc_bsg_remove(rport->rqst_q);
@@ -2970,6 +2979,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	struct fc_internal *i = to_fc_internal(shost->transportt);
 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
 	unsigned long flags;
+	int do_callback = 0;
 
 	spin_lock_irqsave(shost->host_lock, flags);
 
@@ -3035,7 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	rport->roles = FC_PORT_ROLE_UNKNOWN;
 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
-	rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
 
 	/*
 	 * Pre-emptively kill I/O rather than waiting for the work queue
@@ -3045,32 +3054,40 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	spin_unlock_irqrestore(shost->host_lock, flags);
 	fc_terminate_rport_io(rport);
 
-	BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
+	spin_lock_irqsave(shost->host_lock, flags);
+
+	if (rport->port_state == FC_PORTSTATE_NOTPRESENT) {	/* still missing */
 
-	/* remove the identifiers that aren't used in the consisting binding */
-	switch (fc_host->tgtid_bind_type) {
-	case FC_TGTID_BIND_BY_WWPN:
-		rport->node_name = -1;
-		rport->port_id = -1;
-		break;
-	case FC_TGTID_BIND_BY_WWNN:
-		rport->port_name = -1;
-		rport->port_id = -1;
-		break;
-	case FC_TGTID_BIND_BY_ID:
-		rport->node_name = -1;
-		rport->port_name = -1;
-		break;
-	case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
-		break;
+		/* remove the identifiers that aren't used in the consisting binding */
+		switch (fc_host->tgtid_bind_type) {
+		case FC_TGTID_BIND_BY_WWPN:
+			rport->node_name = -1;
+			rport->port_id = -1;
+			break;
+		case FC_TGTID_BIND_BY_WWNN:
+			rport->port_name = -1;
+			rport->port_id = -1;
+			break;
+		case FC_TGTID_BIND_BY_ID:
+			rport->node_name = -1;
+			rport->port_name = -1;
+			break;
+		case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
+			break;
+		}
+
+		/*
+		 * As this only occurs if the remote port (scsi target)
+		 * went away and didn't come back - we'll remove
+		 * all attached scsi devices.
+		 */
+		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
+		fc_queue_work(shost, &rport->stgt_delete_work);
+
+		do_callback = 1;
 	}
 
-	/*
-	 * As this only occurs if the remote port (scsi target)
-	 * went away and didn't come back - we'll remove
-	 * all attached scsi devices.
-	 */
-	fc_queue_work(shost, &rport->stgt_delete_work);
+	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	/*
 	 * Notify the driver that the rport is now dead. The LLDD will
@@ -3078,7 +3095,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	 *
 	 * Note: we set the CALLBK_DONE flag above to correspond
 	 */
-	if (i->f->dev_loss_tmo_callbk)
+	if (do_callback && i->f->dev_loss_tmo_callbk)
 		i->f->dev_loss_tmo_callbk(rport);
 }
 
@@ -3128,6 +3145,31 @@ fc_scsi_scan_rport(struct work_struct *work)
 	spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
+/**
+ * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
+ * @cmnd: SCSI command that scsi_eh is trying to recover
+ *
+ * This routine can be called from a FC LLD scsi_eh callback. It
+ * blocks the scsi_eh thread until the fc_rport leaves the
+ * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
+ * failing recovery actions for blocked rports which would lead to
+ * offlined SCSI devices.
+ */
+void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		msleep(1000);
+		spin_lock_irqsave(shost->host_lock, flags);
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+EXPORT_SYMBOL(fc_block_scsi_eh);
 
 /**
  * fc_vport_setup - allocates and creates a FC virtual port.
@@ -3769,8 +3811,9 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
 		return;
 
 	while (!blk_queue_plugged(q)) {
-		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
-				break;
+		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
+		    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+			break;
 
 		req = blk_fetch_request(q);
 		if (!req)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ad897df36615..ea3892e7e0f7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,7 +30,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/iscsi_if.h>
 
-#define ISCSI_SESSION_ATTRS 21
+#define ISCSI_SESSION_ATTRS 22
 #define ISCSI_CONN_ATTRS 13
 #define ISCSI_HOST_ATTRS 4
 
@@ -627,8 +627,10 @@ static void __iscsi_block_session(struct work_struct *work)
 	spin_unlock_irqrestore(&session->lock, flags);
 	scsi_target_block(&session->dev);
 	ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
-	queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
-			   session->recovery_tmo * HZ);
+	if (session->recovery_tmo >= 0)
+		queue_delayed_work(iscsi_eh_timer_workq,
+				   &session->recovery_work,
+				   session->recovery_tmo * HZ);
 }
 
 void iscsi_block_session(struct iscsi_cls_session *session)
@@ -1348,8 +1350,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
 	switch (ev->u.set_param.param) {
 	case ISCSI_PARAM_SESS_RECOVERY_TMO:
 		sscanf(data, "%d", &value);
-		if (value != 0)
-			session->recovery_tmo = value;
+		session->recovery_tmo = value;
 		break;
 	default:
 		err = transport->set_param(conn, ev->u.set_param.param,
@@ -1759,6 +1760,7 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
 iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
 iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
 iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
 iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
 iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
 
@@ -2000,6 +2002,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
 	SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
 	SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
 	SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+	SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO);
 	SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
 	SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
 	SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 12d58a7ed6bc..ad59abb47722 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2280,7 +2280,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
 	} else if (code == MT_ST_SET_CLN) {
 		value = (options & ~MT_ST_OPTIONS) & 0xff;
 		if (value != 0 &&
-		    value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE)
+			(value < EXTENDED_SENSE_START ||
+				value >= SCSI_SENSE_BUFFERSIZE))
 			return (-EINVAL);
 		STp->cln_mode = value;
 		STp->cln_sense_mask = (options >> 8) & 0xff;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 09fa8861fc58..3058bb1aff95 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -36,11 +36,11 @@
 #include <scsi/scsi_eh.h>
 
 #define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "4.6.0000.3"
+#define ST_DRIVER_VERSION "4.6.0000.4"
 #define ST_VER_MAJOR		4
 #define ST_VER_MINOR		6
 #define ST_OEM			0
-#define ST_BUILD_VER		3
+#define ST_BUILD_VER		4
 
 enum {
 	/* MU register offset */
@@ -64,24 +64,24 @@ enum {
 	YH2I_REQ_HI				= 0xc4,
 
 	/* MU register value */
-	MU_INBOUND_DOORBELL_HANDSHAKE		= 1,
-	MU_INBOUND_DOORBELL_REQHEADCHANGED	= 2,
-	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= 4,
-	MU_INBOUND_DOORBELL_HMUSTOPPED		= 8,
-	MU_INBOUND_DOORBELL_RESET		= 16,
-
-	MU_OUTBOUND_DOORBELL_HANDSHAKE		= 1,
-	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= 2,
-	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= 4,
-	MU_OUTBOUND_DOORBELL_BUSCHANGE		= 8,
-	MU_OUTBOUND_DOORBELL_HASEVENT		= 16,
+	MU_INBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
+	MU_INBOUND_DOORBELL_REQHEADCHANGED	= (1 << 1),
+	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= (1 << 2),
+	MU_INBOUND_DOORBELL_HMUSTOPPED		= (1 << 3),
+	MU_INBOUND_DOORBELL_RESET		= (1 << 4),
+
+	MU_OUTBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
+	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= (1 << 1),
+	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= (1 << 2),
+	MU_OUTBOUND_DOORBELL_BUSCHANGE		= (1 << 3),
+	MU_OUTBOUND_DOORBELL_HASEVENT		= (1 << 4),
+	MU_OUTBOUND_DOORBELL_REQUEST_RESET	= (1 << 27),
 
 	/* MU status code */
 	MU_STATE_STARTING			= 1,
-	MU_STATE_FMU_READY_FOR_HANDSHAKE	= 2,
-	MU_STATE_SEND_HANDSHAKE_FRAME		= 3,
-	MU_STATE_STARTED			= 4,
-	MU_STATE_RESETTING			= 5,
+	MU_STATE_STARTED			= 2,
+	MU_STATE_RESETTING			= 3,
+	MU_STATE_FAILED				= 4,
 
 	MU_MAX_DELAY				= 120,
 	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
@@ -111,6 +111,8 @@ enum {
 
 	SS_H2I_INT_RESET			= 0x100,
 
+	SS_I2H_REQUEST_RESET			= 0x2000,
+
 	SS_MU_OPERATIONAL			= 0x80000000,
 
 	STEX_CDB_LENGTH				= 16,
@@ -160,6 +162,7 @@ enum {
 	INQUIRY_EVPD				= 0x01,
 
 	ST_ADDITIONAL_MEM			= 0x200000,
+	ST_ADDITIONAL_MEM_MIN			= 0x80000,
 };
 
 struct st_sgitem {
@@ -311,6 +314,10 @@ struct st_hba {
 	struct st_ccb *wait_ccb;
 	__le32 *scratch;
 
+	char work_q_name[20];
+	struct workqueue_struct *work_q;
+	struct work_struct reset_work;
+	wait_queue_head_t reset_waitq;
 	unsigned int mu_status;
 	unsigned int cardtype;
 	int msi_enabled;
@@ -577,6 +584,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
 	lun = cmd->device->lun;
 	hba = (struct st_hba *) &host->hostdata[0];
 
+	if (unlikely(hba->mu_status == MU_STATE_RESETTING))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	switch (cmd->cmnd[0]) {
 	case MODE_SENSE_10:
 	{
@@ -841,7 +851,6 @@ static irqreturn_t stex_intr(int irq, void *__hba)
 	void __iomem *base = hba->mmio_base;
 	u32 data;
 	unsigned long flags;
-	int handled = 0;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 
@@ -852,12 +861,16 @@ static irqreturn_t stex_intr(int irq, void *__hba)
 		writel(data, base + ODBL);
 		readl(base + ODBL); /* flush */
 		stex_mu_intr(hba, data);
-		handled = 1;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
+			hba->cardtype == st_shasta))
+			queue_work(hba->work_q, &hba->reset_work);
+		return IRQ_HANDLED;
 	}
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	return IRQ_RETVAL(handled);
+	return IRQ_NONE;
 }
 
 static void stex_ss_mu_intr(struct st_hba *hba)
@@ -939,7 +952,6 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
 	void __iomem *base = hba->mmio_base;
 	u32 data;
 	unsigned long flags;
-	int handled = 0;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 
@@ -948,12 +960,15 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
 		/* clear the interrupt */
 		writel(data, base + YI2H_INT_C);
 		stex_ss_mu_intr(hba);
-		handled = 1;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		if (unlikely(data & SS_I2H_REQUEST_RESET))
+			queue_work(hba->work_q, &hba->reset_work);
+		return IRQ_HANDLED;
 	}
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	return IRQ_RETVAL(handled);
+	return IRQ_NONE;
 }
 
 static int stex_common_handshake(struct st_hba *hba)
@@ -1001,7 +1016,7 @@ static int stex_common_handshake(struct st_hba *hba)
 	h->partner_type = HMU_PARTNER_TYPE;
 	if (hba->extra_offset) {
 		h->extra_offset = cpu_to_le32(hba->extra_offset);
-		h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
+		h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
 	} else
 		h->extra_offset = h->extra_size = 0;
 
@@ -1046,7 +1061,7 @@ static int stex_ss_handshake(struct st_hba *hba)
 	struct st_msg_header *msg_h;
 	struct handshake_frame *h;
 	__le32 *scratch;
-	u32 data;
+	u32 data, scratch_size;
 	unsigned long before;
 	int ret = 0;
 
@@ -1074,13 +1089,16 @@ static int stex_ss_handshake(struct st_hba *hba)
 	stex_gettime(&h->hosttime);
 	h->partner_type = HMU_PARTNER_TYPE;
 	h->extra_offset = h->extra_size = 0;
-	h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32));
+	scratch_size = (hba->sts_count+1)*sizeof(u32);
+	h->scratch_size = cpu_to_le32(scratch_size);
 
 	data = readl(base + YINT_EN);
 	data &= ~4;
 	writel(data, base + YINT_EN);
 	writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
+	readl(base + YH2I_REQ_HI);
 	writel(hba->dma_handle, base + YH2I_REQ);
+	readl(base + YH2I_REQ); /* flush */
 
 	scratch = hba->scratch;
 	before = jiffies;
@@ -1096,7 +1114,7 @@ static int stex_ss_handshake(struct st_hba *hba)
 		msleep(1);
 	}
 
-	*scratch = 0;
+	memset(scratch, 0, scratch_size);
 	msg_h->flag = 0;
 	return ret;
 }
@@ -1105,19 +1123,24 @@ static int stex_handshake(struct st_hba *hba)
 {
 	int err;
 	unsigned long flags;
+	unsigned int mu_status;
 
 	err = (hba->cardtype == st_yel) ?
 		stex_ss_handshake(hba) : stex_common_handshake(hba);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	mu_status = hba->mu_status;
 	if (err == 0) {
-		spin_lock_irqsave(hba->host->host_lock, flags);
 		hba->req_head = 0;
 		hba->req_tail = 0;
 		hba->status_head = 0;
 		hba->status_tail = 0;
 		hba->out_req_cnt = 0;
 		hba->mu_status = MU_STATE_STARTED;
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
-	}
+	} else
+		hba->mu_status = MU_STATE_FAILED;
+	if (mu_status == MU_STATE_RESETTING)
+		wake_up_all(&hba->reset_waitq);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	return err;
 }
 
@@ -1137,17 +1160,11 @@ static int stex_abort(struct scsi_cmnd *cmd)
 
 	base = hba->mmio_base;
 	spin_lock_irqsave(host->host_lock, flags);
-	if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
+	if (tag < host->can_queue &&
+		hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
 		hba->wait_ccb = &hba->ccb[tag];
-	else {
-		for (tag = 0; tag < host->can_queue; tag++)
-			if (hba->ccb[tag].cmd == cmd) {
-				hba->wait_ccb = &hba->ccb[tag];
-				break;
-			}
-		if (tag >= host->can_queue)
-			goto out;
-	}
+	else
+		goto out;
 
 	if (hba->cardtype == st_yel) {
 		data = readl(base + YI2H_INT);
@@ -1221,6 +1238,37 @@ static void stex_hard_reset(struct st_hba *hba)
 			hba->pdev->saved_config_space[i]);
 }
 
+static int stex_yos_reset(struct st_hba *hba)
+{
+	void __iomem *base;
+	unsigned long flags, before;
+	int ret = 0;
+
+	base = hba->mmio_base;
+	writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
+	readl(base + IDBL); /* flush */
+	before = jiffies;
+	while (hba->out_req_cnt > 0) {
+		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
+			printk(KERN_WARNING DRV_NAME
+				"(%s): reset timeout\n", pci_name(hba->pdev));
+			ret = -1;
+			break;
+		}
+		msleep(1);
+	}
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (ret == -1)
+		hba->mu_status = MU_STATE_FAILED;
+	else
+		hba->mu_status = MU_STATE_STARTED;
+	wake_up_all(&hba->reset_waitq);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return ret;
+}
+
 static void stex_ss_reset(struct st_hba *hba)
 {
 	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
@@ -1228,66 +1276,86 @@ static void stex_ss_reset(struct st_hba *hba)
 	ssleep(5);
 }
 
-static int stex_reset(struct scsi_cmnd *cmd)
+static int stex_do_reset(struct st_hba *hba)
 {
-	struct st_hba *hba;
-	void __iomem *base;
-	unsigned long flags, before;
+	struct st_ccb *ccb;
+	unsigned long flags;
+	unsigned int mu_status = MU_STATE_RESETTING;
+	u16 tag;
 
-	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->mu_status == MU_STATE_STARTING) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
+			pci_name(hba->pdev));
+		return 0;
+	}
+	while (hba->mu_status == MU_STATE_RESETTING) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		wait_event_timeout(hba->reset_waitq,
+				   hba->mu_status != MU_STATE_RESETTING,
+				   MU_MAX_DELAY * HZ);
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		mu_status = hba->mu_status;
+	}
 
-	printk(KERN_INFO DRV_NAME
-		"(%s): resetting host\n", pci_name(hba->pdev));
-	scsi_print_command(cmd);
+	if (mu_status != MU_STATE_RESETTING) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return (mu_status == MU_STATE_STARTED) ? 0 : -1;
+	}
 
 	hba->mu_status = MU_STATE_RESETTING;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (hba->cardtype == st_yosemite)
+		return stex_yos_reset(hba);
 
 	if (hba->cardtype == st_shasta)
 		stex_hard_reset(hba);
 	else if (hba->cardtype == st_yel)
 		stex_ss_reset(hba);
 
-	if (hba->cardtype != st_yosemite) {
-		if (stex_handshake(hba)) {
-			printk(KERN_WARNING DRV_NAME
-				"(%s): resetting: handshake failed\n",
-				pci_name(hba->pdev));
-			return FAILED;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	for (tag = 0; tag < hba->host->can_queue; tag++) {
+		ccb = &hba->ccb[tag];
+		if (ccb->req == NULL)
+			continue;
+		ccb->req = NULL;
+		if (ccb->cmd) {
+			scsi_dma_unmap(ccb->cmd);
+			ccb->cmd->result = DID_RESET << 16;
+			ccb->cmd->scsi_done(ccb->cmd);
+			ccb->cmd = NULL;
 		}
-		return SUCCESS;
 	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	/* st_yosemite */
-	writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL);
-	readl(hba->mmio_base + IDBL); /* flush */
-	before = jiffies;
-	while (hba->out_req_cnt > 0) {
-		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
-			printk(KERN_WARNING DRV_NAME
-				"(%s): reset timeout\n", pci_name(hba->pdev));
-			return FAILED;
-		}
-		msleep(1);
-	}
+	if (stex_handshake(hba) == 0)
+		return 0;
 
-	base = hba->mmio_base;
-	writel(0, base + IMR0);
-	readl(base + IMR0);
-	writel(0, base + OMR0);
-	readl(base + OMR0);
-	writel(0, base + IMR1);
-	readl(base + IMR1);
-	writel(0, base + OMR1);
-	readl(base + OMR1); /* flush */
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->req_head = 0;
-	hba->req_tail = 0;
-	hba->status_head = 0;
-	hba->status_tail = 0;
-	hba->out_req_cnt = 0;
-	hba->mu_status = MU_STATE_STARTED;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	return SUCCESS;
+	printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
+		pci_name(hba->pdev));
+	return -1;
+}
+
+static int stex_reset(struct scsi_cmnd *cmd)
+{
+	struct st_hba *hba;
+
+	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+
+	printk(KERN_INFO DRV_NAME
+		"(%s): resetting host\n", pci_name(hba->pdev));
+	scsi_print_command(cmd);
+
+	return stex_do_reset(hba) ? FAILED : SUCCESS;
+}
+
+static void stex_reset_work(struct work_struct *work)
+{
+	struct st_hba *hba = container_of(work, struct st_hba, reset_work);
+
+	stex_do_reset(hba);
 }
 
 static int stex_biosparam(struct scsi_device *sdev,
@@ -1420,8 +1488,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev)
 {
 	int ret;
 
-	if (!pci_set_dma_mask(pdev,  DMA_BIT_MASK(64))
-		&& !pci_set_consistent_dma_mask(pdev,  DMA_BIT_MASK(64)))
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+		&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
 		return 0;
 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (!ret)
@@ -1528,10 +1596,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
 		hba->dma_size, &hba->dma_handle, GFP_KERNEL);
 	if (!hba->dma_mem) {
-		err = -ENOMEM;
-		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
-			pci_name(pdev));
-		goto out_iounmap;
+		/* Retry minimum coherent mapping for st_seq and st_vsc */
+		if (hba->cardtype == st_seq ||
+		    (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
+			printk(KERN_WARNING DRV_NAME
+				"(%s): allocating min buffer for controller\n",
+				pci_name(pdev));
+			hba->dma_size = hba->extra_offset
+				+ ST_ADDITIONAL_MEM_MIN;
+			hba->dma_mem = dma_alloc_coherent(&pdev->dev,
+				hba->dma_size, &hba->dma_handle, GFP_KERNEL);
+		}
+
+		if (!hba->dma_mem) {
+			err = -ENOMEM;
+			printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
+				pci_name(pdev));
+			goto out_iounmap;
+		}
 	}
 
 	hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
@@ -1568,12 +1650,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	hba->host = host;
 	hba->pdev = pdev;
+	init_waitqueue_head(&hba->reset_waitq);
+
+	snprintf(hba->work_q_name, sizeof(hba->work_q_name),
+		 "stex_wq_%d", host->host_no);
+	hba->work_q = create_singlethread_workqueue(hba->work_q_name);
+	if (!hba->work_q) {
+		printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
+			pci_name(pdev));
+		err = -ENOMEM;
+		goto out_ccb_free;
+	}
+	INIT_WORK(&hba->reset_work, stex_reset_work);
 
 	err = stex_request_irq(hba);
 	if (err) {
 		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
 			pci_name(pdev));
-		goto out_ccb_free;
+		goto out_free_wq;
 	}
 
 	err = stex_handshake(hba);
@@ -1602,6 +1696,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 out_free_irq:
 	stex_free_irq(hba);
+out_free_wq:
+	destroy_workqueue(hba->work_q);
 out_ccb_free:
 	kfree(hba->ccb);
 out_pci_free:
@@ -1669,6 +1765,8 @@ static void stex_hba_free(struct st_hba *hba)
 {
 	stex_free_irq(hba);
 
+	destroy_workqueue(hba->work_q);
+
 	iounmap(hba->mmio_base);
 
 	pci_release_regions(hba->pdev);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
new file mode 100644
index 000000000000..d2604c813a20
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -0,0 +1,1407 @@
+/*
+ * Linux driver for VMware's para-virtualized SCSI HBA.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#include "vmw_pvscsi.h"
+
+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
+
+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
+
+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING	8
+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING	1
+#define PVSCSI_DEFAULT_QUEUE_DEPTH		64
+#define SGL_SIZE				PAGE_SIZE
+
+struct pvscsi_sg_list {
+	struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
+};
+
+struct pvscsi_ctx {
+	/*
+	 * The index of the context in cmd_map serves as the context ID for a
+	 * 1-to-1 mapping completions back to requests.
+	 */
+	struct scsi_cmnd	*cmd;
+	struct pvscsi_sg_list	*sgl;
+	struct list_head	list;
+	dma_addr_t		dataPA;
+	dma_addr_t		sensePA;
+	dma_addr_t		sglPA;
+};
+
+struct pvscsi_adapter {
+	char				*mmioBase;
+	unsigned int			irq;
+	u8				rev;
+	bool				use_msi;
+	bool				use_msix;
+	bool				use_msg;
+
+	spinlock_t			hw_lock;
+
+	struct workqueue_struct		*workqueue;
+	struct work_struct		work;
+
+	struct PVSCSIRingReqDesc	*req_ring;
+	unsigned			req_pages;
+	unsigned			req_depth;
+	dma_addr_t			reqRingPA;
+
+	struct PVSCSIRingCmpDesc	*cmp_ring;
+	unsigned			cmp_pages;
+	dma_addr_t			cmpRingPA;
+
+	struct PVSCSIRingMsgDesc	*msg_ring;
+	unsigned			msg_pages;
+	dma_addr_t			msgRingPA;
+
+	struct PVSCSIRingsState		*rings_state;
+	dma_addr_t			ringStatePA;
+
+	struct pci_dev			*dev;
+	struct Scsi_Host		*host;
+
+	struct list_head		cmd_pool;
+	struct pvscsi_ctx		*cmd_map;
+};
+
+
+/* Command line parameters */
+static int pvscsi_ring_pages     = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
+static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;
+static bool pvscsi_disable_msi;
+static bool pvscsi_disable_msix;
+static bool pvscsi_use_msg       = true;
+
+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
+
+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
+		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+
+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
+		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
+
+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
+		 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+
+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+
+static const struct pci_device_id pvscsi_pci_tbl[] = {
+	{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
+
+static struct device *
+pvscsi_dev(const struct pvscsi_adapter *adapter)
+{
+	return &(adapter->dev->dev);
+}
+
+static struct pvscsi_ctx *
+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+	struct pvscsi_ctx *ctx, *end;
+
+	end = &adapter->cmd_map[adapter->req_depth];
+	for (ctx = adapter->cmd_map; ctx < end; ctx++)
+		if (ctx->cmd == cmd)
+			return ctx;
+
+	return NULL;
+}
+
+static struct pvscsi_ctx *
+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+	struct pvscsi_ctx *ctx;
+
+	if (list_empty(&adapter->cmd_pool))
+		return NULL;
+
+	ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
+	ctx->cmd = cmd;
+	list_del(&ctx->list);
+
+	return ctx;
+}
+
+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
+				   struct pvscsi_ctx *ctx)
+{
+	ctx->cmd = NULL;
+	list_add(&ctx->list, &adapter->cmd_pool);
+}
+
+/*
+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
+ * the return value is always >=1.
+ */
+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
+			      const struct pvscsi_ctx *ctx)
+{
+	return ctx - adapter->cmd_map + 1;
+}
+
+static struct pvscsi_ctx *
+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
+{
+	return &adapter->cmd_map[context - 1];
+}
+
+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
+			     u32 offset, u32 val)
+{
+	writel(val, adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
+{
+	return readl(adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
+{
+	return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
+}
+
+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
+				     u32 val)
+{
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
+}
+
+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
+{
+	u32 intr_bits;
+
+	intr_bits = PVSCSI_INTR_CMPL_MASK;
+	if (adapter->use_msg)
+		intr_bits |= PVSCSI_INTR_MSG_MASK;
+
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
+}
+
+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
+{
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
+}
+
+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
+				  u32 cmd, const void *desc, size_t len)
+{
+	const u32 *ptr = desc;
+	size_t i;
+
+	len /= sizeof(*ptr);
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
+	for (i = 0; i < len; i++)
+		pvscsi_reg_write(adapter,
+				 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
+}
+
+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
+			     const struct pvscsi_ctx *ctx)
+{
+	struct PVSCSICmdDescAbortCmd cmd = { 0 };
+
+	cmd.target = ctx->cmd->device->id;
+	cmd.context = pvscsi_map_context(adapter, ctx);
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
+}
+
+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
+{
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
+}
+
+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
+{
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
+}
+
+static int scsi_is_rw(unsigned char op)
+{
+	return op == READ_6  || op == WRITE_6 ||
+	       op == READ_10 || op == WRITE_10 ||
+	       op == READ_12 || op == WRITE_12 ||
+	       op == READ_16 || op == WRITE_16;
+}
+
+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
+			   unsigned char op)
+{
+	if (scsi_is_rw(op))
+		pvscsi_kick_rw_io(adapter);
+	else
+		pvscsi_process_request_ring(adapter);
+}
+
+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
+{
+	dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
+}
+
+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
+{
+	dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
+}
+
+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
+{
+	struct PVSCSICmdDescResetDevice cmd = { 0 };
+
+	dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
+
+	cmd.target = target;
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
+			      &cmd, sizeof(cmd));
+}
+
+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
+			     struct scatterlist *sg, unsigned count)
+{
+	unsigned i;
+	struct PVSCSISGElement *sge;
+
+	BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
+
+	sge = &ctx->sgl->sge[0];
+	for (i = 0; i < count; i++, sg++) {
+		sge[i].addr   = sg_dma_address(sg);
+		sge[i].length = sg_dma_len(sg);
+		sge[i].flags  = 0;
+	}
+}
+
+/*
+ * Map all data buffers for a command into PCI space and
+ * setup the scatter/gather list if needed.
+ */
+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
+			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
+			       struct PVSCSIRingReqDesc *e)
+{
+	unsigned count;
+	unsigned bufflen = scsi_bufflen(cmd);
+	struct scatterlist *sg;
+
+	e->dataLen = bufflen;
+	e->dataAddr = 0;
+	if (bufflen == 0)
+		return;
+
+	sg = scsi_sglist(cmd);
+	count = scsi_sg_count(cmd);
+	if (count != 0) {
+		int segs = scsi_dma_map(cmd);
+		if (segs > 1) {
+			pvscsi_create_sg(ctx, sg, segs);
+
+			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
+			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
+						    SGL_SIZE, PCI_DMA_TODEVICE);
+			e->dataAddr = ctx->sglPA;
+		} else
+			e->dataAddr = sg_dma_address(sg);
+	} else {
+		/*
+		 * In case there is no S/G list, scsi_sglist points
+		 * directly to the buffer.
+		 */
+		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+					     cmd->sc_data_direction);
+		e->dataAddr = ctx->dataPA;
+	}
+}
+
+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
+				 struct pvscsi_ctx *ctx)
+{
+	struct scsi_cmnd *cmd;
+	unsigned bufflen;
+
+	cmd = ctx->cmd;
+	bufflen = scsi_bufflen(cmd);
+
+	if (bufflen != 0) {
+		unsigned count = scsi_sg_count(cmd);
+
+		if (count != 0) {
+			scsi_dma_unmap(cmd);
+			if (ctx->sglPA) {
+				pci_unmap_single(adapter->dev, ctx->sglPA,
+						 SGL_SIZE, PCI_DMA_TODEVICE);
+				ctx->sglPA = 0;
+			}
+		} else
+			pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
+					 cmd->sc_data_direction);
+	}
+	if (cmd->sense_buffer)
+		pci_unmap_single(adapter->dev, ctx->sensePA,
+				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+{
+	adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+						    &adapter->ringStatePA);
+	if (!adapter->rings_state)
+		return -ENOMEM;
+
+	adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
+				 pvscsi_ring_pages);
+	adapter->req_depth = adapter->req_pages
+					* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+	adapter->req_ring = pci_alloc_consistent(adapter->dev,
+						 adapter->req_pages * PAGE_SIZE,
+						 &adapter->reqRingPA);
+	if (!adapter->req_ring)
+		return -ENOMEM;
+
+	adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
+				 pvscsi_ring_pages);
+	adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
+						 adapter->cmp_pages * PAGE_SIZE,
+						 &adapter->cmpRingPA);
+	if (!adapter->cmp_ring)
+		return -ENOMEM;
+
+	BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
+	BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
+	BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
+
+	if (!adapter->use_msg)
+		return 0;
+
+	adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
+				 pvscsi_msg_ring_pages);
+	adapter->msg_ring = pci_alloc_consistent(adapter->dev,
+						 adapter->msg_pages * PAGE_SIZE,
+						 &adapter->msgRingPA);
+	if (!adapter->msg_ring)
+		return -ENOMEM;
+	BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
+
+	return 0;
+}
+
+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
+{
+	struct PVSCSICmdDescSetupRings cmd = { 0 };
+	dma_addr_t base;
+	unsigned i;
+
+	cmd.ringsStatePPN   = adapter->ringStatePA >> PAGE_SHIFT;
+	cmd.reqRingNumPages = adapter->req_pages;
+	cmd.cmpRingNumPages = adapter->cmp_pages;
+
+	base = adapter->reqRingPA;
+	for (i = 0; i < adapter->req_pages; i++) {
+		cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
+		base += PAGE_SIZE;
+	}
+
+	base = adapter->cmpRingPA;
+	for (i = 0; i < adapter->cmp_pages; i++) {
+		cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
+		base += PAGE_SIZE;
+	}
+
+	memset(adapter->rings_state, 0, PAGE_SIZE);
+	memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
+	memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
+
+	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
+			      &cmd, sizeof(cmd));
+
+	if (adapter->use_msg) {
+		struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
+
+		cmd_msg.numPages = adapter->msg_pages;
+
+		base = adapter->msgRingPA;
+		for (i = 0; i < adapter->msg_pages; i++) {
+			cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
+			base += PAGE_SIZE;
+		}
+		memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
+
+		pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
+				      &cmd_msg, sizeof(cmd_msg));
+	}
+}
+
+/*
+ * Pull a completion descriptor off and pass the completion back
+ * to the SCSI mid layer.
+ */
+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+				    const struct PVSCSIRingCmpDesc *e)
+{
+	struct pvscsi_ctx *ctx;
+	struct scsi_cmnd *cmd;
+	u32 btstat = e->hostStatus;
+	u32 sdstat = e->scsiStatus;
+
+	ctx = pvscsi_get_context(adapter, e->context);
+	cmd = ctx->cmd;
+	pvscsi_unmap_buffers(adapter, ctx);
+	pvscsi_release_context(adapter, ctx);
+	cmd->result = 0;
+
+	if (sdstat != SAM_STAT_GOOD &&
+	    (btstat == BTSTAT_SUCCESS ||
+	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
+	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
+		cmd->result = (DID_OK << 16) | sdstat;
+		if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
+			cmd->result |= (DRIVER_SENSE << 24);
+	} else
+		switch (btstat) {
+		case BTSTAT_SUCCESS:
+		case BTSTAT_LINKED_COMMAND_COMPLETED:
+		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
+			/* If everything went fine, let's move on..  */
+			cmd->result = (DID_OK << 16);
+			break;
+
+		case BTSTAT_DATARUN:
+		case BTSTAT_DATA_UNDERRUN:
+			/* Report residual data in underruns */
+			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+			cmd->result = (DID_ERROR << 16);
+			break;
+
+		case BTSTAT_SELTIMEO:
+			/* Our emulation returns this for non-connected devs */
+			cmd->result = (DID_BAD_TARGET << 16);
+			break;
+
+		case BTSTAT_LUNMISMATCH:
+		case BTSTAT_TAGREJECT:
+		case BTSTAT_BADMSG:
+			cmd->result = (DRIVER_INVALID << 24);
+			/* fall through */
+
+		case BTSTAT_HAHARDWARE:
+		case BTSTAT_INVPHASE:
+		case BTSTAT_HATIMEOUT:
+		case BTSTAT_NORESPONSE:
+		case BTSTAT_DISCONNECT:
+		case BTSTAT_HASOFTWARE:
+		case BTSTAT_BUSFREE:
+		case BTSTAT_SENSFAILED:
+			cmd->result |= (DID_ERROR << 16);
+			break;
+
+		case BTSTAT_SENTRST:
+		case BTSTAT_RECVRST:
+		case BTSTAT_BUSRESET:
+			cmd->result = (DID_RESET << 16);
+			break;
+
+		case BTSTAT_ABORTQUEUE:
+			cmd->result = (DID_ABORT << 16);
+			break;
+
+		case BTSTAT_SCSIPARITY:
+			cmd->result = (DID_PARITY << 16);
+			break;
+
+		default:
+			cmd->result = (DID_ERROR << 16);
+			scmd_printk(KERN_DEBUG, cmd,
+				    "Unknown completion status: 0x%x\n",
+				    btstat);
+	}
+
+	dev_dbg(&cmd->device->sdev_gendev,
+		"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
+		cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
+
+	cmd->scsi_done(cmd);
+}
+
+/*
+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
+ * where we may want to serialize some accesses between the driver and the
+ * emulation layer. We use compiler barriers instead of the more expensive
+ * memory barriers because PVSCSI is only supported on X86 which has strong
+ * memory access ordering.
+ */
+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
+{
+	struct PVSCSIRingsState *s = adapter->rings_state;
+	struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
+	u32 cmp_entries = s->cmpNumEntriesLog2;
+
+	while (s->cmpConsIdx != s->cmpProdIdx) {
+		struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
+						      MASK(cmp_entries));
+		/*
+		 * This barrier() ensures that *e is not dereferenced while
+		 * the device emulation still writes data into the slot.
+		 * Since the device emulation advances s->cmpProdIdx only after
+		 * updating the slot we want to check it first.
+		 */
+		barrier();
+		pvscsi_complete_request(adapter, e);
+		/*
+		 * This barrier() ensures that compiler doesn't reorder write
+		 * to s->cmpConsIdx before the read of (*e) inside
+		 * pvscsi_complete_request. Otherwise, device emulation may
+		 * overwrite *e before we had a chance to read it.
+		 */
+		barrier();
+		s->cmpConsIdx++;
+	}
+}
+
+/*
+ * Translate a Linux SCSI request into a request ring entry.
+ */
+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
+			     struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
+{
+	struct PVSCSIRingsState *s;
+	struct PVSCSIRingReqDesc *e;
+	struct scsi_device *sdev;
+	u32 req_entries;
+
+	s = adapter->rings_state;
+	sdev = cmd->device;
+	req_entries = s->reqNumEntriesLog2;
+
+	/*
+	 * If this condition holds, we might have room on the request ring, but
+	 * we might not have room on the completion ring for the response.
+	 * However, we have already ruled out this possibility - we would not
+	 * have successfully allocated a context if it were true, since we only
+	 * have one context per request entry.  Check for it anyway, since it
+	 * would be a serious bug.
+	 */
+	if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
+		scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
+			    "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
+			    s->reqProdIdx, s->cmpConsIdx);
+		return -1;
+	}
+
+	e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
+
+	e->bus    = sdev->channel;
+	e->target = sdev->id;
+	memset(e->lun, 0, sizeof(e->lun));
+	e->lun[1] = sdev->lun;
+
+	if (cmd->sense_buffer) {
+		ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
+					      SCSI_SENSE_BUFFERSIZE,
+					      PCI_DMA_FROMDEVICE);
+		e->senseAddr = ctx->sensePA;
+		e->senseLen = SCSI_SENSE_BUFFERSIZE;
+	} else {
+		e->senseLen  = 0;
+		e->senseAddr = 0;
+	}
+	e->cdbLen   = cmd->cmd_len;
+	e->vcpuHint = smp_processor_id();
+	memcpy(e->cdb, cmd->cmnd, e->cdbLen);
+
+	e->tag = SIMPLE_QUEUE_TAG;
+	if (sdev->tagged_supported &&
+	    (cmd->tag == HEAD_OF_QUEUE_TAG ||
+	     cmd->tag == ORDERED_QUEUE_TAG))
+		e->tag = cmd->tag;
+
+	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+		e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
+	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+		e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
+	else if (cmd->sc_data_direction == DMA_NONE)
+		e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
+	else
+		e->flags = 0;
+
+	pvscsi_map_buffers(adapter, ctx, cmd, e);
+
+	e->context = pvscsi_map_context(adapter, ctx);
+
+	barrier();
+
+	s->reqProdIdx++;
+
+	return 0;
+}
+
+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+	struct Scsi_Host *host = cmd->device->host;
+	struct pvscsi_adapter *adapter = shost_priv(host);
+	struct pvscsi_ctx *ctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	ctx = pvscsi_acquire_context(adapter, cmd);
+	if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
+		if (ctx)
+			pvscsi_release_context(adapter, ctx);
+		spin_unlock_irqrestore(&adapter->hw_lock, flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	cmd->scsi_done = done;
+
+	dev_dbg(&cmd->device->sdev_gendev,
+		"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+	pvscsi_kick_io(adapter, cmd->cmnd[0]);
+
+	return 0;
+}
+
+static int pvscsi_abort(struct scsi_cmnd *cmd)
+{
+	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
+	struct pvscsi_ctx *ctx;
+	unsigned long flags;
+
+	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
+		    adapter->host->host_no, cmd);
+
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	/*
+	 * Poll the completion ring first - we might be trying to abort
+	 * a command that is waiting to be dispatched in the completion ring.
+	 */
+	pvscsi_process_completion_ring(adapter);
+
+	/*
+	 * If there is no context for the command, it either already succeeded
+	 * or else was never properly issued.  Not our problem.
+	 */
+	ctx = pvscsi_find_context(adapter, cmd);
+	if (!ctx) {
+		scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
+		goto out;
+	}
+
+	pvscsi_abort_cmd(adapter, ctx);
+
+	pvscsi_process_completion_ring(adapter);
+
+out:
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+	return SUCCESS;
+}
+
+/*
+ * Abort all outstanding requests.  This is only safe to use if the completion
+ * ring will never be walked again or the device has been reset, because it
+ * destroys the 1-1 mapping between context field passed to emulation and our
+ * request structure.
+ */
+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
+{
+	unsigned i;
+
+	for (i = 0; i < adapter->req_depth; i++) {
+		struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
+		struct scsi_cmnd *cmd = ctx->cmd;
+		if (cmd) {
+			scmd_printk(KERN_ERR, cmd,
+				    "Forced reset on cmd %p\n", cmd);
+			pvscsi_unmap_buffers(adapter, ctx);
+			pvscsi_release_context(adapter, ctx);
+			cmd->result = (DID_RESET << 16);
+			cmd->scsi_done(cmd);
+		}
+	}
+}
+
+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host = cmd->device->host;
+	struct pvscsi_adapter *adapter = shost_priv(host);
+	unsigned long flags;
+	bool use_msg;
+
+	scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
+
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	use_msg = adapter->use_msg;
+
+	if (use_msg) {
+		adapter->use_msg = 0;
+		spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+		/*
+		 * Now that we know that the ISR won't add more work on the
+		 * workqueue we can safely flush any outstanding work.
+		 */
+		flush_workqueue(adapter->workqueue);
+		spin_lock_irqsave(&adapter->hw_lock, flags);
+	}
+
+	/*
+	 * We're going to tear down the entire ring structure and set it back
+	 * up, so stalling new requests until all completions are flushed and
+	 * the rings are back in place.
+	 */
+
+	pvscsi_process_request_ring(adapter);
+
+	ll_adapter_reset(adapter);
+
+	/*
+	 * Now process any completions.  Note we do this AFTER adapter reset,
+	 * which is strange, but stops races where completions get posted
+	 * between processing the ring and issuing the reset.  The backend will
+	 * not touch the ring memory after reset, so the immediately pre-reset
+	 * completion ring state is still valid.
+	 */
+	pvscsi_process_completion_ring(adapter);
+
+	pvscsi_reset_all(adapter);
+	adapter->use_msg = use_msg;
+	pvscsi_setup_all_rings(adapter);
+	pvscsi_unmask_intr(adapter);
+
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+	return SUCCESS;
+}
+
+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host = cmd->device->host;
+	struct pvscsi_adapter *adapter = shost_priv(host);
+	unsigned long flags;
+
+	scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
+
+	/*
+	 * We don't want to queue new requests for this bus after
+	 * flushing all pending requests to emulation, since new
+	 * requests could then sneak in during this bus reset phase,
+	 * so take the lock now.
+	 */
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	pvscsi_process_request_ring(adapter);
+	ll_bus_reset(adapter);
+	pvscsi_process_completion_ring(adapter);
+
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+	return SUCCESS;
+}
+
+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host = cmd->device->host;
+	struct pvscsi_adapter *adapter = shost_priv(host);
+	unsigned long flags;
+
+	scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
+		    host->host_no, cmd->device->id);
+
+	/*
+	 * We don't want to queue new requests for this device after flushing
+	 * all pending requests to emulation, since new requests could then
+	 * sneak in during this device reset phase, so take the lock now.
+	 */
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	pvscsi_process_request_ring(adapter);
+	ll_device_reset(adapter, cmd->device->id);
+	pvscsi_process_completion_ring(adapter);
+
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+	return SUCCESS;
+}
+
+static struct scsi_host_template pvscsi_template;
+
+static const char *pvscsi_info(struct Scsi_Host *host)
+{
+	struct pvscsi_adapter *adapter = shost_priv(host);
+	static char buf[256];
+
+	sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
+		"%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
+		adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
+		pvscsi_template.cmd_per_lun);
+
+	return buf;
+}
+
+static struct scsi_host_template pvscsi_template = {
+	.module				= THIS_MODULE,
+	.name				= "VMware PVSCSI Host Adapter",
+	.proc_name			= "vmw_pvscsi",
+	.info				= pvscsi_info,
+	.queuecommand			= pvscsi_queue,
+	.this_id			= -1,
+	.sg_tablesize			= PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
+	.dma_boundary			= UINT_MAX,
+	.max_sectors			= 0xffff,
+	.use_clustering			= ENABLE_CLUSTERING,
+	.eh_abort_handler		= pvscsi_abort,
+	.eh_device_reset_handler	= pvscsi_device_reset,
+	.eh_bus_reset_handler		= pvscsi_bus_reset,
+	.eh_host_reset_handler		= pvscsi_host_reset,
+};
+
+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
+			       const struct PVSCSIRingMsgDesc *e)
+{
+	struct PVSCSIRingsState *s = adapter->rings_state;
+	struct Scsi_Host *host = adapter->host;
+	struct scsi_device *sdev;
+
+	printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
+	       e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
+
+	BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
+
+	if (e->type == PVSCSI_MSG_DEV_ADDED) {
+		struct PVSCSIMsgDescDevStatusChanged *desc;
+		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
+
+		printk(KERN_INFO
+		       "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
+		       desc->bus, desc->target, desc->lun[1]);
+
+		if (!scsi_host_get(host))
+			return;
+
+		sdev = scsi_device_lookup(host, desc->bus, desc->target,
+					  desc->lun[1]);
+		if (sdev) {
+			printk(KERN_INFO "vmw_pvscsi: device already exists\n");
+			scsi_device_put(sdev);
+		} else
+			scsi_add_device(adapter->host, desc->bus,
+					desc->target, desc->lun[1]);
+
+		scsi_host_put(host);
+	} else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
+		struct PVSCSIMsgDescDevStatusChanged *desc;
+		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
+
+		printk(KERN_INFO
+		       "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
+		       desc->bus, desc->target, desc->lun[1]);
+
+		if (!scsi_host_get(host))
+			return;
+
+		sdev = scsi_device_lookup(host, desc->bus, desc->target,
+					  desc->lun[1]);
+		if (sdev) {
+			scsi_remove_device(sdev);
+			scsi_device_put(sdev);
+		} else
+			printk(KERN_INFO
+			       "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
+			       desc->bus, desc->target, desc->lun[1]);
+
+		scsi_host_put(host);
+	}
+}
+
+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
+{
+	struct PVSCSIRingsState *s = adapter->rings_state;
+
+	return s->msgProdIdx != s->msgConsIdx;
+}
+
+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
+{
+	struct PVSCSIRingsState *s = adapter->rings_state;
+	struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
+	u32 msg_entries = s->msgNumEntriesLog2;
+
+	while (pvscsi_msg_pending(adapter)) {
+		struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
+						      MASK(msg_entries));
+
+		barrier();
+		pvscsi_process_msg(adapter, e);
+		barrier();
+		s->msgConsIdx++;
+	}
+}
+
+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
+{
+	struct pvscsi_adapter *adapter;
+
+	adapter = container_of(data, struct pvscsi_adapter, work);
+
+	pvscsi_process_msg_ring(adapter);
+}
+
+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
+{
+	char name[32];
+
+	if (!pvscsi_use_msg)
+		return 0;
+
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+			 PVSCSI_CMD_SETUP_MSG_RING);
+
+	if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
+		return 0;
+
+	snprintf(name, sizeof(name),
+		 "vmw_pvscsi_wq_%u", adapter->host->host_no);
+
+	adapter->workqueue = create_singlethread_workqueue(name);
+	if (!adapter->workqueue) {
+		printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
+		return 0;
+	}
+	INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
+
+	return 1;
+}
+
+static irqreturn_t pvscsi_isr(int irq, void *devp)
+{
+	struct pvscsi_adapter *adapter = devp;
+	int handled;
+
+	if (adapter->use_msi || adapter->use_msix)
+		handled = true;
+	else {
+		u32 val = pvscsi_read_intr_status(adapter);
+		handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
+		if (handled)
+			pvscsi_write_intr_status(devp, val);
+	}
+
+	if (handled) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&adapter->hw_lock, flags);
+
+		pvscsi_process_completion_ring(adapter);
+		if (adapter->use_msg && pvscsi_msg_pending(adapter))
+			queue_work(adapter->workqueue, &adapter->work);
+
+		spin_unlock_irqrestore(&adapter->hw_lock, flags);
+	}
+
+	return IRQ_RETVAL(handled);
+}
+
+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
+{
+	struct pvscsi_ctx *ctx = adapter->cmd_map;
+	unsigned i;
+
+	for (i = 0; i < adapter->req_depth; ++i, ++ctx)
+		free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
+}
+
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
+{
+	struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
+	int ret;
+
+	ret = pci_enable_msix(adapter->dev, &entry, 1);
+	if (ret)
+		return ret;
+
+	*irq = entry.vector;
+
+	return 0;
+}
+
+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
+{
+	if (adapter->irq) {
+		free_irq(adapter->irq, adapter);
+		adapter->irq = 0;
+	}
+	if (adapter->use_msi) {
+		pci_disable_msi(adapter->dev);
+		adapter->use_msi = 0;
+	} else if (adapter->use_msix) {
+		pci_disable_msix(adapter->dev);
+		adapter->use_msix = 0;
+	}
+}
+
+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+{
+	pvscsi_shutdown_intr(adapter);
+
+	if (adapter->workqueue)
+		destroy_workqueue(adapter->workqueue);
+
+	if (adapter->mmioBase)
+		pci_iounmap(adapter->dev, adapter->mmioBase);
+
+	pci_release_regions(adapter->dev);
+
+	if (adapter->cmd_map) {
+		pvscsi_free_sgls(adapter);
+		kfree(adapter->cmd_map);
+	}
+
+	if (adapter->rings_state)
+		pci_free_consistent(adapter->dev, PAGE_SIZE,
+				    adapter->rings_state, adapter->ringStatePA);
+
+	if (adapter->req_ring)
+		pci_free_consistent(adapter->dev,
+				    adapter->req_pages * PAGE_SIZE,
+				    adapter->req_ring, adapter->reqRingPA);
+
+	if (adapter->cmp_ring)
+		pci_free_consistent(adapter->dev,
+				    adapter->cmp_pages * PAGE_SIZE,
+				    adapter->cmp_ring, adapter->cmpRingPA);
+
+	if (adapter->msg_ring)
+		pci_free_consistent(adapter->dev,
+				    adapter->msg_pages * PAGE_SIZE,
+				    adapter->msg_ring, adapter->msgRingPA);
+}
+
+/*
+ * Allocate scatter gather lists.
+ *
+ * These are statically allocated.  Trying to be clever was not worth it.
+ *
+ * Dynamic allocation can fail, and we can't go deeep into the memory
+ * allocator, since we're a SCSI driver, and trying too hard to allocate
+ * memory might generate disk I/O.  We also don't want to fail disk I/O
+ * in that case because we can't get an allocation - the I/O could be
+ * trying to swap out data to free memory.  Since that is pathological,
+ * just use a statically allocated scatter list.
+ *
+ */
+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+{
+	struct pvscsi_ctx *ctx;
+	int i;
+
+	ctx = adapter->cmd_map;
+	BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
+
+	for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
+		ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
+						    get_order(SGL_SIZE));
+		ctx->sglPA = 0;
+		BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
+		if (!ctx->sgl) {
+			for (; i >= 0; --i, --ctx) {
+				free_pages((unsigned long)ctx->sgl,
+					   get_order(SGL_SIZE));
+				ctx->sgl = NULL;
+			}
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static int __devinit pvscsi_probe(struct pci_dev *pdev,
+				  const struct pci_device_id *id)
+{
+	struct pvscsi_adapter *adapter;
+	struct Scsi_Host *host;
+	unsigned int i;
+	unsigned long flags = 0;
+	int error;
+
+	error = -ENODEV;
+
+	if (pci_enable_device(pdev))
+		return error;
+
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
+	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
+	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
+		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
+	} else {
+		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
+		goto out_disable_device;
+	}
+
+	pvscsi_template.can_queue =
+		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+	pvscsi_template.cmd_per_lun =
+		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+	if (!host) {
+		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
+		goto out_disable_device;
+	}
+
+	adapter = shost_priv(host);
+	memset(adapter, 0, sizeof(*adapter));
+	adapter->dev  = pdev;
+	adapter->host = host;
+
+	spin_lock_init(&adapter->hw_lock);
+
+	host->max_channel = 0;
+	host->max_id      = 16;
+	host->max_lun     = 1;
+	host->max_cmd_len = 16;
+
+	adapter->rev = pdev->revision;
+
+	if (pci_request_regions(pdev, "vmw_pvscsi")) {
+		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
+		goto out_free_host;
+	}
+
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
+			continue;
+
+		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
+			continue;
+
+		break;
+	}
+
+	if (i == DEVICE_COUNT_RESOURCE) {
+		printk(KERN_ERR
+		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
+		goto out_release_resources;
+	}
+
+	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
+
+	if (!adapter->mmioBase) {
+		printk(KERN_ERR
+		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
+		       i, PVSCSI_MEM_SPACE_SIZE);
+		goto out_release_resources;
+	}
+
+	pci_set_master(pdev);
+	pci_set_drvdata(pdev, host);
+
+	ll_adapter_reset(adapter);
+
+	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
+
+	error = pvscsi_allocate_rings(adapter);
+	if (error) {
+		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
+		goto out_release_resources;
+	}
+
+	/*
+	 * From this point on we should reset the adapter if anything goes
+	 * wrong.
+	 */
+	pvscsi_setup_all_rings(adapter);
+
+	adapter->cmd_map = kcalloc(adapter->req_depth,
+				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
+	if (!adapter->cmd_map) {
+		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
+		error = -ENOMEM;
+		goto out_reset_adapter;
+	}
+
+	INIT_LIST_HEAD(&adapter->cmd_pool);
+	for (i = 0; i < adapter->req_depth; i++) {
+		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
+		list_add(&ctx->list, &adapter->cmd_pool);
+	}
+
+	error = pvscsi_allocate_sg(adapter);
+	if (error) {
+		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
+		goto out_reset_adapter;
+	}
+
+	if (!pvscsi_disable_msix &&
+	    pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
+		printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
+		adapter->use_msix = 1;
+	} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
+		printk(KERN_INFO "vmw_pvscsi: using MSI\n");
+		adapter->use_msi = 1;
+		adapter->irq = pdev->irq;
+	} else {
+		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
+		adapter->irq = pdev->irq;
+		flags = IRQF_SHARED;
+	}
+
+	error = request_irq(adapter->irq, pvscsi_isr, flags,
+			    "vmw_pvscsi", adapter);
+	if (error) {
+		printk(KERN_ERR
+		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
+		adapter->irq = 0;
+		goto out_reset_adapter;
+	}
+
+	error = scsi_add_host(host, &pdev->dev);
+	if (error) {
+		printk(KERN_ERR
+		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
+		goto out_reset_adapter;
+	}
+
+	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
+		 adapter->rev, host->host_no);
+
+	pvscsi_unmask_intr(adapter);
+
+	scsi_scan_host(host);
+
+	return 0;
+
+out_reset_adapter:
+	ll_adapter_reset(adapter);
+out_release_resources:
+	pvscsi_release_resources(adapter);
+out_free_host:
+	scsi_host_put(host);
+out_disable_device:
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+
+	return error;
+}
+
+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
+{
+	pvscsi_mask_intr(adapter);
+
+	if (adapter->workqueue)
+		flush_workqueue(adapter->workqueue);
+
+	pvscsi_shutdown_intr(adapter);
+
+	pvscsi_process_request_ring(adapter);
+	pvscsi_process_completion_ring(adapter);
+	ll_adapter_reset(adapter);
+}
+
+static void pvscsi_shutdown(struct pci_dev *dev)
+{
+	struct Scsi_Host *host = pci_get_drvdata(dev);
+	struct pvscsi_adapter *adapter = shost_priv(host);
+
+	__pvscsi_shutdown(adapter);
+}
+
+static void pvscsi_remove(struct pci_dev *pdev)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	struct pvscsi_adapter *adapter = shost_priv(host);
+
+	scsi_remove_host(host);
+
+	__pvscsi_shutdown(adapter);
+	pvscsi_release_resources(adapter);
+
+	scsi_host_put(host);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver pvscsi_pci_driver = {
+	.name		= "vmw_pvscsi",
+	.id_table	= pvscsi_pci_tbl,
+	.probe		= pvscsi_probe,
+	.remove		= __devexit_p(pvscsi_remove),
+	.shutdown       = pvscsi_shutdown,
+};
+
+static int __init pvscsi_init(void)
+{
+	pr_info("%s - version %s\n",
+		PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
+	return pci_register_driver(&pvscsi_pci_driver);
+}
+
+static void __exit pvscsi_exit(void)
+{
+	pci_unregister_driver(&pvscsi_pci_driver);
+}
+
+module_init(pvscsi_init);
+module_exit(pvscsi_exit);
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
new file mode 100644
index 000000000000..62e36e75715e
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -0,0 +1,397 @@
+/*
+ * VMware PVSCSI header file
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#ifndef _VMW_PVSCSI_H_
+#define _VMW_PVSCSI_H_
+
+#include <linux/types.h>
+
+#define PVSCSI_DRIVER_VERSION_STRING   "1.0.1.0-k"
+
+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
+
+#define MASK(n)        ((1 << (n)) - 1)        /* make an n-bit mask */
+
+#define PCI_VENDOR_ID_VMWARE		0x15AD
+#define PCI_DEVICE_ID_VMWARE_PVSCSI	0x07C0
+
+/*
+ * host adapter status/error codes
+ */
+enum HostBusAdapterStatus {
+   BTSTAT_SUCCESS       = 0x00,  /* CCB complete normally with no errors */
+   BTSTAT_LINKED_COMMAND_COMPLETED           = 0x0a,
+   BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+   BTSTAT_DATA_UNDERRUN = 0x0c,
+   BTSTAT_SELTIMEO      = 0x11,  /* SCSI selection timeout */
+   BTSTAT_DATARUN       = 0x12,  /* data overrun/underrun */
+   BTSTAT_BUSFREE       = 0x13,  /* unexpected bus free */
+   BTSTAT_INVPHASE      = 0x14,  /* invalid bus phase or sequence requested by target */
+   BTSTAT_LUNMISMATCH   = 0x17,  /* linked CCB has different LUN from first CCB */
+   BTSTAT_SENSFAILED    = 0x1b,  /* auto request sense failed */
+   BTSTAT_TAGREJECT     = 0x1c,  /* SCSI II tagged queueing message rejected by target */
+   BTSTAT_BADMSG        = 0x1d,  /* unsupported message received by the host adapter */
+   BTSTAT_HAHARDWARE    = 0x20,  /* host adapter hardware failed */
+   BTSTAT_NORESPONSE    = 0x21,  /* target did not respond to SCSI ATN, sent a SCSI RST */
+   BTSTAT_SENTRST       = 0x22,  /* host adapter asserted a SCSI RST */
+   BTSTAT_RECVRST       = 0x23,  /* other SCSI devices asserted a SCSI RST */
+   BTSTAT_DISCONNECT    = 0x24,  /* target device reconnected improperly (w/o tag) */
+   BTSTAT_BUSRESET      = 0x25,  /* host adapter issued BUS device reset */
+   BTSTAT_ABORTQUEUE    = 0x26,  /* abort queue generated */
+   BTSTAT_HASOFTWARE    = 0x27,  /* host adapter software error */
+   BTSTAT_HATIMEOUT     = 0x30,  /* host adapter hardware timeout error */
+   BTSTAT_SCSIPARITY    = 0x34,  /* SCSI parity error detected */
+};
+
+/*
+ * Register offsets.
+ *
+ * These registers are accessible both via i/o space and mm i/o.
+ */
+
+enum PVSCSIRegOffset {
+	PVSCSI_REG_OFFSET_COMMAND        =    0x0,
+	PVSCSI_REG_OFFSET_COMMAND_DATA   =    0x4,
+	PVSCSI_REG_OFFSET_COMMAND_STATUS =    0x8,
+	PVSCSI_REG_OFFSET_LAST_STS_0     =  0x100,
+	PVSCSI_REG_OFFSET_LAST_STS_1     =  0x104,
+	PVSCSI_REG_OFFSET_LAST_STS_2     =  0x108,
+	PVSCSI_REG_OFFSET_LAST_STS_3     =  0x10c,
+	PVSCSI_REG_OFFSET_INTR_STATUS    = 0x100c,
+	PVSCSI_REG_OFFSET_INTR_MASK      = 0x2010,
+	PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
+	PVSCSI_REG_OFFSET_DEBUG          = 0x3018,
+	PVSCSI_REG_OFFSET_KICK_RW_IO     = 0x4018,
+};
+
+/*
+ * Virtual h/w commands.
+ */
+
+enum PVSCSICommands {
+	PVSCSI_CMD_FIRST             = 0, /* has to be first */
+
+	PVSCSI_CMD_ADAPTER_RESET     = 1,
+	PVSCSI_CMD_ISSUE_SCSI        = 2,
+	PVSCSI_CMD_SETUP_RINGS       = 3,
+	PVSCSI_CMD_RESET_BUS         = 4,
+	PVSCSI_CMD_RESET_DEVICE      = 5,
+	PVSCSI_CMD_ABORT_CMD         = 6,
+	PVSCSI_CMD_CONFIG            = 7,
+	PVSCSI_CMD_SETUP_MSG_RING    = 8,
+	PVSCSI_CMD_DEVICE_UNPLUG     = 9,
+
+	PVSCSI_CMD_LAST              = 10  /* has to be last */
+};
+
+/*
+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
+ */
+
+struct PVSCSICmdDescResetDevice {
+	u32	target;
+	u8	lun[8];
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
+ *
+ * - currently does not support specifying the LUN.
+ * - _pad should be 0.
+ */
+
+struct PVSCSICmdDescAbortCmd {
+	u64	context;
+	u32	target;
+	u32	_pad;
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
+ *
+ * Notes:
+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
+ *   PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
+ */
+
+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES        32
+struct PVSCSICmdDescSetupRings {
+	u32	reqRingNumPages;
+	u32	cmpRingNumPages;
+	u64	ringsStatePPN;
+	u64	reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+	u64	cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
+ *
+ * Notes:
+ * - this command was not supported in the initial revision of the h/w
+ *   interface. Before using it, you need to check that it is supported by
+ *   writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
+ *   immediately after read the 'command status' register:
+ *       * a value of -1 means that the cmd is NOT supported,
+ *       * a value != -1 means that the cmd IS supported.
+ *   If it's supported the 'command status' register should return:
+ *      sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
+ *   RingsState page is already setup. If not, the command is a nop.
+ * - numPages needs to be a power of two,
+ * - numPages needs to be different from 0,
+ * - _pad should be zero.
+ */
+
+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES  16
+
+struct PVSCSICmdDescSetupMsgRing {
+	u32	numPages;
+	u32	_pad;
+	u64	ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
+} __packed;
+
+enum PVSCSIMsgType {
+	PVSCSI_MSG_DEV_ADDED          = 0,
+	PVSCSI_MSG_DEV_REMOVED        = 1,
+	PVSCSI_MSG_LAST               = 2,
+};
+
+/*
+ * Msg descriptor.
+ *
+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
+ *
+ * - type is of type enum PVSCSIMsgType.
+ * - the content of args depend on the type of event being delivered.
+ */
+
+struct PVSCSIRingMsgDesc {
+	u32	type;
+	u32	args[31];
+} __packed;
+
+struct PVSCSIMsgDescDevStatusChanged {
+	u32	type;  /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
+	u32	bus;
+	u32	target;
+	u8	lun[8];
+	u32	pad[27];
+} __packed;
+
+/*
+ * Rings state.
+ *
+ * - the fields:
+ *    . msgProdIdx,
+ *    . msgConsIdx,
+ *    . msgNumEntriesLog2,
+ *   .. are only used once the SETUP_MSG_RING cmd has been issued.
+ * - '_pad' helps to ensure that the msg related fields are on their own
+ *   cache-line.
+ */
+
+struct PVSCSIRingsState {
+	u32	reqProdIdx;
+	u32	reqConsIdx;
+	u32	reqNumEntriesLog2;
+
+	u32	cmpProdIdx;
+	u32	cmpConsIdx;
+	u32	cmpNumEntriesLog2;
+
+	u8	_pad[104];
+
+	u32	msgProdIdx;
+	u32	msgConsIdx;
+	u32	msgNumEntriesLog2;
+} __packed;
+
+/*
+ * Request descriptor.
+ *
+ * sizeof(RingReqDesc) = 128
+ *
+ * - context: is a unique identifier of a command. It could normally be any
+ *   64bit value, however we currently store it in the serialNumber variable
+ *   of struct SCSI_Command, so we have the following restrictions due to the
+ *   way this field is handled in the vmkernel storage stack:
+ *    * this value can't be 0,
+ *    * the upper 32bit need to be 0 since serialNumber is as a u32.
+ *   Currently tracked as PR 292060.
+ * - dataLen: contains the total number of bytes that need to be transferred.
+ * - dataAddr:
+ *   * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
+ *     s/g table segment, each s/g segment is entirely contained on a single
+ *     page of physical memory,
+ *   * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
+ *     the buffer used for the DMA transfer,
+ * - flags:
+ *   * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
+ *   * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
+ *   * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
+ *   * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
+ *   * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
+ *     16bytes. To be specified.
+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
+ *   completion of the i/o. For guest OSes that use lowest priority message
+ *   delivery mode (such as windows), we use this "hint" to deliver the
+ *   completion action to the proper vcpu. For now, we can use the vcpuId of
+ *   the processor that initiated the i/o as a likely candidate for the vcpu
+ *   that will be waiting for the completion..
+ * - bus should be 0: we currently only support bus 0 for now.
+ * - unused should be zero'd.
+ */
+
+#define PVSCSI_FLAG_CMD_WITH_SG_LIST        (1 << 0)
+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB     (1 << 1)
+#define PVSCSI_FLAG_CMD_DIR_NONE            (1 << 2)
+#define PVSCSI_FLAG_CMD_DIR_TOHOST          (1 << 3)
+#define PVSCSI_FLAG_CMD_DIR_TODEVICE        (1 << 4)
+
+struct PVSCSIRingReqDesc {
+	u64	context;
+	u64	dataAddr;
+	u64	dataLen;
+	u64	senseAddr;
+	u32	senseLen;
+	u32	flags;
+	u8	cdb[16];
+	u8	cdbLen;
+	u8	lun[8];
+	u8	tag;
+	u8	bus;
+	u8	target;
+	u8	vcpuHint;
+	u8	unused[59];
+} __packed;
+
+/*
+ * Scatter-gather list management.
+ *
+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
+ * table segment.
+ *
+ * - each segment of the s/g table contain a succession of struct
+ *   PVSCSISGElement.
+ * - each segment is entirely contained on a single physical page of memory.
+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
+ *   PVSCSISGElement.flags and in this case:
+ *     * addr is the PA of the next s/g segment,
+ *     * length is undefined, assumed to be 0.
+ */
+
+struct PVSCSISGElement {
+	u64	addr;
+	u32	length;
+	u32	flags;
+} __packed;
+
+/*
+ * Completion descriptor.
+ *
+ * sizeof(RingCmpDesc) = 32
+ *
+ * - context: identifier of the command. The same thing that was specified
+ *   under "context" as part of struct RingReqDesc at initiation time,
+ * - dataLen: number of bytes transferred for the actual i/o operation,
+ * - senseLen: number of bytes written into the sense buffer,
+ * - hostStatus: adapter status,
+ * - scsiStatus: device status,
+ * - _pad should be zero.
+ */
+
+struct PVSCSIRingCmpDesc {
+	u64	context;
+	u64	dataLen;
+	u32	senseLen;
+	u16	hostStatus;
+	u16	scsiStatus;
+	u32	_pad[2];
+} __packed;
+
+/*
+ * Interrupt status / IRQ bits.
+ */
+
+#define PVSCSI_INTR_CMPL_0                 (1 << 0)
+#define PVSCSI_INTR_CMPL_1                 (1 << 1)
+#define PVSCSI_INTR_CMPL_MASK              MASK(2)
+
+#define PVSCSI_INTR_MSG_0                  (1 << 2)
+#define PVSCSI_INTR_MSG_1                  (1 << 3)
+#define PVSCSI_INTR_MSG_MASK               (MASK(2) << 2)
+
+#define PVSCSI_INTR_ALL_SUPPORTED          MASK(4)
+
+/*
+ * Number of MSI-X vectors supported.
+ */
+#define PVSCSI_MAX_INTRS        24
+
+/*
+ * Enumeration of supported MSI-X vectors
+ */
+#define PVSCSI_VECTOR_COMPLETION   0
+
+/*
+ * Misc constants for the rings.
+ */
+
+#define PVSCSI_MAX_NUM_PAGES_REQ_RING   PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_CMP_RING   PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_MSG_RING   PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
+
+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
+				(PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
+
+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
+	(PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
+
+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES     1
+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES        2
+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES     2
+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES        2
+
+enum PVSCSIMemSpace {
+	PVSCSI_MEM_SPACE_COMMAND_PAGE		= 0,
+	PVSCSI_MEM_SPACE_INTR_STATUS_PAGE	= 1,
+	PVSCSI_MEM_SPACE_MISC_PAGE		= 2,
+	PVSCSI_MEM_SPACE_KICK_IO_PAGE		= 4,
+	PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE	= 6,
+	PVSCSI_MEM_SPACE_MSIX_PBA_PAGE		= 7,
+};
+
+#define PVSCSI_MEM_SPACE_NUM_PAGES \
+	(PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES +       \
+	 PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES +   \
+	 PVSCSI_MEM_SPACE_MISC_NUM_PAGES +          \
+	 PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES +       \
+	 PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
+
+#define PVSCSI_MEM_SPACE_SIZE        (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
+
+#endif /* _VMW_PVSCSI_H_ */