summary refs log tree commit diff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-07 21:11:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-07 21:11:05 -0700
commit572c01ba19ef150e98aea0b45ca17d43356521b5 (patch)
tree289381d051dfc34a86be988700ee11cb9ad0cd5b /drivers/scsi
parentcef5d0f952a03d42051141742632078d488b0c6b (diff)
parent2441500a41a9b17ff657626eb81972f62bc8cc5a (diff)
downloadlinux-572c01ba19ef150e98aea0b45ca17d43356521b5.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
 "This is mostly updates of the usual suspects: lpfc, qla2xxx, hisi_sas,
  megaraid_sas, zfcp and a host of minor updates.

  The major driver change here is the elimination of the block based
  cciss driver in favour of the SCSI based hpsa driver (which now drives
  all the legacy cases cciss used to be required for). Plus a reset
  handler clean up and the redo of the SAS SMP handler to use bsg lib"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
  scsi: scsi-mq: Always unprepare before requeuing a request
  scsi: Show .retries and .jiffies_at_alloc in debugfs
  scsi: Improve requeuing behavior
  scsi: Call scsi_initialize_rq() for filesystem requests
  scsi: qla2xxx: Reset the logo flag, after target re-login.
  scsi: qla2xxx: Fix slow mem alloc behind lock
  scsi: qla2xxx: Clear fc4f_nvme flag
  scsi: qla2xxx: add missing includes for qla_isr
  scsi: qla2xxx: Fix an integer overflow in sysfs code
  scsi: aacraid: report -ENOMEM to upper layer from aac_convert_sgraw2()
  scsi: aacraid: get rid of one level of indentation
  scsi: aacraid: fix indentation errors
  scsi: storvsc: fix memory leak on ring buffer busy
  scsi: scsi_transport_sas: switch to bsg-lib for SMP passthrough
  scsi: smartpqi: remove the smp_handler stub
  scsi: hpsa: remove the smp_handler stub
  scsi: bsg-lib: pass the release callback through bsg_setup_queue
  scsi: Rework handling of scsi_device.vpd_pg8[03]
  scsi: Rework the code for caching Vital Product Data (VPD)
  scsi: rcu: Introduce rcu_swap_protected()
  ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/53c700.c23
-rw-r--r--drivers/scsi/NCR5380.c4
-rw-r--r--drivers/scsi/a2091.c17
-rw-r--r--drivers/scsi/a3000.c17
-rw-r--r--drivers/scsi/aacraid/aachba.c325
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c3
-rw-r--r--drivers/scsi/aacraid/linit.c320
-rw-r--r--drivers/scsi/aha152x.c13
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1251
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped34
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped44
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c11
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c37
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/ch.c22
-rw-r--r--drivers/scsi/csiostor/csio_hw.h2
-rw-r--r--drivers/scsi/csiostor/csio_init.c11
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c27
-rw-r--r--drivers/scsi/cxlflash/main.c3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c53
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fdomain.c6
-rw-r--r--drivers/scsi/fdomain.h2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/g_NCR5380.c283
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/gvp11.c18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c198
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c605
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c237
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/hpsa.h81
-rw-r--r--drivers/scsi/hptiop.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c70
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c106
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c15
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c13
-rw-r--r--drivers/scsi/mac_esp.c37
-rw-r--r--drivers/scsi/mac_scsi.c4
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c32
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c29
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c110
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c40
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c216
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c85
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c230
-rw-r--r--drivers/scsi/mvme147.c16
-rw-r--r--drivers/scsi/mvsas/mv_init.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c6
-rw-r--r--drivers/scsi/nsp32.c22
-rw-r--r--drivers/scsi/osst.c8
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c7
-rw-r--r--drivers/scsi/pmcraid.c17
-rw-r--r--drivers/scsi/pmcraid.h2
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_fip.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c113
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qedi/qedi.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h118
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c263
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c155
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicfas408.c6
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c140
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_debugfs.c4
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c131
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c33
-rw-r--r--drivers/scsi/scsi_transport_fc.c37
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_sas.c121
-rw-r--r--drivers/scsi/scsi_transport_srp.c7
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/ses.c62
-rw-r--r--drivers/scsi/sg.c22
-rw-r--r--drivers/scsi/sgiwd93.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h44
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c145
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c111
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd33c93.c2
180 files changed, 4469 insertions, 3874 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 4b3b08025ef6..6be77b3aa8a5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -168,7 +168,6 @@ MODULE_LICENSE("GPL");
 
 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
-STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
@@ -315,7 +314,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
 	/* Fill in the missing routines from the host template */
 	tpnt->queuecommand = NCR_700_queuecommand;
 	tpnt->eh_abort_handler = NCR_700_abort;
-	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
@@ -1938,14 +1936,14 @@ NCR_700_abort(struct scsi_cmnd * SCp)
 }
 
 STATIC int
-NCR_700_bus_reset(struct scsi_cmnd * SCp)
+NCR_700_host_reset(struct scsi_cmnd * SCp)
 {
 	DECLARE_COMPLETION_ONSTACK(complete);
 	struct NCR_700_Host_Parameters *hostdata = 
 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
 
 	scmd_printk(KERN_INFO, SCp,
-		"New error handler wants BUS reset, cmd %p\n\t", SCp);
+		"New error handler wants HOST reset, cmd %p\n\t", SCp);
 	scsi_print_command(SCp);
 
 	/* In theory, eh_complete should always be null because the
@@ -1960,6 +1958,7 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
 
 	hostdata->eh_complete = &complete;
 	NCR_700_internal_bus_reset(SCp->device->host);
+	NCR_700_chip_reset(SCp->device->host);
 
 	spin_unlock_irq(SCp->device->host->host_lock);
 	wait_for_completion(&complete);
@@ -1974,22 +1973,6 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
 	return SUCCESS;
 }
 
-STATIC int
-NCR_700_host_reset(struct scsi_cmnd * SCp)
-{
-	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
-	scsi_print_command(SCp);
-
-	spin_lock_irq(SCp->device->host->host_lock);
-
-	NCR_700_internal_bus_reset(SCp->device->host);
-	NCR_700_chip_reset(SCp->device->host);
-
-	spin_unlock_irq(SCp->device->host->host_lock);
-
-	return SUCCESS;
-}
-
 STATIC void
 NCR_700_set_period(struct scsi_target *STp, int period)
 {
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index acc33440bca0..8a0812221d72 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -2296,13 +2296,13 @@ out:
 
 
 /**
- * NCR5380_bus_reset - reset the SCSI bus
+ * NCR5380_host_reset - reset the SCSI host
  * @cmd: SCSI command undergoing EH
  *
  * Returns SUCCESS
  */
 
-static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
 {
 	struct Scsi_Host *instance = cmd->device->host;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 9176bfbd5745..61aadc7acb49 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -147,22 +147,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int a2091_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fall back
-	   to the same action, calling wd33c93_host_reset() */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template a2091_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "Commodore A2091/A590 SCSI",
@@ -171,7 +155,6 @@ static struct scsi_host_template a2091_scsi_template = {
 	.proc_name		= "A2901",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= a2091_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index e6375b4de79e..2427a8541247 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -162,22 +162,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int a3000_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this entire function, which should
-	   cause mid-layer to call wd33c93_host_reset anyway? */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template amiga_a3000_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "Amiga 3000 built-in SCSI",
@@ -186,7 +170,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
 	.proc_name		= "A3000",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= a3000_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a1a2c71e1626..a64285ab0728 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -594,6 +594,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 
 	aac_fib_init(cmd_fibcontext);
 	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
 	dinfo->type = cpu_to_le32(CT_READ_NAME);
@@ -611,10 +612,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -725,6 +724,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
 
 	dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 	dinfo->type = cpu_to_le32(FT_FILESYS);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	status = aac_fib_send(ContainerCommand,
 			  fibptr,
@@ -736,9 +736,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS)
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
-	else if (status < 0) {
+	if (status < 0 && status != -EINPROGRESS) {
 		/* Inherit results from VM_NameServe, if any */
 		dresp->status = cpu_to_le32(ST_OK);
 		_aac_probe_container2(context, fibptr);
@@ -766,6 +764,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
 		dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 		dinfo->type = cpu_to_le32(FT_FILESYS);
 		scsicmd->SCp.ptr = (char *)callback;
+		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 		status = aac_fib_send(ContainerCommand,
 			  fibptr,
@@ -777,10 +776,9 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
 		/*
 		 *	Check that the command queued to the controller
 		 */
-		if (status == -EINPROGRESS) {
-			scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+		if (status == -EINPROGRESS)
 			return 0;
-		}
+
 		if (status < 0) {
 			scsicmd->SCp.ptr = NULL;
 			aac_fib_complete(fibptr);
@@ -1126,6 +1124,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
 	dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
 	dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	status = aac_fib_send(ContainerCommand,
 		  cmd_fibcontext,
@@ -1138,10 +1137,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -2335,16 +2332,14 @@ static int aac_read(struct scsi_cmnd * scsicmd)
 	 *	Alocate and initialize a Fib
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
 	/*
@@ -2429,16 +2424,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
 	/*
@@ -2588,6 +2581,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
 	synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
 	synchronizecmd->count =
 	     cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	/*
 	 *	Now send the Fib to the adapter
@@ -2603,10 +2597,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING
 		"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
@@ -2666,6 +2658,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
 	pmcmd->cid = cpu_to_le32(sdev_id(sdev));
 	pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
 		cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	/*
 	 *	Now send the Fib to the adapter
@@ -2681,10 +2674,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	aac_fib_complete(cmd_fibcontext);
 	aac_fib_free(cmd_fibcontext);
@@ -3692,16 +3683,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -3739,15 +3728,14 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
 	if (!cmd_fibcontext)
 		return -1;
 
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_hba(cmd_fibcontext, scsicmd);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
 		status);
@@ -3763,6 +3751,8 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 	struct aac_dev *dev;
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
@@ -3771,32 +3761,29 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 	psg->sg[0].count = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
 
-		psg->count = cpu_to_le32(nseg);
+	psg->count = cpu_to_le32(nseg);
 
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
-			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
-			byte_count += sg_dma_len(sg);
-		}
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+		psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+		byte_count += sg_dma_len(sg);
 	}
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
 	return byte_count;
 }
 
@@ -3807,6 +3794,8 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 	unsigned long byte_count = 0;
 	u64 addr;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
@@ -3816,34 +3805,31 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 	psg->sg[0].count = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			addr = sg_dma_address(sg);
-			psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
-			psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
-			psg->sg[i].count = cpu_to_le32(count);
-			byte_count += count;
-		}
-		psg->count = cpu_to_le32(nseg);
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+		psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+		psg->sg[i].count = cpu_to_le32(count);
+		byte_count += count;
+	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
 	}
+
 	return byte_count;
 }
 
@@ -3851,6 +3837,8 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
 {
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	// Get rid of old data
 	psg->count = 0;
@@ -3862,37 +3850,34 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
 	psg->sg[0].flags = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			u64 addr = sg_dma_address(sg);
-			psg->sg[i].next = 0;
-			psg->sg[i].prev = 0;
-			psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
-			psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
-			psg->sg[i].count = cpu_to_le32(count);
-			psg->sg[i].flags = 0;
-			byte_count += count;
-		}
-		psg->count = cpu_to_le32(nseg);
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+		psg->sg[i].next = 0;
+		psg->sg[i].prev = 0;
+		psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
+		psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+		psg->sg[i].count = cpu_to_le32(count);
+		psg->sg[i].flags = 0;
+		byte_count += count;
 	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
 	return byte_count;
 }
 
@@ -3901,75 +3886,77 @@ static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 {
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i, conformable = 0;
+	u32 min_size = PAGE_SIZE, cur_size;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i, conformable = 0;
-		u32 min_size = PAGE_SIZE, cur_size;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			u64 addr = sg_dma_address(sg);
-
-			BUG_ON(i >= sg_max);
-			rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
-			rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
-			cur_size = cpu_to_le32(count);
-			rio2->sge[i].length = cur_size;
-			rio2->sge[i].flags = 0;
-			if (i == 0) {
-				conformable = 1;
-				rio2->sgeFirstSize = cur_size;
-			} else if (i == 1) {
-				rio2->sgeNominalSize = cur_size;
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+
+		BUG_ON(i >= sg_max);
+		rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+		rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+		cur_size = cpu_to_le32(count);
+		rio2->sge[i].length = cur_size;
+		rio2->sge[i].flags = 0;
+		if (i == 0) {
+			conformable = 1;
+			rio2->sgeFirstSize = cur_size;
+		} else if (i == 1) {
+			rio2->sgeNominalSize = cur_size;
+			min_size = cur_size;
+		} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+			conformable = 0;
+			if (cur_size < min_size)
 				min_size = cur_size;
-			} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
-				conformable = 0;
-				if (cur_size < min_size)
-					min_size = cur_size;
-			}
-			byte_count += count;
 		}
+		byte_count += count;
+	}
 
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
-				(byte_count - scsi_bufflen(scsicmd));
-			rio2->sge[i-1].length = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+			(byte_count - scsi_bufflen(scsicmd));
+		rio2->sge[i-1].length = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
 
-		rio2->sgeCnt = cpu_to_le32(nseg);
-		rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
-		/* not conformable: evaluate required sg elements */
-		if (!conformable) {
-			int j, nseg_new = nseg, err_found;
-			for (i = min_size / PAGE_SIZE; i >= 1; --i) {
-				err_found = 0;
-				nseg_new = 2;
-				for (j = 1; j < nseg - 1; ++j) {
-					if (rio2->sge[j].length % (i*PAGE_SIZE)) {
-						err_found = 1;
-						break;
-					}
-					nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
-				}
-				if (!err_found)
+	rio2->sgeCnt = cpu_to_le32(nseg);
+	rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+	/* not conformable: evaluate required sg elements */
+	if (!conformable) {
+		int j, nseg_new = nseg, err_found;
+		for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+			err_found = 0;
+			nseg_new = 2;
+			for (j = 1; j < nseg - 1; ++j) {
+				if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+					err_found = 1;
 					break;
+				}
+				nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
 			}
-			if (i > 0 && nseg_new <= sg_max)
-				aac_convert_sgraw2(rio2, i, nseg, nseg_new);
-		} else
-			rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+			if (!err_found)
+				break;
+		}
+		if (i > 0 && nseg_new <= sg_max) {
+			int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
 
-		/* Check for command underflow */
-		if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
+			if (ret < 0)
+				return ret;
 		}
+	} else
+		rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
 	}
 
 	return byte_count;
@@ -3986,7 +3973,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
 
 	sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
 	if (sge == NULL)
-		return -1;
+		return -ENOMEM;
 
 	for (i = 1, pos = 1; i < nseg-1; ++i) {
 		for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index ee2667e20e42..92fabf2b0c24 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1723,6 +1723,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_FASTRESP		(0x00000008)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA		(0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF	(0x00000020)
+#define FIB_CONTEXT_FLAG_SCSI_CMD	(0x00000040)
 
 /*
  *	Define the command values
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 9ee025b1d0e0..97d269f16888 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -520,9 +520,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 			dev->raw_io_64 = 1;
 		dev->sync_mode = aac_sync_mode;
 		if (dev->a_ops.adapter_comm &&
-			(status[1] & AAC_OPT_NEW_COMM)) {
-				dev->comm_interface = AAC_COMM_MESSAGE;
-				dev->raw_io_interface = 1;
+		    (status[1] & AAC_OPT_NEW_COMM)) {
+			dev->comm_interface = AAC_COMM_MESSAGE;
+			dev->raw_io_interface = 1;
 			if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
 				/* driver supports TYPE1 (Tupelo) */
 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1c617ccfaf12..dfe8e70f8d99 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -770,7 +770,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 		/* bit1 of request_id must be 0 */
 		hbacmd->request_id =
 			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
-	} else
+		fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
+	} else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
 		return -EINVAL;
 
 
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0f277df73af0..87cc4a93e637 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -814,111 +814,225 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
 	return ret;
 }
 
+static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
+				   struct fib *fib, u64 tmf_lun)
+{
+	struct aac_hba_tm_req *tmf;
+	u64 address;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+	memset(tmf, 0, sizeof(*tmf));
+	tmf->tmf = HBA_TMF_LUN_RESET;
+	tmf->it_nexus = info->rmw_nexus;
+	int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
+
+	address = (u64)fib->hw_error_pa;
+	tmf->error_ptr_hi = cpu_to_le32
+		((u32)(address >> 32));
+	tmf->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*tmf);
+
+	return HBA_IU_TYPE_SCSI_TM_REQ;
+}
+
+static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
+				    struct fib *fib)
+{
+	struct aac_hba_reset_req *rst;
+	u64 address;
+
+	/* already tried, start a hard reset now */
+	rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
+	memset(rst, 0, sizeof(*rst));
+	rst->it_nexus = info->rmw_nexus;
+
+	address = (u64)fib->hw_error_pa;
+	rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+	rst->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*rst);
+
+       return HBA_IU_TYPE_SATA_REQ;
+}
+
+void aac_tmf_callback(void *context, struct fib *fibptr)
+{
+	struct aac_hba_resp *err =
+		&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+	struct aac_hba_map_info *info = context;
+	int res;
+
+	switch (err->service_response) {
+	case HBA_RESP_SVCRES_TMF_REJECTED:
+		res = -1;
+		break;
+	case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+		res = 0;
+		break;
+	case HBA_RESP_SVCRES_TMF_COMPLETE:
+	case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+		res = 0;
+		break;
+	default:
+		res = -2;
+		break;
+	}
+	aac_fib_complete(fibptr);
+
+	info->reset_state = res;
+}
+
 /*
- *	aac_eh_reset	- Reset command handling
+ *	aac_eh_dev_reset	- Device reset command handling
  *	@scsi_cmd:	SCSI command block causing the reset
  *
  */
-static int aac_eh_reset(struct scsi_cmnd* cmd)
+static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
 {
 	struct scsi_device * dev = cmd->device;
 	struct Scsi_Host * host = dev->host;
 	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
 	int count;
 	u32 bus, cid;
+	struct fib *fib;
 	int ret = FAILED;
-	int status = 0;
-	__le32 supported_options2 = 0;
-	bool is_mu_reset;
-	bool is_ignore_reset;
-	bool is_doorbell_reset;
-
+	int status;
+	u8 command;
 
 	bus = aac_logical_to_phys(scmd_channel(cmd));
 	cid = scmd_id(cmd);
-	if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
-		aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
-		struct fib *fib;
-		int status;
-		u64 address;
-		u8 command;
+	info = &aac->hba_map[bus][cid];
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+	    info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+		return FAILED;
 
-		pr_err("%s: Host adapter reset request. SCSI hang ?\n",
-			AAC_DRIVERNAME);
+	if (info->reset_state > 0)
+		return FAILED;
 
-		fib = aac_fib_alloc(aac);
-		if (!fib)
-			return ret;
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
 
+	info->reset_state = 1;
 
-		if (aac->hba_map[bus][cid].reset_state == 0) {
-			struct aac_hba_tm_req *tmf;
-
-			/* start a HBA_TMF_LUN_RESET TMF request */
-			tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
-			memset(tmf, 0, sizeof(*tmf));
-			tmf->tmf = HBA_TMF_LUN_RESET;
-			tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
-			tmf->lun[1] = cmd->device->lun;
-
-			address = (u64)fib->hw_error_pa;
-			tmf->error_ptr_hi = cpu_to_le32
-					((u32)(address >> 32));
-			tmf->error_ptr_lo = cpu_to_le32
-					((u32)(address & 0xffffffff));
-			tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
-			fib->hbacmd_size = sizeof(*tmf);
-
-			command = HBA_IU_TYPE_SCSI_TM_REQ;
-			aac->hba_map[bus][cid].reset_state++;
-		} else if (aac->hba_map[bus][cid].reset_state >= 1) {
-			struct aac_hba_reset_req *rst;
-
-			/* already tried, start a hard reset now */
-			rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
-			memset(rst, 0, sizeof(*rst));
-			/* reset_type is already zero... */
-			rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
-
-			address = (u64)fib->hw_error_pa;
-			rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
-			rst->error_ptr_lo = cpu_to_le32
-				((u32)(address & 0xffffffff));
-			rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
-			fib->hbacmd_size = sizeof(*rst);
-
-			command = HBA_IU_TYPE_SATA_REQ;
-			aac->hba_map[bus][cid].reset_state = 0;
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state == 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
 		}
-		cmd->SCp.sent_command = 0;
+		msleep(1000);
+	}
 
-		status = aac_hba_send(command, fib,
-				  (fib_callback) aac_hba_callback,
-				  (void *) cmd);
+	return ret;
+}
 
-		/* Wait up to 15 seconds for completion */
-		for (count = 0; count < 15; ++count) {
-			if (cmd->SCp.sent_command) {
-				ret = SUCCESS;
-				break;
-			}
-			msleep(1000);
+/*
+ *	aac_eh_target_reset	- Target reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
+	int count;
+	u32 bus, cid;
+	int ret = FAILED;
+	struct fib *fib;
+	int status;
+	u8 command;
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	cid = scmd_id(cmd);
+	info = &aac->hba_map[bus][cid];
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+	    info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+		return FAILED;
+
+	if (info->reset_state > 0)
+		return FAILED;
+
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+
+	/* already tried, start a hard reset now */
+	command = aac_eh_tmf_hard_reset_fib(info, fib);
+
+	info->reset_state = 2;
+
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state <= 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
 		}
+		msleep(1000);
+	}
 
-		if (ret == SUCCESS)
-			goto out;
+	return ret;
+}
 
-	} else {
+/*
+ *	aac_eh_bus_reset	- Bus reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int count;
+	u32 cmd_bus;
+	int status = 0;
 
-		/* Mark the assoc. FIB to not complete, eh handler does this */
-		for (count = 0;
-			count < (host->can_queue + AAC_NUM_MGT_FIB);
-			++count) {
-			struct fib *fib = &aac->fibs[count];
 
-			if (fib->hw_fib_va->header.XferState &&
-				(fib->flags & FIB_CONTEXT_FLAG) &&
-				(fib->callback_data == cmd)) {
+	cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
+	/* Mark the assoc. FIB to not complete, eh handler does this */
+	for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+		struct fib *fib = &aac->fibs[count];
+
+		if (fib->hw_fib_va->header.XferState &&
+		    (fib->flags & FIB_CONTEXT_FLAG) &&
+		    (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
+			struct aac_hba_map_info *info;
+			u32 bus, cid;
+
+			cmd = (struct scsi_cmnd *)fib->callback_data;
+			bus = aac_logical_to_phys(scmd_channel(cmd));
+			if (bus != cmd_bus)
+				continue;
+			cid = scmd_id(cmd);
+			info = &aac->hba_map[bus][cid];
+			if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+			    info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
 				fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
 				cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
 			}
@@ -935,8 +1049,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 		dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
 
 	count = get_num_of_incomplete_fibs(aac);
-	if (count == 0)
-		return SUCCESS;
+	return (count == 0) ? SUCCESS : FAILED;
+}
+
+/*
+ *	aac_eh_host_reset	- Host reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+int aac_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int ret = FAILED;
+	__le32 supported_options2 = 0;
+	bool is_mu_reset;
+	bool is_ignore_reset;
+	bool is_doorbell_reset;
 
 	/*
 	 * Check if reset is supported by the firmware
@@ -954,11 +1084,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 	 && aac_check_reset
 	 && (aac_check_reset != -1 || !is_ignore_reset)) {
 		/* Bypass wait for command quiesce */
-		aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+		if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
+			ret = SUCCESS;
+	}
+	/*
+	 * Reset EH state
+	 */
+	if (ret == SUCCESS) {
+		int bus, cid;
+		struct aac_hba_map_info *info;
+
+		for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+			for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
+				info = &aac->hba_map[bus][cid];
+				if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
+					info->reset_state = 0;
+			}
+		}
 	}
-	ret = SUCCESS;
-
-out:
 	return ret;
 }
 
@@ -1382,7 +1525,10 @@ static struct scsi_host_template aac_driver_template = {
 	.change_queue_depth		= aac_change_queue_depth,
 	.sdev_attrs			= aac_dev_attrs,
 	.eh_abort_handler		= aac_eh_abort,
-	.eh_host_reset_handler		= aac_eh_reset,
+	.eh_device_reset_handler	= aac_eh_dev_reset,
+	.eh_target_reset_handler	= aac_eh_target_reset,
+	.eh_bus_reset_handler		= aac_eh_bus_reset,
+	.eh_host_reset_handler		= aac_eh_host_reset,
 	.can_queue			= AAC_NUM_IO_FIB,
 	.this_id			= MAXIMUM_NUM_CONTAINERS,
 	.sg_tablesize			= 16,
@@ -1457,7 +1603,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/*
 	 * Only series 7 needs freset.
 	 */
-	 if (pdev->device == PMC_DEVICE_S7)
+	if (pdev->device == PMC_DEVICE_S7)
 		pdev->needs_freset = 1;
 
 	list_for_each_entry(aac, &aac_devices, entry) {
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index ce5dc73d85bb..bc0058df31c6 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1140,6 +1140,9 @@ static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
 /*
  * Reset the bus
  *
+ * AIC-6260 has a hard reset (MRST signal), but apparently
+ * one cannot trigger it via software. So live with
+ * a soft reset; no-one seemed to have cared.
  */
 static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
 {
@@ -1223,15 +1226,6 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt)
 }
 
 /*
- * Reset the host (bus and controller)
- *
- */
-static int aha152x_host_reset(Scsi_Cmnd *SCpnt)
-{
-	return aha152x_host_reset_host(SCpnt->device->host);
-}
-
-/*
  * Return the "logical geometry"
  *
  */
@@ -2917,7 +2911,6 @@ static struct scsi_host_template aha152x_driver_template = {
 	.eh_abort_handler		= aha152x_abort,
 	.eh_device_reset_handler	= aha152x_device_reset,
 	.eh_bus_reset_handler		= aha152x_bus_reset,
-	.eh_host_reset_handler		= aha152x_host_reset,
 	.bios_param			= aha152x_biosparam,
 	.can_queue			= 1,
 	.this_id			= 7,
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index a23cc9ac5acd..124217927c4a 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -986,7 +986,7 @@ static struct isa_driver aha1542_isa_driver = {
 static int isa_registered;
 
 #ifdef CONFIG_PNP
-static struct pnp_device_id aha1542_pnp_ids[] = {
+static const struct pnp_device_id aha1542_pnp_ids[] = {
 	{ .id = "ADP1542" },
 	{ .id = "" }
 };
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 07b60a780c06..b03ba0df7a83 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -59,7 +59,8 @@ $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
 			      $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
 			      $(srctree)/$(src)/aic7xxx.seq
 
-$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
+$(aic7xxx-gen-y): $(objtree)/$(obj)/aic7xxx_seq.h
+	@true
 else
 $(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
 endif
@@ -76,7 +77,8 @@ $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
 			      $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
 			      $(srctree)/$(src)/aic79xx.seq
 
-$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
+$(aic79xx-gen-y): $(objtree)/$(obj)/aic79xx_seq.h
+	@true
 else
 $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
 endif
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index cdcead071ef6..ddcd5a7701ac 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -13,13 +13,6 @@ typedef struct ahd_reg_parse_entry {
 } ahd_reg_parse_entry_t;
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mode_ptr_print;
-#else
-#define ahd_mode_ptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MODE_PTR", 0x00, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_intstat_print;
 #else
 #define ahd_intstat_print(regvalue, cur_col, wrap) \
@@ -27,27 +20,6 @@ ahd_reg_print_t ahd_intstat_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqintcode_print;
-#else
-#define ahd_seqintcode_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQINTCODE", 0x02, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_error_print;
-#else
-#define ahd_error_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hescb_qoff_print;
-#else
-#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "HESCB_QOFF", 0x08, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_hs_mailbox_print;
 #else
 #define ahd_hs_mailbox_print(regvalue, cur_col, wrap) \
@@ -62,27 +34,6 @@ ahd_reg_print_t ahd_seqintstat_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrseqintstat_print;
-#else
-#define ahd_clrseqintstat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_swtimer_print;
-#else
-#define ahd_swtimer_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SWTIMER", 0x0e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sescb_qoff_print;
-#else
-#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SESCB_QOFF", 0x12, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_intctl_print;
 #else
 #define ahd_intctl_print(regvalue, cur_col, wrap) \
@@ -111,111 +62,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqin_print;
-#else
-#define ahd_lqin_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lunptr_print;
-#else
-#define ahd_lunptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdlenptr_print;
-#else
-#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDLENPTR", 0x25, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_attrptr_print;
-#else
-#define ahd_attrptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ATTRPTR", 0x26, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flagptr_print;
-#else
-#define ahd_flagptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLAGPTR", 0x27, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdptr_print;
-#else
-#define ahd_cmdptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDPTR", 0x28, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qnextptr_print;
-#else
-#define ahd_qnextptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_abrtbyteptr_print;
-#else
-#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ABRTBYTEPTR", 0x2b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_abrtbitptr_print;
-#else
-#define ahd_abrtbitptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lunlen_print;
-#else
-#define ahd_lunlen_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LUNLEN", 0x30, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cdblimit_print;
-#else
-#define ahd_cdblimit_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CDBLIMIT", 0x31, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmd_print;
-#else
-#define ahd_maxcmd_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMD", 0x32, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmdcnt_print;
-#else
-#define ahd_maxcmdcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl1_print;
-#else
-#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl2_print;
-#else
-#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsiseq0_print;
 #else
 #define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
@@ -230,13 +76,6 @@ ahd_reg_print_t ahd_scsiseq1_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl0_print;
-#else
-#define ahd_sxfrctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dffstat_print;
 #else
 #define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -244,13 +83,6 @@ ahd_reg_print_t ahd_dffstat_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_multargid_print;
-#else
-#define ahd_multargid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsisigi_print;
 #else
 #define ahd_scsisigi_print(regvalue, cur_col, wrap) \
@@ -265,13 +97,6 @@ ahd_reg_print_t ahd_scsiphase_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsidat_print;
-#else
-#define ahd_scsidat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSIDAT", 0x44, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsibus_print;
 #else
 #define ahd_scsibus_print(regvalue, cur_col, wrap) \
@@ -279,13 +104,6 @@ ahd_reg_print_t ahd_scsibus_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_targidin_print;
-#else
-#define ahd_targidin_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TARGIDIN", 0x48, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_selid_print;
 #else
 #define ahd_selid_print(regvalue, cur_col, wrap) \
@@ -293,10 +111,10 @@ ahd_reg_print_t ahd_selid_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sblkctl_print;
+ahd_reg_print_t ahd_simode0_print;
 #else
-#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap)
+#define ahd_simode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -307,13 +125,6 @@ ahd_reg_print_t ahd_sstat0_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode0_print;
-#else
-#define ahd_simode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sstat1_print;
 #else
 #define ahd_sstat1_print(regvalue, cur_col, wrap) \
@@ -328,13 +139,6 @@ ahd_reg_print_t ahd_sstat2_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint2_print;
-#else
-#define ahd_clrsint2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT2", 0x4d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_perrdiag_print;
 #else
 #define ahd_perrdiag_print(regvalue, cur_col, wrap) \
@@ -342,13 +146,6 @@ ahd_reg_print_t ahd_perrdiag_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqistate_print;
-#else
-#define ahd_lqistate_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQISTATE", 0x4e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_soffcnt_print;
 #else
 #define ahd_soffcnt_print(regvalue, cur_col, wrap) \
@@ -356,13 +153,6 @@ ahd_reg_print_t ahd_soffcnt_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqostate_print;
-#else
-#define ahd_lqostate_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSTATE", 0x4f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqistat0_print;
 #else
 #define ahd_lqistat0_print(regvalue, cur_col, wrap) \
@@ -370,27 +160,6 @@ ahd_reg_print_t ahd_lqistat0_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
-#else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
-#else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode1_print;
-#else
-#define ahd_lqimode1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIMODE1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqistat1_print;
 #else
 #define ahd_lqistat1_print(regvalue, cur_col, wrap) \
@@ -398,13 +167,6 @@ ahd_reg_print_t ahd_lqistat1_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint1_print;
-#else
-#define ahd_clrlqiint1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQIINT1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqistat2_print;
 #else
 #define ahd_lqistat2_print(regvalue, cur_col, wrap) \
@@ -419,20 +181,6 @@ ahd_reg_print_t ahd_sstat3_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode3_print;
-#else
-#define ahd_simode3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE3", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint3_print;
-#else
-#define ahd_clrsint3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT3", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqostat0_print;
 #else
 #define ahd_lqostat0_print(regvalue, cur_col, wrap) \
@@ -440,27 +188,6 @@ ahd_reg_print_t ahd_lqostat0_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqoint0_print;
-#else
-#define ahd_clrlqoint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQOINT0", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqomode0_print;
-#else
-#define ahd_lqomode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOMODE0", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqomode1_print;
-#else
-#define ahd_lqomode1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOMODE1", 0x55, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqostat1_print;
 #else
 #define ahd_lqostat1_print(regvalue, cur_col, wrap) \
@@ -468,13 +195,6 @@ ahd_reg_print_t ahd_lqostat1_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqoint1_print;
-#else
-#define ahd_clrlqoint1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQOINT1", 0x55, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqostat2_print;
 #else
 #define ahd_lqostat2_print(regvalue, cur_col, wrap) \
@@ -482,13 +202,6 @@ ahd_reg_print_t ahd_lqostat2_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_os_space_cnt_print;
-#else
-#define ahd_os_space_cnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OS_SPACE_CNT", 0x56, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_simode1_print;
 #else
 #define ahd_simode1_print(regvalue, cur_col, wrap) \
@@ -496,13 +209,6 @@ ahd_reg_print_t ahd_simode1_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_gsfifo_print;
-#else
-#define ahd_gsfifo_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "GSFIFO", 0x58, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dffsxfrctl_print;
 #else
 #define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \
@@ -510,27 +216,6 @@ ahd_reg_print_t ahd_dffsxfrctl_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqoscsctl_print;
-#else
-#define ahd_lqoscsctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSCSCTL", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_nextscb_print;
-#else
-#define ahd_nextscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEXTSCB", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrseqintsrc_print;
-#else
-#define ahd_clrseqintsrc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSEQINTSRC", 0x5b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seqintsrc_print;
 #else
 #define ahd_seqintsrc_print(regvalue, cur_col, wrap) \
@@ -538,13 +223,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
-#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seqimode_print;
 #else
 #define ahd_seqimode_print(regvalue, cur_col, wrap) \
@@ -559,90 +237,6 @@ ahd_reg_print_t ahd_mdffstat_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastscb_print;
-#else
-#define ahd_lastscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negoaddr_print;
-#else
-#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negperiod_print;
-#else
-#define ahd_negperiod_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negoffset_print;
-#else
-#define ahd_negoffset_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGOFFSET", 0x62, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negppropts_print;
-#else
-#define ahd_negppropts_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGPPROPTS", 0x63, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negconopts_print;
-#else
-#define ahd_negconopts_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGCONOPTS", 0x64, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_annexcol_print;
-#else
-#define ahd_annexcol_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ANNEXCOL", 0x65, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_annexdat_print;
-#else
-#define ahd_annexdat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ANNEXDAT", 0x66, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scschkn_print;
-#else
-#define ahd_scschkn_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSCHKN", 0x66, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_iownid_print;
-#else
-#define ahd_iownid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shcnt_print;
-#else
-#define ahd_shcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SHCNT", 0x68, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_townid_print;
-#else
-#define ahd_townid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seloid_print;
 #else
 #define ahd_seloid_print(regvalue, cur_col, wrap) \
@@ -650,90 +244,6 @@ ahd_reg_print_t ahd_seloid_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbhaddr_print;
-#else
-#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBHADDR", 0x7c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
-#else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbhcnt_print;
-#else
-#define ahd_scbhcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
-#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pcixctl_print;
-#else
-#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat0_print;
-#else
-#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat1_print;
-#else
-#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat0_print;
-#else
-#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat1_print;
-#else
-#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df0pcistat_print;
-#else
-#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg0_print;
-#else
-#define ahd_reg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg_isr_print;
-#else
-#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sg_state_print;
 #else
 #define ahd_sg_state_print(regvalue, cur_col, wrap) \
@@ -741,27 +251,6 @@ ahd_reg_print_t ahd_sg_state_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_targpcistat_print;
-#else
-#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbautoptr_print;
-#else
-#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBAUTOPTR", 0xab, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbaddr_print;
-#else
-#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_ccscbctl_print;
 #else
 #define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
@@ -776,69 +265,6 @@ ahd_reg_print_t ahd_ccsgctl_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbram_print;
-#else
-#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brddat_print;
-#else
-#define ahd_brddat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BRDDAT", 0xb8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seeadr_print;
-#else
-#define ahd_seeadr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEEADR", 0xba, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seedat_print;
-#else
-#define ahd_seedat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEEDAT", 0xbc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seectl_print;
-#else
-#define ahd_seectl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEECTL", 0xbe, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seestat_print;
-#else
-#define ahd_seestat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspdatactl_print;
-#else
-#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspselect_print;
-#else
-#define ahd_dspselect_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPSELECT", 0xc4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_wrtbiasctl_print;
-#else
-#define ahd_wrtbiasctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seqctl0_print;
 #else
 #define ahd_seqctl0_print(regvalue, cur_col, wrap) \
@@ -853,62 +279,6 @@ ahd_reg_print_t ahd_seqintctl_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_prgmcnt_print;
-#else
-#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PRGMCNT", 0xde, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_none_print;
-#else
-#define ahd_none_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NONE", 0xea, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_intvec1_addr_print;
-#else
-#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INTVEC1_ADDR", 0xf4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_curaddr_print;
-#else
-#define ahd_curaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_intvec2_addr_print;
-#else
-#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INTVEC2_ADDR", 0xf6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_longjmp_addr_print;
-#else
-#define ahd_longjmp_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LONGJMP_ADDR", 0xf8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_accum_save_print;
-#else
-#define ahd_accum_save_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ACCUM_SAVE", 0xfa, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
-#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sram_base_print;
 #else
 #define ahd_sram_base_print(regvalue, cur_col, wrap) \
@@ -916,62 +286,6 @@ ahd_reg_print_t ahd_sram_base_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_tid_head_print;
-#else
-#define ahd_waiting_tid_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_TID_HEAD", 0x120, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_tid_tail_print;
-#else
-#define ahd_waiting_tid_tail_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_TID_TAIL", 0x122, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_next_queued_scb_addr_print;
-#else
-#define ahd_next_queued_scb_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR", 0x124, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_scb_head_print;
-#else
-#define ahd_complete_scb_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD", 0x128, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_scb_dmainprog_head_print;
-#else
-#define ahd_complete_scb_dmainprog_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD", 0x12a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_dma_scb_head_print;
-#else
-#define ahd_complete_dma_scb_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD", 0x12c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_dma_scb_tail_print;
-#else
-#define ahd_complete_dma_scb_tail_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL", 0x12e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_on_qfreeze_head_print;
-#else
-#define ahd_complete_on_qfreeze_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD", 0x130, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_qfreeze_count_print;
 #else
 #define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \
@@ -993,13 +307,6 @@ ahd_reg_print_t ahd_saved_mode_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_msg_out_print;
-#else
-#define ahd_msg_out_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MSG_OUT", 0x137, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seq_flags_print;
 #else
 #define ahd_seq_flags_print(regvalue, cur_col, wrap) \
@@ -1014,48 +321,6 @@ ahd_reg_print_t ahd_lastphase_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qoutfifo_entry_valid_tag_print;
-#else
-#define ahd_qoutfifo_entry_valid_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG", 0x13d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_kernel_tqinpos_print;
-#else
-#define ahd_kernel_tqinpos_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "KERNEL_TQINPOS", 0x13e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qoutfifo_next_addr_print;
-#else
-#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR", 0x144, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_last_msg_print;
-#else
-#define ahd_last_msg_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LAST_MSG", 0x14a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsiseq_template_print;
-#else
-#define ahd_scsiseq_template_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x14b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_initiator_tag_print;
-#else
-#define ahd_initiator_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INITIATOR_TAG", 0x14c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seq_flags2_print;
 #else
 #define ahd_seq_flags2_print(regvalue, cur_col, wrap) \
@@ -1063,62 +328,6 @@ ahd_reg_print_t ahd_seq_flags2_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_allocfifo_scbptr_print;
-#else
-#define ahd_allocfifo_scbptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR", 0x14e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_timer_print;
-#else
-#define ahd_int_coalescing_timer_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_TIMER", 0x150, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_maxcmds_print;
-#else
-#define ahd_int_coalescing_maxcmds_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS", 0x152, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_mincmds_print;
-#else
-#define ahd_int_coalescing_mincmds_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS", 0x153, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmds_pending_print;
-#else
-#define ahd_cmds_pending_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDS_PENDING", 0x154, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_cmdcount_print;
-#else
-#define ahd_int_coalescing_cmdcount_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT", 0x156, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_local_hs_mailbox_print;
-#else
-#define ahd_local_hs_mailbox_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX", 0x157, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdsize_table_print;
-#else
-#define ahd_cmdsize_table_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x158, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_mk_message_scb_print;
 #else
 #define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \
@@ -1140,27 +349,6 @@ ahd_reg_print_t ahd_scb_base_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
-#else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_sense_busaddr_print;
-#else
-#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 0x18c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_tag_print;
-#else
-#define ahd_scb_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TAG", 0x190, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scb_control_print;
 #else
 #define ahd_scb_control_print(regvalue, cur_col, wrap) \
@@ -1174,69 +362,6 @@ ahd_reg_print_t ahd_scb_scsiid_print;
     ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_lun_print;
-#else
-#define ahd_scb_lun_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_LUN", 0x194, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_task_attribute_print;
-#else
-#define ahd_scb_task_attribute_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TASK_ATTRIBUTE", 0x195, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_task_management_print;
-#else
-#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT", 0x197, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_dataptr_print;
-#else
-#define ahd_scb_dataptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DATAPTR", 0x198, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_datacnt_print;
-#else
-#define ahd_scb_datacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DATACNT", 0x1a0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_sgptr_print;
-#else
-#define ahd_scb_sgptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SGPTR", 0x1a4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_busaddr_print;
-#else
-#define ahd_scb_busaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_BUSADDR", 0x1a8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_next2_print;
-#else
-#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_disconnected_lists_print;
-#else
-#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 0x1b8, regvalue, cur_col, wrap)
-#endif
-
 
 #define	MODE_PTR        		0x00
 #define		DST_MODE        	0x70
@@ -1292,15 +417,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRCMDINT       	0x02
 #define		CLRSPLTINT      	0x01
 
-#define	ERROR           		0x04
-#define		CIOPARERR       	0x80
-#define		CIOACCESFAIL    	0x40
-#define		MPARERR         	0x20
-#define		DPARERR         	0x10
-#define		SQPARERR        	0x08
-#define		ILLOPCODE       	0x04
-#define		DSCTMOUT        	0x02
-
 #define	CLRERR          		0x04
 #define		CLRCIOPARERR    	0x80
 #define		CLRCIOACCESFAIL 	0x40
@@ -1310,6 +426,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRILLOPCODE    	0x04
 #define		CLRDSCTMOUT     	0x02
 
+#define	ERROR           		0x04
+#define		CIOPARERR       	0x80
+#define		CIOACCESFAIL    	0x40
+#define		MPARERR         	0x20
+#define		DPARERR         	0x10
+#define		SQPARERR        	0x08
+#define		ILLOPCODE       	0x04
+#define		DSCTMOUT        	0x02
+
 #define	HCNTRL          		0x05
 #define		SEQ_RESET       	0x80
 #define		POWRDN          	0x40
@@ -1404,22 +529,22 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		FIFOFULL        	0x02
 #define		FIFOEMP         	0x01
 
-#define	SG_CACHE_SHADOW 		0x1b
-#define		ODD_SEG         	0x04
-#define		LAST_SEG        	0x02
-#define		LAST_SEG_DONE   	0x01
-
 #define	ARBCTL          		0x1b
 #define		RESET_HARB      	0x80
 #define		RETRY_SWEN      	0x08
 #define		USE_TIME        	0x07
 
-#define	SG_CACHE_PRE    		0x1b
+#define	SG_CACHE_SHADOW 		0x1b
+#define		ODD_SEG         	0x04
+#define		LAST_SEG        	0x02
+#define		LAST_SEG_DONE   	0x01
 
-#define	LQIN            		0x20
+#define	SG_CACHE_PRE    		0x1b
 
 #define	TYPEPTR         		0x20
 
+#define	LQIN            		0x20
+
 #define	TAGPTR          		0x21
 
 #define	LUNPTR          		0x22
@@ -1479,14 +604,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		SINGLECMD       	0x02
 #define		ABORTPENDING    	0x01
 
-#define	SCSBIST0        		0x39
-#define		GSBISTERR       	0x40
-#define		GSBISTDONE      	0x20
-#define		GSBISTRUN       	0x10
-#define		OSBISTERR       	0x04
-#define		OSBISTDONE      	0x02
-#define		OSBISTRUN       	0x01
-
 #define	LQCTL2          		0x39
 #define		LQIRETRY        	0x80
 #define		LQICONTINUE     	0x40
@@ -1497,10 +614,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQOTOIDLE       	0x02
 #define		LQOPAUSE        	0x01
 
-#define	SCSBIST1        		0x3a
-#define		NTBISTERR       	0x04
-#define		NTBISTDONE      	0x02
-#define		NTBISTRUN       	0x01
+#define	SCSBIST0        		0x39
+#define		GSBISTERR       	0x40
+#define		GSBISTDONE      	0x20
+#define		GSBISTRUN       	0x10
+#define		OSBISTERR       	0x04
+#define		OSBISTDONE      	0x02
+#define		OSBISTRUN       	0x01
 
 #define	SCSISEQ0        		0x3a
 #define		TEMODEO         	0x80
@@ -1509,8 +629,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		FORCEBUSFREE    	0x10
 #define		SCSIRSTO        	0x01
 
+#define	SCSBIST1        		0x3a
+#define		NTBISTERR       	0x04
+#define		NTBISTDONE      	0x02
+#define		NTBISTRUN       	0x01
+
 #define	SCSISEQ1        		0x3b
 
+#define	BUSINITID       		0x3c
+
 #define	SXFRCTL0        		0x3c
 #define		DFON            	0x80
 #define		DFPEXP          	0x40
@@ -1519,8 +646,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	DLCOUNT         		0x3c
 
-#define	BUSINITID       		0x3c
-
 #define	SXFRCTL1        		0x3d
 #define		BITBUCKET       	0x80
 #define		ENSACHK         	0x40
@@ -1545,6 +670,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CURRFIFO_1      	0x01
 #define		CURRFIFO_0      	0x00
 
+#define	MULTARGID       		0x40
+
 #define	SCSISIGO        		0x40
 #define		CDO             	0x80
 #define		IOO             	0x40
@@ -1555,8 +682,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		REQO            	0x02
 #define		ACKO            	0x01
 
-#define	MULTARGID       		0x40
-
 #define	SCSISIGI        		0x41
 #define		ATNI            	0x10
 #define		SELI            	0x08
@@ -1603,14 +728,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENAB20          	0x04
 #define		SELWIDE         	0x02
 
-#define	CLRSINT0        		0x4b
-#define		CLRSELDO        	0x40
-#define		CLRSELDI        	0x20
-#define		CLRSELINGO      	0x10
-#define		CLRIOERR        	0x08
-#define		CLROVERRUN      	0x04
-#define		CLRSPIORDY      	0x02
-#define		CLRARBDO        	0x01
+#define	SIMODE0         		0x4b
+#define		ENSELDO         	0x40
+#define		ENSELDI         	0x20
+#define		ENSELINGO       	0x10
+#define		ENIOERR         	0x08
+#define		ENOVERRUN       	0x04
+#define		ENSPIORDY       	0x02
+#define		ENARBDO         	0x01
 
 #define	SSTAT0          		0x4b
 #define		TARGET          	0x80
@@ -1622,23 +747,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		SPIORDY         	0x02
 #define		ARBDO           	0x01
 
-#define	SIMODE0         		0x4b
-#define		ENSELDO         	0x40
-#define		ENSELDI         	0x20
-#define		ENSELINGO       	0x10
-#define		ENIOERR         	0x08
-#define		ENOVERRUN       	0x04
-#define		ENSPIORDY       	0x02
-#define		ENARBDO         	0x01
-
-#define	CLRSINT1        		0x4c
-#define		CLRSELTIMEO     	0x80
-#define		CLRATNO         	0x40
-#define		CLRSCSIRSTI     	0x20
-#define		CLRBUSFREE      	0x08
-#define		CLRSCSIPERR     	0x04
-#define		CLRSTRB2FAST    	0x02
-#define		CLRREQINIT      	0x01
+#define	CLRSINT0        		0x4b
+#define		CLRSELDO        	0x40
+#define		CLRSELDI        	0x20
+#define		CLRSELINGO      	0x10
+#define		CLRIOERR        	0x08
+#define		CLROVERRUN      	0x04
+#define		CLRSPIORDY      	0x02
+#define		CLRARBDO        	0x01
 
 #define	SSTAT1          		0x4c
 #define		SELTO           	0x80
@@ -1650,6 +766,20 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		STRB2FAST       	0x02
 #define		REQINIT         	0x01
 
+#define	CLRSINT1        		0x4c
+#define		CLRSELTIMEO     	0x80
+#define		CLRATNO         	0x40
+#define		CLRSCSIRSTI     	0x20
+#define		CLRBUSFREE      	0x08
+#define		CLRSCSIPERR     	0x04
+#define		CLRSTRB2FAST    	0x02
+#define		CLRREQINIT      	0x01
+
+#define	SIMODE2         		0x4d
+#define		ENWIDE_RES      	0x04
+#define		ENSDONE         	0x02
+#define		ENDMADONE       	0x01
+
 #define	SSTAT2          		0x4d
 #define		BUSFREETIME     	0xc0
 #define		NONPACKREQ      	0x20
@@ -1662,11 +792,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		BUSFREE_DFF0    	0x80
 #define		BUSFREE_LQO     	0x40
 
-#define	SIMODE2         		0x4d
-#define		ENWIDE_RES      	0x04
-#define		ENSDONE         	0x02
-#define		ENDMADONE       	0x01
-
 #define	CLRSINT2        		0x4d
 #define		CLRNONPACKREQ   	0x20
 #define		CLRWIDE_RES     	0x04
@@ -1685,10 +810,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	LQISTATE        		0x4e
 
-#define	SOFFCNT         		0x4f
-
 #define	LQOSTATE        		0x4f
 
+#define	SOFFCNT         		0x4f
+
 #define	LQISTAT0        		0x50
 #define		LQIATNQAS       	0x20
 #define		LQICRCT1        	0x10
@@ -1697,14 +822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQIATNLQ        	0x02
 #define		LQIATNCMD       	0x01
 
-#define	CLRLQIINT0      		0x50
-#define		CLRLQIATNQAS    	0x20
-#define		CLRLQICRCT1     	0x10
-#define		CLRLQICRCT2     	0x08
-#define		CLRLQIBADLQT    	0x04
-#define		CLRLQIATNLQ     	0x02
-#define		CLRLQIATNCMD    	0x01
-
 #define	LQIMODE0        		0x50
 #define		ENLQIATNQASK    	0x20
 #define		ENLQICRCT1      	0x10
@@ -1713,6 +830,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENLQIATNLQ      	0x02
 #define		ENLQIATNCMD     	0x01
 
+#define	CLRLQIINT0      		0x50
+#define		CLRLQIATNQAS    	0x20
+#define		CLRLQICRCT1     	0x10
+#define		CLRLQICRCT2     	0x08
+#define		CLRLQIBADLQT    	0x04
+#define		CLRLQIATNLQ     	0x02
+#define		CLRLQIATNCMD    	0x01
+
 #define	LQIMODE1        		0x51
 #define		ENLQIPHASE_LQ   	0x80
 #define		ENLQIPHASE_NLQ  	0x40
@@ -1753,25 +878,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQISTOPCMD      	0x02
 #define		LQIGSAVAIL      	0x01
 
-#define	SSTAT3          		0x53
-#define		NTRAMPERR       	0x02
-#define		OSRAMPERR       	0x01
-
 #define	SIMODE3         		0x53
 #define		ENNTRAMPERR     	0x02
 #define		ENOSRAMPERR     	0x01
 
+#define	SSTAT3          		0x53
+#define		NTRAMPERR       	0x02
+#define		OSRAMPERR       	0x01
+
 #define	CLRSINT3        		0x53
 #define		CLRNTRAMPERR    	0x02
 #define		CLROSRAMPERR    	0x01
 
-#define	LQOSTAT0        		0x54
-#define		LQOTARGSCBPERR  	0x10
-#define		LQOSTOPT2       	0x08
-#define		LQOATNLQ        	0x04
-#define		LQOATNPKT       	0x02
-#define		LQOTCRC         	0x01
-
 #define	CLRLQOINT0      		0x54
 #define		CLRLQOTARGSCBPERR	0x10
 #define		CLRLQOSTOPT2    	0x08
@@ -1779,6 +897,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRLQOATNPKT    	0x02
 #define		CLRLQOTCRC      	0x01
 
+#define	LQOSTAT0        		0x54
+#define		LQOTARGSCBPERR  	0x10
+#define		LQOSTOPT2       	0x08
+#define		LQOATNLQ        	0x04
+#define		LQOATNPKT       	0x02
+#define		LQOTCRC         	0x01
+
 #define	LQOMODE0        		0x54
 #define		ENLQOTARGSCBPERR	0x10
 #define		ENLQOSTOPT2     	0x08
@@ -1793,13 +918,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENLQOBUSFREE    	0x02
 #define		ENLQOPHACHGINPKT	0x01
 
-#define	LQOSTAT1        		0x55
-#define		LQOINITSCBPERR  	0x10
-#define		LQOSTOPI2       	0x08
-#define		LQOBADQAS       	0x04
-#define		LQOBUSFREE      	0x02
-#define		LQOPHACHGINPKT  	0x01
-
 #define	CLRLQOINT1      		0x55
 #define		CLRLQOINITSCBPERR	0x10
 #define		CLRLQOSTOPI2    	0x08
@@ -1807,6 +925,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRLQOBUSFREE   	0x02
 #define		CLRLQOPHACHGINPKT	0x01
 
+#define	LQOSTAT1        		0x55
+#define		LQOINITSCBPERR  	0x10
+#define		LQOSTOPI2       	0x08
+#define		LQOBADQAS       	0x04
+#define		LQOBUSFREE      	0x02
+#define		LQOPHACHGINPKT  	0x01
+
 #define	LQOSTAT2        		0x56
 #define		LQOPKT          	0xe0
 #define		LQOWAITFIFO     	0x10
@@ -1859,8 +984,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CFG4ICMD        	0x02
 #define		CFG4TCMD        	0x01
 
-#define	CURRSCB         		0x5c
-
 #define	SEQIMODE        		0x5c
 #define		ENCTXTDONE      	0x40
 #define		ENSAVEPTRS      	0x20
@@ -1870,6 +993,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENCFG4ICMD      	0x02
 #define		ENCFG4TCMD      	0x01
 
+#define	CURRSCB         		0x5c
+
+#define	CRCCONTROL      		0x5d
+#define		CRCVALCHKEN     	0x40
+
 #define	MDFFSTAT        		0x5d
 #define		SHCNTNEGATIVE   	0x40
 #define		SHCNTMINUS1     	0x20
@@ -1879,34 +1007,31 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DATAINFIFO      	0x02
 #define		FIFOFREE        	0x01
 
-#define	CRCCONTROL      		0x5d
-#define		CRCVALCHKEN     	0x40
-
 #define	DFFTAG          		0x5e
 
-#define	LASTSCB         		0x5e
-
 #define	SCSITEST        		0x5e
 #define		CNTRTEST        	0x08
 #define		SEL_TXPLL_DEBUG 	0x04
 
+#define	LASTSCB         		0x5e
+
 #define	IOPDNCTL        		0x5f
 #define		DISABLE_OE      	0x80
 #define		PDN_IDIST       	0x04
 #define		PDN_DIFFSENSE   	0x01
 
-#define	SHADDR          		0x60
+#define	DGRPCRCI        		0x60
 
 #define	NEGOADDR        		0x60
 
-#define	DGRPCRCI        		0x60
+#define	SHADDR          		0x60
 
 #define	NEGPERIOD       		0x61
 
-#define	PACKCRCI        		0x62
-
 #define	NEGOFFSET       		0x62
 
+#define	PACKCRCI        		0x62
+
 #define	NEGPPROPTS      		0x63
 #define		PPROPT_PACE     	0x08
 #define		PPROPT_QAS      	0x04
@@ -1942,16 +1067,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	SHCNT           		0x68
 
-#define	TOWNID          		0x69
-
 #define	PLL960CTL1      		0x69
 
+#define	TOWNID          		0x69
+
 #define	PLL960CNT0      		0x6a
 
 #define	XSIG            		0x6a
 
 #define	SELOID          		0x6b
 
+#define	FAIRNESS        		0x6c
+
 #define	PLL400CTL0      		0x6c
 #define		PLL_VCOSEL      	0x80
 #define		PLL_PWDN        	0x40
@@ -1961,8 +1088,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		PLL_DLPF        	0x02
 #define		PLL_ENFBM       	0x01
 
-#define	FAIRNESS        		0x6c
-
 #define	PLL400CTL1      		0x6d
 #define		PLL_CNTEN       	0x80
 #define		PLL_CNTCLR      	0x40
@@ -1974,25 +1099,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	HADDR           		0x70
 
+#define	HODMAADR        		0x70
+
 #define	PLLDELAY        		0x70
 #define		SPLIT_DROP_REQ  	0x80
 
-#define	HODMAADR        		0x70
+#define	HCNT            		0x78
 
 #define	HODMACNT        		0x78
 
-#define	HCNT            		0x78
-
 #define	HODMAEN         		0x7a
 
-#define	SCBHADDR        		0x7c
-
 #define	SGHADDR         		0x7c
 
-#define	SCBHCNT         		0x84
+#define	SCBHADDR        		0x7c
 
 #define	SGHCNT          		0x84
 
+#define	SCBHCNT         		0x84
+
 #define	DFF_THRSH       		0x88
 #define		WR_DFTHRSH      	0x70
 #define		RD_DFTHRSH      	0x07
@@ -2025,6 +1150,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	CMCRXMSG0       		0x90
 
+#define	OVLYRXMSG0      		0x90
+
+#define	DCHRXMSG0       		0x90
+
 #define	ROENABLE        		0x90
 #define		MSIROEN         	0x20
 #define		OVLYROEN        	0x10
@@ -2033,11 +1162,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DCH1ROEN        	0x02
 #define		DCH0ROEN        	0x01
 
-#define	OVLYRXMSG0      		0x90
+#define	OVLYRXMSG1      		0x91
 
-#define	DCHRXMSG0       		0x90
+#define	CMCRXMSG1       		0x91
 
-#define	OVLYRXMSG1      		0x91
+#define	DCHRXMSG1       		0x91
 
 #define	NSENABLE        		0x91
 #define		MSINSEN         	0x20
@@ -2047,10 +1176,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DCH1NSEN        	0x02
 #define		DCH0NSEN        	0x01
 
-#define	CMCRXMSG1       		0x91
-
-#define	DCHRXMSG1       		0x91
-
 #define	DCHRXMSG2       		0x92
 
 #define	CMCRXMSG2       		0x92
@@ -2074,24 +1199,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		TSCSERREN       	0x02
 #define		CMPABCDIS       	0x01
 
+#define	CMCSEQBCNT      		0x94
+
 #define	OVLYSEQBCNT     		0x94
 
 #define	DCHSEQBCNT      		0x94
 
-#define	CMCSEQBCNT      		0x94
-
-#define	CMCSPLTSTAT0    		0x96
-
 #define	DCHSPLTSTAT0    		0x96
 
 #define	OVLYSPLTSTAT0   		0x96
 
-#define	CMCSPLTSTAT1    		0x97
+#define	CMCSPLTSTAT0    		0x96
 
 #define	OVLYSPLTSTAT1   		0x97
 
 #define	DCHSPLTSTAT1    		0x97
 
+#define	CMCSPLTSTAT1    		0x97
+
 #define	SGRXMSG0        		0x98
 #define		CDNUM           	0xf8
 #define		CFNUM           	0x07
@@ -2119,18 +1244,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		TAG_NUM         	0x1f
 #define		RLXORD          	0x10
 
-#define	SGSEQBCNT       		0x9c
-
 #define	SLVSPLTOUTATTR0 		0x9c
 #define		LOWER_BCNT      	0xff
 
+#define	SGSEQBCNT       		0x9c
+
 #define	SLVSPLTOUTATTR1 		0x9d
 #define		CMPLT_DNUM      	0xf8
 #define		CMPLT_FNUM      	0x07
 
-#define	SLVSPLTOUTATTR2 		0x9e
-#define		CMPLT_BNUM      	0xff
-
 #define	SGSPLTSTAT0     		0x9e
 #define		STAETERM        	0x80
 #define		SCBCERR         	0x40
@@ -2141,6 +1263,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		RXSCEMSG        	0x02
 #define		RXSPLTRSP       	0x01
 
+#define	SLVSPLTOUTATTR2 		0x9e
+#define		CMPLT_BNUM      	0xff
+
 #define	SGSPLTSTAT1     		0x9f
 #define		RXDATABUCKET    	0x01
 
@@ -2177,14 +1302,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRPENDMSI      	0x08
 #define		DPR             	0x01
 
+#define	DATA_COUNT_ODD  		0xa7
+
 #define	TARGPCISTAT     		0xa7
 #define		DPE             	0x80
 #define		SSE             	0x40
 #define		STA             	0x08
 #define		TWATERR         	0x02
 
-#define	DATA_COUNT_ODD  		0xa7
-
 #define	SCBPTR          		0xa8
 
 #define	CCSCBACNT       		0xab
@@ -2196,10 +1321,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	CCSGADDR        		0xac
 
-#define	CCSCBADR_BK     		0xac
-
 #define	CCSCBADDR       		0xac
 
+#define	CCSCBADR_BK     		0xac
+
 #define	CMC_RAMBIST     		0xad
 #define		SG_ELEMENT_SIZE 	0x80
 #define		SCBRAMBIST_FAIL 	0x40
@@ -2253,9 +1378,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define	SEEDAT          		0xbc
 
 #define	SEECTL          		0xbe
+#define		SEEOP_EWDS      	0x40
 #define		SEEOP_WALL      	0x40
 #define		SEEOP_EWEN      	0x40
-#define		SEEOP_EWDS      	0x40
 #define		SEEOPCODE       	0x70
 #define		SEERST          	0x02
 #define		SEESTART        	0x01
@@ -2272,25 +1397,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	SCBCNT          		0xbf
 
-#define	DFWADDR         		0xc0
-
 #define	DSPFLTRCTL      		0xc0
 #define		FLTRDISABLE     	0x20
 #define		EDGESENSE       	0x10
 #define		DSPFCNTSEL      	0x0f
 
+#define	DFWADDR         		0xc0
+
 #define	DSPDATACTL      		0xc1
 #define		BYPASSENAB      	0x80
 #define		DESQDIS         	0x10
 #define		RCVROFFSTDIS    	0x04
 #define		XMITOFFSTDIS    	0x02
 
-#define	DFRADDR         		0xc2
-
 #define	DSPREQCTL       		0xc2
 #define		MANREQCTL       	0xc0
 #define		MANREQDLY       	0x3f
 
+#define	DFRADDR         		0xc2
+
 #define	DSPACKCTL       		0xc3
 #define		MANACKCTL       	0xc0
 #define		MANACKDLY       	0x3f
@@ -2311,14 +1436,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	WRTBIASCALC     		0xc7
 
-#define	RCVRBIASCALC    		0xc8
-
 #define	DFPTRS          		0xc8
 
-#define	SKEWCALC        		0xc9
+#define	RCVRBIASCALC    		0xc8
 
 #define	DFBKPTR         		0xc9
 
+#define	SKEWCALC        		0xc9
+
 #define	DFDBCTL         		0xcb
 #define		DFF_CIO_WR_RDY  	0x20
 #define		DFF_CIO_RD_RDY  	0x10
@@ -2403,12 +1528,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	ACCUM_SAVE      		0xfa
 
-#define	WAITING_SCB_TAILS		0x100
-
 #define	AHD_PCI_CONFIG_BASE		0x100
 
 #define	SRAM_BASE       		0x100
 
+#define	WAITING_SCB_TAILS		0x100
+
 #define	WAITING_TID_HEAD		0x120
 
 #define	WAITING_TID_TAIL		0x122
@@ -2437,8 +1562,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		PRELOADEN       	0x80
 #define		WIDEODD         	0x40
 #define		SCSIEN          	0x20
-#define		SDMAEN          	0x10
 #define		SDMAENACK       	0x10
+#define		SDMAEN          	0x10
 #define		HDMAEN          	0x08
 #define		HDMAENACK       	0x08
 #define		DIRECTION       	0x04
@@ -2536,12 +1661,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	MK_MESSAGE_SCSIID		0x162
 
-#define	SCB_BASE        		0x180
-
 #define	SCB_RESIDUAL_DATACNT		0x180
 #define	SCB_CDB_STORE   		0x180
 #define	SCB_HOST_CDB_PTR		0x180
 
+#define	SCB_BASE        		0x180
+
 #define	SCB_RESIDUAL_SGPTR		0x184
 #define		SG_ADDR_MASK    	0xf8
 #define		SG_OVERRUN_RESID	0x02
@@ -2609,77 +1734,77 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define	SCB_DISCONNECTED_LISTS		0x1b8
 
 
+#define	STIMESEL_SHIFT	0x03
+#define	STIMESEL_MIN	0x18
+#define	INVALID_ADDR	0x80
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	AHD_PRECOMP_MASK	0x07
+#define	TARGET_DATA_IN	0x01
+#define	SEEOP_EWEN_ADDR	0xc0
+#define	NUMDSPS 	0x14
+#define	DST_MODE_SHIFT	0x04
+#define	CCSCBADDR_MAX	0x80
+#define	AHD_ANNEXCOL_PER_DEV0	0x04
+#define	TARGET_CMD_CMPLT	0xfe
+#define	SEEOP_WRAL_ADDR	0x40
+#define	BUS_8_BIT	0x00
 #define	AHD_TIMER_MAX_US	0x18ffe7
 #define	AHD_TIMER_MAX_TICKS	0xffff
 #define	AHD_SENSE_BUFSIZE	0x100
-#define	BUS_8_BIT	0x00
-#define	TARGET_CMD_CMPLT	0xfe
-#define	SEEOP_WRAL_ADDR	0x40
-#define	AHD_AMPLITUDE_DEF	0x07
-#define	AHD_PRECOMP_CUTBACK_37	0x07
 #define	AHD_PRECOMP_SHIFT	0x00
+#define	AHD_PRECOMP_CUTBACK_37	0x07
 #define	AHD_ANNEXCOL_PRECOMP_SLEW	0x04
-#define	AHD_TIMER_US_PER_TICK	0x19
-#define	SCB_TRANSFER_SIZE_FULL_LUN	0x38
+#define	AHD_AMPLITUDE_DEF	0x07
+#define	WRTBIASCTL_HP_DEFAULT	0x00
+#define	TID_SHIFT	0x04
 #define	STATUS_QUEUE_FULL	0x28
 #define	STATUS_BUSY	0x08
-#define	MAX_OFFSET_NON_PACED	0x7f
+#define	SEEOP_EWDS_ADDR	0x00
+#define	SCB_TRANSFER_SIZE_FULL_LUN	0x38
+#define	MK_MESSAGE_BIT_OFFSET	0x04
 #define	MAX_OFFSET_PACED	0xfe
-#define	BUS_32_BIT	0x02
+#define	MAX_OFFSET_NON_PACED	0x7f
+#define	LUNLEN_SINGLE_LEVEL_LUN	0x0f
 #define	CCSGADDR_MAX	0x80
-#define	TID_SHIFT	0x04
-#define	MK_MESSAGE_BIT_OFFSET	0x04
-#define	WRTBIASCTL_HP_DEFAULT	0x00
-#define	SEEOP_EWDS_ADDR	0x00
-#define	AHD_AMPLITUDE_SHIFT	0x00
-#define	AHD_AMPLITUDE_MASK	0x07
-#define	AHD_ANNEXCOL_AMPLITUDE	0x06
-#define	AHD_SLEWRATE_DEF_REVA	0x08
+#define	B_CURRFIFO_0	0x02
+#define	BUS_32_BIT	0x02
+#define	AHD_TIMER_US_PER_TICK	0x19
 #define	AHD_SLEWRATE_SHIFT	0x03
 #define	AHD_SLEWRATE_MASK	0x78
+#define	AHD_SLEWRATE_DEF_REVA	0x08
 #define	AHD_PRECOMP_CUTBACK_29	0x06
 #define	AHD_NUM_PER_DEV_ANNEXCOLS	0x04
-#define	B_CURRFIFO_0	0x02
-#define	LUNLEN_SINGLE_LEVEL_LUN	0x0f
-#define	NVRAM_SCB_OFFSET	0x2c
+#define	AHD_ANNEXCOL_AMPLITUDE	0x06
+#define	AHD_AMPLITUDE_SHIFT	0x00
+#define	AHD_AMPLITUDE_MASK	0x07
+#define	STIMESEL_BUG_ADJ	0x08
 #define	STATUS_PKT_SENSE	0xff
-#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	SRC_MODE_SHIFT	0x00
+#define	SEEOP_ERAL_ADDR	0x80
+#define	NVRAM_SCB_OFFSET	0x2c
 #define	MAX_OFFSET_PACED_BUG	0x7f
-#define	STIMESEL_BUG_ADJ	0x08
-#define	STIMESEL_MIN	0x18
-#define	STIMESEL_SHIFT	0x03
 #define	CCSGRAM_MAXSEGS	0x10
-#define	INVALID_ADDR	0x80
-#define	SEEOP_ERAL_ADDR	0x80
 #define	AHD_SLEWRATE_DEF_REVB	0x08
 #define	AHD_PRECOMP_CUTBACK_17	0x04
-#define	AHD_PRECOMP_MASK	0x07
-#define	SRC_MODE_SHIFT	0x00
-#define	PKT_OVERRUN_BUFSIZE	0x200
 #define	SCB_TRANSFER_SIZE_1BYTE_LUN	0x30
-#define	TARGET_DATA_IN	0x01
-#define	HOST_MSG	0xff
+#define	PKT_OVERRUN_BUFSIZE	0x200
 #define	MAX_OFFSET	0xfe
+#define	HOST_MSG	0xff
 #define	BUS_16_BIT	0x01
-#define	CCSCBADDR_MAX	0x80
-#define	NUMDSPS 	0x14
-#define	SEEOP_EWEN_ADDR	0xc0
-#define	AHD_ANNEXCOL_PER_DEV0	0x04
-#define	DST_MODE_SHIFT	0x04
 
 
 /* Downloaded Constant Definitions */
+#define	SG_SIZEOF	0x04
+#define	SG_PREFETCH_ALIGN_MASK	0x02
+#define	SG_PREFETCH_CNT_LIMIT	0x01
 #define	CACHELINE_MASK	0x07
 #define	SCB_TRANSFER_SIZE	0x06
 #define	PKT_OVERRUN_BUFOFFSET	0x05
-#define	SG_SIZEOF	0x04
 #define	SG_PREFETCH_ADDR_MASK	0x03
-#define	SG_PREFETCH_ALIGN_MASK	0x02
-#define	SG_PREFETCH_CNT_LIMIT	0x01
 #define	SG_PREFETCH_CNT	0x00
 #define	DOWNLOAD_CONST_COUNT	0x08
 
 
 /* Exported Labels */
-#define	LABEL_seq_isr 	0x28f
 #define	LABEL_timer_isr	0x28b
+#define	LABEL_seq_isr 	0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index f5ea715d6ac3..2e0c58905b9e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -234,6 +234,23 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
 	    0x49, regvalue, cur_col, wrap));
 }
 
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+	{ "ENARBDO",		0x01, 0x01 },
+	{ "ENSPIORDY",		0x02, 0x02 },
+	{ "ENOVERRUN",		0x04, 0x04 },
+	{ "ENIOERR",		0x08, 0x08 },
+	{ "ENSELINGO",		0x10, 0x10 },
+	{ "ENSELDI",		0x20, 0x20 },
+	{ "ENSELDO",		0x40, 0x40 }
+};
+
+int
+ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
+	    0x4b, regvalue, cur_col, wrap));
+}
+
 static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
 	{ "ARBDO",		0x01, 0x01 },
 	{ "SPIORDY",		0x02, 0x02 },
@@ -252,23 +269,6 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
 	    0x4b, regvalue, cur_col, wrap));
 }
 
-static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
-	{ "ENARBDO",		0x01, 0x01 },
-	{ "ENSPIORDY",		0x02, 0x02 },
-	{ "ENOVERRUN",		0x04, 0x04 },
-	{ "ENIOERR",		0x08, 0x08 },
-	{ "ENSELINGO",		0x10, 0x10 },
-	{ "ENSELDI",		0x20, 0x20 },
-	{ "ENSELDO",		0x40, 0x40 }
-};
-
-int
-ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-	return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
-	    0x4b, regvalue, cur_col, wrap));
-}
-
 static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
 	{ "REQINIT",		0x01, 0x01 },
 	{ "STRB2FAST",		0x02, 0x02 },
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64ab9eaec428..381846164003 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -7340,7 +7340,6 @@ ahc_dump_card_state(struct ahc_softc *ahc)
 		printk("\n");
 	}
 
-	ahc_platform_dump_card_state(ahc);
 	printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
 	ahc_outb(ahc, SCBPTR, saved_scbptr);
 	if (paused == 0)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index fc6a83188c1e..acd687f4554e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -2329,11 +2329,6 @@ done:
 	return (retval);
 }
 
-void
-ahc_platform_dump_card_state(struct ahc_softc *ahc)
-{
-}
-
 static void ahc_linux_set_width(struct scsi_target *starget, int width)
 {
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 54c702864103..f8489078f003 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -688,7 +688,6 @@ void	ahc_done(struct ahc_softc*, struct scb*);
 void	ahc_send_async(struct ahc_softc *, char channel,
 		       u_int target, u_int lun, ac_code);
 void	ahc_print_path(struct ahc_softc *, struct scb *);
-void	ahc_platform_dump_card_state(struct ahc_softc *ahc);
 
 #ifdef CONFIG_PCI
 #define AHC_PCI_CONFIG 1
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index e821082a4f47..473039df0ed5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -244,8 +244,6 @@ ahc_reg_print_t ahc_scb_tag_print;
 
 #define	SCSIDATH        		0x07
 
-#define	STCNT           		0x08
-
 #define	OPTIONMODE      		0x08
 #define		OPTIONMODE_DEFAULTS	0x03
 #define		AUTORATEEN      	0x80
@@ -257,6 +255,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		AUTO_MSGOUT_DE  	0x02
 #define		DIS_MSGIN_DUALEDGE	0x01
 
+#define	STCNT           		0x08
+
 #define	TARGCRCCNT      		0x0a
 
 #define	CLRSINT0        		0x0b
@@ -365,8 +365,6 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		ALTSTIM         	0x20
 #define		DFLTTID         	0x10
 
-#define	TARGID          		0x1b
-
 #define	SPIOCAP         		0x1b
 #define		SOFT1           	0x80
 #define		SOFT0           	0x40
@@ -377,12 +375,14 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		ROM             	0x02
 #define		SSPIOCPS        	0x01
 
+#define	TARGID          		0x1b
+
 #define	BRDCTL          		0x1d
 #define		BRDDAT7         	0x80
 #define		BRDDAT6         	0x40
 #define		BRDDAT5         	0x20
-#define		BRDDAT4         	0x10
 #define		BRDSTB          	0x10
+#define		BRDDAT4         	0x10
 #define		BRDDAT3         	0x08
 #define		BRDCS           	0x08
 #define		BRDDAT2         	0x04
@@ -406,8 +406,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		DIAGLEDEN       	0x80
 #define		DIAGLEDON       	0x40
 #define		AUTOFLUSHDIS    	0x20
-#define		ENAB40          	0x08
 #define		SELBUSB         	0x08
+#define		ENAB40          	0x08
 #define		ENAB20          	0x04
 #define		SELWIDE         	0x02
 #define		XCVR            	0x01
@@ -730,8 +730,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define	SCB_BASE        		0xa0
 
 #define	SCB_CDB_PTR     		0xa0
-#define	SCB_RESIDUAL_DATACNT		0xa0
 #define	SCB_CDB_STORE   		0xa0
+#define	SCB_RESIDUAL_DATACNT		0xa0
 
 #define	SCB_RESIDUAL_SGPTR		0xa4
 
@@ -756,8 +756,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 
 #define	SCB_CONTROL     		0xb8
 #define		SCB_TAG_TYPE    	0x03
-#define		STATUS_RCVD     	0x80
 #define		TARGET_SCB      	0x80
+#define		STATUS_RCVD     	0x80
 #define		DISCENB         	0x40
 #define		TAG_ENB         	0x20
 #define		MK_MESSAGE      	0x10
@@ -872,40 +872,40 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define	SG_CACHE_PRE    		0xfc
 
 
+#define	TARGET_CMD_CMPLT	0xfe
 #define	MAX_OFFSET_ULTRA2	0x7f
 #define	MAX_OFFSET_16BIT	0x08
 #define	BUS_8_BIT	0x00
-#define	TARGET_CMD_CMPLT	0xfe
+#define	TID_SHIFT	0x04
 #define	STATUS_QUEUE_FULL	0x28
 #define	STATUS_BUSY	0x08
-#define	MAX_OFFSET_8BIT	0x0f
-#define	BUS_32_BIT	0x02
-#define	CCSGADDR_MAX	0x80
-#define	TID_SHIFT	0x04
 #define	SCB_DOWNLOAD_SIZE_64	0x30
+#define	MAX_OFFSET_8BIT	0x0f
 #define	HOST_MAILBOX_SHIFT	0x04
-#define	CMD_GROUP_CODE_SHIFT	0x05
-#define	CCSGRAM_MAXSEGS	0x10
-#define	SCB_LIST_NULL	0xff
+#define	CCSGADDR_MAX	0x80
+#define	BUS_32_BIT	0x02
 #define	SG_SIZEOF	0x08
-#define	SCB_DOWNLOAD_SIZE	0x20
 #define	SEQ_MAILBOX_SHIFT	0x00
+#define	SCB_LIST_NULL	0xff
+#define	SCB_DOWNLOAD_SIZE	0x20
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	CCSGRAM_MAXSEGS	0x10
 #define	TARGET_DATA_IN	0x01
-#define	HOST_MSG	0xff
+#define	STACK_SIZE	0x04
+#define	SCB_UPLOAD_SIZE	0x20
 #define	MAX_OFFSET	0x7f
+#define	HOST_MSG	0xff
 #define	BUS_16_BIT	0x01
-#define	SCB_UPLOAD_SIZE	0x20
-#define	STACK_SIZE	0x04
 
 
 /* Downloaded Constant Definitions */
 #define	INVERTED_CACHESIZE_MASK	0x03
-#define	SG_PREFETCH_ADDR_MASK	0x06
 #define	SG_PREFETCH_ALIGN_MASK	0x05
+#define	SG_PREFETCH_ADDR_MASK	0x06
 #define	QOUTFIFO_OFFSET	0x00
 #define	SG_PREFETCH_CNT	0x04
-#define	CACHESIZE_MASK	0x02
 #define	QINFIFO_OFFSET	0x01
+#define	CACHESIZE_MASK	0x02
 #define	DOWNLOAD_CONST_COUNT	0x07
 
 
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index a14ba7a6b81e..6c838865ac5a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -70,7 +70,7 @@ static struct scsi_host_template aic94xx_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler	= sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler	= sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.track_queue_depth	= 1,
@@ -956,11 +956,11 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
 	return 1;
 }
 
-static ssize_t asd_version_show(struct device_driver *driver, char *buf)
+static ssize_t version_show(struct device_driver *driver, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
 }
-static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
 static int asd_create_driver_attrs(struct device_driver *driver)
 {
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 9c86481f779f..259d9c20bf25 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -190,7 +190,7 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
 	return 1;
 }
 
-static struct bin_attribute arcmsr_sysfs_message_read_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_read_attr = {
 	.attr = {
 		.name = "mu_read",
 		.mode = S_IRUSR ,
@@ -199,7 +199,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
 	.read = arcmsr_sysfs_iop_message_read,
 };
 
-static struct bin_attribute arcmsr_sysfs_message_write_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
 	.attr = {
 		.name = "mu_write",
 		.mode = S_IWUSR,
@@ -208,7 +208,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
 	.write = arcmsr_sysfs_iop_message_write,
 };
 
-static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
 	.attr = {
 		.name = "mu_clear",
 		.mode = S_IWUSR,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 12b88294d667..690816f3c6af 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2725,23 +2725,24 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
  * Params   : SCpnt  - command causing reset
  * Returns  : one of SCSI_RESET_ macros
  */
-int acornscsi_bus_reset(struct scsi_cmnd *SCpnt)
+int acornscsi_host_reset(struct Scsi_Host *shpnt)
 {
-	AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
+	AS_Host *host = (AS_Host *)shpnt->hostdata;
 	struct scsi_cmnd *SCptr;
     
     host->stats.resets += 1;
 
 #if (DEBUG & DEBUG_RESET)
     {
-	int asr, ssr;
+	int asr, ssr, devidx;
 
 	asr = sbic_arm_read(host, SBIC_ASR);
 	ssr = sbic_arm_read(host, SBIC_SSR);
 
 	printk(KERN_WARNING "acornscsi_reset: ");
 	print_sbic_status(asr, ssr, host->scsi.phase);
-	acornscsi_dumplog(host, SCpnt->device->id);
+	for (devidx = 0; devidx < 9; devidx ++) {
+	    acornscsi_dumplog(host, devidx);
     }
 #endif
 
@@ -2884,7 +2885,7 @@ static struct scsi_host_template acornscsi_template = {
 	.info			= acornscsi_info,
 	.queuecommand		= acornscsi_queuecmd,
 	.eh_abort_handler	= acornscsi_abort,
-	.eh_bus_reset_handler	= acornscsi_bus_reset,
+	.eh_host_reset_handler	= acornscsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index a87b99c7fb9a..ae1d809904fb 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -216,7 +216,7 @@ static struct scsi_host_template cumanascsi_template = {
 	.info			= cumanascsi_info,
 	.queuecommand		= cumanascsi_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 6be6666534d4..05b7f755499b 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -105,7 +105,7 @@ static struct scsi_host_template oakscsi_template = {
 	.info			= oakscsi_info,
 	.queuecommand		= oakscsi_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a75feebe6ad6..89f5154c40b6 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -671,7 +671,7 @@ static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
 
 #include "NCR5380.c"
 
-static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
+static int atari_scsi_host_reset(struct scsi_cmnd *cmd)
 {
 	int rv;
 	unsigned long flags;
@@ -688,7 +688,7 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
 		atari_dma_orig_addr = NULL;
 	}
 
-	rv = NCR5380_bus_reset(cmd);
+	rv = NCR5380_host_reset(cmd);
 
 	/* The 5380 raises its IRQ line while _RST is active but the ST DMA
 	 * "lock" has been released so this interrupt may end up handled by
@@ -711,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = {
 	.info			= atari_scsi_info,
 	.queuecommand		= atari_scsi_queue_command,
 	.eh_abort_handler	= atari_scsi_abort,
-	.eh_bus_reset_handler	= atari_scsi_bus_reset,
+	.eh_host_reset_handler	= atari_scsi_host_reset,
 	.this_id		= 7,
 	.cmd_per_lun		= 2,
 	.use_clustering		= DISABLE_CLUSTERING,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 97dca4681784..43a80ce5ce6a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -82,8 +82,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 		return NULL;
 	sess = cls_session->dd_data;
 	beiscsi_sess = sess->dd_data;
-	beiscsi_sess->bhs_pool =  pci_pool_create("beiscsi_bhs_pool",
-						   phba->pcidev,
+	beiscsi_sess->bhs_pool =  dma_pool_create("beiscsi_bhs_pool",
+						   &phba->pcidev->dev,
 						   sizeof(struct be_cmd_bhs),
 						   64, 0);
 	if (!beiscsi_sess->bhs_pool)
@@ -108,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
 	struct beiscsi_session *beiscsi_sess = sess->dd_data;
 
 	printk(KERN_INFO "In beiscsi_session_destroy\n");
-	pci_pool_destroy(beiscsi_sess->bhs_pool);
+	dma_pool_destroy(beiscsi_sess->bhs_pool);
 	iscsi_session_teardown(cls_session);
 }
 
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f862332261f8..b4542e7e2ad5 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4257,7 +4257,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
 	if (io_task->cmd_bhs) {
-		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+		dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 			      io_task->bhs_pa.u.a64.address);
 		io_task->cmd_bhs = NULL;
 		task->hdr = NULL;
@@ -4374,7 +4374,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
 	dma_addr_t paddr;
 
-	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
+	io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool,
 					  GFP_ATOMIC, &paddr);
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
@@ -4501,7 +4501,7 @@ free_hndls:
 	if (io_task->pwrb_handle)
 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 	io_task->pwrb_handle = NULL;
-	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+	dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 		      io_task->bhs_pa.u.a64.address);
 	io_task->cmd_bhs = NULL;
 	return -ENOMEM;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 338dbe0800c1..81ce3ffda968 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -438,7 +438,7 @@ struct beiscsi_hba {
 	 test_bit(BEISCSI_HBA_ONLINE, &phba->state))
 
 struct beiscsi_session {
-	struct pci_pool *bhs_pool;
+	struct dma_pool *bhs_pool;
 };
 
 /**
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7eb0eef18fdd..24e657a4ec80 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -373,32 +373,28 @@ out:
 }
 
 /*
- * Scsi_Host template entry, resets the bus and abort all commands.
+ * Scsi_Host template entry, resets the target and abort all commands.
  */
 static int
-bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
+bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
 {
 	struct Scsi_Host *shost = cmnd->device->host;
+	struct scsi_target *starget = scsi_target(cmnd->device);
 	struct bfad_im_port_s *im_port =
 				(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_itnim_s   *itnim;
 	unsigned long   flags;
-	u32        i, rc, err_cnt = 0;
+	u32        rc, rtn = FAILED;
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 	enum bfi_tskim_status task_status;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	for (i = 0; i < MAX_FCP_TARGET; i++) {
-		itnim = bfad_get_itnim(im_port, i);
-		if (itnim) {
-			cmnd->SCp.ptr = (char *)&wq;
-			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
-			if (rc != BFA_STATUS_OK) {
-				err_cnt++;
-				continue;
-			}
-
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim) {
+		cmnd->SCp.ptr = (char *)&wq;
+		rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
+		if (rc == BFA_STATUS_OK) {
 			/* wait target reset to complete */
 			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 			wait_event(wq, test_bit(IO_DONE_BIT,
@@ -406,20 +402,17 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
 			spin_lock_irqsave(&bfad->bfad_lock, flags);
 
 			task_status = cmnd->SCp.Status >> 1;
-			if (task_status != BFI_TSKIM_STS_OK) {
+			if (task_status != BFI_TSKIM_STS_OK)
 				BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 					"target reset failure,"
 					" status: %d\n", task_status);
-				err_cnt++;
-			}
+			else
+				rtn = SUCCESS;
 		}
 	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	if (err_cnt)
-		return FAILED;
-
-	return SUCCESS;
+	return rtn;
 }
 
 /*
@@ -816,7 +809,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
 	.eh_timed_out = fc_eh_timed_out,
 	.eh_abort_handler = bfad_im_abort_handler,
 	.eh_device_reset_handler = bfad_im_reset_lun_handler,
-	.eh_bus_reset_handler = bfad_im_reset_bus_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
 
 	.slave_alloc = bfad_im_slave_alloc,
 	.slave_configure = bfad_im_slave_configure,
@@ -839,7 +832,7 @@ struct scsi_host_template bfad_im_vport_template = {
 	.eh_timed_out = fc_eh_timed_out,
 	.eh_abort_handler = bfad_im_abort_handler,
 	.eh_device_reset_handler = bfad_im_reset_lun_handler,
-	.eh_bus_reset_handler = bfad_im_reset_bus_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
 
 	.slave_alloc = bfad_im_slave_alloc,
 	.slave_configure = bfad_im_slave_configure,
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 7e007e142aab..901a31632493 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -539,7 +539,6 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
-int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
 void bnx2fc_rport_event_handler(struct fc_lport *lport,
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index dad959fcf6d8..c535c52e72e5 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -105,6 +105,7 @@ do {									\
 static struct class * ch_sysfs_class;
 
 typedef struct {
+	struct kref         ref;
 	struct list_head    list;
 	int                 minor;
 	char                name[8];
@@ -563,13 +564,23 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
 
 /* ------------------------------------------------------------------------ */
 
+static void ch_destroy(struct kref *ref)
+{
+	scsi_changer *ch = container_of(ref, scsi_changer, ref);
+
+	kfree(ch->dt);
+	kfree(ch);
+}
+
 static int
 ch_release(struct inode *inode, struct file *file)
 {
 	scsi_changer *ch = file->private_data;
 
 	scsi_device_put(ch->device);
+	ch->device = NULL;
 	file->private_data = NULL;
+	kref_put(&ch->ref, ch_destroy);
 	return 0;
 }
 
@@ -588,6 +599,7 @@ ch_open(struct inode *inode, struct file *file)
 		mutex_unlock(&ch_mutex);
 		return -ENXIO;
 	}
+	kref_get(&ch->ref);
 	spin_unlock(&ch_index_lock);
 
 	file->private_data = ch;
@@ -935,8 +947,11 @@ static int ch_probe(struct device *dev)
 	}
 
 	mutex_init(&ch->lock);
+	kref_init(&ch->ref);
 	ch->device = sd;
-	ch_readconfig(ch);
+	ret = ch_readconfig(ch);
+	if (ret)
+		goto destroy_dev;
 	if (init)
 		ch_init_elem(ch);
 
@@ -944,6 +959,8 @@ static int ch_probe(struct device *dev)
 	sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
 
 	return 0;
+destroy_dev:
+	device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
 remove_idr:
 	idr_remove(&ch_index_idr, ch->minor);
 free_ch:
@@ -960,8 +977,7 @@ static int ch_remove(struct device *dev)
 	spin_unlock(&ch_index_lock);
 
 	device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
-	kfree(ch->dt);
-	kfree(ch);
+	kref_put(&ch->ref, ch_destroy);
 	return 0;
 }
 
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9acb89538e29..667046419b19 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -465,7 +465,7 @@ struct csio_hw {
 	struct csio_pport	pport[CSIO_MAX_PPORTS];	/* Ports (XGMACs) */
 	struct csio_hw_params	params;			/* Hw parameters */
 
-	struct pci_pool		*scsi_pci_pool;		/* PCI pool for SCSI */
+	struct dma_pool		*scsi_dma_pool;		/* DMA pool for SCSI */
 	mempool_t		*mb_mempool;		/* Mailbox memory pool*/
 	mempool_t		*rnode_mempool;		/* rnode memory pool */
 
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index dcd074169aa9..28a9c7d706cb 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -485,9 +485,10 @@ csio_resource_alloc(struct csio_hw *hw)
 	if (!hw->rnode_mempool)
 		goto err_free_mb_mempool;
 
-	hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
-					    CSIO_SCSI_RSP_LEN, 8, 0);
-	if (!hw->scsi_pci_pool)
+	hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool",
+					    &hw->pdev->dev, CSIO_SCSI_RSP_LEN,
+					    8, 0);
+	if (!hw->scsi_dma_pool)
 		goto err_free_rn_pool;
 
 	return 0;
@@ -505,8 +506,8 @@ err:
 static void
 csio_resource_free(struct csio_hw *hw)
 {
-	pci_pool_destroy(hw->scsi_pci_pool);
-	hw->scsi_pci_pool = NULL;
+	dma_pool_destroy(hw->scsi_dma_pool);
+	hw->scsi_dma_pool = NULL;
 	mempool_destroy(hw->rnode_mempool);
 	hw->rnode_mempool = NULL;
 	mempool_destroy(hw->mb_mempool);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index a1ff75f1384f..dab0d3f9bee1 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2445,7 +2445,7 @@ csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
 
 		/* Allocate Dma buffers for Response Payload */
 		dma_buf = &ioreq->dma_buf;
-		dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+		dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
 						&dma_buf->paddr);
 		if (!dma_buf->vaddr) {
 			csio_err(hw,
@@ -2485,7 +2485,7 @@ free_ioreq:
 		ioreq = (struct csio_ioreq *)tmp;
 
 		dma_buf = &ioreq->dma_buf;
-		pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+		dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
 			      dma_buf->paddr);
 
 		kfree(ioreq);
@@ -2516,7 +2516,7 @@ csio_scsim_exit(struct csio_scsim *scm)
 		ioreq = (struct csio_ioreq *)tmp;
 
 		dma_buf = &ioreq->dma_buf;
-		pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+		dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
 			      dma_buf->paddr);
 
 		kfree(ioreq);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 1a4cfa562a60..512c8f1ea5b0 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -585,19 +585,21 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
 
 static struct rtable *find_route_ipv4(struct flowi4 *fl4,
 				      __be32 saddr, __be32 daddr,
-				      __be16 sport, __be16 dport, u8 tos)
+				      __be16 sport, __be16 dport, u8 tos,
+				      int ifindex)
 {
 	struct rtable *rt;
 
 	rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
-				   dport, sport, IPPROTO_TCP, tos, 0);
+				   dport, sport, IPPROTO_TCP, tos, ifindex);
 	if (IS_ERR(rt))
 		return NULL;
 
 	return rt;
 }
 
-static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
+static struct cxgbi_sock *
+cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
 {
 	struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
 	struct dst_entry *dst;
@@ -611,7 +613,8 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
 	int port = 0xFFFF;
 	int err = 0;
 
-	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
+	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0,
+			     daddr->sin_port, 0, ifindex);
 	if (!rt) {
 		pr_info("no route to ipv4 0x%x, port %u.\n",
 			be32_to_cpu(daddr->sin_addr.s_addr),
@@ -693,11 +696,13 @@ err_out:
 
 #if IS_ENABLED(CONFIG_IPV6)
 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
-					const struct in6_addr *daddr)
+					const struct in6_addr *daddr,
+					int ifindex)
 {
 	struct flowi6 fl;
 
 	memset(&fl, 0, sizeof(fl));
+	fl.flowi6_oif = ifindex;
 	if (saddr)
 		memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
 	if (daddr)
@@ -705,7 +710,8 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
 	return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
 }
 
-static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
+static struct cxgbi_sock *
+cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
 {
 	struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
 	struct dst_entry *dst;
@@ -719,7 +725,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
 	int port = 0xFFFF;
 	int err = 0;
 
-	rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
+	rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex);
 
 	if (!rt) {
 		pr_info("no route to ipv6 %pI6 port %u\n",
@@ -2536,6 +2542,7 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
 	struct cxgbi_endpoint *cep;
 	struct cxgbi_hba *hba = NULL;
 	struct cxgbi_sock *csk;
+	int ifindex = 0;
 	int err = -EINVAL;
 
 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
@@ -2548,13 +2555,15 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
 			pr_info("shost 0x%p, priv NULL.\n", shost);
 			goto err_out;
 		}
+
+		ifindex = hba->ndev->ifindex;
 	}
 
 	if (dst_addr->sa_family == AF_INET) {
-		csk = cxgbi_check_route(dst_addr);
+		csk = cxgbi_check_route(dst_addr, ifindex);
 #if IS_ENABLED(CONFIG_IPV6)
 	} else if (dst_addr->sa_family == AF_INET6) {
-		csk = cxgbi_check_route6(dst_addr);
+		csk = cxgbi_check_route6(dst_addr, ifindex);
 #endif
 	} else {
 		pr_info("address family 0x%x NOT supported.\n",
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6a4367cc9caa..76b8b7eed0c0 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -820,8 +820,7 @@ static void term_afu(struct cxlflash_cfg *cfg)
 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 		term_intr(cfg, UNMAP_THREE, k);
 
-	if (cfg->afu)
-		stop_afu(cfg);
+	stop_afu(cfg);
 
 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 		term_mc(cfg, k);
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ad0f9968ccfb..ed46e8df2e42 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1390,6 +1390,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	if (unlikely(!ctxi)) {
 		dev_err(dev, "%s: Failed to create context ctxid=%d\n",
 			__func__, ctxid);
+		rc = -ENOMEM;
 		goto err;
 	}
 
@@ -1650,6 +1651,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 	u64 ctxid = DECODE_CTXID(recover->context_id),
 	    rctxid = recover->context_id;
 	long reg;
+	bool locked = true;
 	int lretry = 20; /* up to 2 seconds */
 	int new_adap_fd = -1;
 	int rc = 0;
@@ -1658,8 +1660,11 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 	up_read(&cfg->ioctl_rwsem);
 	rc = mutex_lock_interruptible(mutex);
 	down_read(&cfg->ioctl_rwsem);
-	if (rc)
+	if (rc) {
+		locked = false;
 		goto out;
+	}
+
 	rc = check_state(cfg);
 	if (rc) {
 		dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
@@ -1693,8 +1698,10 @@ retry_recover:
 				mutex_unlock(mutex);
 				msleep(100);
 				rc = mutex_lock_interruptible(mutex);
-				if (rc)
+				if (rc) {
+					locked = false;
 					goto out;
+				}
 				goto retry_recover;
 			}
 
@@ -1738,7 +1745,8 @@ retry_recover:
 out:
 	if (likely(ctxi))
 		put_context(ctxi);
-	mutex_unlock(mutex);
+	if (locked)
+		mutex_unlock(mutex);
 	atomic_dec_if_positive(&cfg->recovery_threads);
 	return rc;
 }
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index bdfb93061460..703bf1e9a64a 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -694,11 +694,7 @@ static int shrink_lxt(struct afu *afu,
 	/* Free LBAs allocated to freed chunks */
 	mutex_lock(&blka->mutex);
 	for (i = delta - 1; i >= 0; i--) {
-		/* Mask the higher 48 bits before shifting, even though
-		 * it is a noop
-		 */
-		aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
-		aun = (aun >> MC_CHUNK_SHIFT);
+		aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
 		if (needs_ws)
 			write_same16(sdev, aun, MC_CHUNK_SIZE);
 		ba_free(&blka->ba_lun, aun);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6af3394d051d..003c3d726238 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -58,7 +58,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
 	.info			= NCR5380_info,
 	.queuecommand		= NCR5380_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 32,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 256dd6791fcc..fd172b0890d3 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1169,11 +1169,6 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
 	if(chan < 0 || chan >= MAX_CHANNEL)
 		return NULL;
 	
-	if( pHba->channel[chan].device == NULL){
-		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
-		return NULL;
-	}
-
 	d = pHba->channel[chan].device[id];
 	if(!d || d->tid == 0) {
 		return NULL;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 227dd2c2ec2f..6501c330d8c8 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1899,7 +1899,6 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
 static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 {
 	unsigned int i, time, k, c, limit = 0;
-	int arg_done = 0;
 	struct scsi_cmnd *SCpnt;
 	struct Scsi_Host *shost = SCarg->device->host;
 	struct hostdata *ha = (struct hostdata *)shost->hostdata;
@@ -1967,9 +1966,6 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 		if (SCpnt->scsi_done == NULL)
 			panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n",
 			      ha->board_name, i);
-
-		if (SCpnt == SCarg)
-			arg_done = 1;
 	}
 
 	if (do_dma(shost->io_port, 0, RESET_PIO)) {
@@ -2037,10 +2033,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 	ha->in_reset = 0;
 	do_trace = 0;
 
-	if (arg_done)
-		printk("%s: reset, exit, done.\n", ha->board_name);
-	else
-		printk("%s: reset, exit.\n", ha->board_name);
+	printk("%s: reset, exit.\n", ha->board_name);
 
 	spin_unlock_irq(shost->host_lock);
 	return SUCCESS;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f2e9d8aa979c..81f226be3e3b 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -309,7 +309,7 @@ MODULE_PARM_DESC(interrupt_mode,
 		 "Defines the interrupt mode to use.  0 for legacy"
 		 ", 1 for MSI.  Default is MSI (1).");
 
-static struct pci_device_id
+static const struct pci_device_id
 	esas2r_pci_table[] = {
 	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x0049,
 	  0,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71cb05b1c3eb..c3fc34b9964d 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -597,14 +597,12 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 
 		lp->non_tagged_cmd = ent;
 		return 0;
-	} else {
-		/* Tagged command, see if blocked by a
-		 * non-tagged one.
-		 */
-		if (lp->non_tagged_cmd || lp->hold)
-			return -EBUSY;
 	}
 
+	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
+	if (lp->non_tagged_cmd || lp->hold)
+		return -EBUSY;
+
 	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 
 	lp->tagged_cmds[ent->orig_tag[1]] = ent;
@@ -1210,12 +1208,6 @@ static int esp_reconnect(struct esp *esp)
 
 	esp->active_cmd = ent;
 
-	if (ent->flags & ESP_CMD_FLAG_ABORT) {
-		esp->msg_out[0] = ABORT_TASK_SET;
-		esp->msg_out_len = 1;
-		scsi_esp_cmd(esp, ESP_CMD_SATN);
-	}
-
 	esp_event(esp, ESP_EVENT_CHECK_PHASE);
 	esp_restore_pointers(esp, ent);
 	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
@@ -1230,9 +1222,6 @@ static int esp_finish_select(struct esp *esp)
 {
 	struct esp_cmd_entry *ent;
 	struct scsi_cmnd *cmd;
-	u8 orig_select_state;
-
-	orig_select_state = esp->select_state;
 
 	/* No longer selecting.  */
 	esp->select_state = ESP_SELECT_NONE;
@@ -1496,9 +1485,8 @@ static void esp_msgin_reject(struct esp *esp)
 		return;
 	}
 
-	esp->msg_out[0] = ABORT_TASK_SET;
-	esp->msg_out_len = 1;
-	scsi_esp_cmd(esp, ESP_CMD_SATN);
+	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
+	esp_schedule_reset(esp);
 }
 
 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
@@ -1621,7 +1609,7 @@ static void esp_msgin_extended(struct esp *esp)
 	shost_printk(KERN_INFO, esp->host,
 		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
 
-	esp->msg_out[0] = ABORT_TASK_SET;
+	esp->msg_out[0] = MESSAGE_REJECT;
 	esp->msg_out_len = 1;
 	scsi_esp_cmd(esp, ESP_CMD_SATN);
 }
@@ -1745,7 +1733,6 @@ again:
 			return 0;
 		}
 		goto again;
-		break;
 
 	case ESP_EVENT_DATA_IN:
 		write = 1;
@@ -1956,12 +1943,16 @@ again:
 		} else {
 			if (esp->msg_out_len > 1)
 				esp->ops->dma_invalidate(esp);
-		}
 
-		if (!(esp->ireg & ESP_INTR_DC)) {
-			if (esp->rev != FASHME)
+			/* XXX if the chip went into disconnected mode,
+			 * we can't run the phase state machine anyway.
+			 */
+			if (!(esp->ireg & ESP_INTR_DC))
 				scsi_esp_cmd(esp, ESP_CMD_NULL);
 		}
+
+		esp->msg_out_len = 0;
+
 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
 		goto again;
 	case ESP_EVENT_MSGIN:
@@ -1998,6 +1989,10 @@ again:
 
 			scsi_esp_cmd(esp, ESP_CMD_MOK);
 
+			/* Check whether a bus reset is to be done next */
+			if (esp->event == ESP_EVENT_RESET)
+				return 0;
+
 			if (esp->event != ESP_EVENT_FREE_BUS)
 				esp_event(esp, ESP_EVENT_CHECK_PHASE);
 		} else {
@@ -2022,7 +2017,6 @@ again:
 		}
 		esp_schedule_reset(esp);
 		return 0;
-		break;
 
 	case ESP_EVENT_RESET:
 		scsi_esp_cmd(esp, ESP_CMD_RS);
@@ -2033,7 +2027,6 @@ again:
 			     "Unexpected event %x, resetting\n", esp->event);
 		esp_schedule_reset(esp);
 		return 0;
-		break;
 	}
 	return 1;
 }
@@ -2170,14 +2163,14 @@ static void __esp_interrupt(struct esp *esp)
 
 		esp_schedule_reset(esp);
 	} else {
-		if (!(esp->ireg & ESP_INTR_RSEL)) {
-			/* Some combination of FDONE, BSERV, DC.  */
-			if (esp->select_state != ESP_SELECT_NONE)
-				intr_done = esp_finish_select(esp);
-		} else if (esp->ireg & ESP_INTR_RSEL) {
+		if (esp->ireg & ESP_INTR_RSEL) {
 			if (esp->active_cmd)
 				(void) esp_finish_select(esp);
 			intr_done = esp_reconnect(esp);
+		} else {
+			/* Some combination of FDONE, BSERV, DC. */
+			if (esp->select_state != ESP_SELECT_NONE)
+				intr_done = esp_finish_select(esp);
 		}
 	}
 	while (!intr_done)
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 84dcbe4a6268..7e8932ae91f8 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -281,7 +281,6 @@ struct esp_cmd_entry {
 
 	u8			flags;
 #define ESP_CMD_FLAG_WRITE	0x01 /* DMA is a write */
-#define ESP_CMD_FLAG_ABORT	0x02 /* being aborted */
 #define ESP_CMD_FLAG_AUTOSENSE	0x04 /* Doing automatic REQUEST_SENSE */
 #define ESP_CMD_FLAG_RESIDUAL	0x08 /* AM53c974 BLAST residual */
 
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 9cf3d56296ab..5c8310bade61 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -659,13 +659,13 @@ static void fcoe_fcf_device_release(struct device *dev)
 	kfree(fcf);
 }
 
-static struct device_type fcoe_ctlr_device_type = {
+static const struct device_type fcoe_ctlr_device_type = {
 	.name = "fcoe_ctlr",
 	.groups = fcoe_ctlr_attr_groups,
 	.release = fcoe_ctlr_device_release,
 };
 
-static struct device_type fcoe_fcf_device_type = {
+static const struct device_type fcoe_fcf_device_type = {
 	.name = "fcoe_fcf",
 	.groups = fcoe_fcf_attr_groups,
 	.release = fcoe_fcf_device_release,
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index b87ab38a4530..ebbe5a3e665d 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -933,7 +933,7 @@ struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt )
       }
    }
 
-   fdomain_16x0_bus_reset(NULL);
+   fdomain_16x0_host_reset(NULL);
 
    if (fdomain_test_loopback()) {
       printk(KERN_ERR  "scsi: <fdomain> Detection failed (loopback test failed at port base 0x%x)\n", port_base);
@@ -1568,7 +1568,7 @@ static int fdomain_16x0_abort(struct scsi_cmnd *SCpnt)
    return SUCCESS;
 }
 
-int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt)
+int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt)
 {
    unsigned long flags;
 
@@ -1758,7 +1758,7 @@ struct scsi_host_template fdomain_driver_template = {
 	.info			= fdomain_16x0_info,
 	.queuecommand		= fdomain_16x0_queue,
 	.eh_abort_handler	= fdomain_16x0_abort,
-	.eh_bus_reset_handler	= fdomain_16x0_bus_reset,
+	.eh_host_reset_handler	= fdomain_16x0_host_reset,
 	.bios_param		= fdomain_16x0_biosparam,
 	.release		= fdomain_16x0_release,
 	.can_queue		= 1,
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
index 47021d9d4fe4..5cbe86b573ae 100644
--- a/drivers/scsi/fdomain.h
+++ b/drivers/scsi/fdomain.h
@@ -21,4 +21,4 @@
 extern struct scsi_host_template fdomain_driver_template;
 extern int fdomain_setup(char *str);
 extern struct Scsi_Host *__fdomain_16x0_detect(struct  scsi_host_template *tpnt );
-extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt);
+extern int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 67aab965c0f4..d094ba59ed15 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -180,7 +180,7 @@ enum fnic_msix_intr_index {
 
 struct fnic_msix_entry {
 	int requested;
-	char devname[IFNAMSIZ];
+	char devname[IFNAMSIZ + 11];
 	irqreturn_t (*isr)(int, void *);
 	void *devid;
 };
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 6c0646d62dfb..242e2ee494a1 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1990,10 +1990,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			"Issuing Host reset due to out of order IO\n");
 
-		if (fnic_host_reset(sc) == FAILED) {
-			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-				"fnic_host_reset failed.\n");
-		}
 		ret = FAILED;
 		goto fnic_abort_cmd_end;
 	}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index c34fc91ba486..fc538181f8df 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -1,17 +1,17 @@
 /*
  * Generic Generic NCR5380 driver
- *	
+ *
  * Copyright 1993, Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 440-4894
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
  *
  * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
- *    K.Lentin@cs.monash.edu.au
+ * K.Lentin@cs.monash.edu.au
  *
  * NCR53C400A extensions (c) 1996, Ingmar Baumgart
- *    ingmar@gonzo.schwaben.de
+ * ingmar@gonzo.schwaben.de
  *
  * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg
  * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl
@@ -44,17 +44,19 @@
 	int c400_ctl_status; \
 	int c400_blk_cnt; \
 	int c400_host_buf; \
-	int io_width
+	int io_width; \
+	int pdma_residual; \
+	int board
 
 #define NCR5380_dma_xfer_len            generic_NCR5380_dma_xfer_len
-#define NCR5380_dma_recv_setup          generic_NCR5380_pread
-#define NCR5380_dma_send_setup          generic_NCR5380_pwrite
-#define NCR5380_dma_residual            NCR5380_dma_residual_none
+#define NCR5380_dma_recv_setup          generic_NCR5380_precv
+#define NCR5380_dma_send_setup          generic_NCR5380_psend
+#define NCR5380_dma_residual            generic_NCR5380_dma_residual
 
 #define NCR5380_intr                    generic_NCR5380_intr
 #define NCR5380_queue_command           generic_NCR5380_queue_command
 #define NCR5380_abort                   generic_NCR5380_abort
-#define NCR5380_bus_reset               generic_NCR5380_bus_reset
+#define NCR5380_host_reset              generic_NCR5380_host_reset
 #define NCR5380_info                    generic_NCR5380_info
 
 #define NCR5380_io_delay(x)             udelay(x)
@@ -76,6 +78,7 @@
 #define IRQ_AUTO 254
 
 #define MAX_CARDS 8
+#define DMA_MAX_SIZE 32768
 
 /* old-style parameters for compatibility */
 static int ncr_irq = -1;
@@ -314,6 +317,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 	}
 	hostdata = shost_priv(instance);
 
+	hostdata->board = board;
 	hostdata->io = iomem;
 	hostdata->region_size = region_size;
 
@@ -478,180 +482,210 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
 		release_mem_region(base, region_size);
 }
 
+/* wait_for_53c80_access - wait for 53C80 registers to become accessible
+ * @hostdata: scsi host private data
+ *
+ * The registers within the 53C80 logic block are inaccessible until
+ * bit 7 in the 53C400 control status register gets asserted.
+ */
+
+static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata)
+{
+	int count = 10000;
+
+	do {
+		if (hostdata->board == BOARD_DTC3181E)
+			udelay(4); /* DTC436 chip hangs without this */
+		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)
+			return;
+	} while (--count > 0);
+
+	scmd_printk(KERN_ERR, hostdata->connected,
+	            "53c80 registers not accessible, device will be reset\n");
+	NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+}
+
 /**
- *	generic_NCR5380_pread - pseudo DMA read
- *	@hostdata: scsi host private data
- *	@dst: buffer to read into
- *	@len: buffer length
+ * generic_NCR5380_precv - pseudo DMA receive
+ * @hostdata: scsi host private data
+ * @dst: buffer to write into
+ * @len: transfer size
  *
- *	Perform a pseudo DMA mode read from an NCR53C400 or equivalent
- *	controller
+ * Perform a pseudo DMA mode receive from a 53C400 or equivalent device.
  */
- 
-static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
+
+static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
                                         unsigned char *dst, int len)
 {
-	int blocks = len / 128;
+	int residual;
 	int start = 0;
 
 	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR);
-	NCR5380_write(hostdata->c400_blk_cnt, blocks);
-	while (1) {
-		if (NCR5380_read(hostdata->c400_blk_cnt) == 0)
-			break;
-		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
-			printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
-			return -1;
+	NCR5380_write(hostdata->c400_blk_cnt, len / 128);
+
+	do {
+		if (start == len - 128) {
+			/* Ignore End of DMA interrupt for the final buffer */
+			if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status,
+			                          CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0)
+				break;
+		} else {
+			if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
+			                           CSR_HOST_BUF_NOT_RDY, 0,
+			                           hostdata->c400_ctl_status,
+			                           CSR_GATED_53C80_IRQ,
+			                           CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+			    NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
+				break;
 		}
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; /* FIXME - no timeout */
 
 		if (hostdata->io_port && hostdata->io_width == 2)
 			insw(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 64);
+			     dst + start, 64);
 		else if (hostdata->io_port)
 			insb(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 128);
+			     dst + start, 128);
 		else
 			memcpy_fromio(dst + start,
 				hostdata->io + NCR53C400_host_buffer, 128);
-
 		start += 128;
-		blocks--;
-	}
+	} while (start < len);
 
-	if (blocks) {
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; /* FIXME - no timeout */
+	residual = len - start;
 
-		if (hostdata->io_port && hostdata->io_width == 2)
-			insw(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 64);
-		else if (hostdata->io_port)
-			insb(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 128);
-		else
-			memcpy_fromio(dst + start,
-				hostdata->io + NCR53C400_host_buffer, 128);
-
-		start += 128;
-		blocks--;
+	if (residual != 0) {
+		/* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
+		NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
 	}
+	wait_for_53c80_access(hostdata);
 
-	if (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ))
-		printk("53C400r: no 53C80 gated irq after transfer");
+	if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+	                                           BASR_END_DMA_TRANSFER,
+	                                           BASR_END_DMA_TRANSFER,
+	                                           HZ / 64) < 0)
+		scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
+		            __func__);
 
-	/* wait for 53C80 registers to be available */
-	while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG))
-		;
+	hostdata->pdma_residual = residual;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
-		printk(KERN_ERR "53C400r: no end dma signal\n");
-		
 	return 0;
 }
 
 /**
- *	generic_NCR5380_pwrite - pseudo DMA write
- *	@hostdata: scsi host private data
- *	@dst: buffer to read into
- *	@len: buffer length
+ * generic_NCR5380_psend - pseudo DMA send
+ * @hostdata: scsi host private data
+ * @src: buffer to read from
+ * @len: transfer size
  *
- *	Perform a pseudo DMA mode read from an NCR53C400 or equivalent
- *	controller
+ * Perform a pseudo DMA mode send to a 53C400 or equivalent device.
  */
 
-static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
-                                         unsigned char *src, int len)
+static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
+                                        unsigned char *src, int len)
 {
-	int blocks = len / 128;
+	int residual;
 	int start = 0;
 
 	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
-	NCR5380_write(hostdata->c400_blk_cnt, blocks);
-	while (1) {
-		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
-			printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
-			return -1;
+	NCR5380_write(hostdata->c400_blk_cnt, len / 128);
+
+	do {
+		if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
+		                           CSR_HOST_BUF_NOT_RDY, 0,
+		                           hostdata->c400_ctl_status,
+		                           CSR_GATED_53C80_IRQ,
+		                           CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+		    NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) {
+			/* Both 128 B buffers are in use */
+			if (start >= 128)
+				start -= 128;
+			if (start >= 128)
+				start -= 128;
+			break;
 		}
 
-		if (NCR5380_read(hostdata->c400_blk_cnt) == 0)
+		if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0)
 			break;
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; // FIXME - timeout
 
-		if (hostdata->io_port && hostdata->io_width == 2)
-			outsw(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 64);
-		else if (hostdata->io_port)
-			outsb(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 128);
-		else
-			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
-			            src + start, 128);
+		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
+			/* Host buffer is empty, other one is in use */
+			if (start >= 128)
+				start -= 128;
+			break;
+		}
 
-		start += 128;
-		blocks--;
-	}
-	if (blocks) {
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; // FIXME - no timeout
+		if (start >= len)
+			continue;
 
 		if (hostdata->io_port && hostdata->io_width == 2)
 			outsw(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 64);
+			      src + start, 64);
 		else if (hostdata->io_port)
 			outsb(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 128);
+			      src + start, 128);
 		else
 			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
 			            src + start, 128);
-
 		start += 128;
-		blocks--;
-	}
+	} while (1);
 
-	/* wait for 53C80 registers to be available */
-	while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) {
-		udelay(4); /* DTC436 chip hangs without this */
-		/* FIXME - no timeout */
-	}
+	residual = len - start;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
-		printk(KERN_ERR "53C400w: no end dma signal\n");
+	if (residual != 0) {
+		/* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
+		NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
 	}
+	wait_for_53c80_access(hostdata);
+
+	if (residual == 0) {
+		if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+		                          TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT,
+		                          HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected,
+			            "%s: Last Byte Sent timeout\n", __func__);
+
+		if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+		                          BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
+		                          HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
+			            __func__);
+	}
+
+	hostdata->pdma_residual = residual;
 
-	while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
-		; 	// TIMEOUT
 	return 0;
 }
 
 static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                         struct scsi_cmnd *cmd)
 {
-	int transfersize = cmd->transfersize;
+	int transfersize = cmd->SCp.this_residual;
 
 	if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
 		return 0;
 
-	/* Limit transfers to 32K, for xx400 & xx406
-	 * pseudoDMA that transfers in 128 bytes blocks.
-	 */
-	if (transfersize > 32 * 1024 && cmd->SCp.this_residual &&
-	    !(cmd->SCp.this_residual % transfersize))
-		transfersize = 32 * 1024;
-
 	/* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */
 	if (transfersize % 128)
-		transfersize = 0;
+		return 0;
+
+	/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
+	if (hostdata->board == BOARD_DTC3181E &&
+	    cmd->sc_data_direction == DMA_TO_DEVICE)
+		transfersize = min(cmd->SCp.this_residual, 512);
 
-	return transfersize;
+	return min(transfersize, DMA_MAX_SIZE);
 }
 
-/*
- *	Include the NCR5380 core code that we build our driver around	
- */
- 
+static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+	return hostdata->pdma_residual;
+}
+
+/* Include the core driver code. */
+
 #include "NCR5380.c"
 
 static struct scsi_host_template driver_template = {
@@ -661,7 +695,7 @@ static struct scsi_host_template driver_template = {
 	.info			= generic_NCR5380_info,
 	.queuecommand		= generic_NCR5380_queue_command,
 	.eh_abort_handler	= generic_NCR5380_abort,
-	.eh_bus_reset_handler	= generic_NCR5380_bus_reset,
+	.eh_host_reset_handler	= generic_NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
@@ -671,11 +705,10 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= 128,
 };
 
-
 static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
 {
 	int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
-					  irq[ndev], card[ndev]);
+	                                   irq[ndev], card[ndev]);
 	if (ret) {
 		if (base[ndev])
 			printk(KERN_WARNING "Card not found at address 0x%03x\n",
@@ -687,7 +720,7 @@ static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
 }
 
 static int generic_NCR5380_isa_remove(struct device *pdev,
-				   unsigned int ndev)
+                                      unsigned int ndev)
 {
 	generic_NCR5380_release_resources(dev_get_drvdata(pdev));
 	dev_set_drvdata(pdev, NULL);
@@ -703,14 +736,14 @@ static struct isa_driver generic_NCR5380_isa_driver = {
 };
 
 #ifdef CONFIG_PNP
-static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+static const struct pnp_device_id generic_NCR5380_pnp_ids[] = {
 	{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
 	{ .id = "" }
 };
 MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
 
 static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
-			       const struct pnp_device_id *id)
+                                     const struct pnp_device_id *id)
 {
 	int base, irq;
 
@@ -721,7 +754,7 @@ static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
 	irq = pnp_irq(pdev, 0);
 
 	return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
-				       id->driver_data);
+	                                id->driver_data);
 }
 
 static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index facc7271f932..a4473356a9dc 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2354,7 +2354,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
         inq.resp_aenc = 2;
         inq.add_length= 32;
         strcpy(inq.vendor,ha->oem_name);
-        sprintf(inq.product,"Host Drive  #%02d",t);
+        snprintf(inq.product, sizeof(inq.product), "Host Drive  #%02d",t);
         strcpy(inq.revision,"   ");
         gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
         break;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index be609db66807..d08b2716752c 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -147,7 +147,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
 
     gdth_cmd_str *gdtcmd;
     gdth_evt_str *estr;
-    char hrec[161];
+    char hrec[277];
 
     char *buf;
     gdth_dskstat_str *pds;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 3b6f83ffddc4..a27fc49ebd3a 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -171,23 +171,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int gvp11_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: shouldn't we no-op this function (return
-	   FAILED), and fall back to host reset function,
-	   wd33c93_host_reset ? */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template gvp11_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "GVP Series II SCSI",
@@ -196,7 +179,6 @@ static struct scsi_host_template gvp11_scsi_template = {
 	.proc_name		= "GVP11",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= gvp11_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a722f2bd72ab..07f4a4cfbec1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -15,6 +15,7 @@
 #include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/dmapool.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -25,14 +26,13 @@
 #include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 
-#define DRV_VERSION "v1.6"
-
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_QUEUES	32
 #define HISI_SAS_QUEUE_SLOTS 512
 #define HISI_SAS_MAX_ITCT_ENTRIES 2048
 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
 #define HISI_SAS_RESET_BIT	0
+#define HISI_SAS_REJECT_CMD_BIT	1
 
 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
 #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -90,6 +90,14 @@ enum hisi_sas_dev_type {
 	HISI_SAS_DEV_TYPE_SATA,
 };
 
+struct hisi_sas_hw_error {
+	u32 irq_msk;
+	u32 msk;
+	int shift;
+	const char *msg;
+	int reg;
+};
+
 struct hisi_sas_phy {
 	struct hisi_hba	*hisi_hba;
 	struct hisi_sas_port	*port;
@@ -132,6 +140,7 @@ struct hisi_sas_dq {
 struct hisi_sas_device {
 	struct hisi_hba		*hisi_hba;
 	struct domain_device	*sas_device;
+	struct completion *completion;
 	struct hisi_sas_dq	*dq;
 	struct list_head	list;
 	u64 attached_phy;
@@ -192,6 +201,7 @@ struct hisi_sas_hw {
 	void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+	void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
 			struct sas_phy_linkrates *linkrates);
 	enum sas_linkrate (*phy_get_max_linkrate)(void);
@@ -201,6 +211,7 @@ struct hisi_sas_hw {
 	void (*dereg_device)(struct hisi_hba *hisi_hba,
 				struct domain_device *device);
 	int (*soft_reset)(struct hisi_hba *hisi_hba);
+	u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
 	int max_command_entries;
 	int complete_hdr_size;
 };
@@ -390,6 +401,7 @@ struct hisi_sas_slot_buf_table {
 extern struct scsi_transport_template *hisi_sas_stt;
 extern struct scsi_host_template *hisi_sas_sht;
 
+extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
 extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
 extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
 extern void hisi_sas_free(struct hisi_hba *hisi_hba);
@@ -408,6 +420,4 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
 				    struct sas_task *task,
 				    struct hisi_sas_slot *slot);
 extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
-extern void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
-				     u32 state);
 #endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 4022c3f8295f..16664f2e15fb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -61,6 +61,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_WRITE_QUEUED:
 	case ATA_CMD_WRITE_LOG_DMA_EXT:
 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
+	case ATA_CMD_ZAC_MGMT_IN:
 	return HISI_SAS_SATA_PROTOCOL_DMA;
 
 	case ATA_CMD_CHK_POWER:
@@ -73,6 +74,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_SET_FEATURES:
 	case ATA_CMD_STANDBY:
 	case ATA_CMD_STANDBYNOW1:
+	case ATA_CMD_ZAC_MGMT_OUT:
 	return HISI_SAS_SATA_PROTOCOL_NONDATA;
 	default:
 		if (direction == DMA_NONE)
@@ -125,6 +127,15 @@ struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
 }
 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
 
+void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
+{
+	int phy_no;
+
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
+		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
+
 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 {
 	void *bitmap = hisi_hba->slot_index_tags;
@@ -433,7 +444,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_sas_dq *dq = sas_dev->dq;
 
-	if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
 		return -EINVAL;
 
 	/* protect task_prep and start_delivery sequence */
@@ -716,7 +727,6 @@ static void hisi_sas_dev_gone(struct domain_device *device)
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct device *dev = hisi_hba->dev;
-	int dev_id = sas_dev->device_id;
 
 	dev_info(dev, "found dev[%d:%x] is gone\n",
 		 sas_dev->device_id, sas_dev->dev_type);
@@ -729,9 +739,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
 	hisi_hba->hw->free_device(hisi_hba, sas_dev);
 	device->lldd_dev = NULL;
 	memset(sas_dev, 0, sizeof(*sas_dev));
-	sas_dev->device_id = dev_id;
 	sas_dev->dev_type = SAS_PHY_UNUSED;
-	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 }
 
 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
@@ -764,7 +772,12 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 	case PHY_FUNC_SET_LINK_RATE:
 		hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
 		break;
-
+	case PHY_FUNC_GET_EVENTS:
+		if (hisi_hba->hw->get_events) {
+			hisi_hba->hw->get_events(hisi_hba, phy_no);
+			break;
+		}
+		/* fallthru */
 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
 	default:
 		return -EOPNOTSUPP;
@@ -967,37 +980,117 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
 				sizeof(ssp_task), tmf);
 }
 
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
+		struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+{
+	struct hisi_sas_device	*sas_dev;
+	struct domain_device *device;
+	int i;
+
+	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+		sas_dev = &hisi_hba->devices[i];
+		device = sas_dev->sas_device;
+		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
+				|| !device || (device->port != sas_port))
+			continue;
+
+		hisi_hba->hw->free_device(hisi_hba, sas_dev);
+
+		/* Update linkrate of directly attached device. */
+		if (!device->parent)
+			device->linkrate = linkrate;
+
+		hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+	}
+}
+
+static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
+			      u32 state)
+{
+	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+	struct asd_sas_port *_sas_port = NULL;
+	int phy_no;
+
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+		struct asd_sas_port *sas_port = sas_phy->port;
+		struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+		bool do_port_check = !!(_sas_port != sas_port);
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		/* Report PHY state change to libsas */
+		if (state & (1 << phy_no)) {
+			if (do_port_check && sas_port) {
+				struct domain_device *dev = sas_port->port_dev;
+
+				_sas_port = sas_port;
+				port->id = phy->port_id;
+				hisi_sas_refresh_port_id(hisi_hba,
+						sas_port, sas_phy->linkrate);
+
+				if (DEV_IS_EXPANDER(dev->dev_type))
+					sas_ha->notify_port_event(sas_phy,
+							PORTE_BROADCAST_RCVD);
+			}
+		} else if (old_state & (1 << phy_no))
+			/* PHY down but was up before */
+			hisi_sas_phy_down(hisi_hba, phy_no, 0);
+
+	}
+
+	drain_workqueue(hisi_hba->shost->work_q);
+}
+
 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
 {
+	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+	struct device *dev = hisi_hba->dev;
+	struct Scsi_Host *shost = hisi_hba->shost;
+	u32 old_state, state;
+	unsigned long flags;
 	int rc;
 
 	if (!hisi_hba->hw->soft_reset)
 		return -1;
 
-	if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
-		struct device *dev = hisi_hba->dev;
-		struct sas_ha_struct *sas_ha = &hisi_hba->sha;
-		unsigned long flags;
+	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+		return -1;
 
-		dev_dbg(dev, "controller reset begins!\n");
-		scsi_block_requests(hisi_hba->shost);
-		rc = hisi_hba->hw->soft_reset(hisi_hba);
-		if (rc) {
-			dev_warn(dev, "controller reset failed (%d)\n", rc);
-			goto out;
-		}
-		spin_lock_irqsave(&hisi_hba->lock, flags);
-		hisi_sas_release_tasks(hisi_hba);
-		spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	dev_dbg(dev, "controller resetting...\n");
+	old_state = hisi_hba->hw->get_phys_state(hisi_hba);
 
-		sas_ha->notify_ha_event(sas_ha, HAE_RESET);
-		dev_dbg(dev, "controller reset successful!\n");
-	} else
-		return -1;
+	scsi_block_requests(shost);
+	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+	rc = hisi_hba->hw->soft_reset(hisi_hba);
+	if (rc) {
+		dev_warn(dev, "controller reset failed (%d)\n", rc);
+		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+		goto out;
+	}
+	spin_lock_irqsave(&hisi_hba->lock, flags);
+	hisi_sas_release_tasks(hisi_hba);
+	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+	sas_ha->notify_ha_event(sas_ha, HAE_RESET);
+	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+	/* Init and wait for PHYs to come up and all libsas event finished. */
+	hisi_hba->hw->phys_init(hisi_hba);
+	msleep(1000);
+	drain_workqueue(hisi_hba->wq);
+	drain_workqueue(shost->work_q);
+
+	state = hisi_hba->hw->get_phys_state(hisi_hba);
+	hisi_sas_rescan_topology(hisi_hba, old_state, state);
+	dev_dbg(dev, "controller reset complete\n");
 
 out:
-	scsi_unblock_requests(hisi_hba->shost);
+	scsi_unblock_requests(shost);
 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
 	return rc;
 }
 
@@ -1241,7 +1334,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
 	unsigned long flags, flags_dq;
 
-	if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
 		return -EINVAL;
 
 	if (!device->port)
@@ -1279,12 +1372,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	slot->port = port;
 	task->lldd_task = slot;
 
+	slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+			GFP_ATOMIC, &slot->buf_dma);
+	if (!slot->buf) {
+		rc = -ENOMEM;
+		goto err_out_tag;
+	}
+
 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
+	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
 
 	rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
 				      abort_flag, task_tag);
 	if (rc)
-		goto err_out_tag;
+		goto err_out_buf;
 
 
 	list_add_tail(&slot->entry, &sas_dev->list);
@@ -1302,6 +1404,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 
 	return 0;
 
+err_out_buf:
+	dma_pool_free(hisi_hba->buffer_pool, slot->buf,
+		slot->buf_dma);
 err_out_tag:
 	spin_lock_irqsave(&hisi_hba->lock, flags);
 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -1437,36 +1542,6 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
 
-void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
-			      u32 state)
-{
-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
-	int phy_no;
-
-	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
-		struct asd_sas_phy *sas_phy = &phy->sas_phy;
-		struct asd_sas_port *sas_port = sas_phy->port;
-		struct domain_device *dev;
-
-		if (sas_phy->enabled) {
-			/* Report PHY state change to libsas */
-			if (state & (1 << phy_no))
-				continue;
-
-			if (old_state & (1 << phy_no))
-				/* PHY down but was up before */
-				hisi_sas_phy_down(hisi_hba, phy_no, 0);
-		}
-		if (!sas_port)
-			continue;
-		dev = sas_port->port_dev;
-
-		if (DEV_IS_EXPANDER(dev->dev_type))
-			sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD);
-	}
-}
-EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
 
 struct scsi_transport_template *hisi_sas_stt;
 EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -1487,7 +1562,7 @@ static struct scsi_host_template _hisi_sas_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 };
@@ -1825,7 +1900,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 
 	return shost;
 err_out:
-	kfree(shost);
+	scsi_host_put(shost);
 	dev_err(dev, "shost alloc failed\n");
 	return NULL;
 }
@@ -1916,7 +1991,7 @@ err_out_register_ha:
 	scsi_remove_host(shost);
 err_out_ha:
 	hisi_sas_free(hisi_hba);
-	kfree(shost);
+	scsi_host_put(shost);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_probe);
@@ -1931,15 +2006,13 @@ int hisi_sas_remove(struct platform_device *pdev)
 	sas_remove_host(sha->core.shost);
 
 	hisi_sas_free(hisi_hba);
-	kfree(shost);
+	scsi_host_put(shost);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_remove);
 
 static __init int hisi_sas_init(void)
 {
-	pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
-
 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
 	if (!hisi_sas_stt)
 		return -ENOMEM;
@@ -1955,7 +2028,6 @@ static __exit void hisi_sas_exit(void)
 module_init(hisi_sas_init);
 module_exit(hisi_sas_exit);
 
-MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
 MODULE_DESCRIPTION("HISILICON SAS controller driver");
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 2bfea7082e3a..779af979b6db 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -256,6 +256,8 @@
 #define LINK_DFX2_RCVR_HOLD_STS_MSK	(0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF)
 #define LINK_DFX2_SEND_HOLD_STS_OFF	10
 #define LINK_DFX2_SEND_HOLD_STS_MSK	(0x1 << LINK_DFX2_SEND_HOLD_STS_OFF)
+#define SAS_ERR_CNT4_REG		(PORT_BASE + 0x290)
+#define SAS_ERR_CNT6_REG		(PORT_BASE + 0x298)
 #define PHY_CTRL_RDY_MSK		(PORT_BASE + 0x2b0)
 #define PHYCTRL_NOT_RDY_MSK		(PORT_BASE + 0x2b4)
 #define PHYCTRL_DWS_RESET_MSK		(PORT_BASE + 0x2b8)
@@ -399,6 +401,172 @@ struct hisi_sas_err_record_v2 {
 	__le32 dma_rx_err_type;
 };
 
+static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
+		.msk = HGC_DQE_ECC_1B_ADDR_MSK,
+		.shift = HGC_DQE_ECC_1B_ADDR_OFF,
+		.msg = "hgc_dqe_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_DQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
+		.msk = HGC_IOST_ECC_1B_ADDR_MSK,
+		.shift = HGC_IOST_ECC_1B_ADDR_OFF,
+		.msg = "hgc_iost_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_IOST_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
+		.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
+		.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
+		.msg = "hgc_itct_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_ITCT_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
+		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+		.msg = "hgc_iostl_acc1b_intr found:  \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
+		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+		.msg = "hgc_itctl_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
+		.msk = HGC_CQE_ECC_1B_ADDR_MSK,
+		.shift = HGC_CQE_ECC_1B_ADDR_OFF,
+		.msg = "hgc_cqe_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_CQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+		.msg = "rxm_mem0_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+		.msg = "rxm_mem1_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+		.msg = "rxm_mem2_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+		.msg = "rxm_mem3_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS15,
+	},
+};
+
+static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
+		.msk = HGC_DQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_DQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_dqe_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_DQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
+		.msk = HGC_IOST_ECC_MB_ADDR_MSK,
+		.shift = HGC_IOST_ECC_MB_ADDR_OFF,
+		.msg = "hgc_iost_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_IOST_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
+		.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
+		.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
+		.msg = "hgc_itct_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_ITCT_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+		.msg = "hgc_iostl_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+		.msg = "hgc_itctl_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
+		.msk = HGC_CQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_CQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_cqe_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_CQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+		.msg = "rxm_mem0_accbad_intr (0x%x) found: \
+			memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+		.msg = "rxm_mem1_accbad_intr (0x%x) found: \
+			memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+		.msg = "rxm_mem2_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+		.msg = "rxm_mem3_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS15,
+	},
+};
+
 enum {
 	HISI_SAS_PHY_PHY_UPDOWN,
 	HISI_SAS_PHY_CHNL_INT,
@@ -806,12 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
 			      struct hisi_sas_device *sas_dev)
 {
+	DECLARE_COMPLETION_ONSTACK(completion);
 	u64 dev_id = sas_dev->device_id;
-	struct device *dev = hisi_hba->dev;
 	struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
 	u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
 	int i;
 
+	sas_dev->completion = &completion;
+
 	/* SoC bug workaround */
 	if (dev_is_sata(sas_dev->sas_device))
 		clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
@@ -821,28 +991,12 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
 		hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 				 ENT_INT_SRC3_ITC_INT_MSK);
 
-	/* clear the itct int*/
 	for (i = 0; i < 2; i++) {
-		/* clear the itct table*/
-		reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
-		reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+		reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
 		hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+		wait_for_completion(sas_dev->completion);
 
-		udelay(10);
-		reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
-		if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
-			dev_dbg(dev, "got clear ITCT done interrupt\n");
-
-			/* invalid the itct state*/
-			memset(itct, 0, sizeof(struct hisi_sas_itct));
-			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
-					 ENT_INT_SRC3_ITC_INT_MSK);
-
-			/* clear the itct */
-			hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
-			dev_dbg(dev, "clear ITCT ok\n");
-			break;
-		}
+		memset(itct, 0, sizeof(struct hisi_sas_itct));
 	}
 }
 
@@ -1023,7 +1177,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
 	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
 	for (i = 0; i < hisi_hba->queue_count; i++)
 		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -1332,25 +1486,12 @@ static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	enable_phy_v2_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	disable_phy_v2_hw(hisi_hba, phy_no);
-}
-
-static void stop_phys_v2_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		stop_phy_v2_hw(hisi_hba, i);
-}
-
 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	stop_phy_v2_hw(hisi_hba, phy_no);
+	disable_phy_v2_hw(hisi_hba, phy_no);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -1360,17 +1501,38 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	start_phy_v2_hw(hisi_hba, phy_no);
 }
 
-static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
+static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
-	int i;
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_phy *sphy = sas_phy->phy;
+	u32 err4_reg_val, err6_reg_val;
 
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		start_phy_v2_hw(hisi_hba, i);
+	/* loss dword syn, phy reset problem */
+	err4_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT4_REG);
+
+	/* disparity err, invalid dword */
+	err6_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT6_REG);
+
+	sphy->loss_of_dword_sync_count += (err4_reg_val >> 16) & 0xFFFF;
+	sphy->phy_reset_problem_count += err4_reg_val & 0xFFFF;
+	sphy->invalid_dword_count += (err6_reg_val & 0xFF0000) >> 16;
+	sphy->running_disparity_error_count += err6_reg_val & 0xFF;
 }
 
 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
 {
-	start_phys_v2_hw(hisi_hba);
+	int i;
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		start_phy_v2_hw(hisi_hba, i);
+	}
 }
 
 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1965,7 +2127,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
 		}
 		case DMA_RX_DATA_LEN_UNDERFLOW:
 		{
-			ts->residual = dma_rx_err_type;
+			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
 			break;
 		}
@@ -2091,7 +2253,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
 		}
 		case DMA_RX_DATA_LEN_UNDERFLOW:
 		{
-			ts->residual = dma_rx_err_type;
+			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
 			break;
 		}
@@ -2599,6 +2761,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 	struct hisi_hba *hisi_hba = p;
 	u32 irq_msk;
 	int phy_no = 0;
+	irqreturn_t res = IRQ_NONE;
 
 	irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
 		   >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
@@ -2613,15 +2776,15 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 			case CHL_INT0_SL_PHY_ENABLE_MSK:
 				/* phy up */
 				if (phy_up_v2_hw(phy_no, hisi_hba) ==
-				    IRQ_NONE)
-					return IRQ_NONE;
+				    IRQ_HANDLED)
+					res = IRQ_HANDLED;
 				break;
 
 			case CHL_INT0_NOT_RDY_MSK:
 				/* phy down */
 				if (phy_down_v2_hw(phy_no, hisi_hba) ==
-				    IRQ_NONE)
-					return IRQ_NONE;
+				    IRQ_HANDLED)
+					res = IRQ_HANDLED;
 				break;
 
 			case (CHL_INT0_NOT_RDY_MSK |
@@ -2631,13 +2794,13 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 				if (reg_value & BIT(phy_no)) {
 					/* phy up */
 					if (phy_up_v2_hw(phy_no, hisi_hba) ==
-					    IRQ_NONE)
-						return IRQ_NONE;
+					    IRQ_HANDLED)
+						res = IRQ_HANDLED;
 				} else {
 					/* phy down */
 					if (phy_down_v2_hw(phy_no, hisi_hba) ==
-					    IRQ_NONE)
-						return IRQ_NONE;
+					    IRQ_HANDLED)
+						res = IRQ_HANDLED;
 				}
 				break;
 
@@ -2650,7 +2813,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 		phy_no++;
 	}
 
-	return IRQ_HANDLED;
+	return res;
 }
 
 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2733,194 +2896,38 @@ static void
 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
 {
 	struct device *dev = hisi_hba->dev;
-	u32 reg_val;
-
-	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
-		dev_warn(dev, "hgc_dqe_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
-			 HGC_DQE_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
-		dev_warn(dev, "hgc_iost_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
-			 HGC_IOST_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
-		dev_warn(dev, "hgc_itct_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
-			 HGC_ITCT_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_iostl_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
-			 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_itctl_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
-			 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
-		dev_warn(dev, "hgc_cqe_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
-			 HGC_CQE_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem0_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM0_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem1_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM1_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem2_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM2_OFF);
-	}
+	const struct hisi_sas_hw_error *ecc_error;
+	u32 val;
+	int i;
 
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
-		dev_warn(dev, "rxm_mem3_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
-			 HGC_RXM_DFX_STATUS15_MEM3_OFF);
+	for (i = 0; i < ARRAY_SIZE(one_bit_ecc_errors); i++) {
+		ecc_error = &one_bit_ecc_errors[i];
+		if (irq_value & ecc_error->irq_msk) {
+			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+			val &= ecc_error->msk;
+			val >>= ecc_error->shift;
+			dev_warn(dev, ecc_error->msg, val);
+		}
 	}
-
 }
 
 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
 		u32 irq_value)
 {
-	u32 reg_val;
 	struct device *dev = hisi_hba->dev;
+	const struct hisi_sas_hw_error *ecc_error;
+	u32 val;
+	int i;
 
-	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
-		dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
-		      HGC_DQE_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
-		dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
-		      HGC_IOST_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
-		dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
-		      HGC_ITCT_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
-		      HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
-		      HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
-		dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
-		      HGC_CQE_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM0_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM1_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM2_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
-		dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
-		      HGC_RXM_DFX_STATUS15_MEM3_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+	for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
+		ecc_error = &multi_bit_ecc_errors[i];
+		if (irq_value & ecc_error->irq_msk) {
+			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+			val &= ecc_error->msk;
+			val >>= ecc_error->shift;
+			dev_warn(dev, ecc_error->msg, irq_value, val);
+			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+		}
 	}
 
 	return;
@@ -3053,8 +3060,20 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
 			      irq_value);
 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+			u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+			u32 dev_id = reg_val & ITCT_DEV_MSK;
+			struct hisi_sas_device *sas_dev =
+					&hisi_hba->devices[dev_id];
+
+			hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+			dev_dbg(dev, "clear ITCT ok\n");
+			complete(sas_dev->completion);
+		}
 	}
 
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
 
 	return IRQ_HANDLED;
@@ -3251,97 +3270,92 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 {
 	struct platform_device *pdev = hisi_hba->platform_dev;
 	struct device *dev = &pdev->dev;
-	int i, irq, rc, irq_map[128];
-
+	int irq, rc, irq_map[128];
+	int i, phy_no, fatal_no, queue_no, k;
 
 	for (i = 0; i < 128; i++)
 		irq_map[i] = platform_get_irq(pdev, i);
 
 	for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
-		int idx = i;
-
-		irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
-		if (!irq) {
-			dev_err(dev, "irq init: fail map phy interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
-
+		irq = irq_map[i + 1]; /* Phy up/down is irq1 */
 		rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
 				      DRV_NAME " phy", hisi_hba);
 		if (rc) {
 			dev_err(dev, "irq init: could not request "
 				"phy interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_phy_int_irqs;
 		}
 	}
 
-	for (i = 0; i < hisi_hba->n_phy; i++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
-		int idx = i + 72; /* First SATA interrupt is irq72 */
-
-		irq = irq_map[idx];
-		if (!irq) {
-			dev_err(dev, "irq init: fail map phy interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 
+		irq = irq_map[phy_no + 72];
 		rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
 				      DRV_NAME " sata", phy);
 		if (rc) {
 			dev_err(dev, "irq init: could not request "
 				"sata interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_sata_int_irqs;
 		}
 	}
 
-	for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
-		int idx = i;
-
-		irq = irq_map[idx + 81];
-		if (!irq) {
-			dev_err(dev, "irq init: fail map fatal interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
-
-		rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+	for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
+		irq = irq_map[fatal_no + 81];
+		rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
 				      DRV_NAME " fatal", hisi_hba);
 		if (rc) {
 			dev_err(dev,
 				"irq init: could not request fatal interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_fatal_int_irqs;
 		}
 	}
 
-	for (i = 0; i < hisi_hba->queue_count; i++) {
-		int idx = i + 96; /* First cq interrupt is irq96 */
-		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+	for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
 		struct tasklet_struct *t = &cq->tasklet;
 
-		irq = irq_map[idx];
-		if (!irq) {
-			dev_err(dev,
-				"irq init: could not map cq interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
+		irq = irq_map[queue_no + 96];
 		rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
-				      DRV_NAME " cq", &hisi_hba->cq[i]);
+				      DRV_NAME " cq", cq);
 		if (rc) {
 			dev_err(dev,
 				"irq init: could not request cq interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_cq_int_irqs;
 		}
 		tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
 	}
 
 	return 0;
+
+free_cq_int_irqs:
+	for (k = 0; k < queue_no; k++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[k];
+
+		free_irq(irq_map[k + 96], cq);
+		tasklet_kill(&cq->tasklet);
+	}
+free_fatal_int_irqs:
+	for (k = 0; k < fatal_no; k++)
+		free_irq(irq_map[k + 81], hisi_hba);
+free_sata_int_irqs:
+	for (k = 0; k < phy_no; k++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[k];
+
+		free_irq(irq_map[k + 72], phy);
+	}
+free_phy_int_irqs:
+	for (k = 0; k < i; k++)
+		free_irq(irq_map[k + 1], hisi_hba);
+	return rc;
 }
 
 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
@@ -3383,19 +3397,21 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
 		synchronize_irq(platform_get_irq(pdev, i));
 }
 
+
+static u32 get_phys_state_v2_hw(struct hisi_hba *hisi_hba)
+{
+	return hisi_sas_read32(hisi_hba, PHY_STATE);
+}
+
 static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
-	u32 old_state, state;
 	int rc, cnt;
-	int phy_no;
-
-	old_state = hisi_sas_read32(hisi_hba, PHY_STATE);
 
 	interrupt_disable_v2_hw(hisi_hba);
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
 
-	stop_phys_v2_hw(hisi_hba);
+	hisi_sas_stop_phys(hisi_hba);
 
 	mdelay(10);
 
@@ -3425,22 +3441,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
 
 	phys_reject_stp_links_v2_hw(hisi_hba);
 
-	/* Re-enable the PHYs */
-	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
-		struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-		if (sas_phy->enabled)
-			start_phy_v2_hw(hisi_hba, phy_no);
-	}
-
-	/* Wait for the PHYs to come up and read the PHY state */
-	msleep(1000);
-
-	state = hisi_sas_read32(hisi_hba, PHY_STATE);
-
-	hisi_sas_rescan_topology(hisi_hba, old_state, state);
-
 	return 0;
 }
 
@@ -3463,11 +3463,13 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
 	.phy_enable = enable_phy_v2_hw,
 	.phy_disable = disable_phy_v2_hw,
 	.phy_hard_reset = phy_hard_reset_v2_hw,
+	.get_events = phy_get_events_v2_hw,
 	.phy_set_linkrate = phy_set_linkrate_v2_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
 	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
 	.soft_reset = soft_reset_v2_hw,
+	.get_phys_state = get_phys_state_v2_hw,
 };
 
 static int hisi_sas_v2_probe(struct platform_device *pdev)
@@ -3491,10 +3493,17 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
 {
 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
+	int i;
 
 	if (timer_pending(&hisi_hba->timer))
 		del_timer(&hisi_hba->timer);
 
+	for (i = 0; i < hisi_hba->queue_count; i++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+		tasklet_kill(&cq->tasklet);
+	}
+
 	return hisi_sas_remove(pdev);
 }
 
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 83d2dca1c650..2e5fa9717be8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -23,14 +23,11 @@
 #define PHY_STATE			0x24
 #define PHY_PORT_NUM_MA			0x28
 #define PHY_CONN_RATE			0x30
-#define AXI_AHB_CLK_CFG			0x3c
 #define ITCT_CLR			0x44
 #define ITCT_CLR_EN_OFF			16
 #define ITCT_CLR_EN_MSK			(0x1 << ITCT_CLR_EN_OFF)
 #define ITCT_DEV_OFF			0
 #define ITCT_DEV_MSK			(0x7ff << ITCT_DEV_OFF)
-#define AXI_USER1			0x48
-#define AXI_USER2			0x4c
 #define IO_SATA_BROKEN_MSG_ADDR_LO	0x58
 #define IO_SATA_BROKEN_MSG_ADDR_HI	0x5c
 #define SATA_INITI_D2H_STORE_ADDR_LO	0x60
@@ -137,6 +134,7 @@
 #define TX_HARDRST_MSK          (0x1 << TX_HARDRST_OFF)
 #define RX_IDAF_DWORD0			(PORT_BASE + 0xc4)
 #define RXOP_CHECK_CFG_H		(PORT_BASE + 0xfc)
+#define STP_LINK_TIMER			(PORT_BASE + 0x120)
 #define SAS_SSP_CON_TIMER_CFG		(PORT_BASE + 0x134)
 #define SAS_SMP_CON_TIMER_CFG		(PORT_BASE + 0x138)
 #define SAS_STP_CON_TIMER_CFG		(PORT_BASE + 0x13c)
@@ -167,6 +165,31 @@
 #define PHYCTRL_PHY_ENA_MSK		(PORT_BASE + 0x2bc)
 #define SL_RX_BCAST_CHK_MSK		(PORT_BASE + 0x2c0)
 #define PHYCTRL_OOB_RESTART_MSK		(PORT_BASE + 0x2c4)
+#define DMA_TX_STATUS			(PORT_BASE + 0x2d0)
+#define DMA_TX_STATUS_BUSY_OFF		0
+#define DMA_TX_STATUS_BUSY_MSK		(0x1 << DMA_TX_STATUS_BUSY_OFF)
+#define DMA_RX_STATUS			(PORT_BASE + 0x2e8)
+#define DMA_RX_STATUS_BUSY_OFF		0
+#define DMA_RX_STATUS_BUSY_MSK		(0x1 << DMA_RX_STATUS_BUSY_OFF)
+
+#define MAX_ITCT_HW			4096 /* max the hw can support */
+#define DEFAULT_ITCT_HW		2048 /* reset value, not reprogrammed */
+#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
+#error Max ITCT exceeded
+#endif
+
+#define AXI_MASTER_CFG_BASE		(0x5000)
+#define AM_CTRL_GLOBAL			(0x0)
+#define AM_CURR_TRANS_RETURN	(0x150)
+
+#define AM_CFG_MAX_TRANS		(0x5010)
+#define AM_CFG_SINGLE_PORT_MAX_TRANS	(0x5014)
+#define AXI_CFG					(0x5100)
+#define AM_ROB_ECC_ERR_ADDR		(0x510c)
+#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF	0
+#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
+#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF	8
+#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
 
 /* HW dma structures */
 /* Delivery queue header */
@@ -354,8 +377,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	/* Global registers init */
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
 			 (u32)((1ULL << hisi_hba->queue_count) - 1));
-	hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
-	hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
 	hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
 	hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
 	hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -371,15 +392,14 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
-	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
 	hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
 	for (i = 0; i < hisi_hba->queue_count; i++)
 		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
 
-	hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
 	hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
-	hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
+	hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000);
 
 	for (i = 0; i < hisi_hba->n_phy; i++) {
 		hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
@@ -389,7 +409,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
-		hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -398,9 +417,11 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
-				     0xa0064);
+				     0xa03e8);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
-				     0xa0064);
+				     0xa03e8);
+		hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
+				     0x7f7a120);
 	}
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 		/* Delivery queue */
@@ -578,8 +599,6 @@ static void free_device_v3_hw(struct hisi_hba *hisi_hba,
 		memset(itct, 0, sizeof(struct hisi_sas_itct));
 		hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 				 ENT_INT_SRC3_ITC_INT_MSK);
-		hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
-		hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
 
 		/* clear the itct */
 		hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -610,8 +629,52 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
 					1 << CFG_ABT_SET_IPTT_DONE_OFF);
 }
 
+static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+	int ret;
+	u32 val;
+
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+	/* Disable all of the PHYs */
+	hisi_sas_stop_phys(hisi_hba);
+	udelay(50);
+
+	/* Ensure axi bus idle */
+	ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val,
+			20000, 1000000);
+	if (ret) {
+		dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
+		return -EIO;
+	}
+
+	if (ACPI_HANDLE(dev)) {
+		acpi_status s;
+
+		s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+		if (ACPI_FAILURE(s)) {
+			dev_err(dev, "Reset failed\n");
+			return -EIO;
+		}
+	} else
+		dev_err(dev, "no reset method!\n");
+
+	return 0;
+}
+
 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
 {
+	struct device *dev = hisi_hba->dev;
+	int rc;
+
+	rc = reset_hw_v3_hw(hisi_hba);
+	if (rc) {
+		dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+		return rc;
+	}
+
+	msleep(100);
 	init_reg_v3_hw(hisi_hba);
 
 	return 0;
@@ -640,25 +703,12 @@ static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 	enable_phy_v3_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	disable_phy_v3_hw(hisi_hba, phy_no);
-}
-
-static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		start_phy_v3_hw(hisi_hba, i);
-}
-
 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	stop_phy_v3_hw(hisi_hba, phy_no);
+	disable_phy_v3_hw(hisi_hba, phy_no);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -675,7 +725,17 @@ enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
 
 static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
 {
-	start_phys_v3_hw(hisi_hba);
+	int i;
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		start_phy_v3_hw(hisi_hba, i);
+	}
 }
 
 static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1140,7 +1200,6 @@ end:
 
 static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
-	int res = 0;
 	u32 phy_state, sl_ctrl, txid_auto;
 	struct device *dev = hisi_hba->dev;
 
@@ -1161,7 +1220,7 @@ static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
 
-	return res;
+	return 0;
 }
 
 static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -1259,7 +1318,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
 		if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
 			hisi_sas_phy_write32(hisi_hba, phy_no,
 					CHL_INT0, irq_value0
-					& (~CHL_INT0_HOTPLUG_TOUT_MSK)
+					& (~CHL_INT0_SL_RX_BCST_ACK_MSK)
 					& (~CHL_INT0_SL_PHY_ENABLE_MSK)
 					& (~CHL_INT0_NOT_RDY_MSK));
 		}
@@ -1620,6 +1679,104 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
 	return 0;
 }
 
+static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
+		struct sas_phy_linkrates *r)
+{
+	u32 prog_phy_link_rate =
+		hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	int i;
+	enum sas_linkrate min, max;
+	u32 rate_mask = 0;
+
+	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = sas_phy->phy->maximum_linkrate;
+		min = r->minimum_linkrate;
+	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = r->maximum_linkrate;
+		min = sas_phy->phy->minimum_linkrate;
+	} else
+		return;
+
+	sas_phy->phy->maximum_linkrate = max;
+	sas_phy->phy->minimum_linkrate = min;
+
+	min -= SAS_LINK_RATE_1_5_GBPS;
+	max -= SAS_LINK_RATE_1_5_GBPS;
+
+	for (i = 0; i <= max; i++)
+		rate_mask |= 1 << (i * 2);
+
+	prog_phy_link_rate &= ~0xff;
+	prog_phy_link_rate |= rate_mask;
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+			prog_phy_link_rate);
+
+	phy_hard_reset_v3_hw(hisi_hba, phy_no);
+}
+
+static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct pci_dev *pdev = hisi_hba->pci_dev;
+	int i;
+
+	synchronize_irq(pci_irq_vector(pdev, 1));
+	synchronize_irq(pci_irq_vector(pdev, 2));
+	synchronize_irq(pci_irq_vector(pdev, 11));
+	for (i = 0; i < hisi_hba->queue_count; i++) {
+		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+		synchronize_irq(pci_irq_vector(pdev, i + 16));
+	}
+
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
+		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1);
+		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1);
+		hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1);
+	}
+}
+
+static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
+{
+	return hisi_sas_read32(hisi_hba, PHY_STATE);
+}
+
+static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+	int rc;
+	u32 status;
+
+	interrupt_disable_v3_hw(hisi_hba);
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+
+	hisi_sas_stop_phys(hisi_hba);
+
+	mdelay(10);
+
+	hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+
+	/* wait until bus idle */
+	rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+		AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+	if (rc) {
+		dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+		return rc;
+	}
+
+	hisi_sas_init_mem(hisi_hba);
+
+	return hw_init_v3_hw(hisi_hba);
+}
+
 static const struct hisi_sas_hw hisi_sas_v3_hw = {
 	.hw_init = hisi_sas_v3_init,
 	.setup_itct = setup_itct_v3_hw,
@@ -1640,7 +1797,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
 	.phy_disable = disable_phy_v3_hw,
 	.phy_hard_reset = phy_hard_reset_v3_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
+	.phy_set_linkrate = phy_set_linkrate_v3_hw,
 	.dereg_device = dereg_device_v3_hw,
+	.soft_reset = soft_reset_v3_hw,
+	.get_phys_state = get_phys_state_v3_hw,
 };
 
 static struct Scsi_Host *
@@ -1651,8 +1811,10 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
 	struct device *dev = &pdev->dev;
 
 	shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
-	if (!shost)
-		goto err_out;
+	if (!shost) {
+		dev_err(dev, "shost alloc failed\n");
+		return NULL;
+	}
 	hisi_hba = shost_priv(shost);
 
 	hisi_hba->hw = &hisi_sas_v3_hw;
@@ -1673,6 +1835,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
 
 	return shost;
 err_out:
+	scsi_host_put(shost);
 	dev_err(dev, "shost alloc failed\n");
 	return NULL;
 }
@@ -1781,7 +1944,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 err_out_register_ha:
 	scsi_remove_host(shost);
 err_out_ha:
-	kfree(shost);
+	scsi_host_put(shost);
 err_out_regions:
 	pci_release_regions(pdev);
 err_out_disable_device:
@@ -1801,6 +1964,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 
 		free_irq(pci_irq_vector(pdev, i+16), cq);
+		tasklet_kill(&cq->tasklet);
 	}
 	pci_free_irq_vectors(pdev);
 }
@@ -1810,14 +1974,16 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
 	struct device *dev = &pdev->dev;
 	struct sas_ha_struct *sha = dev_get_drvdata(dev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
+	struct Scsi_Host *shost = sha->core.shost;
 
 	sas_unregister_ha(sha);
 	sas_remove_host(sha->core.shost);
 
-	hisi_sas_free(hisi_hba);
 	hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
+	hisi_sas_free(hisi_hba);
+	scsi_host_put(shost);
 }
 
 enum {
@@ -1839,7 +2005,6 @@ static struct pci_driver sas_v3_pci_driver = {
 
 module_pci_driver(sas_v3_pci_driver);
 
-MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f7cdb28bd38..9abe81021484 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -81,11 +81,8 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
 MODULE_VERSION(HPSA_DRIVER_VERSION);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("cciss");
 
-static int hpsa_allow_any;
-module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(hpsa_allow_any,
-		"Allow hpsa driver to access unknown HP Smart Array hardware");
 static int hpsa_simple_mode;
 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(hpsa_simple_mode,
@@ -148,6 +145,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
 	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+	{PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 };
 
@@ -158,6 +157,26 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
  *  access = Address of the struct of function pointers
  */
 static struct board_type products[] = {
+	{0x40700E11, "Smart Array 5300", &SA5A_access},
+	{0x40800E11, "Smart Array 5i", &SA5B_access},
+	{0x40820E11, "Smart Array 532", &SA5B_access},
+	{0x40830E11, "Smart Array 5312", &SA5B_access},
+	{0x409A0E11, "Smart Array 641", &SA5A_access},
+	{0x409B0E11, "Smart Array 642", &SA5A_access},
+	{0x409C0E11, "Smart Array 6400", &SA5A_access},
+	{0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
+	{0x40910E11, "Smart Array 6i", &SA5A_access},
+	{0x3225103C, "Smart Array P600", &SA5A_access},
+	{0x3223103C, "Smart Array P800", &SA5A_access},
+	{0x3234103C, "Smart Array P400", &SA5A_access},
+	{0x3235103C, "Smart Array P400i", &SA5A_access},
+	{0x3211103C, "Smart Array E200i", &SA5A_access},
+	{0x3212103C, "Smart Array E200", &SA5A_access},
+	{0x3213103C, "Smart Array E200i", &SA5A_access},
+	{0x3214103C, "Smart Array E200i", &SA5A_access},
+	{0x3215103C, "Smart Array E200i", &SA5A_access},
+	{0x3237103C, "Smart Array E500", &SA5A_access},
+	{0x323D103C, "Smart Array P700m", &SA5A_access},
 	{0x3241103C, "Smart Array P212", &SA5_access},
 	{0x3243103C, "Smart Array P410", &SA5_access},
 	{0x3245103C, "Smart Array P410i", &SA5_access},
@@ -278,7 +297,8 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
 			       u64 *cfg_offset);
 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 				    unsigned long *memory_bar);
-static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
+				bool *legacy_board);
 static int wait_for_device_to_become_ready(struct ctlr_info *h,
 					   unsigned char lunaddr[],
 					   int reply_queue);
@@ -866,6 +886,16 @@ static ssize_t host_show_ctlr_num(struct device *dev,
 	return snprintf(buf, 20, "%d\n", h->ctlr);
 }
 
+static ssize_t host_show_legacy_board(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	h = shost_to_hba(shost);
+	return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -891,6 +921,8 @@ static DEVICE_ATTR(lockup_detected, S_IRUGO,
 	host_show_lockup_detected, NULL);
 static DEVICE_ATTR(ctlr_num, S_IRUGO,
 	host_show_ctlr_num, NULL);
+static DEVICE_ATTR(legacy_board, S_IRUGO,
+	host_show_legacy_board, NULL);
 
 static struct device_attribute *hpsa_sdev_attrs[] = {
 	&dev_attr_raid_level,
@@ -912,6 +944,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
 	&dev_attr_raid_offload_debug,
 	&dev_attr_lockup_detected,
 	&dev_attr_ctlr_num,
+	&dev_attr_legacy_board,
 	NULL,
 };
 
@@ -3565,7 +3598,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 	memset(scsi3addr, 0, sizeof(scsi3addr));
 	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
 		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
-		rc = -1;
+		rc = -EAGAIN;
 		goto out;
 	}
 	if (extended_response)
@@ -3578,16 +3611,19 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 	if (ei->CommandStatus != 0 &&
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
 		hpsa_scsi_interpret_error(h, c);
-		rc = -1;
+		rc = -EIO;
 	} else {
 		struct ReportLUNdata *rld = buf;
 
 		if (rld->extended_response_flag != extended_response) {
-			dev_err(&h->pdev->dev,
-				"report luns requested format %u, got %u\n",
-				extended_response,
-				rld->extended_response_flag);
-			rc = -1;
+			if (!h->legacy_board) {
+				dev_err(&h->pdev->dev,
+					"report luns requested format %u, got %u\n",
+					extended_response,
+					rld->extended_response_flag);
+				rc = -EINVAL;
+			} else
+				rc = -EOPNOTSUPP;
 		}
 	}
 out:
@@ -3603,7 +3639,7 @@ static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 
 	rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
 				      HPSA_REPORT_PHYS_EXTENDED);
-	if (!rc || !hpsa_allow_any)
+	if (!rc || rc != -EOPNOTSUPP)
 		return rc;
 
 	/* REPORT PHYS EXTENDED is not supported */
@@ -3791,7 +3827,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
-		sizeof(this_device->device_id)))
+		sizeof(this_device->device_id)) < 0)
 		dev_err(&h->pdev->dev,
 			"hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
 			h->ctlr, __func__,
@@ -3809,6 +3845,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
 		volume_offline = hpsa_volume_offline(h, scsi3addr);
+		if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
+		    h->legacy_board) {
+			/*
+			 * Legacy boards might not support volume status
+			 */
+			dev_info(&h->pdev->dev,
+				 "C0:T%d:L%d Volume status not available, assuming online.\n",
+				 this_device->target, this_device->lun);
+			volume_offline = 0;
+		}
 		this_device->volume_offline = volume_offline;
 		if (volume_offline == HPSA_LV_FAILED) {
 			rc = HPSA_LV_FAILED;
@@ -6571,7 +6617,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 		default:
 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
 			BUG();
-			return -1;
 		}
 	} else if (cmd_type == TYPE_MSG) {
 		switch (cmd) {
@@ -7232,7 +7277,8 @@ static int hpsa_interrupt_mode(struct ctlr_info *h)
 	return 0;
 }
 
-static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
+				bool *legacy_board)
 {
 	int i;
 	u32 subsystem_vendor_id, subsystem_device_id;
@@ -7242,17 +7288,24 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 		    subsystem_vendor_id;
 
+	if (legacy_board)
+		*legacy_board = false;
 	for (i = 0; i < ARRAY_SIZE(products); i++)
-		if (*board_id == products[i].board_id)
+		if (*board_id == products[i].board_id) {
+			if (products[i].access != &SA5A_access &&
+			    products[i].access != &SA5B_access)
+				return i;
+			dev_warn(&pdev->dev,
+				 "legacy board ID: 0x%08x\n",
+				 *board_id);
+			if (legacy_board)
+			    *legacy_board = true;
 			return i;
+		}
 
-	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
-		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
-		!hpsa_allow_any) {
-		dev_warn(&pdev->dev, "unrecognized board ID: "
-			"0x%08x, ignoring.\n", *board_id);
-			return -ENODEV;
-	}
+	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
+	if (legacy_board)
+		*legacy_board = true;
 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
 }
 
@@ -7555,13 +7608,14 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
 static int hpsa_pci_init(struct ctlr_info *h)
 {
 	int prod_index, err;
+	bool legacy_board;
 
-	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
+	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
 	if (prod_index < 0)
 		return prod_index;
 	h->product_name = products[prod_index].product_name;
 	h->access = *(products[prod_index].access);
-
+	h->legacy_board = legacy_board;
 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
 
@@ -8241,7 +8295,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (number_of_controllers == 0)
 		printk(KERN_INFO DRIVER_NAME "\n");
 
-	rc = hpsa_lookup_board_id(pdev, &board_id);
+	rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
 	if (rc < 0) {
 		dev_warn(&pdev->dev, "Board ID not found\n");
 		return rc;
@@ -9443,14 +9497,6 @@ hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 	return -EINVAL;
 }
 
-/* SMP = Serial Management Protocol */
-static int
-hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-struct request *req)
-{
-	return -EINVAL;
-}
-
 static struct sas_function_template hpsa_sas_transport_functions = {
 	.get_linkerrors = hpsa_sas_get_linkerrors,
 	.get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
@@ -9460,7 +9506,6 @@ static struct sas_function_template hpsa_sas_transport_functions = {
 	.phy_setup = hpsa_sas_phy_setup,
 	.phy_release = hpsa_sas_phy_release,
 	.set_phy_speed = hpsa_sas_phy_speed,
-	.smp_handler = hpsa_sas_smp_handler,
 };
 
 /*
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 1c49741bc639..018f980a701c 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -293,6 +293,7 @@ struct ctlr_info {
 	int	drv_req_rescan;
 	int	raid_offload_debug;
 	int     discovery_polling;
+	int     legacy_board;
 	struct  ReportLUNdata *lastlogicals;
 	int	needs_abort_tags_swizzled;
 	struct workqueue_struct *resubmit_wq;
@@ -447,6 +448,23 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
 	}
 }
 
+/*
+ *  Variant of the above; 0x04 turns interrupts off...
+ */
+static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+	if (val) { /* Turn interrupts on */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else { /* Turn them off */
+		h->interrupts_enabled = 0;
+		writel(SA5B_INTR_OFF,
+		       h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+
 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
 {
 	if (val) { /* turn on interrupts */
@@ -549,6 +567,14 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
 		true : false;
 }
 
+/*
+ *      Returns true if an interrupt is pending..
+ */
+static bool SA5B_intr_pending(struct ctlr_info *h)
+{
+	return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
+}
+
 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
 #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
 #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
@@ -581,38 +607,53 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
 }
 
 static struct access_method SA5_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_intr_mask,
-	.intr_pending = SA5_intr_pending,
-	.command_completed = SA5_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_intr_mask,
+	.intr_pending =		SA5_intr_pending,
+	.command_completed =	SA5_completed,
+};
+
+/* Duplicate entry of the above to mark unsupported boards */
+static struct access_method SA5A_access = {
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_intr_mask,
+	.intr_pending =		SA5_intr_pending,
+	.command_completed =	SA5_completed,
+};
+
+static struct access_method SA5B_access = {
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5B_intr_mask,
+	.intr_pending =		SA5B_intr_pending,
+	.command_completed =	SA5_completed,
 };
 
 static struct access_method SA5_ioaccel_mode1_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_ioaccel_mode1_intr_pending,
-	.command_completed = SA5_ioaccel_mode1_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_ioaccel_mode1_intr_pending,
+	.command_completed =	SA5_ioaccel_mode1_completed,
 };
 
 static struct access_method SA5_ioaccel_mode2_access = {
-	.submit_command = SA5_submit_command_ioaccel2,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command_ioaccel2,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access_no_read = {
-	.submit_command = SA5_submit_command_no_read,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command_no_read,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 struct board_type {
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 7226226f7383..2fad7f03aa02 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1106,12 +1106,10 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
 
 static int hptiop_reset(struct scsi_cmnd *scp)
 {
-	struct Scsi_Host * host = scp->device->host;
-	struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
+	struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
 
-	printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
-			scp->device->host->host_no, scp->device->channel,
-			scp->device->id, scp);
+	printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
+	       scp->device->host->host_no, -1, -1);
 
 	return hptiop_reset_hba(hba)? FAILED : SUCCESS;
 }
@@ -1179,8 +1177,7 @@ static struct scsi_host_template driver_template = {
 	.module                     = THIS_MODULE,
 	.name                       = driver_name,
 	.queuecommand               = hptiop_queuecommand,
-	.eh_device_reset_handler    = hptiop_reset,
-	.eh_bus_reset_handler       = hptiop_reset,
+	.eh_host_reset_handler      = hptiop_reset,
 	.info                       = hptiop_info,
 	.emulated                   = 0,
 	.use_clustering             = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc4e05be8d4a..b491af31a5f8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2528,16 +2528,12 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-	int rc, block_rc;
+	int rc;
 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-	block_rc = fc_block_scsi_eh(cmd);
 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
 
-	if (block_rc == FAST_IO_FAIL)
-		return FAST_IO_FAIL;
-
 	return rc ? FAILED : SUCCESS;
 }
 
@@ -4929,7 +4925,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
 }
 
-static struct vio_device_id ibmvfc_device_table[] = {
+static const struct vio_device_id ibmvfc_device_table[] = {
 	{"fcp", "IBM,vfc-client"},
 	{ "", "" }
 };
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index da22b3665cb0..7d156b161482 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2330,7 +2330,7 @@ static int ibmvscsi_resume(struct device *dev)
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
-static struct vio_device_id ibmvscsi_device_table[] = {
+static const struct vio_device_id ibmvscsi_device_table[] = {
 	{"vscsi", "IBM,v-scsi"},
 	{ "", "" }
 };
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 1f75d0380516..785fb42f6650 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -4086,7 +4086,7 @@ static struct class ibmvscsis_class = {
 	.dev_groups	= ibmvscsis_dev_groups,
 };
 
-static struct vio_device_id ibmvscsis_device_table[] = {
+static const struct vio_device_id ibmvscsis_device_table[] = {
 	{ "v-scsi-host", "IBM,v-scsi-host" },
 	{ "", "" }
 };
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 9164ce1249c1..87c94191033b 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1106,7 +1106,6 @@ static struct scsi_host_template imm_template = {
 	.name			= "Iomega VPI2 (imm) interface",
 	.queuecommand		= imm_queuecommand,
 	.eh_abort_handler	= imm_abort,
-	.eh_bus_reset_handler	= imm_reset,
 	.eh_host_reset_handler	= imm_reset,
 	.bios_param		= imm_biosparam,
 	.this_id		= 7,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 45371179ab87..922e3e56c90d 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -166,7 +166,7 @@ static struct scsi_host_template isci_sht = {
 	.use_clustering			= ENABLE_CLUSTERING,
 	.eh_abort_handler		= sas_eh_abort_handler,
 	.eh_device_reset_handler        = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler           = sas_eh_bus_reset_handler,
+	.eh_target_reset_handler        = sas_eh_target_reset_handler,
 	.target_destroy			= sas_target_destroy,
 	.ioctl				= sas_ioctl,
 	.shost_attrs			= isci_host_attrs,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4842fc0e809d..4d934d6c3e13 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -163,7 +163,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
 	struct iscsi_tcp_conn *tcp_conn;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
 	struct iscsi_conn *conn;
-	struct iscsi_session *session;
 	void (*old_state_change)(struct sock *);
 
 	read_lock_bh(&sk->sk_callback_lock);
@@ -172,7 +171,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
 		read_unlock_bh(&sk->sk_callback_lock);
 		return;
 	}
-	session = conn->session;
 
 	iscsi_sw_sk_state_check(sk);
 
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 234352da5c3c..772c35a5c49e 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2222,8 +2222,6 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 
 	FC_SCSI_DBG(lport, "Resetting host\n");
 
-	fc_block_scsi_eh(sc_cmd);
-
 	fc_lport_reset(lport);
 	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
 	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 42381adf0769..bd4605a34f54 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1078,7 +1078,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		if (opcode != ISCSI_OP_NOOP_OUT)
 			return 0;
 
-		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
+		if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
 			/*
 			 * nop-out in response to target's nop-out rejected.
 			 * Just resend.
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 9dafe64e7c7a..13739bfacc67 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -26,6 +26,7 @@ config SCSI_SAS_LIBSAS
 	tristate "SAS Domain Transport Attributes"
 	depends on SCSI
 	select SCSI_SAS_ATTRS
+	select BLK_DEV_BSGLIB
 	help
 	  This provides transport specific helpers for SAS drivers which
 	  use the domain device construct (like the aic94xxx).
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 87f5e694dbed..70be4425ae0b 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -343,6 +343,7 @@ static int smp_ata_check_ready(struct ata_link *link)
 	case SAS_END_DEVICE:
 		if (ex_phy->attached_sata_dev)
 			return sas_ata_clear_pending(dev, ex_phy);
+		/* fall through */
 	default:
 		return -ENODEV;
 	}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 570b2cb2da43..6b4fd2375178 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -64,8 +64,8 @@ static void smp_task_done(struct sas_task *task)
 /* Give it some long enough timeout. In seconds. */
 #define SMP_TIMEOUT 10
 
-static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
-			    void *resp, int resp_size)
+static int smp_execute_task_sg(struct domain_device *dev,
+		struct scatterlist *req, struct scatterlist *resp)
 {
 	int res, retry;
 	struct sas_task *task = NULL;
@@ -86,8 +86,8 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 		}
 		task->dev = dev;
 		task->task_proto = dev->tproto;
-		sg_init_one(&task->smp_task.smp_req, req, req_size);
-		sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+		task->smp_task.smp_req = *req;
+		task->smp_task.smp_resp = *resp;
 
 		task->task_done = smp_task_done;
 
@@ -151,6 +151,17 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 	return res;
 }
 
+static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
+			    void *resp, int resp_size)
+{
+	struct scatterlist req_sg;
+	struct scatterlist resp_sg;
+
+	sg_init_one(&req_sg, req, req_size);
+	sg_init_one(&resp_sg, resp, resp_size);
+	return smp_execute_task_sg(dev, &req_sg, &resp_sg);
+}
+
 /* ---------- Allocations ---------- */
 
 static inline void *alloc_smp_req(int size)
@@ -2130,57 +2141,50 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
 	return res;
 }
 
-int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-		    struct request *req)
+void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy)
 {
 	struct domain_device *dev;
-	int ret, type;
-	struct request *rsp = req->next_rq;
-
-	if (!rsp) {
-		printk("%s: space for a smp response is missing\n",
-		       __func__);
-		return -EINVAL;
-	}
+	unsigned int reslen = 0;
+	int ret = -EINVAL;
 
 	/* no rphy means no smp target support (ie aic94xx host) */
 	if (!rphy)
-		return sas_smp_host_handler(shost, req, rsp);
-
-	type = rphy->identify.device_type;
+		return sas_smp_host_handler(job, shost);
 
-	if (type != SAS_EDGE_EXPANDER_DEVICE &&
-	    type != SAS_FANOUT_EXPANDER_DEVICE) {
+	switch (rphy->identify.device_type) {
+	case SAS_EDGE_EXPANDER_DEVICE:
+	case SAS_FANOUT_EXPANDER_DEVICE:
+		break;
+	default:
 		printk("%s: can we send a smp request to a device?\n",
 		       __func__);
-		return -EINVAL;
+		goto out;
 	}
 
 	dev = sas_find_dev_by_rphy(rphy);
 	if (!dev) {
 		printk("%s: fail to find a domain_device?\n", __func__);
-		return -EINVAL;
+		goto out;
 	}
 
 	/* do we need to support multiple segments? */
-	if (bio_multiple_segments(req->bio) ||
-	    bio_multiple_segments(rsp->bio)) {
+	if (job->request_payload.sg_cnt > 1 ||
+	    job->reply_payload.sg_cnt > 1) {
 		printk("%s: multiple segments req %u, rsp %u\n",
-		       __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
-		return -EINVAL;
+		       __func__, job->request_payload.payload_len,
+		       job->reply_payload.payload_len);
+		goto out;
 	}
 
-	ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
-			       bio_data(rsp->bio), blk_rq_bytes(rsp));
+	ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
+			job->reply_payload.sg_list);
 	if (ret > 0) {
 		/* positive number is the untransferred residual */
-		scsi_req(rsp)->resid_len = ret;
-		scsi_req(req)->resid_len = 0;
+		reslen = ret;
 		ret = 0;
-	} else if (ret == 0) {
-		scsi_req(rsp)->resid_len = 0;
-		scsi_req(req)->resid_len = 0;
 	}
 
-	return ret;
+out:
+	bsg_job_done(job, ret, reslen);
 }
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 45cbbc44f4d7..9ead93df3a6e 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -225,47 +225,36 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 }
 
-int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
-			 struct request *rsp)
+void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost)
 {
-	u8 *req_data = NULL, *resp_data = NULL, *buf;
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+	u8 *req_data, *resp_data;
+	unsigned int reslen = 0;
 	int error = -EINVAL;
 
 	/* eight is the minimum size for request and response frames */
-	if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
+	if (job->request_payload.payload_len < 8 ||
+	    job->reply_payload.payload_len < 8)
 		goto out;
 
-	if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
-	    bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
-		shost_printk(KERN_ERR, shost,
-			"SMP request/response frame crosses page boundary");
+	error = -ENOMEM;
+	req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+	if (!req_data)
 		goto out;
-	}
-
-	req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, req_data,
+			  job->request_payload.payload_len);
 
 	/* make sure frame can always be built ... we copy
 	 * back only the requested length */
-	resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
-
-	if (!req_data || !resp_data) {
-		error = -ENOMEM;
-		goto out;
-	}
-
-	local_irq_disable();
-	buf = kmap_atomic(bio_page(req->bio));
-	memcpy(req_data, buf, blk_rq_bytes(req));
-	kunmap_atomic(buf - bio_offset(req->bio));
-	local_irq_enable();
+	resp_data = kzalloc(max(job->reply_payload.payload_len, 128U),
+			GFP_KERNEL);
+	if (!resp_data)
+		goto out_free_req;
 
+	error = -EINVAL;
 	if (req_data[0] != SMP_REQUEST)
-		goto out;
-
-	/* always succeeds ... even if we can't process the request
-	 * the result is in the response frame */
-	error = 0;
+		goto out_free_resp;
 
 	/* set up default don't know response */
 	resp_data[0] = SMP_RESPONSE;
@@ -274,20 +263,18 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 
 	switch (req_data[1]) {
 	case SMP_REPORT_GENERAL:
-		scsi_req(req)->resid_len -= 8;
-		scsi_req(rsp)->resid_len -= 32;
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 		resp_data[9] = sas_ha->num_phys;
+		reslen = 32;
 		break;
 
 	case SMP_REPORT_MANUF_INFO:
-		scsi_req(req)->resid_len -= 8;
-		scsi_req(rsp)->resid_len -= 64;
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 		memcpy(resp_data + 12, shost->hostt->name,
 		       SAS_EXPANDER_VENDOR_ID_LEN);
 		memcpy(resp_data + 20, "libsas virt phy",
 		       SAS_EXPANDER_PRODUCT_ID_LEN);
+		reslen = 64;
 		break;
 
 	case SMP_READ_GPIO_REG:
@@ -295,14 +282,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_DISCOVER:
-		scsi_req(req)->resid_len -= 16;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 56;
+		if (job->request_payload.payload_len < 16)
+			goto out_free_resp;
 		sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
+		reslen = 56;
 		break;
 
 	case SMP_REPORT_PHY_ERR_LOG:
@@ -311,14 +294,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_REPORT_PHY_SATA:
-		scsi_req(req)->resid_len -= 16;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 60;
+		if (job->request_payload.payload_len < 16)
+			goto out_free_resp;
 		sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
+		reslen = 60;
 		break;
 
 	case SMP_REPORT_ROUTE_INFO:
@@ -330,16 +309,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		const int base_frame_size = 11;
 		int to_write = req_data[4];
 
-		if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
-		    scsi_req(req)->resid_len < base_frame_size + to_write * 4) {
+		if (job->request_payload.payload_len <
+				base_frame_size + to_write * 4) {
 			resp_data[2] = SMP_RESP_INV_FRM_LEN;
 			break;
 		}
 
 		to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
 						   req_data[3], to_write, &req_data[8]);
-		scsi_req(req)->resid_len -= base_frame_size + to_write * 4;
-		scsi_req(rsp)->resid_len -= 8;
+		reslen = 8;
 		break;
 	}
 
@@ -348,16 +326,12 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_PHY_CONTROL:
-		scsi_req(req)->resid_len -= 44;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 8;
+		if (job->request_payload.payload_len < 44)
+			goto out_free_resp;
 		sas_phy_control(sas_ha, req_data[9], req_data[10],
 				req_data[32] >> 4, req_data[33] >> 4,
 				resp_data);
+		reslen = 8;
 		break;
 
 	case SMP_PHY_TEST_FUNCTION:
@@ -369,15 +343,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 	}
 
-	local_irq_disable();
-	buf = kmap_atomic(bio_page(rsp->bio));
-	memcpy(buf, resp_data, blk_rq_bytes(rsp));
-	flush_kernel_dcache_page(bio_page(rsp->bio));
-	kunmap_atomic(buf - bio_offset(rsp->bio));
-	local_irq_enable();
+	sg_copy_from_buffer(job->reply_payload.sg_list,
+			    job->reply_payload.sg_cnt, resp_data,
+			    job->reply_payload.payload_len);
 
- out:
-	kfree(req_data);
+	error = 0;
+out_free_resp:
 	kfree(resp_data);
-	return error;
+out_free_req:
+	kfree(req_data);
+out:
+	bsg_job_done(job, error, reslen);
 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index a216c957b639..c07e08136491 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -81,6 +81,8 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
 
+void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy);
 int sas_smp_phy_control(struct domain_device *dev, int phy_id,
 			enum phy_func phy_func, struct sas_phy_linkrates *);
 int sas_smp_get_phy_events(struct sas_phy *phy);
@@ -98,16 +100,14 @@ void sas_hae_reset(struct work_struct *work);
 void sas_free_device(struct kref *kref);
 
 #ifdef CONFIG_SCSI_SAS_HOST_SMP
-extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
-				struct request *rsp);
+extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
 #else
-static inline int sas_smp_host_handler(struct Scsi_Host *shost,
-				       struct request *req,
-				       struct request *rsp)
+static inline void sas_smp_host_handler(struct bsg_job *job,
+		struct Scsi_Host *shost)
 {
 	shost_printk(KERN_ERR, shost,
 		"Cannot send SMP to a sas host (not enabled in CONFIG)\n");
-	return -EINVAL;
+	bsg_job_done(job, -EINVAL, 0);
 }
 #endif
 
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 87e5079d816b..ea8ad06ff582 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -526,7 +526,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
 	return FAILED;
 }
 
-int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
 	int res;
 	struct Scsi_Host *host = cmd->device->host;
@@ -554,15 +554,15 @@ static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
 	struct Scsi_Host *shost = cmd->device->host;
 
 	if (!shost->hostt->eh_device_reset_handler)
-		goto try_bus_reset;
+		goto try_target_reset;
 
 	res = shost->hostt->eh_device_reset_handler(cmd);
 	if (res == SUCCESS)
 		return res;
 
-try_bus_reset:
-	if (shost->hostt->eh_bus_reset_handler)
-		return shost->hostt->eh_bus_reset_handler(cmd);
+try_target_reset:
+	if (shost->hostt->eh_target_reset_handler)
+		return shost->hostt->eh_target_reset_handler(cmd);
 
 	return FAILED;
 }
@@ -855,7 +855,6 @@ int sas_target_alloc(struct scsi_target *starget)
 int sas_slave_configure(struct scsi_device *scsi_dev)
 {
 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-	struct sas_ha_struct *sas_ha;
 
 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
 
@@ -864,8 +863,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
 		return 0;
 	}
 
-	sas_ha = dev->port->ha;
-
 	sas_read_port_mode_page(scsi_dev);
 
 	if (scsi_dev->tagged_supported) {
@@ -996,6 +993,6 @@ EXPORT_SYMBOL_GPL(sas_bios_param);
 EXPORT_SYMBOL_GPL(sas_task_abort);
 EXPORT_SYMBOL_GPL(sas_phy_reset);
 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
-EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
+EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
 EXPORT_SYMBOL_GPL(sas_target_destroy);
 EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 562dc0139735..8eb3f96fe068 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -733,7 +733,6 @@ struct lpfc_hba {
 	uint32_t fc_rttov;	/* R_T_TOV timer value */
 	uint32_t fc_altov;	/* AL_TOV timer value */
 	uint32_t fc_crtov;	/* C_R_TOV timer value */
-	uint32_t fc_citov;	/* C_I_TOV timer value */
 
 	struct serv_parm fc_fabparam;	/* fabric service parameters buffer */
 	uint8_t alpa_map[128];	/* AL_PA map from READ_LA */
@@ -757,6 +756,7 @@ struct lpfc_hba {
 #define LPFC_NVMET_MAX_PORTS	32
 	uint8_t  mds_diags_support;
 	uint32_t initial_imax;
+	uint8_t  bbcredit_support;
 
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
@@ -836,6 +836,7 @@ struct lpfc_hba {
 	uint32_t cfg_enable_SmartSAN;
 	uint32_t cfg_enable_mds_diags;
 	uint32_t cfg_enable_fc4_type;
+	uint32_t cfg_enable_bbcr;	/*Enable BB Credit Recovery*/
 	uint32_t cfg_xri_split;
 #define LPFC_ENABLE_FCP  1
 #define LPFC_ENABLE_NVME 2
@@ -946,14 +947,14 @@ struct lpfc_hba {
 	struct list_head active_rrq_list;
 	spinlock_t hbalock;
 
-	/* pci_mem_pools */
-	struct pci_pool *lpfc_sg_dma_buf_pool;
-	struct pci_pool *lpfc_mbuf_pool;
-	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
-	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
-	struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
-	struct pci_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
-	struct pci_pool *txrdy_payload_pool;
+	/* dma_mem_pools */
+	struct dma_pool *lpfc_sg_dma_buf_pool;
+	struct dma_pool *lpfc_mbuf_pool;
+	struct dma_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
+	struct dma_pool *txrdy_payload_pool;
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 	mempool_t *mbox_mem_pool;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7ee1a94c0b33..c17677f494af 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -247,13 +247,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 				atomic_read(&tgtp->xmt_abort_rsp),
 				atomic_read(&tgtp->xmt_abort_rsp_error));
 
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		tot = phba->sli4_hba.nvmet_xri_cnt -
-			(phba->sli4_hba.nvmet_ctx_get_cnt +
-			phba->sli4_hba.nvmet_ctx_put_cnt);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
 		len += snprintf(buf + len, PAGE_SIZE - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
@@ -1894,6 +1891,36 @@ static inline bool lpfc_rangecheck(uint val, uint min, uint max)
 }
 
 /**
+ * lpfc_enable_bbcr_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static ssize_t
+lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
+{
+	if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3068 %s_enable_bbcr changed from %d to %d\n",
+				LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+		phba->cfg_enable_bbcr = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
+			LPFC_DRIVER_NAME, val);
+	return -EINVAL;
+}
+
+/**
  * lpfc_param_show - Return a cfg attribute value in decimal
  *
  * Description:
@@ -5116,6 +5143,14 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
  */
 LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
 
+/*
+ * lpfc_enable_bbcr: Enable BB Credit Recovery
+ *       0  = BB Credit Recovery disabled
+ *       1  = BB Credit Recovery enabled (default)
+ * Value range is [0,1]. Default value is 1.
+ */
+LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+
 struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_nvme_info,
 	&dev_attr_bg_info,
@@ -5223,6 +5258,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_protocol,
 	&dev_attr_lpfc_xlane_supported,
 	&dev_attr_lpfc_enable_mds_diags,
+	&dev_attr_lpfc_enable_bbcr,
 	NULL,
 };
 
@@ -6234,11 +6270,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
 	lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
 
 	if (phba->sli_rev != LPFC_SLI_REV4) {
 		/* NVME only supported on SLI4 */
 		phba->nvmet_support = 0;
 		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+		phba->cfg_enable_bbcr = 0;
 	} else {
 		/* We MUST have FCP support */
 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index d56dafcdd563..931db52692f5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -46,6 +46,16 @@ lpfc_param_store(name)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 		   lpfc_##name##_show, lpfc_##name##_store)
 
+#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0444);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
 #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 module_param(lpfc_##name, uint, S_IRUGO);\
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a1686c2d863c..fe9e1c079c20 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2384,20 +2384,17 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
 		goto job_error;
 
 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!pmboxq) {
-		rc = -ENOMEM;
+	if (!pmboxq)
 		goto link_diag_test_exit;
-	}
 
 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
 		   sizeof(struct lpfc_sli4_cfg_mhdr));
 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
 				     req_len, LPFC_SLI4_MBX_EMBED);
-	if (alloc_len != req_len) {
-		rc = -ENOMEM;
+	if (alloc_len != req_len)
 		goto link_diag_test_exit;
-	}
+
 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
 	       phba->sli4_hba.lnk_info.lnk_no);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index da669dce12fe..7e300734b345 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -556,9 +556,8 @@ int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
 void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
 void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
 			struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
-void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
-			struct lpfc_sli_ring *pring,
-			struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
+				struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
 void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
 void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
 				struct lpfc_iocbq *cmdiocb,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9c0c1463057d..33417681f5d4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -955,7 +955,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
 		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
 		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
 				 did, fc4_data_0, fc4_data_1);
 
@@ -969,7 +969,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
 			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
 				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 					 "3064 Setting ndlp %p, DID x%06x with "
 					 "FC4 x%08x, Data: x%08x x%08x\n",
 					 ndlp, did, ndlp->nlp_fc4_type,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 744f3f395b64..d50c481ec41c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -851,13 +851,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 			spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		}
 
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		tot = phba->sli4_hba.nvmet_xri_cnt -
-			(phba->sli4_hba.nvmet_ctx_get_cnt +
-			phba->sli4_hba.nvmet_ctx_put_cnt);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
 		len += snprintf(buf + len, size - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 7b7d314af0e0..c4edd87bfc65 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -478,16 +478,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
 		return;
 
 	for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
-		eq = phba->sli4_hba.hba_eq[eqidx];
-		if (cq->assoc_qid == eq->queue_id)
+		if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
 			break;
 	}
 	if (eqidx == phba->io_channel_irqs) {
 		pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
 		eqidx = 0;
-		eq = phba->sli4_hba.hba_eq[0];
 	}
 
+	eq = phba->sli4_hba.hba_eq[eqidx];
+
 	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
 		pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
 			"->EQ[Idx:%d|Qid:%d]:\n",
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 094c97b9e5f7..f9a566eaef04 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -159,6 +159,7 @@ struct lpfc_node_rrq {
 #define NLP_RNID_SND       0x00000400	/* sent RNID request for this entry */
 #define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
 #define NLP_NVMET_RECOV    0x00001000   /* NVMET auditing node for recovery. */
+#define NLP_FCP_PRLI_RJT   0x00002000   /* Rport does not support FCP PRLI. */
 #define NLP_DEFER_RM       0x00010000	/* Remove this ndlp if no longer used */
 #define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
 #define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d1d6f691df4..468a66371de9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1527,6 +1527,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 	uint8_t  name[sizeof(struct lpfc_name)];
 	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
 	uint16_t keep_nlp_state;
+	struct lpfc_nvme_rport *keep_nrport = NULL;
 	int  put_node;
 	int  put_rport;
 	unsigned long *active_rrqs_xri_bitmap = NULL;
@@ -1624,6 +1625,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 	keep_nlp_state = new_ndlp->nlp_state;
 	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
 
+	/* interchange the nvme remoteport structs */
+	keep_nrport = new_ndlp->nrport;
+	new_ndlp->nrport = ndlp->nrport;
+
 	/* Move this back to NPR state */
 	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
 		/* The new_ndlp is replacing ndlp totally, so we need
@@ -1646,6 +1651,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 			}
 			new_ndlp->nlp_type = ndlp->nlp_type;
 		}
+
+		/* Fix up the nvme rport */
+		if (ndlp->nrport) {
+			ndlp->nrport = NULL;
+			lpfc_nlp_put(ndlp);
+		}
+
 		/* We shall actually free the ndlp with both nlp_DID and
 		 * nlp_portname fields equals 0 to avoid any ndlp on the
 		 * nodelist never to be used.
@@ -1690,6 +1702,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 			keep_nlp_state = NLP_STE_NPR_NODE;
 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
 
+		/* Previous ndlp no longer active with nvme host transport.
+		 * Remove reference from earlier registration unless the
+		 * nvme host took care of it.
+		 */
+		if (ndlp->nrport)
+			lpfc_nlp_put(ndlp);
+		ndlp->nrport = keep_nrport;
+
 		/* Fix up the rport accordingly */
 		rport = ndlp->rport;
 		if (rport) {
@@ -1966,6 +1986,7 @@ int
 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 {
 	struct lpfc_hba  *phba = vport->phba;
+	struct Scsi_Host *shost;
 	struct serv_parm *sp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *elsiocb;
@@ -1984,6 +2005,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 	if (!elsiocb)
 		return 1;
 
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
+	spin_unlock_irq(shost->host_lock);
+
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 	/* For PLOGI request, remainder of payload is service parameters */
@@ -2007,6 +2033,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 
 	sp->cmn.valid_vendor_ver_level = 0;
 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
+	sp->cmn.bbRcvSizeMsb &= 0xF;
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 		"Issue PLOGI:     did:x%x",
@@ -2151,6 +2178,16 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	uint16_t cmdsize;
 	u32 local_nlp_type, elscmd;
 
+	/*
+	 * If we are in RSCN mode, the FC4 types supported from a
+	 * previous GFT_ID command may not be accurate. So, if we
+	 * are a NVME Initiator, always look for the possibility of
+	 * the remote NPort beng a NVME Target.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    vport->fc_flag & FC_RSCN_MODE &&
+	    vport->nvmei_support)
+		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 	local_nlp_type = ndlp->nlp_fc4_type;
 
  send_next_prli:
@@ -3420,8 +3457,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				maxretry = 3;
 				delay = 1000;
 				retry = 1;
-				break;
+			} else if (cmd == ELS_CMD_FLOGI &&
+				   stat.un.b.lsRjtRsnCodeExp ==
+						LSEXP_NOTHING_MORE) {
+				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+				retry = 1;
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0820 FLOGI Failed (x%x). "
+						 "BBCredit Not Supported\n",
+						 stat.un.lsRjtError);
 			}
+			break;
+
 		case LSRJT_PROTOCOL_ERR:
 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 			  (cmd == ELS_CMD_FDISC) &&
@@ -3442,6 +3489,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				goto out_retry;
 			}
 			break;
+		case LSRJT_CMD_UNSUPPORTED:
+			/* lpfc nvmet returns this type of LS_RJT when it
+			 * receives an FCP PRLI because lpfc nvmet only
+			 * support NVME.  ELS request is terminated for FCP4
+			 * on this rport.
+			 */
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
+				spin_unlock_irq(shost->host_lock);
+				retry = 0;
+				goto out_retry;
+			}
+			break;
 		}
 		break;
 
@@ -3930,7 +3992,25 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	if (mbox) {
 		if ((rspiocb->iocb.ulpStatus == 0)
 		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
-			lpfc_unreg_rpi(vport, ndlp);
+			if (!lpfc_unreg_rpi(vport, ndlp) &&
+			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
+			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0314 PLOGI recov DID x%x "
+					"Data: x%x x%x x%x\n",
+					ndlp->nlp_DID, ndlp->nlp_state,
+					ndlp->nlp_rpi, ndlp->nlp_flag);
+				mp = mbox->context1;
+				if (mp) {
+					lpfc_mbuf_free(phba, mp->virt,
+						       mp->phys);
+					kfree(mp);
+				}
+				mempool_free(mbox, phba->mbox_mem_pool);
+				goto out;
+			}
+
 			/* Increment reference count to ndlp to hold the
 			 * reference to ndlp for the callback function.
 			 */
@@ -4132,6 +4212,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 			sp->cmn.valid_vendor_ver_level = 0;
 			memset(sp->un.vendorVersion, 0,
 			       sizeof(sp->un.vendorVersion));
+			sp->cmn.bbRcvSizeMsb &= 0xF;
 
 			/* If our firmware supports this feature, convey that
 			 * info to the target using the vendor specific field.
@@ -7989,6 +8070,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			rjt_exp = LSEXP_NOTHING_MORE;
 			break;
 		}
+
+		/* NVMET accepts NVME PRLI only.  Reject FCP PRLI */
+		if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
+			rjt_err = LSRJT_CMD_UNSUPPORTED;
+			rjt_exp = LSEXP_REQ_UNSUPPORTED;
+			break;
+		}
 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
 		break;
 	case ELS_CMD_LIRR:
@@ -8784,6 +8872,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	pcmd += sizeof(uint32_t); /* Node Name */
 	pcmd += sizeof(uint32_t); /* Node Name */
 	memcpy(pcmd, &vport->fc_nodename, 8);
+	sp->cmn.valid_vendor_ver_level = 0;
 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
 	lpfc_set_disctmo(vport);
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index aa5e5ff56dfb..20808349a80e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1108,6 +1108,7 @@ void
 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
+	uint8_t bbscn = 0;
 
 	if (pmb->u.mb.mbxStatus)
 		goto out;
@@ -1134,10 +1135,17 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	/* Start discovery by sending a FLOGI. port_state is identically
 	 * LPFC_FLOGI while waiting for FLOGI cmpl
 	 */
-	if (vport->port_state != LPFC_FLOGI)
+	if (vport->port_state != LPFC_FLOGI) {
+		if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+			bbscn = bf_get(lpfc_bbscn_def,
+				       &phba->sli4_hba.bbscn_params);
+			vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+			vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
+		}
 		lpfc_initial_flogi(vport);
-	else if (vport->fc_flag & FC_PT2PT)
+	} else if (vport->fc_flag & FC_PT2PT) {
 		lpfc_disc_start(vport);
+	}
 	return;
 
 out:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 26a5647e057e..bdc1f184f67a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2293,15 +2293,27 @@ typedef struct {
 	uint32_t rttov;
 	uint32_t altov;
 	uint32_t crtov;
-	uint32_t citov;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd4:19;
+	uint32_t cscn:1;
+	uint32_t bbscn:4;
+	uint32_t rsvd3:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rsvd3:8;
+	uint32_t bbscn:4;
+	uint32_t cscn:1;
+	uint32_t rsvd4:19;
+#endif
+
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t rrq_enable:1;
 	uint32_t rrq_immed:1;
-	uint32_t rsvd4:29;
+	uint32_t rsvd5:29;
 	uint32_t ack0_enable:1;
 #else	/*  __LITTLE_ENDIAN_BITFIELD */
 	uint32_t ack0_enable:1;
-	uint32_t rsvd4:29;
+	uint32_t rsvd5:29;
 	uint32_t rrq_immed:1;
 	uint32_t rrq_enable:1;
 #endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bb4715705fa3..1db0a38683f4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2217,9 +2217,15 @@ struct lpfc_mbx_reg_vfi {
 	uint32_t e_d_tov;
 	uint32_t r_a_tov;
 	uint32_t word10;
-#define lpfc_reg_vfi_nport_id_SHIFT		0
-#define lpfc_reg_vfi_nport_id_MASK		0x00FFFFFF
-#define lpfc_reg_vfi_nport_id_WORD		word10
+#define lpfc_reg_vfi_nport_id_SHIFT	0
+#define lpfc_reg_vfi_nport_id_MASK	0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD	word10
+#define lpfc_reg_vfi_bbcr_SHIFT		27
+#define lpfc_reg_vfi_bbcr_MASK		0x00000001
+#define lpfc_reg_vfi_bbcr_WORD		word10
+#define lpfc_reg_vfi_bbscn_SHIFT	28
+#define lpfc_reg_vfi_bbscn_MASK		0x0000000F
+#define lpfc_reg_vfi_bbscn_WORD		word10
 };
 
 struct lpfc_mbx_init_vpi {
@@ -2646,7 +2652,16 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_link_speed_MASK	0x0000FFFF
 #define lpfc_mbx_rd_conf_link_speed_WORD	word6
 	uint32_t rsvd_7;
-	uint32_t rsvd_8;
+	uint32_t word8;
+#define lpfc_mbx_rd_conf_bbscn_min_SHIFT	0
+#define lpfc_mbx_rd_conf_bbscn_min_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_min_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_max_SHIFT	4
+#define lpfc_mbx_rd_conf_bbscn_max_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_max_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_def_SHIFT	8
+#define lpfc_mbx_rd_conf_bbscn_def_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_def_WORD		word8
 	uint32_t word9;
 #define lpfc_mbx_rd_conf_lmt_SHIFT		0
 #define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 491aa95eb0f6..7e7ae786121b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -37,6 +37,7 @@
 #include <linux/miscdevice.h>
 #include <linux/percpu.h>
 #include <linux/msi.h>
+#include <linux/bitops.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -1253,6 +1254,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 	unsigned long time_elapsed;
 	uint32_t tick_cqe, max_cqe, val;
 	uint64_t tot, data1, data2, data3;
+	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_register reg_data;
 	void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
 
@@ -1281,13 +1283,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 		/* Check outstanding IO count */
 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 			if (phba->nvmet_support) {
-				spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-				spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-				tot = phba->sli4_hba.nvmet_xri_cnt -
-					(phba->sli4_hba.nvmet_ctx_get_cnt +
-					phba->sli4_hba.nvmet_ctx_put_cnt);
-				spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-				spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+				tgtp = phba->targetport->private;
+				/* Calculate outstanding IOs */
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+				tot += atomic_read(&tgtp->xmt_fcp_release);
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 			} else {
 				tot = atomic_read(&phba->fc4NvmeIoCmpls);
 				data1 = atomic_read(
@@ -3048,7 +3048,7 @@ lpfc_online(struct lpfc_hba *phba)
 {
 	struct lpfc_vport *vport;
 	struct lpfc_vport **vports;
-	int i;
+	int i, error = 0;
 	bool vpis_cleared = false;
 
 	if (!phba)
@@ -3072,6 +3072,18 @@ lpfc_online(struct lpfc_hba *phba)
 		if (!phba->sli4_hba.max_cfg_param.vpi_used)
 			vpis_cleared = true;
 		spin_unlock_irq(&phba->hbalock);
+
+		/* Reestablish the local initiator port.
+		 * The offline process destroyed the previous lport.
+		 */
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+				!phba->nvmet_support) {
+			error = lpfc_nvme_create_localport(phba->pport);
+			if (error)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6132 NVME restore reg failed "
+					"on nvmei error x%x\n", error);
+		}
 	} else {
 		lpfc_sli_queue_init(phba);
 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
@@ -3226,6 +3238,13 @@ lpfc_offline(struct lpfc_hba *phba)
 
 	/* stop port and all timers associated with this hba */
 	lpfc_stop_port(phba);
+
+	/* Tear down the local and target port registrations.  The
+	 * nvme transports need to cleanup.
+	 */
+	lpfc_nvmet_destroy_targetport(phba);
+	lpfc_nvme_destroy_localport(phba->pport);
+
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
@@ -3275,7 +3294,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
 				 list) {
 		list_del(&sb->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
 			      sb->dma_handle);
 		kfree(sb);
 		phba->total_scsi_bufs--;
@@ -3286,7 +3305,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
 				 list) {
 		list_del(&sb->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
 			      sb->dma_handle);
 		kfree(sb);
 		phba->total_scsi_bufs--;
@@ -3317,7 +3336,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
 				 &phba->lpfc_nvme_buf_list_put, list) {
 		list_del(&lpfc_ncmd->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
 			      lpfc_ncmd->dma_handle);
 		kfree(lpfc_ncmd);
 		phba->total_nvme_bufs--;
@@ -3328,7 +3347,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
 				 &phba->lpfc_nvme_buf_list_get, list) {
 		list_del(&lpfc_ncmd->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
 			      lpfc_ncmd->dma_handle);
 		kfree(lpfc_ncmd);
 		phba->total_nvme_bufs--;
@@ -3640,7 +3659,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
 			list_remove_head(&scsi_sgl_list, psb,
 					 struct lpfc_scsi_buf, list);
 			if (psb) {
-				pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 					      psb->data, psb->dma_handle);
 				kfree(psb);
 			}
@@ -3710,9 +3729,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		return be64_to_cpu(wwn);
 	else
-		return (((wwn & 0xffffffff00000000) >> 32) |
-			((wwn & 0x00000000ffffffff) << 32));
-
+		return rol64(wwn, 32);
 }
 
 /**
@@ -3774,7 +3791,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
 			list_remove_head(&nvme_sgl_list, lpfc_ncmd,
 					 struct lpfc_nvme_buf, list);
 			if (lpfc_ncmd) {
-				pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 					      lpfc_ncmd->data,
 					      lpfc_ncmd->dma_handle);
 				kfree(lpfc_ncmd);
@@ -5930,8 +5947,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
 
 		/* Fast-path XRI aborted CQ Event work queue list */
@@ -5940,8 +5955,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
 	/* This abort list used by worker thread */
 	spin_lock_init(&phba->sli4_hba.sgl_list_lock);
-	spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
-	spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
 	spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
 
 	/*
@@ -6516,6 +6529,12 @@ lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
 		lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
 		kfree(sglq_entry);
 	}
+
+	/* Update the nvmet_xri_cnt to reflect no current sgls.
+	 * The next initialization cycle sets the count and allocates
+	 * the sgls over again.
+	 */
+	phba->sli4_hba.nvmet_xri_cnt = 0;
 }
 
 /**
@@ -6846,8 +6865,8 @@ lpfc_create_shost(struct lpfc_hba *phba)
 	if (phba->nvmet_support) {
 		/* Only 1 vport (pport) will support NVME target */
 		if (phba->txrdy_payload_pool == NULL) {
-			phba->txrdy_payload_pool = pci_pool_create(
-				"txrdy_pool", phba->pcidev,
+			phba->txrdy_payload_pool = dma_pool_create(
+				"txrdy_pool", &phba->pcidev->dev,
 				TXRDY_PAYLOAD_LEN, 16, 0);
 			if (phba->txrdy_payload_pool) {
 				phba->targetport = NULL;
@@ -7605,6 +7624,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 					"3082 Mailbox (x%x) returned ldv:x0\n",
 					bf_get(lpfc_mqe_command, &pmb->u.mqe));
+		if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
+			phba->bbcredit_support = 1;
+			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
+		}
+
 		phba->sli4_hba.extents_in_use =
 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
 		phba->sli4_hba.max_cfg_param.max_xri =
@@ -8301,6 +8325,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 				goto out_error;
 			}
 
+			/* Put list in known state in case driver load fails. */
+			INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
+
 			/* Create NVMET Receive Queue for data */
 			qdesc = lpfc_sli4_queue_alloc(phba,
 						      phba->sli4_hba.rq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index ce25a18367b5..81fb92967b11 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -376,7 +376,12 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 	mb->un.varCfgLnk.rttov = phba->fc_rttov;
 	mb->un.varCfgLnk.altov = phba->fc_altov;
 	mb->un.varCfgLnk.crtov = phba->fc_crtov;
-	mb->un.varCfgLnk.citov = phba->fc_citov;
+	mb->un.varCfgLnk.cscn = 0;
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+		mb->un.varCfgLnk.cscn = 1;
+		mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
+						 &phba->sli4_hba.bbscn_params);
+	}
 
 	if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
 		mb->un.varCfgLnk.ack0_enable = 1;
@@ -2139,6 +2144,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 {
 	struct lpfc_mbx_reg_vfi *reg_vfi;
 	struct lpfc_hba *phba = vport->phba;
+	uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
 
 	memset(mbox, 0, sizeof(*mbox));
 	reg_vfi = &mbox->u.mqe.un.reg_vfi;
@@ -2168,16 +2174,39 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
 		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
 	}
+
+	bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
+	bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
+	bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
+
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr  &&
+	    bbscn_fabric != 0) {
+		bbscn_max = bf_get(lpfc_bbscn_max,
+				   &phba->sli4_hba.bbscn_params);
+		if (bbscn_fabric <= bbscn_max) {
+			bbscn_def = bf_get(lpfc_bbscn_def,
+					   &phba->sli4_hba.bbscn_params);
+
+			if (bbscn_fabric > bbscn_def)
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
+				       bbscn_fabric);
+			else
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
+
+			bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
+		}
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
 			"3134 Register VFI, mydid:x%x, fcfi:%d, "
 			" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
-			" port_state:x%x topology chg:%d\n",
+			" port_state:x%x topology chg:%d bbscn_fabric :%d\n",
 			vport->fc_myDID,
 			phba->fcf.fcfi,
 			phba->sli4_hba.vfi_ids[vport->vfi],
 			phba->vpi_ids[vport->vpi],
 			reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
-			vport->port_state, phba->fc_topology_changed);
+			vport->port_state, phba->fc_topology_changed,
+			bbscn_fabric);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index fcc05a1517c2..56faeb049b4a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -97,8 +97,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 			i = SLI4_PAGE_SIZE;
 
 		phba->lpfc_sg_dma_buf_pool =
-			pci_pool_create("lpfc_sg_dma_buf_pool",
-					phba->pcidev,
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev,
 					phba->cfg_sg_dma_buf_size,
 					i, 0);
 		if (!phba->lpfc_sg_dma_buf_pool)
@@ -106,15 +106,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 
 	} else {
 		phba->lpfc_sg_dma_buf_pool =
-			pci_pool_create("lpfc_sg_dma_buf_pool",
-					phba->pcidev, phba->cfg_sg_dma_buf_size,
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
 					align, 0);
 
 		if (!phba->lpfc_sg_dma_buf_pool)
 			goto fail;
 	}
 
-	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+	phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
 							LPFC_BPL_SIZE,
 							align, 0);
 	if (!phba->lpfc_mbuf_pool)
@@ -128,7 +128,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 	pool->max_count = 0;
 	pool->current_count = 0;
 	for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
-		pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+		pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
 				       GFP_KERNEL, &pool->elements[i].phys);
 		if (!pool->elements[i].virt)
 			goto fail_free_mbuf_pool;
@@ -152,21 +152,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 						sizeof(struct lpfc_node_rrq));
 		if (!phba->rrq_pool)
 			goto fail_free_nlp_mem_pool;
-		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
-					      phba->pcidev,
+		phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
+					      &phba->pcidev->dev,
 					      LPFC_HDR_BUF_SIZE, align, 0);
 		if (!phba->lpfc_hrb_pool)
 			goto fail_free_rrq_mem_pool;
 
-		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
-					      phba->pcidev,
+		phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
+					      &phba->pcidev->dev,
 					      LPFC_DATA_BUF_SIZE, align, 0);
 		if (!phba->lpfc_drb_pool)
 			goto fail_free_hrb_pool;
 		phba->lpfc_hbq_pool = NULL;
 	} else {
-		phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
-			phba->pcidev, LPFC_BPL_SIZE, align, 0);
+		phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
+			&phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
 		if (!phba->lpfc_hbq_pool)
 			goto fail_free_nlp_mem_pool;
 		phba->lpfc_hrb_pool = NULL;
@@ -185,10 +185,10 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 
 	return 0;
 fail_free_drb_pool:
-	pci_pool_destroy(phba->lpfc_drb_pool);
+	dma_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
  fail_free_hrb_pool:
-	pci_pool_destroy(phba->lpfc_hrb_pool);
+	dma_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
  fail_free_rrq_mem_pool:
 	mempool_destroy(phba->rrq_pool);
@@ -201,14 +201,14 @@ fail_free_drb_pool:
 	phba->mbox_mem_pool = NULL;
  fail_free_mbuf_pool:
 	while (i--)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
 						 pool->elements[i].phys);
 	kfree(pool->elements);
  fail_free_lpfc_mbuf_pool:
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
  fail_free_dma_buf_pool:
-	pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
 	phba->lpfc_sg_dma_buf_pool = NULL;
  fail:
 	return -ENOMEM;
@@ -218,8 +218,8 @@ int
 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
 {
 	phba->lpfc_nvmet_drb_pool =
-		pci_pool_create("lpfc_nvmet_drb_pool",
-				phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+		dma_pool_create("lpfc_nvmet_drb_pool",
+				&phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
 				SGL_ALIGN_SZ, 0);
 	if (!phba->lpfc_nvmet_drb_pool) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -248,20 +248,20 @@ lpfc_mem_free(struct lpfc_hba *phba)
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
 	if (phba->lpfc_nvmet_drb_pool)
-		pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+		dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
 	phba->lpfc_nvmet_drb_pool = NULL;
 	if (phba->lpfc_drb_pool)
-		pci_pool_destroy(phba->lpfc_drb_pool);
+		dma_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
 	if (phba->lpfc_hrb_pool)
-		pci_pool_destroy(phba->lpfc_hrb_pool);
+		dma_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
 	if (phba->txrdy_payload_pool)
-		pci_pool_destroy(phba->txrdy_payload_pool);
+		dma_pool_destroy(phba->txrdy_payload_pool);
 	phba->txrdy_payload_pool = NULL;
 
 	if (phba->lpfc_hbq_pool)
-		pci_pool_destroy(phba->lpfc_hbq_pool);
+		dma_pool_destroy(phba->lpfc_hbq_pool);
 	phba->lpfc_hbq_pool = NULL;
 
 	if (phba->rrq_pool)
@@ -282,15 +282,15 @@ lpfc_mem_free(struct lpfc_hba *phba)
 
 	/* Free MBUF memory pool */
 	for (i = 0; i < pool->current_count; i++)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
 			      pool->elements[i].phys);
 	kfree(pool->elements);
 
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
 
 	/* Free DMA buffer memory pool */
-	pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
 	phba->lpfc_sg_dma_buf_pool = NULL;
 
 	/* Free Device Data memory pool */
@@ -379,7 +379,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
  * @handle: used to return the DMA-mapped address of the mbuf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
- * Allocates from generic pci_pool_alloc function first and if that fails and
+ * Allocates from generic dma_pool_alloc function first and if that fails and
  * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
  * HBA's pool.
  *
@@ -397,7 +397,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 	unsigned long iflags;
 	void *ret;
 
-	ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+	ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
@@ -433,7 +433,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
 		pool->elements[pool->current_count].phys = dma;
 		pool->current_count++;
 	} else {
-		pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+		dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
 	}
 	return;
 }
@@ -470,7 +470,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  * @handle: used to return the DMA-mapped address of the nvmet_buf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
- * PCI pool.  Allocates from generic pci_pool_alloc function.
+ * PCI pool.  Allocates from generic dma_pool_alloc function.
  *
  * Returns:
  *   pointer to the allocated nvmet_buf on success
@@ -481,7 +481,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 {
 	void *ret;
 
-	ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+	ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
 	return ret;
 }
 
@@ -497,7 +497,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 void
 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
 {
-	pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+	dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
 }
 
 /**
@@ -522,7 +522,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 	if (!hbqbp)
 		return NULL;
 
-	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+	hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
 					  &hbqbp->dbuf.phys);
 	if (!hbqbp->dbuf.virt) {
 		kfree(hbqbp);
@@ -547,7 +547,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
 	kfree(hbqbp);
 	return;
 }
@@ -574,16 +574,16 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 	if (!dma_buf)
 		return NULL;
 
-	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					    &dma_buf->hbuf.phys);
 	if (!dma_buf->hbuf.virt) {
 		kfree(dma_buf);
 		return NULL;
 	}
-	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
 					    &dma_buf->dbuf.phys);
 	if (!dma_buf->dbuf.virt) {
-		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
 			      dma_buf->hbuf.phys);
 		kfree(dma_buf);
 		return NULL;
@@ -607,8 +607,8 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
 {
-	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-	pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
 	kfree(dmab);
 }
 
@@ -634,16 +634,16 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 	if (!dma_buf)
 		return NULL;
 
-	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					    &dma_buf->hbuf.phys);
 	if (!dma_buf->hbuf.virt) {
 		kfree(dma_buf);
 		return NULL;
 	}
-	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
 					    GFP_KERNEL, &dma_buf->dbuf.phys);
 	if (!dma_buf->dbuf.virt) {
-		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
 			      dma_buf->hbuf.phys);
 		kfree(dma_buf);
 		return NULL;
@@ -667,8 +667,8 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
 {
-	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-	pci_pool_free(phba->lpfc_nvmet_drb_pool,
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_nvmet_drb_pool,
 		      dmab->dbuf.virt, dmab->dbuf.phys);
 	kfree(dmab);
 }
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f74cb0142fd4..f3ad7cac355d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1724,6 +1724,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 				lpfc_nvme_update_localport(vport);
 			}
 
+		} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
 		} else if (ndlp->nlp_fc4_type == 0) {
 			rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
 					 0, ndlp->nlp_DID);
@@ -1892,6 +1895,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			goto out;
 		}
 
+		/* When the rport rejected the FCP PRLI as unsupported.
+		 * This should only happen in Pt2Pt so an NVME PRLI
+		 * should be outstanding still.
+		 */
+		if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+			ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
+			goto out_err;
+		}
+
 		/* The LS Req had some error.  Don't let this be a
 		 * target.
 		 */
@@ -2189,12 +2201,15 @@ lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			  void *arg, uint32_t evt)
 {
 	/*
-	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
-	 * timed out and is calling for Device Remove.  In this case, the LOGO
-	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
-	 * and other NLP flags are correctly cleaned up.
+	 * DevLoss has timed out and is calling for Device Remove.
+	 * In this case, abort the LOGO and cleanup the ndlp
 	 */
-	return ndlp->nlp_state;
+
+	lpfc_unreg_rpi(vport, ndlp);
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(vport->phba, ndlp);
+	lpfc_drop_node(vport, ndlp);
+	return NLP_STE_FREED_NODE;
 }
 
 static uint32_t
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0a0a1b92d01d..79ba3ce063a4 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -110,7 +110,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
 		qhandle->index = qidx;
 	}
 
-	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 			 "6073 Binding %s HdwQueue %d  (cpu %d) to "
 			 "io_channel %d qhandle %p\n", str,
 			 qidx, qhandle->cpu_id, qhandle->index, qhandle);
@@ -364,7 +364,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 
 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
-	if (rc == WQE_ERROR) {
+	if (rc) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "6045 Issue GEN REQ WQE to NPORT x%x "
 				 "Data: x%x x%x\n",
@@ -1270,7 +1270,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 	 * not exceed the programmed depth.
 	 */
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
-		ret = -EAGAIN;
+		ret = -EBUSY;
 		goto out_fail;
 	}
 
@@ -1279,7 +1279,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
 				 "6065 driver's buffer pool is empty, "
 				 "IO failed\n");
-		ret = -ENOMEM;
+		ret = -EBUSY;
 		goto out_fail;
 	}
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1332,7 +1332,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 				 "sid: x%x did: x%x oxid: x%x\n",
 				 ret, vport->fc_myDID, ndlp->nlp_DID,
 				 lpfc_ncmd->cur_iocbq.sli4_xritag);
-		ret = -EBUSY;
 		goto out_free_nvme_buf;
 	}
 
@@ -1576,7 +1575,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
 	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
 	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
-	if (ret_val == IOCB_ERROR) {
+	if (ret_val) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6137 Failed abts issue_wqe with status x%x "
 				 "for nvme_fcreq %p.\n",
@@ -1939,7 +1938,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 		 * pci bus space for an I/O. The DMA buffer includes the
 		 * number of SGE's necessary to support the sg_tablesize.
 		 */
-		lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+		lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
 						 GFP_KERNEL,
 						 &lpfc_ncmd->dma_handle);
 		if (!lpfc_ncmd->data) {
@@ -1950,7 +1949,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
 			kfree(lpfc_ncmd);
 			break;
@@ -1961,7 +1960,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, pwqeq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
 			kfree(lpfc_ncmd);
 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -2182,8 +2181,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 		vport->localport = localport;
 		lport->vport = vport;
 		vport->nvmei_support = 1;
-		len  = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
-		vport->phba->total_nvme_bufs += len;
+
+		/* Don't post more new bufs if repost already recovered
+		 * the nvme sgls.
+		 */
+		if (phba->sli4_hba.nvme_xri_cnt == 0) {
+			len  = lpfc_new_nvme_buf(vport,
+						 phba->sli4_hba.nvme_xri_max);
+			vport->phba->total_nvme_bufs += len;
+		}
 	}
 
 	return ret;
@@ -2296,6 +2302,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 			 ndlp->nlp_DID, ndlp->nlp_type);
 
 	localport = vport->localport;
+	if (!localport)
+		return 0;
+
 	lport = (struct lpfc_nvme_lport *)localport->private;
 
 	/* NVME rports are not preserved across devloss.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index bbbd0f84160d..0b7c1a49e203 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -170,12 +170,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct fc_frame_header *fc_hdr;
 	struct rqb_dmabuf *nvmebuf;
+	struct lpfc_nvmet_ctx_info *infop;
 	uint32_t *payload;
 	uint32_t size, oxid, sid, rc;
+	int cpu;
 	unsigned long iflag;
 
 	if (ctxp->txrdy) {
-		pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+		dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
 			      ctxp->txrdy_phys);
 		ctxp->txrdy = NULL;
 		ctxp->txrdy_phys = 0;
@@ -267,11 +269,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 	}
 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
-	list_add_tail(&ctx_buf->list,
-		      &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-	phba->sli4_hba.nvmet_ctx_put_cnt++;
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
+	/*
+	 * Use the CPU context list, from the MRQ the IO was received on
+	 * (ctxp->idx), to save context structure.
+	 */
+	cpu = smp_processor_id();
+	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
+	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+	infop->nvmet_ctx_list_cnt++;
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
 #endif
 }
 
@@ -552,7 +559,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
 	} else {
 		ctxp->entry_cnt++;
-		start_clean = offsetof(struct lpfc_iocbq, wqe);
+		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
 		memset(((char *)cmdwqe) + start_clean, 0,
 		       (sizeof(struct lpfc_iocbq) - start_clean));
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -879,51 +886,54 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
 };
 
 static void
-lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
+		struct lpfc_nvmet_ctx_info *infop)
 {
 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
-	spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-			&phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+				&infop->nvmet_ctx_list, list) {
 		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		list_del_init(&ctx_buf->list);
 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		__lpfc_clear_active_sglq(phba,
-					 ctx_buf->sglq->sli4_lxritag);
+
+		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
 		ctx_buf->sglq->state = SGL_FREED;
 		ctx_buf->sglq->ndlp = NULL;
 
 		spin_lock(&phba->sli4_hba.sgl_list_lock);
 		list_add_tail(&ctx_buf->sglq->list,
-			      &phba->sli4_hba.lpfc_nvmet_sgl_list);
+				&phba->sli4_hba.lpfc_nvmet_sgl_list);
 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
 
 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
 		kfree(ctx_buf->context);
 	}
-	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-			&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
-		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		list_del_init(&ctx_buf->list);
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		__lpfc_clear_active_sglq(phba,
-					 ctx_buf->sglq->sli4_lxritag);
-		ctx_buf->sglq->state = SGL_FREED;
-		ctx_buf->sglq->ndlp = NULL;
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
+}
 
-		spin_lock(&phba->sli4_hba.sgl_list_lock);
-		list_add_tail(&ctx_buf->sglq->list,
-			      &phba->sli4_hba.lpfc_nvmet_sgl_list);
-		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+static void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j;
 
-		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
-		kfree(ctx_buf->context);
+	/* The first context list, MRQ 0 CPU 0 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	if (!infop)
+		return;
+
+	/* Cycle the the entire CPU context list for every MRQ */
+	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
+			infop++; /* next */
+		}
 	}
-	spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+	kfree(phba->sli4_hba.nvmet_ctx_info);
+	phba->sli4_hba.nvmet_ctx_info = NULL;
 }
 
 static int
@@ -932,15 +942,71 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 	struct lpfc_nvmet_ctxbuf *ctx_buf;
 	struct lpfc_iocbq *nvmewqe;
 	union lpfc_wqe128 *wqe;
-	int i;
+	struct lpfc_nvmet_ctx_info *last_infop;
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j, idx;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
 			"6403 Allocate NVMET resources for %d XRIs\n",
 			phba->sli4_hba.nvmet_xri_cnt);
 
+	phba->sli4_hba.nvmet_ctx_info = kcalloc(
+		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
+	if (!phba->sli4_hba.nvmet_ctx_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6419 Failed allocate memory for "
+				"nvmet context lists\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Assuming X CPUs in the system, and Y MRQs, allocate some
+	 * lpfc_nvmet_ctx_info structures as follows:
+	 *
+	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
+	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
+	 * ...
+	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
+	 *
+	 * Each line represents a MRQ "silo" containing an entry for
+	 * every CPU.
+	 *
+	 * MRQ X is initially assumed to be associated with CPU X, thus
+	 * contexts are initially distributed across all MRQs using
+	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
+	 * freed, the are freed to the MRQ silo based on the CPU number
+	 * of the IO completion. Thus a context that was allocated for MRQ A
+	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
+	 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
+			spin_lock_init(&infop->nvmet_ctx_list_lock);
+			infop->nvmet_ctx_list_cnt = 0;
+			infop++;
+		}
+	}
+
+	/*
+	 * Setup the next CPU context info ptr for each MRQ.
+	 * MRQ 0 will cycle thru CPUs 0 - X separately from
+	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
+	 */
+	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+		last_infop = lpfc_get_ctx_list(phba, 0, j);
+		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
+			infop = lpfc_get_ctx_list(phba, i, j);
+			infop->nvmet_ctx_next_cpu = last_infop;
+			last_infop = infop;
+		}
+	}
+
 	/* For all nvmet xris, allocate resources needed to process a
 	 * received command on a per xri basis.
 	 */
+	idx = 0;
 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
 		if (!ctx_buf) {
@@ -977,7 +1043,6 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 		/* Word 7 */
 		bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
 		bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
-		bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
 		/* Word 10 */
 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
 		bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
@@ -995,12 +1060,35 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 					"6407 Ran out of NVMET XRIs\n");
 			return -ENOMEM;
 		}
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		list_add_tail(&ctx_buf->list,
-			      &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
+		/*
+		 * Add ctx to MRQidx context list. Our initial assumption
+		 * is MRQidx will be associated with CPUidx. This association
+		 * can change on the fly.
+		 */
+		infop = lpfc_get_ctx_list(phba, idx, idx);
+		spin_lock(&infop->nvmet_ctx_list_lock);
+		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+		infop->nvmet_ctx_list_cnt++;
+		spin_unlock(&infop->nvmet_ctx_list_lock);
+
+		/* Spread ctx structures evenly across all MRQs */
+		idx++;
+		if (idx >= phba->cfg_nvmet_mrq)
+			idx = 0;
+	}
+
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+					"6408 TOTAL NVMET ctx for CPU %d "
+					"MRQ %d: cnt %d nextcpu %p\n",
+					i, j, infop->nvmet_ctx_list_cnt,
+					infop->nvmet_ctx_next_cpu);
+			infop++;
+		}
 	}
-	phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
 	return 0;
 }
 
@@ -1365,10 +1453,65 @@ dropit:
 #endif
 }
 
+static struct lpfc_nvmet_ctxbuf *
+lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
+			     struct lpfc_nvmet_ctx_info *current_infop)
+{
+	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
+	struct lpfc_nvmet_ctx_info *get_infop;
+	int i;
+
+	/*
+	 * The current_infop for the MRQ a NVME command IU was received
+	 * on is empty. Our goal is to replenish this MRQs context
+	 * list from a another CPUs.
+	 *
+	 * First we need to pick a context list to start looking on.
+	 * nvmet_ctx_start_cpu has available context the last time
+	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
+	 * is just the next sequential CPU for this MRQ.
+	 */
+	if (current_infop->nvmet_ctx_start_cpu)
+		get_infop = current_infop->nvmet_ctx_start_cpu;
+	else
+		get_infop = current_infop->nvmet_ctx_next_cpu;
+
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		if (get_infop == current_infop) {
+			get_infop = get_infop->nvmet_ctx_next_cpu;
+			continue;
+		}
+		spin_lock(&get_infop->nvmet_ctx_list_lock);
+
+		/* Just take the entire context list, if there are any */
+		if (get_infop->nvmet_ctx_list_cnt) {
+			list_splice_init(&get_infop->nvmet_ctx_list,
+				    &current_infop->nvmet_ctx_list);
+			current_infop->nvmet_ctx_list_cnt =
+				get_infop->nvmet_ctx_list_cnt - 1;
+			get_infop->nvmet_ctx_list_cnt = 0;
+			spin_unlock(&get_infop->nvmet_ctx_list_lock);
+
+			current_infop->nvmet_ctx_start_cpu = get_infop;
+			list_remove_head(&current_infop->nvmet_ctx_list,
+					 ctx_buf, struct lpfc_nvmet_ctxbuf,
+					 list);
+			return ctx_buf;
+		}
+
+		/* Otherwise, move on to the next CPU for this MRQ */
+		spin_unlock(&get_infop->nvmet_ctx_list_lock);
+		get_infop = get_infop->nvmet_ctx_next_cpu;
+	}
+
+	/* Nothing found, all contexts for the MRQ are in-flight */
+	return NULL;
+}
+
 /**
  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
  * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
  *
  * This routine is used for processing the WQE associated with a unsolicited
@@ -1380,22 +1523,26 @@ dropit:
  **/
 static void
 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
-			    struct lpfc_sli_ring *pring,
+			    uint32_t idx,
 			    struct rqb_dmabuf *nvmebuf,
 			    uint64_t isr_timestamp)
 {
-#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct fc_frame_header *fc_hdr;
 	struct lpfc_nvmet_ctxbuf *ctx_buf;
+	struct lpfc_nvmet_ctx_info *current_infop;
 	uint32_t *payload;
 	uint32_t size, oxid, sid, rc, qno;
 	unsigned long iflag;
+	int current_cpu;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t id;
 #endif
 
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
 	ctx_buf = NULL;
 	if (!nvmebuf || !phba->targetport) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1407,31 +1554,24 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 		goto dropit;
 	}
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
-	if (phba->sli4_hba.nvmet_ctx_get_cnt) {
-		list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+	/*
+	 * Get a pointer to the context list for this MRQ based on
+	 * the CPU this MRQ IRQ is associated with. If the CPU association
+	 * changes from our initial assumption, the context list could
+	 * be empty, thus it would need to be replenished with the
+	 * context list from another CPU for this MRQ.
+	 */
+	current_cpu = smp_processor_id();
+	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
+	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
+	if (current_infop->nvmet_ctx_list_cnt) {
+		list_remove_head(&current_infop->nvmet_ctx_list,
 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-		phba->sli4_hba.nvmet_ctx_get_cnt--;
+		current_infop->nvmet_ctx_list_cnt--;
 	} else {
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		if (phba->sli4_hba.nvmet_ctx_put_cnt) {
-			list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
-				    &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-			INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-			phba->sli4_hba.nvmet_ctx_get_cnt =
-				phba->sli4_hba.nvmet_ctx_put_cnt;
-			phba->sli4_hba.nvmet_ctx_put_cnt = 0;
-			spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-
-			list_remove_head(
-				&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
-				ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-			phba->sli4_hba.nvmet_ctx_get_cnt--;
-		} else {
-			spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		}
+		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
 	}
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
 
 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -1483,6 +1623,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 	ctxp->size = size;
 	ctxp->oxid = oxid;
 	ctxp->sid = sid;
+	ctxp->idx = idx;
 	ctxp->state = LPFC_NVMET_STE_RCV;
 	ctxp->entry_cnt = 1;
 	ctxp->flag = 0;
@@ -1556,7 +1697,6 @@ dropit:
 
 	if (nvmebuf)
 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
-#endif
 }
 
 /**
@@ -1591,7 +1731,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 /**
  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
  * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to received nvme data structure.
  *
  * This routine is used to process an unsolicited event received from a SLI
@@ -1602,7 +1742,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  **/
 void
 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
-			   struct lpfc_sli_ring *pring,
+			   uint32_t idx,
 			   struct rqb_dmabuf *nvmebuf,
 			   uint64_t isr_timestamp)
 {
@@ -1610,7 +1750,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
 		return;
 	}
-	lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
+	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
 				    isr_timestamp);
 }
 
@@ -1863,6 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
 		bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
 
 		/* Word 8 */
@@ -1939,7 +2080,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 
 	case NVMET_FCOP_WRITEDATA:
 		/* Words 0 - 2 : The first sg segment */
-		txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
+		txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
 				       GFP_KERNEL, &physaddr);
 		if (!txrdy) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1971,6 +2112,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
 		bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
 		bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
 		       CMD_FCP_TRECEIVE64_WQE);
@@ -2054,6 +2196,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
 		bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
 		bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
 
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 48a76788b003..25a65b0bb7f3 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -74,6 +74,19 @@ struct lpfc_nvmet_tgtport {
 	atomic_t xmt_abort_rsp_error;
 };
 
+struct lpfc_nvmet_ctx_info {
+	struct list_head nvmet_ctx_list;
+	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+	uint16_t	nvmet_ctx_list_cnt;
+	char pad[16];  /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq)  \
+	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
 struct lpfc_nvmet_rcv_ctx {
 	union {
 		struct nvmefc_tgt_ls_req ls_req;
@@ -92,6 +105,7 @@ struct lpfc_nvmet_rcv_ctx {
 	uint16_t size;
 	uint16_t entry_cnt;
 	uint16_t cpu;
+	uint16_t idx;
 	uint16_t state;
 	/* States */
 #define LPFC_NVMET_STE_LS_RCV		1
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index adc784539061..1a6f122bb25d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -416,7 +416,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 		 * necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
 					GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
@@ -427,7 +427,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -826,7 +826,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
 		 * of bde's necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
 						GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
@@ -839,7 +839,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		 */
 		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
 		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -848,7 +848,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -857,7 +857,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e948ea05fd33..8b119f87b51d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
 				    struct lpfc_cqe *);
 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
 				       int);
-static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
-			uint32_t);
+static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+				    struct lpfc_eqe *eqe, uint32_t qidx);
 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -106,7 +106,7 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
  * -ENOMEM.
  * The caller is expected to hold the hbalock when calling this routine.
  **/
-static uint32_t
+static int
 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 {
 	union lpfc_wqe *temp_wqe;
@@ -123,7 +123,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 	idx = ((q->host_index + 1) % q->entry_count);
 	if (idx == q->hba_index) {
 		q->WQ_overflow++;
-		return -ENOMEM;
+		return -EBUSY;
 	}
 	q->WQ_posted++;
 	/* set consumption flag every once in a while */
@@ -10741,7 +10741,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	abtsiocbp->vport = vport;
 	abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
 	retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
-	if (retval == IOCB_ERROR) {
+	if (retval) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
 				 "6147 Failed abts issue_wqe with status x%x "
 				 "for oxid x%x\n",
@@ -13010,7 +13010,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * completion queue, and then return.
  *
  **/
-static void
+static int
 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 	struct lpfc_queue *speq)
 {
@@ -13034,7 +13034,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 					"0365 Slow-path CQ identifier "
 					"(%d) does not exist\n", cqid);
-		return;
+		return 0;
 	}
 
 	/* Save EQ associated with this CQ */
@@ -13071,7 +13071,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0370 Invalid completion queue type (%d)\n",
 				cq->type);
-		return;
+		return 0;
 	}
 
 	/* Catch the no cq entry condition, log an error */
@@ -13086,6 +13086,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
 		lpfc_worker_wake_up(phba);
+
+	return ecount;
 }
 
 /**
@@ -13289,7 +13291,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
 			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
 			lpfc_nvmet_unsol_fcp_event(
-				phba, phba->sli4_hba.els_wq->pring, dma_buf,
+				phba, idx, dma_buf,
 				cq->assoc_qp->isr_timestamp);
 			return false;
 		}
@@ -13393,7 +13395,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * queue and process all the entries on the completion queue, rearm the
  * completion queue, and then return.
  **/
-static void
+static int
 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 			uint32_t qidx)
 {
@@ -13409,7 +13411,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 				"event: majorcode=x%x, minorcode=x%x\n",
 				bf_get_le32(lpfc_eqe_major_code, eqe),
 				bf_get_le32(lpfc_eqe_minor_code, eqe));
-		return;
+		return 0;
 	}
 
 	/* Get the reference to the corresponding CQ */
@@ -13446,8 +13448,9 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 
 	/* Otherwise this is a Slow path event */
 	if (cq == NULL) {
-		lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
-		return;
+		ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
+						 phba->sli4_hba.hba_eq[qidx]);
+		return ecount;
 	}
 
 process_cq:
@@ -13456,7 +13459,7 @@ process_cq:
 				"0368 Miss-matched fast-path completion "
 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
 				cqid, cq->queue_id);
-		return;
+		return 0;
 	}
 
 	/* Save EQ associated with this CQ */
@@ -13486,6 +13489,8 @@ process_cq:
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
 		lpfc_worker_wake_up(phba);
+
+	return ecount;
 }
 
 static void
@@ -13706,6 +13711,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 	struct lpfc_eqe *eqe;
 	unsigned long iflag;
 	int ecount = 0;
+	int ccount = 0;
 	int hba_eqidx;
 
 	/* Get the driver's phba structure from the dev_id */
@@ -13757,8 +13763,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 		if (eqe == NULL)
 			break;
 
-		lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
-		if (!(++ecount % fpeq->entry_repost))
+		ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+		if (!(++ecount % fpeq->entry_repost) ||
+		    ccount > LPFC_MAX_ISR_CQE)
 			break;
 		fpeq->EQ_processed++;
 	}
@@ -17051,7 +17058,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_dmabuf *pcmd = cmdiocb->context2;
 
 	if (pcmd && pcmd->virt)
-		pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_sli_release_iocbq(phba, cmdiocb);
 }
@@ -17079,7 +17086,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
 	/* Allocate buffer for command payload */
 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 	if (pcmd)
-		pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
 					    &pcmd->phys);
 	if (!pcmd || !pcmd->virt)
 		goto exit;
@@ -17128,7 +17135,7 @@ exit:
 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"2023 Unable to process MDS loopback frame\n");
 	if (pcmd && pcmd->virt)
-		pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_sli_release_iocbq(phba, iocbq);
 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -18888,6 +18895,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 	struct lpfc_sglq *sglq;
 	struct lpfc_sli_ring *pring;
 	unsigned long iflags;
+	uint32_t ret = 0;
 
 	/* NVME_LS and NVME_LS ABTS requests. */
 	if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
@@ -18906,10 +18914,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		}
 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
 		       pwqe->sli4_xritag);
-		if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
+		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
+
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
 		return 0;
@@ -18924,9 +18934,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
-		if (lpfc_sli4_wq_put(wq, wqe)) {
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -18950,9 +18961,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
-		if (lpfc_sli4_wq_put(wq, wqe)) {
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7a1d74e9e877..60200385fe00 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,6 +158,7 @@ struct lpfc_queue {
 #define LPFC_MQ_REPOST		8
 #define LPFC_CQ_REPOST		64
 #define LPFC_RQ_REPOST		64
+#define LPFC_MAX_ISR_CQE	64
 #define LPFC_RELEASE_NOTIFICATION_INTERVAL	32  /* For WQs */
 	uint32_t queue_id;	/* Queue ID assigned by the hardware */
 	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
@@ -419,6 +420,20 @@ struct lpfc_hba_eq_hdl {
 #define LPFC_MULTI_CPU_AFFINITY 0xffffffff
 };
 
+/*BB Credit recovery value*/
+struct lpfc_bbscn_params {
+	uint32_t word0;
+#define lpfc_bbscn_min_SHIFT		0
+#define lpfc_bbscn_min_MASK		0x0000000F
+#define lpfc_bbscn_min_WORD		word0
+#define lpfc_bbscn_max_SHIFT		4
+#define lpfc_bbscn_max_MASK		0x0000000F
+#define lpfc_bbscn_max_WORD		word0
+#define lpfc_bbscn_def_SHIFT		8
+#define lpfc_bbscn_def_MASK		0x0000000F
+#define lpfc_bbscn_def_WORD		word0
+};
+
 /* Port Capabilities for SLI4 Parameters */
 struct lpfc_pc_sli4_params {
 	uint32_t supported;
@@ -550,6 +565,7 @@ struct lpfc_sli4_hba {
 	uint32_t ue_to_rp;
 	struct lpfc_register sli_intf;
 	struct lpfc_pc_sli4_params pc_sli4_params;
+	struct lpfc_bbscn_params bbscn_params;
 	struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
 
 	/* Pointers to the constructed SLI4 queues */
@@ -621,8 +637,6 @@ struct lpfc_sli4_hba {
 	uint16_t scsi_xri_start;
 	uint16_t els_xri_cnt;
 	uint16_t nvmet_xri_cnt;
-	uint16_t nvmet_ctx_get_cnt;
-	uint16_t nvmet_ctx_put_cnt;
 	uint16_t nvmet_io_wait_cnt;
 	uint16_t nvmet_io_wait_total;
 	struct list_head lpfc_els_sgl_list;
@@ -631,9 +645,8 @@ struct lpfc_sli4_hba {
 	struct list_head lpfc_abts_nvmet_ctx_list;
 	struct list_head lpfc_abts_scsi_buf_list;
 	struct list_head lpfc_abts_nvme_buf_list;
-	struct list_head lpfc_nvmet_ctx_get_list;
-	struct list_head lpfc_nvmet_ctx_put_list;
 	struct list_head lpfc_nvmet_io_wait_list;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
 	struct lpfc_sglq **lpfc_sglq_active_list;
 	struct list_head lpfc_rpi_hdr_list;
 	unsigned long *rpi_bmask;
@@ -664,8 +677,6 @@ struct lpfc_sli4_hba {
 	spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t sgl_list_lock; /* list of aborted els IOs */
-	spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
-	spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
 	spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
 	uint32_t physical_port;
 
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6a24c3e2d5e..6aa192b3e4bf 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.4.0.1"
+#define LPFC_DRIVER_VERSION "11.4.0.3"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index a6682c508c4c..8c4d3003b68b 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -448,15 +448,14 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
 		ioremap(macio_resource_start(mdev, 1), 0x1000);
 	state->dmaintr = macio_irq(mdev, 1);
 	if (state->regs == NULL || state->dma == NULL) {
-		printk(KERN_ERR "mac53c94: ioremap failed for %s\n",
-		       node->full_name);
+		printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node);
 		goto out_free;
 	}
 
 	clkprop = of_get_property(node, "clock-frequency", &proplen);
        	if (clkprop == NULL || proplen != sizeof(int)) {
-       		printk(KERN_ERR "%s: can't get clock frequency, "
-       		       "assuming 25MHz\n", node->full_name);
+       		printk(KERN_ERR "%pOF: can't get clock frequency, "
+       		       "assuming 25MHz\n", node);
        		state->clk_freq = 25000000;
        	} else
        		state->clk_freq = *(int *)clkprop;
@@ -469,7 +468,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
        				sizeof(struct dbdma_cmd), GFP_KERNEL);
        	if (dma_cmd_space == 0) {
        		printk(KERN_ERR "mac53c94: couldn't allocate dma "
-       		       "command space for %s\n", node->full_name);
+       		       "command space for %pOF\n", node);
 		rc = -ENOMEM;
        		goto out_free;
        	}
@@ -481,8 +480,8 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
 	mac53c94_init(state);
 
 	if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) {
-		printk(KERN_ERR "mac53C94: can't get irq %d for %s\n",
-		       state->intr, node->full_name);
+		printk(KERN_ERR "mac53C94: can't get irq %d for %pOF\n",
+		       state->intr, node);
 		goto out_free_dma;
 	}
 
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index cdb61eaa2d1f..eb551f3cc471 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -348,26 +348,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 				 u32 dma_count, int write, u8 cmd)
 {
 	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-	u8 *fifo = esp->regs + ESP_FDATA * 16;
+	u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
 	cmd &= ~ESP_CMD_DMA;
 	mep->error = 0;
 
 	if (write) {
+		u8 *dst = (u8 *)addr;
+		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
 		scsi_esp_cmd(esp, cmd);
 
 		while (1) {
-			unsigned int n;
-
-			n = mac_esp_wait_for_fifo(esp);
-			if (!n)
+			if (!mac_esp_wait_for_fifo(esp))
 				break;
 
-			if (n > esp_count)
-				n = esp_count;
-			esp_count -= n;
-
-			MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+			*dst++ = esp_read8(ESP_FDATA);
+			--esp_count;
 
 			if (!esp_count)
 				break;
@@ -375,14 +373,17 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 			if (mac_esp_wait_for_intr(esp))
 				break;
 
-			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
-			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
 				break;
 
 			esp->ireg = esp_read8(ESP_INTRPT);
-			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
-			    ESP_INTR_BSERV)
+			if (esp->ireg & mask) {
+				mep->error = 1;
 				break;
+			}
+
+			if (phase == ESP_MIP)
+				scsi_esp_cmd(esp, ESP_CMD_MOK);
 
 			scsi_esp_cmd(esp, ESP_CMD_TI);
 		}
@@ -402,14 +403,14 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 			if (mac_esp_wait_for_intr(esp))
 				break;
 
-			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
-			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
 				break;
 
 			esp->ireg = esp_read8(ESP_INTRPT);
-			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
-			    ESP_INTR_BSERV)
+			if (esp->ireg & ~ESP_INTR_BSERV) {
+				mep->error = 1;
 				break;
+			}
 
 			n = MAC_ESP_FIFO_SIZE -
 			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 196acc79714b..dd6057359d7c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -41,7 +41,7 @@
 #define NCR5380_intr                    macscsi_intr
 #define NCR5380_queue_command           macscsi_queue_command
 #define NCR5380_abort                   macscsi_abort
-#define NCR5380_bus_reset               macscsi_bus_reset
+#define NCR5380_host_reset              macscsi_host_reset
 #define NCR5380_info                    macscsi_info
 
 #include "NCR5380.h"
@@ -328,7 +328,7 @@ static struct scsi_host_template mac_scsi_template = {
 	.info			= macscsi_info,
 	.queuecommand		= macscsi_queue_command,
 	.eh_abort_handler	= macscsi_abort,
-	.eh_bus_reset_handler	= macscsi_bus_reset,
+	.eh_host_reset_handler	= macscsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= 1,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3c63c292cb92..7195cff51d4c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -311,13 +311,15 @@ mega_query_adapter(adapter_t *adapter)
 	   right 8 bits making them zero. This 0 value was hardcoded to fix
 	   sparse warnings. */
 	if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
-		sprintf (adapter->fw_version, "%c%d%d.%d%d",
+		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+			 "%c%d%d.%d%d",
 			 adapter->product_info.fw_version[2],
 			 0,
 			 adapter->product_info.fw_version[1] & 0x0f,
 			 0,
 			 adapter->product_info.fw_version[0] & 0x0f);
-		sprintf (adapter->bios_version, "%c%d%d.%d%d",
+		snprintf(adapter->bios_version, sizeof(adapter->fw_version),
+			 "%c%d%d.%d%d",
 			 adapter->product_info.bios_version[2],
 			 0,
 			 adapter->product_info.bios_version[1] & 0x0f,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index f0987f22ea70..ec3c43854978 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -341,8 +341,6 @@ static struct scsi_host_template megaraid_template_g = {
 	.proc_name			= "megaraid",
 	.queuecommand			= megaraid_queue_command,
 	.eh_abort_handler		= megaraid_abort_handler,
-	.eh_device_reset_handler	= megaraid_reset_handler,
-	.eh_bus_reset_handler		= megaraid_reset_handler,
 	.eh_host_reset_handler		= megaraid_reset_handler,
 	.change_queue_depth		= scsi_change_queue_depth,
 	.use_clustering			= ENABLE_CLUSTERING,
@@ -1153,8 +1151,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 
 	// Allocate memory for 16-bytes aligned mailboxes
-	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
-						adapter->pdev,
+	raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool",
+						&adapter->pdev->dev,
 						sizeof(mbox64_t) + 16,
 						16, 0);
 
@@ -1164,7 +1162,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	mbox_pci_blk = raid_dev->mbox_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		mbox_pci_blk[i].vaddr = pci_pool_alloc(
+		mbox_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->mbox_pool_handle,
 						GFP_KERNEL,
 						&mbox_pci_blk[i].dma_addr);
@@ -1181,8 +1179,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 	 * share common memory pool. Passthru structures piggyback on memory
 	 * allocted to extended passthru since passthru is smaller of the two
 	 */
-	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
-			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
+	raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
+			&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
 
 	if (raid_dev->epthru_pool_handle == NULL) {
 		goto fail_setup_dma_pool;
@@ -1190,7 +1188,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	epthru_pci_blk = raid_dev->epthru_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		epthru_pci_blk[i].vaddr = pci_pool_alloc(
+		epthru_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->epthru_pool_handle,
 						GFP_KERNEL,
 						&epthru_pci_blk[i].dma_addr);
@@ -1202,8 +1200,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	// Allocate memory for each scatter-gather list. Request for 512 bytes
 	// alignment for each sg list
-	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
-					adapter->pdev,
+	raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg",
+					&adapter->pdev->dev,
 					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
 					512, 0);
 
@@ -1213,7 +1211,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	sg_pci_blk = raid_dev->sg_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		sg_pci_blk[i].vaddr = pci_pool_alloc(
+		sg_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->sg_pool_handle,
 						GFP_KERNEL,
 						&sg_pci_blk[i].dma_addr);
@@ -1249,29 +1247,29 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
 
 	sg_pci_blk = raid_dev->sg_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
+		dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
 			sg_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->sg_pool_handle)
-		pci_pool_destroy(raid_dev->sg_pool_handle);
+		dma_pool_destroy(raid_dev->sg_pool_handle);
 
 
 	epthru_pci_blk = raid_dev->epthru_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->epthru_pool_handle,
+		dma_pool_free(raid_dev->epthru_pool_handle,
 			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->epthru_pool_handle)
-		pci_pool_destroy(raid_dev->epthru_pool_handle);
+		dma_pool_destroy(raid_dev->epthru_pool_handle);
 
 
 	mbox_pci_blk = raid_dev->mbox_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->mbox_pool_handle,
+		dma_pool_free(raid_dev->mbox_pool_handle,
 			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->mbox_pool_handle)
-		pci_pool_destroy(raid_dev->mbox_pool_handle);
+		dma_pool_destroy(raid_dev->mbox_pool_handle);
 
 	return;
 }
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 544d6f7e6138..65b6f6ace3a5 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
 
 	kioc->pool_index	= right_pool;
 	kioc->free_buf		= 1;
-	kioc->buf_vaddr		= pci_pool_alloc(pool->handle, GFP_ATOMIC,
+	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
 							&kioc->buf_paddr);
 	spin_unlock_irqrestore(&pool->lock, flags);
 
@@ -658,7 +658,7 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
 		 * not in use
 		 */
 		if (kioc->free_buf == 1)
-			pci_pool_free(pool->handle, kioc->buf_vaddr, 
+			dma_pool_free(pool->handle, kioc->buf_vaddr, 
 							kioc->buf_paddr);
 		else
 			pool->in_use = 0;
@@ -940,8 +940,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 						GFP_KERNEL);
 	adapter->mbox_list	= kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
 						GFP_KERNEL);
-	adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
-						adapter->pdev,
+	adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
+						&adapter->pdev->dev,
 						sizeof(mraid_passthru_t),
 						16, 0);
 
@@ -970,7 +970,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 
 		kioc		= adapter->kioc_list + i;
 		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
-		kioc->pthru32	= pci_pool_alloc(adapter->pthru_dma_pool,
+		kioc->pthru32	= dma_pool_alloc(adapter->pthru_dma_pool,
 						GFP_KERNEL, &kioc->pthru32_h);
 
 		if (!kioc->pthru32) {
@@ -1006,7 +1006,7 @@ pthru_dma_pool_error:
 	for (i = 0; i < lld_adp->max_kioc; i++) {
 		kioc = adapter->kioc_list + i;
 		if (kioc->pthru32) {
-			pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
+			dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
 				kioc->pthru32_h);
 		}
 	}
@@ -1017,7 +1017,7 @@ memalloc_error:
 	kfree(adapter->mbox_list);
 
 	if (adapter->pthru_dma_pool)
-		pci_pool_destroy(adapter->pthru_dma_pool);
+		dma_pool_destroy(adapter->pthru_dma_pool);
 
 	kfree(adapter);
 
@@ -1086,14 +1086,15 @@ mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
 		pool->buf_size = bufsize;
 		spin_lock_init(&pool->lock);
 
-		pool->handle = pci_pool_create("megaraid mm data buffer",
-						adp->pdev, bufsize, 16, 0);
+		pool->handle = dma_pool_create("megaraid mm data buffer",
+						&adp->pdev->dev, bufsize,
+						16, 0);
 
 		if (!pool->handle) {
 			goto dma_pool_setup_error;
 		}
 
-		pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
 							&pool->paddr);
 
 		if (!pool->vaddr)
@@ -1163,14 +1164,14 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
 
 		kioc = adp->kioc_list + i;
 
-		pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
+		dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
 				kioc->pthru32_h);
 	}
 
 	kfree(adp->kioc_list);
 	kfree(adp->mbox_list);
 
-	pci_pool_destroy(adp->pthru_dma_pool);
+	dma_pool_destroy(adp->pthru_dma_pool);
 
 
 	return;
@@ -1194,10 +1195,10 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
 		if (pool->handle) {
 
 			if (pool->vaddr)
-				pci_pool_free(pool->handle, pool->vaddr,
+				dma_pool_free(pool->handle, pool->vaddr,
 							pool->paddr);
 
-			pci_pool_destroy(pool->handle);
+			dma_pool_destroy(pool->handle);
 			pool->handle = NULL;
 		}
 	}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 2b209bbb4c91..a6722c93a295 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"07.701.17.00-rc1"
-#define MEGASAS_RELDATE				"March 2, 2017"
+#define MEGASAS_VERSION				"07.702.06.00-rc1"
+#define MEGASAS_RELDATE				"June 21, 2017"
 
 /*
  * Device IDs
@@ -2115,7 +2115,6 @@ struct megasas_instance {
 	u32 *crash_dump_buf;
 	dma_addr_t crash_dump_h;
 	void *crash_buf[MAX_CRASH_DUMP_SIZE];
-	u32 crash_buf_pages;
 	unsigned int    fw_crash_buffer_size;
 	unsigned int    fw_crash_state;
 	unsigned int    fw_crash_buffer_offset;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71c4746341ea..e518dadc8161 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -49,6 +49,7 @@
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
 #include <linux/poll.h>
+#include <linux/vmalloc.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -1995,9 +1996,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
 				if (cmd_mfi->sync_cmd &&
-					cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+					cmd_mfi->frame->hdr.cmd_status =
+							MFI_STAT_WRONG_STATE;
 					megasas_complete_cmd(instance,
 							     cmd_mfi, DID_OK);
+				}
 			}
 		}
 	} else {
@@ -2791,7 +2795,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
 		if (cmd)
 			megasas_dump_frame(cmd->io_request,
-				sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
 		ret = megasas_reset_fusion(scmd->device->host,
 				SCSIIO_TIMEOUT_OCR);
 	} else
@@ -3862,19 +3866,19 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
 		cmd = instance->cmd_list[i];
 
 		if (cmd->frame)
-			pci_pool_free(instance->frame_dma_pool, cmd->frame,
+			dma_pool_free(instance->frame_dma_pool, cmd->frame,
 				      cmd->frame_phys_addr);
 
 		if (cmd->sense)
-			pci_pool_free(instance->sense_dma_pool, cmd->sense,
+			dma_pool_free(instance->sense_dma_pool, cmd->sense,
 				      cmd->sense_phys_addr);
 	}
 
 	/*
 	 * Now destroy the pool itself
 	 */
-	pci_pool_destroy(instance->frame_dma_pool);
-	pci_pool_destroy(instance->sense_dma_pool);
+	dma_pool_destroy(instance->frame_dma_pool);
+	dma_pool_destroy(instance->sense_dma_pool);
 
 	instance->frame_dma_pool = NULL;
 	instance->sense_dma_pool = NULL;
@@ -3925,22 +3929,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 	/*
 	 * Use DMA pool facility provided by PCI layer
 	 */
-	instance->frame_dma_pool = pci_pool_create("megasas frame pool",
-					instance->pdev, instance->mfi_frame_size,
-					256, 0);
+	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
+					&instance->pdev->dev,
+					instance->mfi_frame_size, 256, 0);
 
 	if (!instance->frame_dma_pool) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
 		return -ENOMEM;
 	}
 
-	instance->sense_dma_pool = pci_pool_create("megasas sense pool",
-						   instance->pdev, 128, 4, 0);
+	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
+						   &instance->pdev->dev, 128,
+						   4, 0);
 
 	if (!instance->sense_dma_pool) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
 
-		pci_pool_destroy(instance->frame_dma_pool);
+		dma_pool_destroy(instance->frame_dma_pool);
 		instance->frame_dma_pool = NULL;
 
 		return -ENOMEM;
@@ -3955,10 +3960,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 
 		cmd = instance->cmd_list[i];
 
-		cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
 					    GFP_KERNEL, &cmd->frame_phys_addr);
 
-		cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
 					    GFP_KERNEL, &cmd->sense_phys_addr);
 
 		/*
@@ -3966,7 +3971,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 		 * whatever has been allocated
 		 */
 		if (!cmd->frame || !cmd->sense) {
-			dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
 			megasas_teardown_frame_pool(instance);
 			return -ENOMEM;
 		}
@@ -5478,7 +5483,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		instance->throttlequeuedepth =
 				MEGASAS_THROTTLE_QUEUE_DEPTH;
 
-	if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+	if ((resetwaittime < 1) ||
+	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
 
 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
@@ -5649,6 +5655,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
 		prev_aen.word =
 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
 
+		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+			dev_info(&instance->pdev->dev,
+				 "%s %d out of range class %d send by application\n",
+				 __func__, __LINE__, curr_aen.members.class);
+			return 0;
+		}
+
 		/*
 		 * A class whose enum value is smaller is inclusive of all
 		 * higher values. If a PROGRESS (= -1) was previously
@@ -6096,14 +6110,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
 		instance->pd_info = pci_alloc_consistent(pdev,
 			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
 
-		instance->pd_info = pci_alloc_consistent(pdev,
-			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
-		instance->tgt_prop = pci_alloc_consistent(pdev,
-			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
-
 		if (!instance->pd_info)
 			dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
 
+		instance->tgt_prop = pci_alloc_consistent(pdev,
+			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+
 		if (!instance->tgt_prop)
 			dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
 
@@ -6663,9 +6675,14 @@ skip_firing_dcmds:
 						  fusion->max_map_sz,
 						  fusion->ld_map[i],
 						  fusion->ld_map_phys[i]);
-			if (fusion->ld_drv_map[i])
-				free_pages((ulong)fusion->ld_drv_map[i],
-					fusion->drv_map_pages);
+			if (fusion->ld_drv_map[i]) {
+				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+					vfree(fusion->ld_drv_map[i]);
+				else
+					free_pages((ulong)fusion->ld_drv_map[i],
+						   fusion->drv_map_pages);
+			}
+
 			if (fusion->pd_seq_sync[i])
 				dma_free_coherent(&instance->pdev->dev,
 					pd_seq_map_sz,
@@ -6866,6 +6883,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	void *sense = NULL;
 	dma_addr_t sense_handle;
 	unsigned long *sense_ptr;
+	u32 opcode;
 
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
@@ -6893,15 +6911,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
 					       MFI_FRAME_SGL64 |
 					       MFI_FRAME_SENSE64));
+	opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
 
-	if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
+	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
 			megasas_return_cmd(instance, cmd);
 			return -1;
 		}
 	}
 
-	if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
 		error = megasas_set_crash_dump_params_ioctl(cmd);
 		megasas_return_cmd(instance, cmd);
 		return error;
@@ -6975,8 +6994,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 		cmd->sync_cmd = 0;
 		dev_err(&instance->pdev->dev,
 			"return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
-			__func__, __LINE__, cmd->frame->dcmd.opcode,
-			cmd->cmd_status_drv);
+			__func__, __LINE__, opcode,	cmd->cmd_status_drv);
 		return -EBUSY;
 	}
 
@@ -7323,49 +7341,39 @@ static struct pci_driver megasas_pci_driver = {
 /*
  * Sysfs driver attributes
  */
-static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+static ssize_t version_show(struct device_driver *dd, char *buf)
 {
 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
 			MEGASAS_VERSION);
 }
+static DRIVER_ATTR_RO(version);
 
-static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
-
-static ssize_t
-megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+static ssize_t release_date_show(struct device_driver *dd, char *buf)
 {
 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
 		MEGASAS_RELDATE);
 }
+static DRIVER_ATTR_RO(release_date);
 
-static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
-
-static ssize_t
-megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", support_poll_for_event);
 }
+static DRIVER_ATTR_RO(support_poll_for_event);
 
-static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
-			megasas_sysfs_show_support_poll_for_event, NULL);
-
- static ssize_t
-megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", support_device_change);
 }
+static DRIVER_ATTR_RO(support_device_change);
 
-static DRIVER_ATTR(support_device_change, S_IRUGO,
-			megasas_sysfs_show_support_device_change, NULL);
-
-static ssize_t
-megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
 }
 
-static ssize_t
-megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
+			     size_t count)
 {
 	int retval = count;
 
@@ -7375,9 +7383,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
 	}
 	return retval;
 }
-
-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
-		megasas_sysfs_set_dbg_lvl);
+static DRIVER_ATTR_RW(dbg_lvl);
 
 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
 {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 62affa76133d..ecc699a65bac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -67,16 +67,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
 #define MR_LD_STATE_OPTIMAL 3
 
-#ifdef FALSE
-#undef FALSE
-#endif
-#define FALSE 0
-
-#ifdef TRUE
-#undef TRUE
-#endif
-#define TRUE 1
-
 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
 #define SPAN_INVALID  0xff
@@ -709,7 +699,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 	u32     pd, arRef, r1_alt_pd;
 	u8      physArm, span;
 	u64     row;
-	u8	retval = TRUE;
+	u8	retval = true;
 	u64	*pdBlock = &io_info->pdBlock;
 	__le16	*pDevHandle = &io_info->devHandle;
 	u8	*pPdInterface = &io_info->pd_interface;
@@ -727,7 +717,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 	if (raid->level == 6) {
 		logArm = get_arm_from_strip(instance, ld, stripRow, map);
 		if (logArm == -1U)
-			return FALSE;
+			return false;
 		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
 		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
 		arm = armQ + 1 + logArm;
@@ -738,7 +728,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 		/* Calculate the arm */
 		physArm = get_arm(instance, ld, span, stripRow, map);
 	if (physArm == 0xFF)
-		return FALSE;
+		return false;
 
 	arRef       = MR_LdSpanArrayGet(ld, span, map);
 	pd          = MR_ArPdGet(arRef, physArm, map);
@@ -812,7 +802,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 	u32         pd, arRef, r1_alt_pd;
 	u8          physArm, span;
 	u64         row;
-	u8	    retval = TRUE;
+	u8	    retval = true;
 	u64	    *pdBlock = &io_info->pdBlock;
 	__le16	    *pDevHandle = &io_info->devHandle;
 	u8	    *pPdInterface = &io_info->pd_interface;
@@ -829,7 +819,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		u32 rowMod, armQ, arm;
 
 		if (raid->rowSize == 0)
-			return FALSE;
+			return false;
 		/* get logical row mod */
 		rowMod = mega_mod64(row, raid->rowSize);
 		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
@@ -839,7 +829,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		physArm = (u8)arm;
 	} else  {
 		if (raid->modFactor == 0)
-			return FALSE;
+			return false;
 		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
 							  raid->modFactor),
 					  map);
@@ -851,7 +841,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 	} else {
 		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
 		if (span == SPAN_INVALID)
-			return FALSE;
+			return false;
 	}
 
 	/* Get the array on which this span is present */
@@ -954,7 +944,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	 */
 	if (raid->rowDataSize == 0) {
 		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
-			return FALSE;
+			return false;
 		else if (instance->UnevenSpanSupport) {
 			io_info->IoforUnevenSpan = 1;
 		} else {
@@ -963,7 +953,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				"rowDataSize = 0x%0x,"
 				"but there is _NO_ UnevenSpanSupport\n",
 				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
-			return FALSE;
+			return false;
 		}
 	}
 
@@ -988,7 +978,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 			dev_info(&instance->pdev->dev, "return from %s %d."
 				"Send IO w/o region lock.\n",
 				__func__, __LINE__);
-			return FALSE;
+			return false;
 		}
 
 		if (raid->spanDepth == 1) {
@@ -1004,7 +994,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				(unsigned long long)start_row,
 				(unsigned long long)start_strip,
 				(unsigned long long)endStrip);
-			return FALSE;
+			return false;
 		}
 		io_info->start_span	= startlba_span;
 		io_info->start_row	= start_row;
@@ -1038,7 +1028,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 					       raid->capability.
 					       fpWriteAcrossStripe));
 	} else
-		io_info->fpOkForIo = FALSE;
+		io_info->fpOkForIo = false;
 
 	if (numRows == 1) {
 		/* single-strip IOs can always lock only the data needed */
@@ -1124,7 +1114,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 					pRAID_Context, map);
 		/* If IO on an invalid Pd, then FP is not possible.*/
 		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
-			io_info->fpOkForIo = FALSE;
+			io_info->fpOkForIo = false;
 		return retval;
 	} else if (isRead) {
 		uint stripIdx;
@@ -1138,10 +1128,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				    start_strip + stripIdx, ref_in_start_stripe,
 				    io_info, pRAID_Context, map);
 			if (!retval)
-				return TRUE;
+				return true;
 		}
 	}
-	return TRUE;
+	return true;
 }
 
 /*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 985510628f56..11bd2e698b84 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -313,20 +313,20 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 		cmd = fusion->cmd_list[i];
 		if (cmd) {
 			if (cmd->sg_frame)
-				pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+				dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
 				      cmd->sg_frame_phys_addr);
 			if (cmd->sense)
-				pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+				dma_pool_free(fusion->sense_dma_pool, cmd->sense,
 				      cmd->sense_phys_addr);
 		}
 	}
 
 	if (fusion->sg_dma_pool) {
-		pci_pool_destroy(fusion->sg_dma_pool);
+		dma_pool_destroy(fusion->sg_dma_pool);
 		fusion->sg_dma_pool = NULL;
 	}
 	if (fusion->sense_dma_pool) {
-		pci_pool_destroy(fusion->sense_dma_pool);
+		dma_pool_destroy(fusion->sense_dma_pool);
 		fusion->sense_dma_pool = NULL;
 	}
 
@@ -343,11 +343,11 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 			fusion->request_alloc_sz, fusion->req_frames_desc,
 			fusion->req_frames_desc_phys);
 	if (fusion->io_request_frames)
-		pci_pool_free(fusion->io_request_frames_pool,
+		dma_pool_free(fusion->io_request_frames_pool,
 			fusion->io_request_frames,
 			fusion->io_request_frames_phys);
 	if (fusion->io_request_frames_pool) {
-		pci_pool_destroy(fusion->io_request_frames_pool);
+		dma_pool_destroy(fusion->io_request_frames_pool);
 		fusion->io_request_frames_pool = NULL;
 	}
 
@@ -376,12 +376,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 
 
 	fusion->sg_dma_pool =
-			pci_pool_create("mr_sg", instance->pdev,
+			dma_pool_create("mr_sg", &instance->pdev->dev,
 				instance->max_chain_frame_sz,
 				MR_DEFAULT_NVME_PAGE_SIZE, 0);
 	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
 	fusion->sense_dma_pool =
-			pci_pool_create("mr_sense", instance->pdev,
+			dma_pool_create("mr_sense", &instance->pdev->dev,
 				SCSI_SENSE_BUFFERSIZE, 64, 0);
 
 	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
@@ -395,10 +395,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 	 */
 	for (i = 0; i < max_cmd; i++) {
 		cmd = fusion->cmd_list[i];
-		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+		cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
 					GFP_KERNEL, &cmd->sg_frame_phys_addr);
 
-		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
 					GFP_KERNEL, &cmd->sense_phys_addr);
 		if (!cmd->sg_frame || !cmd->sense) {
 			dev_err(&instance->pdev->dev,
@@ -410,7 +410,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 	/* create sense buffer for the raid 1/10 fp */
 	for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
 		cmd = fusion->cmd_list[i];
-		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
 			GFP_KERNEL, &cmd->sense_phys_addr);
 		if (!cmd->sense) {
 			dev_err(&instance->pdev->dev,
@@ -479,7 +479,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->io_request_frames_pool =
-			pci_pool_create("mr_ioreq", instance->pdev,
+			dma_pool_create("mr_ioreq", &instance->pdev->dev,
 				fusion->io_frames_alloc_sz, 16, 0);
 
 	if (!fusion->io_request_frames_pool) {
@@ -489,7 +489,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->io_request_frames =
-			pci_pool_alloc(fusion->io_request_frames_pool,
+			dma_pool_alloc(fusion->io_request_frames_pool,
 				GFP_KERNEL, &fusion->io_request_frames_phys);
 	if (!fusion->io_request_frames) {
 		dev_err(&instance->pdev->dev,
@@ -509,7 +509,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
 
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 	fusion->reply_frames_desc_pool =
-			pci_pool_create("mr_reply", instance->pdev,
+			dma_pool_create("mr_reply", &instance->pdev->dev,
 				fusion->reply_alloc_sz * count, 16, 0);
 
 	if (!fusion->reply_frames_desc_pool) {
@@ -519,7 +519,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->reply_frames_desc[0] =
-		pci_pool_alloc(fusion->reply_frames_desc_pool,
+		dma_pool_alloc(fusion->reply_frames_desc_pool,
 			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
 	if (!fusion->reply_frames_desc[0]) {
 		dev_err(&instance->pdev->dev,
@@ -562,8 +562,10 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 	memset(fusion->rdpq_virt, 0,
 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
-	fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
-							 instance->pdev, fusion->reply_alloc_sz, 16, 0);
+	fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
+							 &instance->pdev->dev,
+							 fusion->reply_alloc_sz,
+							 16, 0);
 
 	if (!fusion->reply_frames_desc_pool) {
 		dev_err(&instance->pdev->dev,
@@ -573,7 +575,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 
 	for (i = 0; i < count; i++) {
 		fusion->reply_frames_desc[i] =
-				pci_pool_alloc(fusion->reply_frames_desc_pool,
+				dma_pool_alloc(fusion->reply_frames_desc_pool,
 					GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
 		if (!fusion->reply_frames_desc[i]) {
 			dev_err(&instance->pdev->dev,
@@ -601,13 +603,13 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
 
 	for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
 		if (fusion->reply_frames_desc[i])
-			pci_pool_free(fusion->reply_frames_desc_pool,
+			dma_pool_free(fusion->reply_frames_desc_pool,
 				fusion->reply_frames_desc[i],
 				fusion->reply_frames_desc_phys[i]);
 	}
 
 	if (fusion->reply_frames_desc_pool)
-		pci_pool_destroy(fusion->reply_frames_desc_pool);
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
 
 	if (fusion->rdpq_virt)
 		pci_free_consistent(instance->pdev,
@@ -623,12 +625,12 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
 	fusion = instance->ctrl_context;
 
 	if (fusion->reply_frames_desc[0])
-		pci_pool_free(fusion->reply_frames_desc_pool,
+		dma_pool_free(fusion->reply_frames_desc_pool,
 			fusion->reply_frames_desc[0],
 			fusion->reply_frames_desc_phys[0]);
 
 	if (fusion->reply_frames_desc_pool)
-		pci_pool_destroy(fusion->reply_frames_desc_pool);
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
 
 }
 
@@ -914,7 +916,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 		ret = 1;
 		goto fail_fw_init;
 	}
-	dev_info(&instance->pdev->dev, "Init cmd success\n");
 
 	ret = 0;
 
@@ -925,6 +926,10 @@ fail_fw_init:
 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
 				  IOCInitMessage, ioc_init_handle);
 fail_get_cmd:
+	dev_err(&instance->pdev->dev,
+		"Init cmd return status %s for SCSI host %d\n",
+		ret ? "FAILED" : "SUCCESS", instance->host->host_no);
+
 	return ret;
 }
 
@@ -1261,6 +1266,80 @@ megasas_display_intel_branding(struct megasas_instance *instance)
 }
 
 /**
+ * megasas_allocate_raid_maps -	Allocate memory for RAID maps
+ * @instance:				Adapter soft state
+ *
+ * return:				if success: return 0
+ *					failed:  return -ENOMEM
+ */
+static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+	int i = 0;
+
+	fusion = instance->ctrl_context;
+
+	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = NULL;
+
+		fusion->ld_drv_map[i] = (void *)
+			__get_free_pages(__GFP_ZERO | GFP_KERNEL,
+					 fusion->drv_map_pages);
+
+		if (!fusion->ld_drv_map[i]) {
+			fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
+
+			if (!fusion->ld_drv_map[i]) {
+				dev_err(&instance->pdev->dev,
+					"Could not allocate memory for local map"
+					" size requested: %d\n",
+					fusion->drv_map_sz);
+				goto ld_drv_map_alloc_fail;
+			}
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+						       fusion->max_map_sz,
+						       &fusion->ld_map_phys[i],
+						       GFP_KERNEL);
+		if (!fusion->ld_map[i]) {
+			dev_err(&instance->pdev->dev,
+				"Could not allocate memory for map info %s:%d\n",
+				__func__, __LINE__);
+			goto ld_map_alloc_fail;
+		}
+	}
+
+	return 0;
+
+ld_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_map[i])
+			dma_free_coherent(&instance->pdev->dev,
+					  fusion->max_map_sz,
+					  fusion->ld_map[i],
+					  fusion->ld_map_phys[i]);
+	}
+
+ld_drv_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_drv_map[i]) {
+			if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+				vfree(fusion->ld_drv_map[i]);
+			else
+				free_pages((ulong)fusion->ld_drv_map[i],
+					   fusion->drv_map_pages);
+		}
+	}
+
+	return -ENOMEM;
+}
+
+/**
  * megasas_init_adapter_fusion -	Initializes the FW
  * @instance:		Adapter soft state
  *
@@ -1379,45 +1458,14 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 	instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
 	fusion->fast_path_io = 0;
 
-	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
-	for (i = 0; i < 2; i++) {
-		fusion->ld_map[i] = NULL;
-		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
-			fusion->drv_map_pages);
-		if (!fusion->ld_drv_map[i]) {
-			dev_err(&instance->pdev->dev, "Could not allocate "
-				"memory for local map info for %d pages\n",
-				fusion->drv_map_pages);
-			if (i == 1)
-				free_pages((ulong)fusion->ld_drv_map[0],
-					fusion->drv_map_pages);
-			goto fail_ioc_init;
-		}
-		memset(fusion->ld_drv_map[i], 0,
-			((1 << PAGE_SHIFT) << fusion->drv_map_pages));
-	}
-
-	for (i = 0; i < 2; i++) {
-		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
-						       fusion->max_map_sz,
-						       &fusion->ld_map_phys[i],
-						       GFP_KERNEL);
-		if (!fusion->ld_map[i]) {
-			dev_err(&instance->pdev->dev, "Could not allocate memory "
-			       "for map info\n");
-			goto fail_map_info;
-		}
-	}
+	if (megasas_allocate_raid_maps(instance))
+		goto fail_ioc_init;
 
 	if (!megasas_get_map_info(instance))
 		megasas_sync_map_info(instance);
 
 	return 0;
 
-fail_map_info:
-	if (i == 1)
-		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
-				  fusion->ld_map[0], fusion->ld_map_phys[0]);
 fail_ioc_init:
 	megasas_free_cmds_fusion(instance);
 fail_alloc_cmds:
@@ -3287,7 +3335,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 
-	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
+	mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
 }
 
 /**
@@ -3369,17 +3417,13 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
 {
 	unsigned int i;
 
-	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
 	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
-		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
-				instance->crash_buf_pages);
+		instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
 		if (!instance->crash_buf[i]) {
 			dev_info(&instance->pdev->dev, "Firmware crash dump "
 				"memory allocation failed at index %d\n", i);
 			break;
 		}
-		memset(instance->crash_buf[i], 0,
-			((1 << PAGE_SHIFT) << instance->crash_buf_pages));
 	}
 	instance->drv_buf_alloc = i;
 }
@@ -3391,12 +3435,10 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
 void
 megasas_free_host_crash_buffer(struct megasas_instance *instance)
 {
-	unsigned int i
-;
+	unsigned int i;
 	for (i = 0; i < instance->drv_buf_alloc; i++) {
 		if (instance->crash_buf[i])
-			free_pages((ulong)instance->crash_buf[i],
-					instance->crash_buf_pages);
+			vfree(instance->crash_buf[i]);
 	}
 	instance->drv_buf_index = 0;
 	instance->drv_buf_alloc = 0;
@@ -3556,6 +3598,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
 			}
 		}
 
+		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
 		outstanding = atomic_read(&instance->fw_outstanding);
 		if (!outstanding)
 			goto out;
@@ -3564,8 +3607,6 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
 			       "commands to complete for scsi%d\n", i,
 			       outstanding, instance->host->host_no);
-			megasas_complete_cmd_dpc_fusion(
-				(unsigned long)instance);
 		}
 		msleep(1000);
 	}
@@ -3623,6 +3664,15 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
 
 		if (!smid)
 			continue;
+
+		/* Do not refire shutdown command */
+		if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
+			MR_DCMD_CTRL_SHUTDOWN) {
+			cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+			continue;
+		}
+
 		req_desc = megasas_get_request_descriptor
 					(instance, smid - 1);
 		refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
@@ -3750,7 +3800,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	struct megasas_cmd_fusion *cmd_fusion;
 	struct megasas_cmd *cmd_mfi;
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-	struct fusion_context *fusion;
+	struct fusion_context *fusion = NULL;
 	struct megasas_cmd_fusion *scsi_lookup;
 	int rc;
 	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
@@ -3777,8 +3827,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	cmd_fusion->request_desc = req_desc;
 	req_desc->Words = 0;
 
-	scsi_lookup = fusion->cmd_list[smid_task - 1];
-
 	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
 	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
 	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
@@ -3825,13 +3873,13 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	rc = SUCCESS;
 	switch (type) {
 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+		scsi_lookup = fusion->cmd_list[smid_task - 1];
+
 		if (scsi_lookup->scmd == NULL)
 			break;
 		else {
 			instance->instancet->disable_intr(instance);
 			megasas_sync_irqs((unsigned long)instance);
-			megasas_complete_cmd_dpc_fusion
-					((unsigned long)instance);
 			instance->instancet->enable_intr(instance);
 			if (scsi_lookup->scmd == NULL)
 				break;
@@ -3843,9 +3891,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
 			break;
 		instance->instancet->disable_intr(instance);
-		msleep(1000);
-		megasas_complete_cmd_dpc_fusion
-				((unsigned long)instance);
+		megasas_sync_irqs((unsigned long)instance);
 		rc = megasas_track_scsiio(instance, id, channel);
 		instance->instancet->enable_intr(instance);
 
@@ -4271,9 +4317,6 @@ transition_to_ready:
 			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
 
 			if (megasas_ioc_init_fusion(instance)) {
-				dev_warn(&instance->pdev->dev,
-				       "megasas_ioc_init_fusion() failed! for "
-				       "scsi%d\n", instance->host->host_no);
 				if (instance->requestorId && !reason)
 					goto fail_kill_adapter;
 				else
@@ -4319,6 +4362,10 @@ transition_to_ready:
 			instance->instancet->enable_intr(instance);
 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
 
+			dev_info(&instance->pdev->dev, "Interrupts are enabled and"
+				" controller is OPERATIONAL for scsi:%d\n",
+				instance->host->host_no);
+
 			/* Restart SR-IOV heartbeat */
 			if (instance->requestorId) {
 				if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -4330,11 +4377,6 @@ transition_to_ready:
 					instance->skip_heartbeat_timer_del = 1;
 			}
 
-			/* Adapter reset completed successfully */
-			dev_warn(&instance->pdev->dev, "Reset "
-			       "successful for scsi%d.\n",
-				instance->host->host_no);
-
 			if (instance->crash_dump_drv_support &&
 				instance->crash_dump_app_support)
 				megasas_set_crash_dump_params(instance,
@@ -4344,6 +4386,12 @@ transition_to_ready:
 					MR_CRASH_BUF_TURN_OFF);
 
 			retval = SUCCESS;
+
+			/* Adapter reset completed successfully */
+			dev_warn(&instance->pdev->dev,
+				 "Reset successful for scsi%d.\n",
+				 instance->host->host_no);
+
 			goto out;
 		}
 fail_kill_adapter:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 18039bba26c4..87999905bca3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -615,9 +615,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
 		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
 		    "start" : "stop");
 		if (event_data->DiscoveryStatus)
-			pr_info("discovery_status(0x%08x)",
+			pr_cont(" discovery_status(0x%08x)",
 			    le32_to_cpu(event_data->DiscoveryStatus));
-			pr_info("\n");
+		pr_cont("\n");
 		return;
 	}
 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
@@ -3198,9 +3198,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->sense) {
-		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
-		if (ioc->sense_dma_pool)
-			pci_pool_destroy(ioc->sense_dma_pool);
+		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+		dma_pool_destroy(ioc->sense_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"sense_pool(0x%p): free\n",
 			ioc->name, ioc->sense));
@@ -3208,9 +3207,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->reply) {
-		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
-		if (ioc->reply_dma_pool)
-			pci_pool_destroy(ioc->reply_dma_pool);
+		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+		dma_pool_destroy(ioc->reply_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"reply_pool(0x%p): free\n",
 			ioc->name, ioc->reply));
@@ -3218,10 +3216,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->reply_free) {
-		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
 		    ioc->reply_free_dma);
-		if (ioc->reply_free_dma_pool)
-			pci_pool_destroy(ioc->reply_free_dma_pool);
+		dma_pool_destroy(ioc->reply_free_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"reply_free_pool(0x%p): free\n",
 			ioc->name, ioc->reply_free));
@@ -3232,7 +3229,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		do {
 			rps = &ioc->reply_post[i];
 			if (rps->reply_post_free) {
-				pci_pool_free(
+				dma_pool_free(
 				    ioc->reply_post_free_dma_pool,
 				    rps->reply_post_free,
 				    rps->reply_post_free_dma);
@@ -3244,8 +3241,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		} while (ioc->rdpq_array_enable &&
 			   (++i < ioc->reply_queue_count));
 
-		if (ioc->reply_post_free_dma_pool)
-			pci_pool_destroy(ioc->reply_post_free_dma_pool);
+		dma_pool_destroy(ioc->reply_post_free_dma_pool);
 		kfree(ioc->reply_post);
 	}
 
@@ -3266,12 +3262,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	if (ioc->chain_lookup) {
 		for (i = 0; i < ioc->chain_depth; i++) {
 			if (ioc->chain_lookup[i].chain_buffer)
-				pci_pool_free(ioc->chain_dma_pool,
+				dma_pool_free(ioc->chain_dma_pool,
 				    ioc->chain_lookup[i].chain_buffer,
 				    ioc->chain_lookup[i].chain_buffer_dma);
 		}
-		if (ioc->chain_dma_pool)
-			pci_pool_destroy(ioc->chain_dma_pool);
+		dma_pool_destroy(ioc->chain_dma_pool);
 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
 		ioc->chain_lookup = NULL;
 	}
@@ -3446,23 +3441,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 			ioc->name);
 		goto out;
 	}
-	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
-	    ioc->pdev, sz, 16, 0);
+	ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
+	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_post_free_dma_pool) {
 		pr_err(MPT3SAS_FMT
-		 "reply_post_free pool: pci_pool_create failed\n",
+		 "reply_post_free pool: dma_pool_create failed\n",
 		 ioc->name);
 		goto out;
 	}
 	i = 0;
 	do {
 		ioc->reply_post[i].reply_post_free =
-		    pci_pool_alloc(ioc->reply_post_free_dma_pool,
+		    dma_pool_alloc(ioc->reply_post_free_dma_pool,
 		    GFP_KERNEL,
 		    &ioc->reply_post[i].reply_post_free_dma);
 		if (!ioc->reply_post[i].reply_post_free) {
 			pr_err(MPT3SAS_FMT
-			"reply_post_free pool: pci_pool_alloc failed\n",
+			"reply_post_free pool: dma_pool_alloc failed\n",
 			ioc->name);
 			goto out;
 		}
@@ -3577,15 +3572,15 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 			ioc->name);
 		goto out;
 	}
-	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
 	    ioc->chain_segment_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
-		pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
 			ioc->name);
 		goto out;
 	}
 	for (i = 0; i < ioc->chain_depth; i++) {
-		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+		ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
 		    ioc->chain_dma_pool , GFP_KERNEL,
 		    &ioc->chain_lookup[i].chain_buffer_dma);
 		if (!ioc->chain_lookup[i].chain_buffer) {
@@ -3630,17 +3625,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* sense buffers, 4 byte align */
 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
-	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
-	    0);
+	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
+					      4, 0);
 	if (!ioc->sense_dma_pool) {
-		pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
 		    ioc->name);
 		goto out;
 	}
-	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 	    &ioc->sense_dma);
 	if (!ioc->sense) {
-		pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
 		    ioc->name);
 		goto out;
 	}
@@ -3654,17 +3649,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* reply pool, 4 byte align */
 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
-	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
-	    0);
+	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
+					      4, 0);
 	if (!ioc->reply_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
 		    ioc->name);
 		goto out;
 	}
-	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
 	    &ioc->reply_dma);
 	if (!ioc->reply) {
-		pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
 		    ioc->name);
 		goto out;
 	}
@@ -3680,17 +3675,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* reply free queue, 16 byte align */
 	sz = ioc->reply_free_queue_depth * 4;
-	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
-	    ioc->pdev, sz, 16, 0);
+	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
+	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_free_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
 			ioc->name);
 		goto out;
 	}
-	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
 	    &ioc->reply_free_dma);
 	if (!ioc->reply_free) {
-		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
 			ioc->name);
 		goto out;
 	}
@@ -3708,7 +3703,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	    ioc->config_page_sz, &ioc->config_page_dma);
 	if (!ioc->config_page) {
 		pr_err(MPT3SAS_FMT
-			"config page: pci_pool_alloc failed\n",
+			"config page: dma_pool_alloc failed\n",
 			ioc->name);
 		goto out;
 	}
@@ -5499,10 +5494,10 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
 	mutex_init(&ioc->ctl_cmds.mutex);
 
-	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
-	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
-	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
-	    !ioc->ctl_cmds.sense) {
+	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
+	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
+	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
+	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 099ab4ca7edf..a77bb7dc12b1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -970,7 +970,7 @@ struct MPT3SAS_ADAPTER {
 	u8		id;
 	int		cpu_count;
 	char		name[MPT_NAME_LENGTH];
-	char		driver_name[MPT_NAME_LENGTH];
+	char		driver_name[MPT_NAME_LENGTH - 8];
 	char		tmp_string[MPT_STRING_LENGTH];
 	struct pci_dev	*pdev;
 	Mpi2SystemInterfaceRegs_t __iomem *chip;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e7a7a704a315..d3940c5d079d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1870,6 +1870,38 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 	return rc;
 }
 
+static int
+_transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+		dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+	/* Check if the request is split across multiple segments */
+	if (buf->sg_cnt > 1) {
+		*p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+				GFP_KERNEL);
+		if (!*p)
+			return -ENOMEM;
+		*dma_len = buf->payload_len;
+	} else {
+		if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+			return -ENOMEM;
+		*dma_addr = sg_dma_address(buf->sg_list);
+		*dma_len = sg_dma_len(buf->sg_list);
+		*p = NULL;
+	}
+
+	return 0;
+}
+
+static void
+_transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+		dma_addr_t dma_addr, void *p)
+{
+	if (p)
+		dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+	else
+		dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
 /**
  * _transport_smp_handler - transport portal for smp passthru
  * @shost: shost object
@@ -1880,9 +1912,9 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
  * Example:
  *           smp_rep_general /sys/class/bsg/expander-5:0
  */
-static int
-_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-	struct request *req)
+static void
+_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy)
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -1891,33 +1923,25 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	u16 smid;
 	u32 ioc_state;
 	void *psge;
-	u8 issue_reset = 0;
-	dma_addr_t dma_addr_in = 0;
-	dma_addr_t dma_addr_out = 0;
-	dma_addr_t pci_dma_in = 0;
-	dma_addr_t pci_dma_out = 0;
-	void *pci_addr_in = NULL;
-	void *pci_addr_out = NULL;
+	dma_addr_t dma_addr_in;
+	dma_addr_t dma_addr_out;
+	void *addr_in = NULL;
+	void *addr_out = NULL;
+	size_t dma_len_in;
+	size_t dma_len_out;
 	u16 wait_state_count;
-	struct request *rsp = req->next_rq;
-	struct bio_vec bvec;
-	struct bvec_iter iter;
-
-	if (!rsp) {
-		pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
-			ioc->name, __func__);
-		return -EINVAL;
-	}
+	unsigned int reslen = 0;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
 		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
-		return -EFAULT;
+		rc = -EFAULT;
+		goto out;
 	}
 
 	rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
 	if (rc)
-		return rc;
+		goto out;
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
 		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
@@ -1927,58 +1951,20 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	}
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
-	/* Check if the request is split across multiple segments */
-	if (bio_multiple_segments(req->bio)) {
-		u32 offset = 0;
-
-		/* Allocate memory and copy the request */
-		pci_addr_out = pci_alloc_consistent(ioc->pdev,
-		    blk_rq_bytes(req), &pci_dma_out);
-		if (!pci_addr_out) {
-			pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto out;
-		}
-
-		bio_for_each_segment(bvec, req->bio, iter) {
-			memcpy(pci_addr_out + offset,
-			    page_address(bvec.bv_page) + bvec.bv_offset,
-			    bvec.bv_len);
-			offset += bvec.bv_len;
-		}
-	} else {
-		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
-		    blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) {
-			pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto free_pci;
-		}
+	rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->request_payload,
+			&dma_addr_out, &dma_len_out, &addr_out);
+	if (rc)
+		goto out;
+	if (addr_out) {
+		sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt, addr_out,
+				job->request_payload.payload_len);
 	}
 
-	/* Check if the response needs to be populated across
-	 * multiple segments */
-	if (bio_multiple_segments(rsp->bio)) {
-		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
-		    &pci_dma_in);
-		if (!pci_addr_in) {
-			pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto unmap;
-		}
-	} else {
-		dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
-		    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) {
-			pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto unmap;
-		}
-	}
+	rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->reply_payload,
+			&dma_addr_in, &dma_len_in, &addr_in);
+	if (rc)
+		goto unmap_out;
 
 	wait_state_count = 0;
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
@@ -1988,7 +1974,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 			    "%s: failed due to ioc not operational\n",
 			    ioc->name, __func__);
 			rc = -EFAULT;
-			goto unmap;
+			goto unmap_in;
 		}
 		ssleep(1);
 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
@@ -2005,7 +1991,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
 		rc = -EAGAIN;
-		goto unmap;
+		goto unmap_in;
 	}
 
 	rc = 0;
@@ -2018,15 +2004,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	mpi_request->SASAddress = (rphy) ?
 	    cpu_to_le64(rphy->identify.sas_address) :
 	    cpu_to_le64(ioc->sas_hba.sas_address);
-	mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+	mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4);
 	psge = &mpi_request->SGL;
 
-	if (bio_multiple_segments(req->bio))
-		ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
-		    pci_dma_in, (blk_rq_bytes(rsp) + 4));
-	else
-		ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
-		    dma_addr_in, (blk_rq_bytes(rsp) + 4));
+	ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
+			dma_len_in - 4);
 
 	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s - sending smp request\n", ioc->name, __func__));
@@ -2040,83 +2022,51 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		    __func__, ioc->name);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
-		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
-			issue_reset = 1;
-		goto issue_host_reset;
+		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
+			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+			rc = -ETIMEDOUT;
+			goto unmap_in;
+		}
 	}
 
 	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s - complete\n", ioc->name, __func__));
 
-	if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
-
-		mpi_reply = ioc->transport_cmds.reply;
-
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s - reply data transfer size(%d)\n",
-		    ioc->name, __func__,
-		    le16_to_cpu(mpi_reply->ResponseDataLength)));
-
-		memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply));
-		scsi_req(req)->sense_len = sizeof(*mpi_reply);
-		scsi_req(req)->resid_len = 0;
-		scsi_req(rsp)->resid_len -=
-		    le16_to_cpu(mpi_reply->ResponseDataLength);
-
-		/* check if the resp needs to be copied from the allocated
-		 * pci mem */
-		if (bio_multiple_segments(rsp->bio)) {
-			u32 offset = 0;
-			u32 bytes_to_copy =
-			    le16_to_cpu(mpi_reply->ResponseDataLength);
-			bio_for_each_segment(bvec, rsp->bio, iter) {
-				if (bytes_to_copy <= bvec.bv_len) {
-					memcpy(page_address(bvec.bv_page) +
-					    bvec.bv_offset, pci_addr_in +
-					    offset, bytes_to_copy);
-					break;
-				} else {
-					memcpy(page_address(bvec.bv_page) +
-					    bvec.bv_offset, pci_addr_in +
-					    offset, bvec.bv_len);
-					bytes_to_copy -= bvec.bv_len;
-				}
-				offset += bvec.bv_len;
-			}
-		}
-	} else {
+	if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
 		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		    "%s - no reply\n", ioc->name, __func__));
 		rc = -ENXIO;
+		goto unmap_in;
 	}
 
- issue_host_reset:
-	if (issue_reset) {
-		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-		rc = -ETIMEDOUT;
-	}
+	mpi_reply = ioc->transport_cmds.reply;
 
- unmap:
-	if (dma_addr_out)
-		pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
-		    PCI_DMA_BIDIRECTIONAL);
-	if (dma_addr_in)
-		pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
-		    PCI_DMA_BIDIRECTIONAL);
+	dtransportprintk(ioc,
+		pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
+			ioc->name, __func__,
+			le16_to_cpu(mpi_reply->ResponseDataLength)));
 
- free_pci:
-	if (pci_addr_out)
-		pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
-		    pci_dma_out);
+	memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
+	job->reply_len = sizeof(*mpi_reply);
+	reslen = le16_to_cpu(mpi_reply->ResponseDataLength);
 
-	if (pci_addr_in)
-		pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
-		    pci_dma_in);
+	if (addr_in) {
+		sg_copy_to_buffer(job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, addr_in,
+				job->reply_payload.payload_len);
+	}
 
+	rc = 0;
+ unmap_in:
+	_transport_unmap_smp_buffer(&ioc->pdev->dev, &job->reply_payload,
+			dma_addr_in, addr_in);
+ unmap_out:
+	_transport_unmap_smp_buffer(&ioc->pdev->dev, &job->request_payload,
+			dma_addr_out, addr_out);
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	mutex_unlock(&ioc->transport_cmds.mutex);
-	return rc;
+	bsg_job_done(job, rc, reslen);
 }
 
 struct sas_function_template mpt3sas_transport_functions = {
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index e7f6661a8862..4f515700bdc3 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -121,21 +121,6 @@ err_out:
 	return 0;
 }
 
-static int mvme147_bus_reset(struct scsi_cmnd *cmd)
-{
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fallback to
-	   the same result, calling wd33c93_host_reset() */
-
-	spin_lock_irq(cmd->device->host->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(cmd->device->host->host_lock);
-
-	return SUCCESS;
-}
-
-
 static struct scsi_host_template driver_template = {
 	.proc_name		= "MVME147",
 	.name			= "MVME147 built-in SCSI",
@@ -143,7 +128,6 @@ static struct scsi_host_template driver_template = {
 	.release		= mvme147_release,
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= mvme147_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 4e047b5001a6..718c88de328b 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -61,7 +61,7 @@ static struct scsi_host_template mvs_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= mvst_host_attrs,
@@ -125,8 +125,7 @@ static void mvs_free(struct mvs_info *mvi)
 	else
 		slot_nr = MVS_CHIP_SLOT_SZ;
 
-	if (mvi->dma_pool)
-		pci_pool_destroy(mvi->dma_pool);
+	dma_pool_destroy(mvi->dma_pool);
 
 	if (mvi->tx)
 		dma_free_coherent(mvi->dev,
@@ -296,7 +295,8 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
 		goto err_out;
 
 	sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
-	mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
+	mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev,
+					MVS_SLOT_BUF_SZ, 16, 0);
 	if (!mvi->dma_pool) {
 			printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
 			goto err_out;
@@ -557,14 +557,14 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
 	SHOST_TO_SAS_HA(shost) =
 		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
 	if (!SHOST_TO_SAS_HA(shost)) {
-		kfree(shost);
+		scsi_host_put(shost);
 		rc = -ENOMEM;
 		goto err_out_regions;
 	}
 
 	rc = mvs_prep_sas_ha_init(shost, chip);
 	if (rc) {
-		kfree(shost);
+		scsi_host_put(shost);
 		rc = -ENOMEM;
 		goto err_out_regions;
 	}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index c7cc8035eacb..ee81d10252e0 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -790,7 +790,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
 	slot->n_elem = n_elem;
 	slot->slot_tag = tag;
 
-	slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+	slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
 	if (!slot->buf) {
 		rc = -ENOMEM;
 		goto err_out_tag;
@@ -840,7 +840,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
 	return rc;
 
 err_out_slot_buf:
-	pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+	dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
 err_out_tag:
 	mvs_tag_free(mvi, tag);
 err_out:
@@ -918,7 +918,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
 	}
 
 	if (slot->buf) {
-		pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+		dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
 		slot->buf = NULL;
 	}
 	list_del_init(&slot->entry);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 53c84771f0e8..107e191bf023 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -201,7 +201,6 @@ static int         nsp32_release     (struct Scsi_Host *);
 
 /* SCSI error handler */
 static int         nsp32_eh_abort     (struct scsi_cmnd *);
-static int         nsp32_eh_bus_reset (struct scsi_cmnd *);
 static int         nsp32_eh_host_reset(struct scsi_cmnd *);
 
 /* generate SCSI message */
@@ -276,8 +275,7 @@ static struct scsi_host_template nsp32_template = {
 	.max_sectors			= 128,
 	.this_id			= NSP32_HOST_SCSIID,
 	.use_clustering			= DISABLE_CLUSTERING,
-	.eh_abort_handler       	= nsp32_eh_abort,
-	.eh_bus_reset_handler		= nsp32_eh_bus_reset,
+	.eh_abort_handler		= nsp32_eh_abort,
 	.eh_host_reset_handler		= nsp32_eh_host_reset,
 /*	.highmem_io			= 1, */
 };
@@ -2845,24 +2843,6 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
 	return SUCCESS;
 }
 
-static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
-{
-	nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
-	unsigned int   base = SCpnt->device->host->io_port;
-
-	spin_lock_irq(SCpnt->device->host->host_lock);
-
-	nsp32_msg(KERN_INFO, "Bus Reset");	
-	nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
-
-	nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
-	nsp32_do_bus_reset(data);
-	nsp32_write2(base, IRQ_CONTROL, 0);
-
-	spin_unlock_irq(SCpnt->device->host->host_lock);
-	return SUCCESS;	/* SCSI bus reset is succeeded at any time. */
-}
-
 static void nsp32_do_bus_reset(nsp32_hw_data *data)
 {
 	unsigned int   base = data->BaseAddress;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 929ee7e88120..20ec1c01dbd5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -619,7 +619,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q
 	os_aux_t           * aux  = STp->buffer->aux;
 	os_partition_t     * par  = &(aux->partition);
 	struct st_partstat * STps = &(STp->ps[STp->partition]);
-	int		     blk_cnt, blk_sz, i;
+	unsigned int	     blk_cnt, blk_sz, i;
 
 	if (STp->raw) {
 		if (STp->buffer->syscall_result) {
@@ -5434,7 +5434,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i
 
 	for (i=0, offset=st_bp->buffer_bytes;
 	     i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
-	offset -= st_bp->sg[i].length;
+		offset -= st_bp->sg[i].length;
 	if (i == st_bp->sg_segs) {  /* Should never happen */
 		printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n");
 		return (-EIO);
@@ -5667,12 +5667,12 @@ static	struct	osst_support_data support_list[] = {
  * sysfs support for osst driver parameter information
  */
 
-static ssize_t osst_version_show(struct device_driver *ddd, char *buf)
+static ssize_t version_show(struct device_driver *ddd, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s\n", osst_version);
 }
 
-static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
 static int osst_create_sysfs_files(struct device_driver *sysfs)
 {
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 714b248f5d5e..953a792150ae 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -173,7 +173,7 @@ static void fdomain_release(struct pcmcia_device *link)
 
 static int fdomain_resume(struct pcmcia_device *link)
 {
-	fdomain_16x0_bus_reset(NULL);
+	fdomain_16x0_host_reset(NULL);
 
 	return 0;
 }
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index c670dc704c74..0556054764dc 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -67,7 +67,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
 	.info			= qlogicfas408_info,
 	.queuecommand		= qlogicfas408_queuecommand,
 	.eh_abort_handler	= qlogicfas408_abort,
-	.eh_bus_reset_handler	= qlogicfas408_bus_reset,
+	.eh_host_reset_handler	= qlogicfas408_host_reset,
 	.bios_param		= qlogicfas408_biosparam,
 	.can_queue		= 1,
 	.this_id		= -1,
@@ -264,7 +264,7 @@ static int qlogic_resume(struct pcmcia_device *link)
 		outb(0x04, link->resource[0]->start + 0xd);
 	}
 	/* Ugggglllyyyy!!! */
-	qlogicfas408_bus_reset(NULL);
+	qlogicfas408_host_reset(NULL);
 
 	return 0;
 }
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 034b2f7d1135..0e013f76b582 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -86,7 +86,7 @@ static struct scsi_host_template pm8001_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= pm8001_host_attrs,
@@ -160,8 +160,6 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 			}
 	}
 	PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
-	if (pm8001_ha->shost)
-		scsi_host_put(pm8001_ha->shost);
 	flush_workqueue(pm8001_wq);
 	kfree(pm8001_ha->tags);
 	kfree(pm8001_ha);
@@ -1073,7 +1071,7 @@ err_out_ha_free:
 err_out_free:
 	kfree(SHOST_TO_SAS_HA(shost));
 err_out_free_host:
-	kfree(shost);
+	scsi_host_put(shost);
 err_out_regions:
 	pci_release_regions(pdev);
 err_out_disable:
@@ -1112,6 +1110,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
 		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
 			tasklet_kill(&pm8001_ha->tasklet[j]);
 #endif
+	scsi_host_put(pm8001_ha->shost);
 	pm8001_free(pm8001_ha);
 	kfree(sha->sas_phy);
 	kfree(sha->sas_port);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 1cc814f1505a..b4d6cd8cd1ad 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1595,12 +1595,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 	if (pinstance->ccn.hcam->notification_type ==
 	    NOTIFICATION_TYPE_ENTRY_CHANGED &&
 	    cfg_entry->resource_type == RES_TYPE_VSET) {
-
-		if (fw_version <= PMCRAID_FW_VERSION_1)
-			hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
-		else
-			hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
-
+		hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
 	} else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
 		goto out_notify_apps;
 	}
@@ -4655,13 +4650,13 @@ pmcraid_release_control_blocks(
 		return;
 
 	for (i = 0; i < max_index; i++) {
-		pci_pool_free(pinstance->control_pool,
+		dma_pool_free(pinstance->control_pool,
 			      pinstance->cmd_list[i]->ioa_cb,
 			      pinstance->cmd_list[i]->ioa_cb_bus_addr);
 		pinstance->cmd_list[i]->ioa_cb = NULL;
 		pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
 	}
-	pci_pool_destroy(pinstance->control_pool);
+	dma_pool_destroy(pinstance->control_pool);
 	pinstance->control_pool = NULL;
 }
 
@@ -4718,8 +4713,8 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
 		pinstance->host->unique_id);
 
 	pinstance->control_pool =
-		pci_pool_create(pinstance->ctl_pool_name,
-				pinstance->pdev,
+		dma_pool_create(pinstance->ctl_pool_name,
+				&pinstance->pdev->dev,
 				sizeof(struct pmcraid_control_block),
 				PMCRAID_IOARCB_ALIGNMENT, 0);
 
@@ -4728,7 +4723,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
 
 	for (i = 0; i < PMCRAID_MAX_CMD; i++) {
 		pinstance->cmd_list[i]->ioa_cb =
-			pci_pool_alloc(
+			dma_pool_alloc(
 				pinstance->control_pool,
 				GFP_KERNEL,
 				&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 01eb2bc16dc1..8bfac72a242b 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -755,7 +755,7 @@ struct pmcraid_instance {
 
 	/* structures related to command blocks */
 	struct kmem_cache *cmd_cachep;		/* cache for cmd blocks */
-	struct pci_pool *control_pool;		/* pool for control blocks */
+	struct dma_pool *control_pool;		/* pool for control blocks */
 	char   cmd_pool_name[64];		/* name of cmd cache */
 	char   ctl_pool_name[64];		/* name of control cache */
 
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f6ad579280d4..7be5823ab036 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -970,7 +970,6 @@ static struct scsi_host_template ppa_template = {
 	.name			= "Iomega VPI0 (ppa) interface",
 	.queuecommand		= ppa_queuecommand,
 	.eh_abort_handler	= ppa_abort,
-	.eh_bus_reset_handler	= ppa_reset,
 	.eh_host_reset_handler	= ppa_reset,
 	.bios_param		= ppa_biosparam,
 	.this_id		= -1,
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 351f06dfc5a0..9bf7b227e69a 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -300,7 +300,6 @@ struct qedf_ctx {
 #define QEDF_FALLBACK_VLAN	1002
 #define QEDF_DEFAULT_PRIO	3
 	int vlan_id;
-	uint vlan_hw_insert:1;
 	struct qed_dev *cdev;
 	struct qed_dev_fcoe_info dev_info;
 	struct qed_int_info int_info;
@@ -443,7 +442,6 @@ extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
 extern int qedf_queuecommand(struct Scsi_Host *host,
 	struct scsi_cmnd *sc_cmd);
 extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
-extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
 extern u8 *qedf_get_src_mac(struct fc_lport *lport);
 extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
 extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index aefd24ca9604..773558fc0697 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -108,7 +108,6 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
 	struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
 	struct ethhdr *eth_hdr;
-	struct vlan_ethhdr *vlan_hdr;
 	struct fip_header *fiph;
 	u16 op, vlan_tci = 0;
 	u8 sub;
@@ -124,16 +123,14 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	op = ntohs(fiph->fip_op);
 	sub = fiph->fip_subcode;
 
-	if (!qedf->vlan_hw_insert) {
-		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
-		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
-		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
-		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
-		vlan_hdr->h_vlan_TCI = vlan_tci =  htons(qedf->vlan_id);
-	}
+	/*
+	 * Add VLAN tag to non-offload FIP frame based on current stored VLAN
+	 * for FIP/FCoE traffic.
+	 */
+	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
 
-	/* Update eth_hdr since we added a VLAN tag */
-	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+	/* Get VLAN ID from skb for printing purposes */
+	__vlan_hwaccel_get_tag(skb, &vlan_tci);
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
 	    "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
@@ -174,7 +171,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 	/* Handle FIP VLAN resp in the driver */
 	if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
 		qedf_fcoe_process_vlan_resp(qedf, skb);
-		qedf->vlan_hw_insert = 0;
 		kfree_skb(skb);
 	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
@@ -242,26 +238,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 	}
 }
 
-void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
-{
-	struct qedf_ctx *qedf = lport_priv(lport);
-
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-	    "Setting data_src_addr=%pM.\n", addr);
-	ether_addr_copy(qedf->data_src_addr, addr);
-}
-
 u8 *qedf_get_src_mac(struct fc_lport *lport)
 {
-	u8 mac[ETH_ALEN];
-	u8 port_id[3];
 	struct qedf_ctx *qedf = lport_priv(lport);
 
-	/* We need to use the lport port_id to create the data_src_addr */
-	if (is_zero_ether_addr(qedf->data_src_addr)) {
-		hton24(port_id, lport->port_id);
-		fc_fcoe_set_mac(mac, port_id);
-		qedf->ctlr.update_mac(lport, mac);
-	}
 	return qedf->data_src_addr;
 }
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1d13c9ca517d..7c0064500cc5 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -18,6 +18,7 @@
 #include <linux/kthread.h>
 #include <scsi/libfc.h>
 #include <scsi/scsi_host.h>
+#include <scsi/fc_frame.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/cpu.h>
@@ -42,7 +43,7 @@ MODULE_PARM_DESC(dev_loss_tmo,  " dev_loss_tmo setting for attached "
 
 uint qedf_debug = QEDF_LOG_INFO;
 module_param_named(debug, qedf_debug, uint, S_IRUGO);
-MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging"
+MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
 	" mask");
 
 static uint qedf_fipvlan_retries = 30;
@@ -163,7 +164,7 @@ static void qedf_handle_link_update(struct work_struct *work)
 		QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
 			   "response, falling back to default VLAN %d.\n",
 			   qedf_fallback_vlan);
-		qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN);
+		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 
 		/*
 		 * Zero out data_src_addr so we'll update it with the new
@@ -187,6 +188,50 @@ static void qedf_handle_link_update(struct work_struct *work)
 	}
 }
 
+#define	QEDF_FCOE_MAC_METHOD_GRANGED_MAC		1
+#define QEDF_FCOE_MAC_METHOD_FCF_MAP			2
+#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC		3
+static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
+{
+	u8 *granted_mac;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	u8 fc_map[3];
+	int method = 0;
+
+	/* Get granted MAC address from FIP FLOGI payload */
+	granted_mac = fr_cb(fp)->granted_mac;
+
+	/*
+	 * We set the source MAC for FCoE traffic based on the Granted MAC
+	 * address from the switch.
+	 *
+	 * If granted_mac is non-zero, we used that.
+	 * If the granted_mac is zeroed out, created the FCoE MAC based on
+	 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
+	 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
+	 * d_id of the FLOGI frame.
+	 */
+	if (!is_zero_ether_addr(granted_mac)) {
+		ether_addr_copy(qedf->data_src_addr, granted_mac);
+		method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
+	} else if (qedf->ctlr.sel_fcf->fc_map != 0) {
+		hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
+		qedf->data_src_addr[0] = fc_map[0];
+		qedf->data_src_addr[1] = fc_map[1];
+		qedf->data_src_addr[2] = fc_map[2];
+		qedf->data_src_addr[3] = fh->fh_d_id[0];
+		qedf->data_src_addr[4] = fh->fh_d_id[1];
+		qedf->data_src_addr[5] = fh->fh_d_id[2];
+		method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
+	} else {
+		fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
+		method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
+	}
+
+	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+	    "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
+}
+
 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	void *arg)
 {
@@ -212,6 +257,10 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	/* Log stats for FLOGI reject */
 	if (fc_frame_payload_op(fp) == ELS_LS_RJT)
 		qedf->flogi_failed++;
+	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+		/* Set the source MAC we will use for FCoE traffic */
+		qedf_set_data_src_addr(qedf, fp);
+	}
 
 	/* Complete flogi_compl so we can proceed to sending ADISCs */
 	complete(&qedf->flogi_compl);
@@ -312,8 +361,9 @@ static void qedf_link_recovery(struct work_struct *work)
 	/* Since the link when down and up to verify which vlan we're on */
 	qedf->fipvlan_retries = qedf_fipvlan_retries;
 	rc = qedf_initiate_fipvlan_req(qedf);
+	/* If getting the VLAN fails, set the VLAN to the fallback one */
 	if (!rc)
-		return;
+		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 
 	/*
 	 * We need to wait for an FCF to be selected due to the
@@ -629,16 +679,6 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
-static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd)
-{
-	QEDF_ERR(NULL, "BUS RESET Issued...\n");
-	/*
-	 * Essentially a no-op but return SUCCESS to prevent
-	 * unnecessary escalation to the host reset handler.
-	 */
-	return SUCCESS;
-}
-
 void qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
 	while (1) {
@@ -716,7 +756,6 @@ static struct scsi_host_template qedf_host_template = {
 	.eh_abort_handler	= qedf_eh_abort,
 	.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
 	.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
-	.eh_bus_reset_handler = qedf_eh_bus_reset,
 	.eh_host_reset_handler  = qedf_eh_host_reset,
 	.slave_configure	= qedf_slave_configure,
 	.dma_boundary = QED_HW_DMA_BOUNDARY,
@@ -915,6 +954,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
 	skb->mac_len = elen;
 	skb->protocol = htons(ETH_P_FCOE);
 
+	/*
+	 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
+	 * for FIP/FCoE traffic.
+	 */
 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
 
 	/* fill up mac and fcoe headers */
@@ -927,7 +970,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
 		ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
 
 	/* Set the source MAC address */
-	fc_fcoe_set_mac(eh->h_source, fh->fh_s_id);
+	ether_addr_copy(eh->h_source, qedf->data_src_addr);
 
 	hp = (struct fcoe_hdr *)(eh + 1);
 	memset(hp, 0, sizeof(*hp));
@@ -1025,7 +1068,6 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
 {
 	struct qed_fcoe_params_offload conn_info;
 	u32 port_id;
-	u8 lport_src_id[3];
 	int rval;
 	uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
 
@@ -1054,11 +1096,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
 	    (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
 
 	/* Need to use our FCoE MAC for the offload session */
-	port_id = fc_host_port_id(qedf->lport->host);
-	lport_src_id[2] = (port_id & 0x000000FF);
-	lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
-	lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
-	fc_fcoe_set_mac(conn_info.src_mac, lport_src_id);
+	ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
 
 	ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
 
@@ -1347,7 +1385,6 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
 	fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
 
 	qedf->ctlr.send = qedf_fip_send;
-	qedf->ctlr.update_mac = qedf_update_src_mac;
 	qedf->ctlr.get_src_addr = qedf_get_src_mac;
 	ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
 }
@@ -2939,7 +2976,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
 	sprintf(host_buf, "qedf_%u_link",
 	    qedf->lport->host->host_no);
-	qedf->link_update_wq = create_singlethread_workqueue(host_buf);
+	qedf->link_update_wq = create_workqueue(host_buf);
 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
 
@@ -3056,9 +3093,24 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
 		   qedf->mac);
 
-	/* Set the WWNN and WWPN based on the MAC address */
-	qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
-	qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
+	/*
+	 * Set the WWNN and WWPN in the following way:
+	 *
+	 * If the info we get from qed is non-zero then use that to set the
+	 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
+	 * on the MAC address.
+	 */
+	if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
+		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+		    "Setting WWPN and WWNN from qed dev_info.\n");
+		qedf->wwnn = qedf->dev_info.wwnn;
+		qedf->wwpn = qedf->dev_info.wwpn;
+	} else {
+		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+		    "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
+		qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
+		qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
+	}
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,  "WWNN=%016llx "
 		   "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
 
@@ -3094,7 +3146,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	/* Start LL2 processing thread */
 	snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
 	qedf->ll2_recv_wq =
-		create_singlethread_workqueue(host_buf);
+		create_workqueue(host_buf);
 	if (!qedf->ll2_recv_wq) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
 		goto err7;
@@ -3114,8 +3166,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	}
 	set_bit(QEDF_LL2_STARTED, &qedf->flags);
 
-	/* hw will be insterting vlan tag*/
-	qedf->vlan_hw_insert = 1;
+	/* Set initial FIP/FCoE VLAN to NULL */
 	qedf->vlan_id = 0;
 
 	/*
@@ -3137,7 +3188,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
 	sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
 	qedf->timer_work_queue =
-		create_singlethread_workqueue(host_buf);
+		create_workqueue(host_buf);
 	if (!qedf->timer_work_queue) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
 			  "workqueue.\n");
@@ -3148,7 +3199,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	if (mode != QEDF_MODE_RECOVERY) {
 		sprintf(host_buf, "qedf_%u_dpc",
 		    qedf->lport->host->host_no);
-		qedf->dpc_wq = create_singlethread_workqueue(host_buf);
+		qedf->dpc_wq = create_workqueue(host_buf);
 	}
 
 	/*
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 6fa442061c32..397b3b8ee51a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION		"8.18.22.0"
+#define QEDF_VERSION		"8.20.5.0"
 #define QEDF_DRIVER_MAJOR_VER		8
-#define QEDF_DRIVER_MINOR_VER		18
-#define QEDF_DRIVER_REV_VER		22
+#define QEDF_DRIVER_MINOR_VER		20
+#define QEDF_DRIVER_REV_VER		5
 #define QEDF_DRIVER_ENG_VER		0
 
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 91d2f51c351b..b8b22ce60ecc 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -54,8 +54,8 @@ struct qedi_endpoint;
 /* MAX Length for cached SGL */
 #define MAX_SGLEN_FOR_CACHESGL	((1U << 16) - 1)
 
-#define MAX_NUM_MSIX_PF         8
-#define MIN_NUM_CPUS_MSIX(x)	min((x)->msix_count, num_online_cpus())
+#define MIN_NUM_CPUS_MSIX(x)	min_t(u32, x->dev_info.num_cqs, \
+					num_online_cpus())
 
 #define QEDI_LOCAL_PORT_MIN     60000
 #define QEDI_LOCAL_PORT_MAX     61024
@@ -301,7 +301,6 @@ struct qedi_ctx {
 	u16 bdq_prod_idx;
 	u16 rq_num_entries;
 
-	u32 msix_count;
 	u32 max_sqes;
 	u8 num_queues;
 	u32 max_active_conns;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 37da9a8b43b1..a02b34ea5cab 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -534,7 +534,7 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 	SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
 	SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
 
-	conn_info->default_cq = (qedi_ep->fw_cid % 8);
+	conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues);
 
 	conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
 	conn_info->dup_ack_theshold = 3;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 2c3783684815..cccc34adc0e0 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -794,13 +794,14 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 	u32 log_page_size;
 	int rval = 0;
 
-	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
-		  MIN_NUM_CPUS_MSIX(qedi));
 
 	num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
 
 	qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
 
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+		  "Number of CQ count is %d\n", qedi->num_queues);
+
 	memset(&qedi->pf_params.iscsi_pf_params, 0,
 	       sizeof(qedi->pf_params.iscsi_pf_params));
 
@@ -1575,7 +1576,7 @@ struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
 {
 	struct qedi_cmd *cmd = NULL;
 
-	if (tid > MAX_ISCSI_TASK_ENTRIES)
+	if (tid >= MAX_ISCSI_TASK_ENTRIES)
 		return NULL;
 
 	cmd = qedi->itt_map[tid].p_cmd;
@@ -2179,9 +2180,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
 		goto free_host;
 	}
 
-	qedi->msix_count = MAX_NUM_MSIX_PF;
 	atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
 
+	rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+	if (rc)
+		goto free_host;
+
 	if (mode != QEDI_MODE_RECOVERY) {
 		rc = qedi_set_iscsi_pf_param(qedi);
 		if (rc) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 08a1feb3a195..9ce28c4f9812 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		return -EINVAL;
 	if (start > ha->optrom_size)
 		return -EINVAL;
+	if (size > ha->optrom_size - start)
+		size = ha->optrom_size - start;
 
 	mutex_lock(&ha->optrom_mutex);
 	switch (val) {
@@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size > ha->optrom_size ?
-		    ha->optrom_size - start : size;
+		ha->optrom_region_size = start + size;
 
 		ha->optrom_state = QLA_SREADING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size > ha->optrom_size ?
-		    ha->optrom_size - start : size;
+		ha->optrom_region_size = start + size;
 
 		ha->optrom_state = QLA_SWRITING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -565,47 +565,17 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
 {
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
-	struct qla_hw_data *ha = vha->hw;
-	uint16_t iter, addr, offset;
 	int rval;
 
-	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
 		return 0;
 
-	if (ha->sfp_data)
-		goto do_read;
-
-	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
-	    &ha->sfp_data_dma);
-	if (!ha->sfp_data) {
-		ql_log(ql_log_warn, vha, 0x706c,
-		    "Unable to allocate memory for SFP read-data.\n");
+	if (qla2x00_reset_active(vha))
 		return 0;
-	}
-
-do_read:
-	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
-	addr = 0xa0;
-	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
-	    iter++, offset += SFP_BLOCK_SIZE) {
-		if (iter == 4) {
-			/* Skip to next device address. */
-			addr = 0xa2;
-			offset = 0;
-		}
-
-		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
-		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
-		if (rval != QLA_SUCCESS) {
-			ql_log(ql_log_warn, vha, 0x706d,
-			    "Unable to read SFP data (%x/%x/%x).\n", rval,
-			    addr, offset);
 
-			return -EIO;
-		}
-		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
-		buf += SFP_BLOCK_SIZE;
-	}
+	rval = qla2x00_read_sfp_dev(vha, buf, count);
+	if (rval)
+		return -EIO;
 
 	return count;
 }
@@ -615,7 +585,7 @@ static struct bin_attribute sysfs_sfp_attr = {
 		.name = "sfp",
 		.mode = S_IRUSR | S_IWUSR,
 	},
-	.size = SFP_DEV_SIZE * 2,
+	.size = SFP_DEV_SIZE,
 	.read = qla2x00_sysfs_read_sfp,
 };
 
@@ -1511,6 +1481,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
 	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
 }
 
+static ssize_t
+qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->min_link_speed == 5 ? "32Gps" :
+	    ha->min_link_speed == 4 ? "16Gps" :
+	    ha->min_link_speed == 3 ? "8Gps" :
+	    ha->min_link_speed == 2 ? "4Gps" :
+	    ha->min_link_speed != 0 ? "unknown" : "");
+}
+
+static ssize_t
+qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->max_speed_sup ? "32Gps" : "16Gps");
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1556,6 +1558,8 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
 		   qla2x00_allow_cna_fw_dump_show,
 		   qla2x00_allow_cna_fw_dump_store);
 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
+static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
+static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1590,6 +1594,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_fw_dump_size,
 	&dev_attr_allow_cna_fw_dump,
 	&dev_attr_pep_version,
+	&dev_attr_min_link_speed,
+	&dev_attr_max_speed_sup,
 	NULL,
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 26751d34bcf2..3e9dc54b89a3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,7 +14,7 @@
  * | Module Init and Probe        |       0x0193       | 0x0146         |
  * |                              |                    | 0x015b-0x0160	|
  * |                              |                    | 0x016e		|
- * | Mailbox commands             |       0x1199       | 0x1193		|
+ * | Mailbox commands             |       0x1205       | 0x11a2-0x11ff	|
  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
  * |				  | 		       | 0x211a         |
  * |                              |                    | 0x211c-0x2128  |
@@ -41,7 +41,7 @@
  * |                              |                    | 0x70ad-0x70ae  |
  * |                              |                    | 0x70d0-0x70d6	|
  * |                              |                    | 0x70d7-0x70db  |
- * | Task Management              |       0x8042       | 0x8000,0x800b  |
+ * | Task Management              |       0x8042       | 0x8000         |
  * |                              |                    | 0x8019         |
  * |                              |                    | 0x8025,0x8026  |
  * |                              |                    | 0x8031,0x8032  |
@@ -2520,8 +2520,6 @@ qla83xx_fw_dump_failed:
 static inline int
 ql_mask_match(uint32_t level)
 {
-	if (ql2xextended_error_logging == 1)
-		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 	return (level & ql2xextended_error_logging) == level;
 }
 
@@ -2738,9 +2736,9 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
 		mbx_reg = MAILBOX_REG(ha, reg, 0);
 
 	ql_dbg(level, vha, id, "Mailbox registers:\n");
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++, mbx_reg++)
 		ql_dbg(level, vha, id,
-		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
+		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
 }
 
 
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 0730b10b4280..486c075998f6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -427,6 +427,7 @@ struct srb_iocb {
 			enum nvmefc_fcp_datadir dir;
 			uint32_t dl;
 			uint32_t timeout_sec;
+			struct	list_head   entry;
 		} nvme;
 	} u;
 
@@ -470,7 +471,7 @@ typedef struct srb {
 	uint8_t cmd_type;
 	uint8_t pad[3];
 	atomic_t ref_count;
-	wait_queue_head_t nvme_ls_waitQ;
+	wait_queue_head_t nvme_ls_waitq;
 	struct fc_port *fcport;
 	struct scsi_qla_host *vha;
 	uint32_t handle;
@@ -901,6 +902,7 @@ struct mbx_cmd_32 {
 #define MBA_SHUTDOWN_REQUESTED	0x8062	/* Shutdown Requested */
 #define MBA_TEMPERATURE_ALERT	0x8070	/* Temperature Alert */
 #define MBA_DPORT_DIAGNOSTICS	0x8080	/* D-port Diagnostics */
+#define MBA_TRANS_INSERT	0x8130	/* Transceiver Insertion */
 #define MBA_FW_INIT_FAILURE	0x8401	/* Firmware initialization failure */
 #define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
 					   Notification */
@@ -977,6 +979,7 @@ struct mbx_cmd_32 {
 #define MBC_ABORT_TARGET		0x17	/* Abort target (ID). */
 #define MBC_RESET			0x18	/* Reset. */
 #define MBC_GET_ADAPTER_LOOP_ID		0x20	/* Get loop id of ISP2200. */
+#define MBC_GET_SET_ZIO_THRESHOLD	0x21	/* Get/SET ZIO THRESHOLD. */
 #define MBC_GET_RETRY_COUNT		0x22	/* Get f/w retry cnt/delay. */
 #define MBC_DISABLE_VI			0x24	/* Disable VI operation. */
 #define MBC_ENABLE_VI			0x25	/* Enable VI operation. */
@@ -2301,8 +2304,7 @@ typedef struct fc_port {
 	unsigned int login_succ:1;
 
 	struct work_struct nvme_del_work;
-	atomic_t nvme_ref_count;
-	wait_queue_head_t nvme_waitQ;
+	struct completion nvme_del_done;
 	uint32_t nvme_prli_service_param;
 #define NVME_PRLI_SP_CONF       BIT_7
 #define NVME_PRLI_SP_INITIATOR  BIT_5
@@ -3338,6 +3340,7 @@ struct qla_qpair {
 	struct work_struct q_work;
 	struct list_head qp_list_elem; /* vha->qp_list */
 	struct list_head hints_list;
+	struct list_head nvme_done_list;
 	uint16_t cpuid;
 	struct qla_tgt_counters tgt_counters;
 };
@@ -3463,8 +3466,15 @@ struct qla_hw_data {
 		uint32_t	n2n_ae:1;
 		uint32_t	fw_started:1;
 		uint32_t	fw_init_done:1;
+
+		uint32_t	detected_lr_sfp:1;
+		uint32_t	using_lr_setting:1;
 	} flags;
 
+	uint16_t long_range_distance;	/* 32G & above */
+#define LR_DISTANCE_5K  1
+#define LR_DISTANCE_10K 0
+
 	/* This spinlock is used to protect "io transactions", you must
 	* acquire it before doing any IO to the card, eg with RD_REG*() and
 	* WRT_REG*() for the duration of your entire commandtransaction.
@@ -3712,7 +3722,7 @@ struct qla_hw_data {
 	struct sns_cmd_pkt	*sns_cmd;
 	dma_addr_t		sns_cmd_dma;
 
-#define SFP_DEV_SIZE    256
+#define SFP_DEV_SIZE    512
 #define SFP_BLOCK_SIZE  64
 	void		*sfp_data;
 	dma_addr_t	sfp_data_dma;
@@ -4017,8 +4027,20 @@ struct qla_hw_data {
 
 	struct qlt_hw_data tgt;
 	int	allow_cna_fw_dump;
+	uint32_t fw_ability_mask;
+	uint16_t min_link_speed;
+	uint16_t max_speed_sup;
+
+	atomic_t        nvme_active_aen_cnt;
+	uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
 };
 
+#define FW_ABILITY_MAX_SPEED_MASK	0xFUL
+#define FW_ABILITY_MAX_SPEED_16G	0x0
+#define FW_ABILITY_MAX_SPEED_32G	0x1
+#define FW_ABILITY_MAX_SPEED(ha)	\
+	(ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK)
+
 /*
  * Qlogic scsi host structure
  */
@@ -4089,6 +4111,8 @@ typedef struct scsi_qla_host {
 #define FX00_CRITEMP_RECOVERY	25
 #define FX00_HOST_INFO_RESEND	26
 #define QPAIR_ONLINE_CHECK_NEEDED	27
+#define SET_ZIO_THRESHOLD_NEEDED	28
+#define DETECT_SFP_CHANGE	29
 
 	unsigned long	pci_flags;
 #define PFLG_DISCONNECTED	0	/* PCI device removed */
@@ -4129,8 +4153,7 @@ typedef struct scsi_qla_host {
 	uint8_t		fabric_node_name[WWN_SIZE];
 
 	struct		nvme_fc_local_port *nvme_local_port;
-	atomic_t	nvme_ref_count;
-	wait_queue_head_t nvme_waitQ;
+	struct completion nvme_del_done;
 	struct list_head nvme_rport_list;
 	atomic_t 	nvme_active_aen_cnt;
 	uint16_t	nvme_last_rptd_aen;
@@ -4199,6 +4222,7 @@ typedef struct scsi_qla_host {
 	int fcport_count;
 	wait_queue_head_t fcport_waitQ;
 	wait_queue_head_t vref_waitq;
+	uint8_t min_link_speed_feat;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -4373,6 +4397,88 @@ enum nexus_wait_type {
 	WAIT_LUN,
 };
 
+/* Refer to SNIA SFF 8247 */
+struct sff_8247_a0 {
+	u8 txid;	/* transceiver id */
+	u8 ext_txid;
+	u8 connector;
+	/* compliance code */
+	u8 eth_infi_cc3;	/* ethernet, inifiband */
+	u8 sonet_cc4[2];
+	u8 eth_cc6;
+	/* link length */
+#define FC_LL_VL BIT_7	/* very long */
+#define FC_LL_S  BIT_6	/* Short */
+#define FC_LL_I  BIT_5	/* Intermidiate*/
+#define FC_LL_L  BIT_4	/* Long */
+#define FC_LL_M  BIT_3	/* Medium */
+#define FC_LL_SA BIT_2	/* ShortWave laser */
+#define FC_LL_LC BIT_1	/* LongWave laser */
+#define FC_LL_EL BIT_0	/* Electrical inter enclosure */
+	u8 fc_ll_cc7;
+	/* FC technology */
+#define FC_TEC_EL BIT_7	/* Electrical inter enclosure */
+#define FC_TEC_SN BIT_6	/* short wave w/o OFC */
+#define FC_TEC_SL BIT_5	/* short wave with OFC */
+#define FC_TEC_LL BIT_4	/* Longwave Laser */
+#define FC_TEC_ACT BIT_3	/* Active cable */
+#define FC_TEC_PAS BIT_2	/* Passive cable */
+	u8 fc_tec_cc8;
+	/* Transmission Media */
+#define FC_MED_TW BIT_7	/* Twin Ax */
+#define FC_MED_TP BIT_6	/* Twited Pair */
+#define FC_MED_MI BIT_5	/* Min Coax */
+#define FC_MED_TV BIT_4	/* Video Coax */
+#define FC_MED_M6 BIT_3	/* Multimode, 62.5um */
+#define FC_MED_M5 BIT_2	/* Multimode, 50um */
+#define FC_MED_SM BIT_0	/* Single Mode */
+	u8 fc_med_cc9;
+	/* speed FC_SP_12: 12*100M = 1200 MB/s */
+#define FC_SP_12 BIT_7
+#define FC_SP_8  BIT_6
+#define FC_SP_16 BIT_5
+#define FC_SP_4  BIT_4
+#define FC_SP_32 BIT_3
+#define FC_SP_2  BIT_2
+#define FC_SP_1  BIT_0
+	u8 fc_sp_cc10;
+	u8 encode;
+	u8 bitrate;
+	u8 rate_id;
+	u8 length_km;		/* offset 14/eh */
+	u8 length_100m;
+	u8 length_50um_10m;
+	u8 length_62um_10m;
+	u8 length_om4_10m;
+	u8 length_om3_10m;
+#define SFF_VEN_NAME_LEN 16
+	u8 vendor_name[SFF_VEN_NAME_LEN];	/* offset 20/14h */
+	u8 tx_compat;
+	u8 vendor_oui[3];
+#define SFF_PART_NAME_LEN 16
+	u8 vendor_pn[SFF_PART_NAME_LEN];	/* part number */
+	u8 vendor_rev[4];
+	u8 wavelength[2];
+	u8 resv;
+	u8 cc_base;
+	u8 options[2];	/* offset 64 */
+	u8 br_max;
+	u8 br_min;
+	u8 vendor_sn[16];
+	u8 date_code[8];
+	u8 diag;
+	u8 enh_options;
+	u8 sff_revision;
+	u8 cc_ext;
+	u8 vendor_specific[32];
+	u8 resv2[128];
+};
+
+#define AUTO_DETECT_SFP_SUPPORT(_vha)\
+	(ql2xautodetectsfp && !_vha->vp_idx &&		\
+	(IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
+	IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+
 #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
 	(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
 
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index b9c9886e8b1d..bec641aae7b3 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1699,6 +1699,15 @@ struct access_chip_rsp_84xx {
 #define FAC_OPT_CMD_UNLOCK_SEMAPHORE	0x04
 #define FAC_OPT_CMD_GET_SECTOR_SIZE	0x05
 
+/* enhanced features bit definitions */
+#define NEF_LR_DIST_ENABLE	BIT_0
+
+/* LR Distance bit positions */
+#define LR_DIST_NV_POS		2
+#define LR_DIST_FW_POS		12
+#define LR_DIST_FW_SHIFT	(LR_DIST_FW_POS - LR_DIST_NV_POS)
+#define LR_DIST_FW_FIELD(x)	((x) << LR_DIST_FW_SHIFT & 0xf000)
+
 struct nvram_81xx {
 	/* NVRAM header. */
 	uint8_t id[4];
@@ -1745,7 +1754,9 @@ struct nvram_81xx {
 	uint16_t reserved_6_3[14];
 
 	/* Offset 192. */
-	uint16_t reserved_7[32];
+	uint8_t min_link_speed;
+	uint8_t reserved_7_0;
+	uint16_t reserved_7[31];
 
 	/*
 	 * BIT 0  = Enable spinup delay
@@ -1839,16 +1850,13 @@ struct nvram_81xx {
 	uint8_t reserved_21[16];
 	uint16_t reserved_22[3];
 
-	/*
-	 * BIT 0 = Extended BB credits for LR
-	 * BIT 1 = Virtual Fabric Enable
-	 * BIT 2 = Enhanced Features Unused
-	 * BIT 3-7 = Enhanced Features Reserved
+	/* Offset 406 (0x196) Enhanced Features
+	 * BIT 0    = Extended BB credits for LR
+	 * BIT 1    = Virtual Fabric Enable
+	 * BIT 2-5  = Distance Support if BIT 0 is on
+	 * BIT 6-15 = Unused
 	 */
-	/* Enhanced Features */
-	uint8_t enhanced_features;
-
-	uint8_t reserved_23;
+	uint16_t enhanced_features;
 	uint16_t reserved_24[4];
 
 	/* Offset 416. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index cadb6e3baacc..f852ca60c49f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -10,17 +10,6 @@
 #include <linux/interrupt.h>
 
 /*
- * Global functions prototype in qla_nvme.c source file.
- */
-extern void qla_nvme_register_hba(scsi_qla_host_t *);
-extern int  qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
-extern void qla_nvme_delete(scsi_qla_host_t *);
-extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
-extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
-    struct req_que *);
-extern void qla24xx_async_gffid_sp_done(void *, int);
-
-/*
  * Global Function Prototypes in qla_init.c source file.
  */
 extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
@@ -116,13 +105,15 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
 int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
     void *);
 int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
-
+int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
 /*
  * Global Data in qla_os.c source file.
  */
 extern char qla2x00_version_str[];
 
 extern struct kmem_cache *srb_cachep;
+extern struct kmem_cache *qla_tgt_plogi_cachep;
 
 extern int ql2xlogintimeout;
 extern int qlport_down_retry;
@@ -153,6 +144,7 @@ extern int ql2xfwholdabts;
 extern int ql2xmvasynctoatio;
 extern int ql2xuctrlirq;
 extern int ql2xnvmeenable;
+extern int ql2xautodetectsfp;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -495,6 +487,9 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
 int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
 	struct port_database_24xx *);
 
+extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
+extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -804,6 +799,7 @@ extern char *qdev_state(uint32_t);
 extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 extern int qla82xx_read_temperature(scsi_qla_host_t *);
 extern int qla8044_read_temperature(scsi_qla_host_t *);
+extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct bsg_job *);
@@ -873,4 +869,6 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
 void qlt_remove_target_resources(struct qla_hw_data *);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
 
+void qla_nvme_cmpl_io(struct srb_iocb *);
+
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index b323a7c71eda..bc3db6abc9a0 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2816,13 +2816,19 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
 				case MODE_INITIATOR:
 				case MODE_DUAL:
 				default:
+					ql_dbg(ql_dbg_disc, vha, 0x201f,
+					    "%s %d %8phC post %s\n", __func__,
+					    __LINE__, fcport->port_name,
+					    (atomic_read(&fcport->state) ==
+					    FCS_ONLINE) ? "gpdb" : "gnl");
+
 					if (atomic_read(&fcport->state) ==
 					    FCS_ONLINE)
-						break;
-					ql_dbg(ql_dbg_disc, vha, 0x201f,
-					    "%s %d %8phC post gnl\n",
-					    __func__, __LINE__, fcport->port_name);
-					qla24xx_post_gnl_work(vha, fcport);
+						qla24xx_post_gpdb_work(vha,
+						    fcport, PDO_FORCE_ADISC);
+					else
+						qla24xx_post_gnl_work(vha,
+						    fcport);
 					break;
 				}
 			} else { /* fcport->d_id.b24 != ea->id.b24 */
@@ -3080,7 +3086,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
 		GPSC_RSP_SIZE);
 
 	/* GPSC req */
-	memcpy(ct_req->req.gpsc.port_name, fcport->port_name,
+	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
 		WWN_SIZE);
 
 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 072ad1aa5505..b5b48ddca962 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -36,7 +36,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
 static int qla84xx_init_chip(scsi_qla_host_t *);
 static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
     struct event_arg *);
@@ -774,8 +773,7 @@ done_free_sp:
 	return rval;
 }
 
-static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
-    u8 opt)
+int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 {
 	struct qla_work_evt *e;
 
@@ -808,6 +806,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	if (!sp)
 		goto done;
 
+	sp->type = SRB_MB_IOCB;
+	sp->name = "gpdb";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
 	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
 	if (pd == NULL) {
 		ql_log(ql_log_warn, vha, 0xd043,
@@ -816,12 +820,6 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	}
 	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
 
-	sp->type = SRB_MB_IOCB;
-	sp->name = "gpdb";
-	sp->gen1 = fcport->rscn_gen;
-	sp->gen2 = fcport->login_gen;
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
 	mb[0] = MBC_GET_PORT_DATABASE;
 	mb[1] = fcport->loop_id;
@@ -1466,6 +1464,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 				__func__, __LINE__, ea->fcport->port_name);
 			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 			ea->fcport->logout_on_delete = 1;
+			ea->fcport->send_els_logo = 0;
 			qla24xx_post_gpdb_work(vha, ea->fcport, 0);
 		}
 		break;
@@ -2823,6 +2822,147 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
 	return QLA_SUCCESS;
 }
 
+#define PRINT_FIELD(_field, _flag, _str) {		\
+	if (a0->_field & _flag) {\
+		if (p) {\
+			strcat(ptr, "|");\
+			ptr++;\
+			leftover--;\
+		} \
+		len = snprintf(ptr, leftover, "%s", _str);	\
+		p = 1;\
+		leftover -= len;\
+		ptr += len; \
+	} \
+}
+
+static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
+{
+#define STR_LEN 64
+	struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	u8 str[STR_LEN], *ptr, p;
+	int leftover, len;
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
+	ql_dbg(ql_dbg_init, vha, 0x015a,
+	    "SFP MFG Name: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
+	ql_dbg(ql_dbg_init, vha, 0x015c,
+	    "SFP Part Name: %s\n", str);
+
+	/* media */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
+	PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
+	ql_dbg(ql_dbg_init, vha, 0x0160,
+	    "SFP Media: %s\n", str);
+
+	/* link length */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
+	ql_dbg(ql_dbg_init, vha, 0x0196,
+	    "SFP Link Length: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
+	ql_dbg(ql_dbg_init, vha, 0x016e,
+	    "SFP FC Link Tech: %s\n", str);
+
+	if (a0->length_km)
+		ql_dbg(ql_dbg_init, vha, 0x016f,
+		    "SFP Distant: %d km\n", a0->length_km);
+	if (a0->length_100m)
+		ql_dbg(ql_dbg_init, vha, 0x0170,
+		    "SFP Distant: %d m\n", a0->length_100m*100);
+	if (a0->length_50um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0189,
+		    "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
+	if (a0->length_62um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x018a,
+		  "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
+	if (a0->length_om4_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0194,
+		    "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
+	if (a0->length_om3_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0195,
+		    "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
+}
+
+
+/*
+ * Return Code:
+ *   QLA_SUCCESS: no action
+ *   QLA_INTERFACE_ERROR: SFP is not there.
+ *   QLA_FUNCTION_FAILED: detected New SFP
+ */
+int
+qla24xx_detect_sfp(scsi_qla_host_t *vha)
+{
+	int rc = QLA_SUCCESS;
+	struct sff_8247_a0 *a;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!AUTO_DETECT_SFP_SUPPORT(vha))
+		goto out;
+
+	rc = qla2x00_read_sfp_dev(vha, NULL, 0);
+	if (rc)
+		goto out;
+
+	a = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	qla2xxx_print_sfp_info(vha);
+
+	if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
+		/* long range */
+		ha->flags.detected_lr_sfp = 1;
+
+		if (a->length_km > 5 || a->length_100m > 50)
+			ha->long_range_distance = LR_DISTANCE_10K;
+		else
+			ha->long_range_distance = LR_DISTANCE_5K;
+
+		if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x507b,
+			    "Detected Long Range SFP.\n");
+	} else {
+		/* short range */
+		ha->flags.detected_lr_sfp = 0;
+		if (ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x5084,
+			    "Detected Short Range SFP.\n");
+	}
+
+	if (!vha->flags.init_done)
+		rc = QLA_SUCCESS;
+out:
+	return rc;
+}
+
 /**
  * qla2x00_setup_chip() - Load and start RISC firmware.
  * @ha: HA context
@@ -2879,6 +3019,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 			rval = qla2x00_execute_fw(vha, srisc_address);
 			/* Retrieve firmware information. */
 			if (rval == QLA_SUCCESS) {
+				qla24xx_detect_sfp(vha);
+
 				rval = qla2x00_set_exlogins_buffer(vha);
 				if (rval != QLA_SUCCESS)
 					goto failed;
@@ -4609,24 +4751,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			qla2x00_fdmi_register(vha);
 
 		/* Ensure we are logged into the SNS. */
-		if (IS_FWI2_CAPABLE(ha))
-			loop_id = NPH_SNS;
-		else
-			loop_id = SIMPLE_NAME_SERVER;
+		loop_id = NPH_SNS_LID(ha);
 		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
 		    0xfc, mb, BIT_1|BIT_0);
-		if (rval != QLA_SUCCESS) {
+		if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_disc, vha, 0x20a1,
+			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
+			    loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			return rval;
 		}
-		if (mb[0] != MBS_COMMAND_COMPLETE) {
-			ql_dbg(ql_dbg_disc, vha, 0x20a1,
-			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
-			    "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
-			    mb[2], mb[6], mb[7]);
-			return (QLA_SUCCESS);
-		}
-
 		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
 			if (qla2x00_rft_id(vha)) {
 				/* EMPTY */
@@ -4804,6 +4938,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 				new_fcport->fc4_type = swl[swl_idx].fc4_type;
 
 				new_fcport->nvme_flag = 0;
+				new_fcport->fc4f_nvme = 0;
 				if (vha->flags.nvme_enabled &&
 				    swl[swl_idx].fc4f_nvme) {
 					new_fcport->fc4f_nvme =
@@ -5913,7 +6048,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 
 	if (!status) {
 		ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
-
+		qla2x00_configure_hba(vha);
 		spin_lock_irqsave(&ha->vport_slock, flags);
 		list_for_each_entry(vp, &ha->vp_list, list) {
 			if (vp->vp_idx) {
@@ -7804,7 +7939,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
 		ha->queue_pair_map[qpair_id] = qpair;
 		qpair->id = qpair_id;
 		qpair->vp_idx = vp_idx;
+		qpair->fw_started = ha->flags.fw_started;
 		INIT_LIST_HEAD(&qpair->hints_list);
+		INIT_LIST_HEAD(&qpair->nvme_done_list);
 		qpair->chip_reset = ha->base_qpair->chip_reset;
 		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
 		qpair->enable_explicit_conf =
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a36c485fae50..2f94159186d7 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2682,12 +2682,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	uint32_t        *cur_dsd;
 	struct scatterlist *sg;
 	int index;
-	uint16_t tot_dsds;
+	uint16_t cmd_dsds, rsp_dsds;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
-	int loop_iterartion = 0;
 	int entry_count = 1;
+	cont_a64_entry_t *cont_pkt = NULL;
 
 	ct_iocb->entry_type = CT_IOCB_TYPE;
         ct_iocb->entry_status = 0;
@@ -2698,30 +2698,46 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	ct_iocb->vp_index = sp->vha->vp_idx;
 	ct_iocb->comp_status = cpu_to_le16(0);
 
-	ct_iocb->cmd_dsd_count =
-		cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	cmd_dsds = bsg_job->request_payload.sg_cnt;
+	rsp_dsds = bsg_job->reply_payload.sg_cnt;
+
+	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
         ct_iocb->timeout = 0;
-        ct_iocb->rsp_dsd_count =
-		cpu_to_le16(bsg_job->reply_payload.sg_cnt);
-        ct_iocb->rsp_byte_count =
-            cpu_to_le32(bsg_job->reply_payload.payload_len);
+	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
         ct_iocb->cmd_byte_count =
             cpu_to_le32(bsg_job->request_payload.payload_len);
-        ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
-        ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
-           (bsg_job->request_payload.sg_list)));
-        ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
-            (bsg_job->request_payload.sg_list));
 
-	avail_dsds = 1;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+	avail_dsds = 2;
+	cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
 	index = 0;
-	tot_dsds = bsg_job->reply_payload.sg_cnt;
 
-	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
+		dma_addr_t       sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Cont.
+			 * Type 1 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(
+			    vha, ha->req_q_map[0]);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+
+	index = 0;
+
+	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
 		dma_addr_t       sle_dma;
-		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
@@ -2740,7 +2756,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
-		loop_iterartion++;
 		avail_dsds--;
 	}
         ct_iocb->entry_count = entry_count;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b3b702ef622..9d9668aac6f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -14,6 +14,8 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/nvme-fc-driver.h>
 
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
@@ -452,7 +454,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			uint16_t peg_fw_state, nw_interface_link_up;
 			uint16_t nw_interface_signal_detect, sfp_status;
 			uint16_t htbt_counter, htbt_monitor_enable;
-			uint16_t sfp_additonal_info, sfp_multirate;
+			uint16_t sfp_additional_info, sfp_multirate;
 			uint16_t sfp_tx_fault, link_speed, dcbx_status;
 
 			/*
@@ -492,7 +494,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			sfp_status = ((mb[2] & 0x0c00) >> 10);
 			htbt_counter = ((mb[2] & 0x7000) >> 12);
 			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
-			sfp_additonal_info = (mb[6] & 0x0003);
+			sfp_additional_info = (mb[6] & 0x0003);
 			sfp_multirate = ((mb[6] & 0x0004) >> 2);
 			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
 			link_speed = ((mb[6] & 0x0070) >> 4);
@@ -507,9 +509,9 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			    sfp_status);
 			ql_log(ql_log_warn, vha, 0x5067,
 			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
-			    "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
+			    "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
 			    htbt_counter, htbt_monitor_enable,
-			    sfp_additonal_info, sfp_multirate);
+			    sfp_additional_info, sfp_multirate);
 			ql_log(ql_log_warn, vha, 0x5068,
 			    "sfp_tx_fault=0x%x, link_state=0x%x, "
 			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
@@ -799,6 +801,11 @@ skip_rio:
 
 		vha->flags.management_server_logged_in = 0;
 		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
+
+		if (AUTO_DETECT_SFP_SUPPORT(vha)) {
+			set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
@@ -1228,6 +1235,11 @@ global_port_update:
 			schedule_work(&ha->board_disable);
 		break;
 
+	case MBA_TRANS_INSERT:
+		ql_dbg(ql_dbg_async, vha, 0x5091,
+		    "Transceiver Insertion: %04x\n", mb[1]);
+		break;
+
 	default:
 		ql_dbg(ql_dbg_async, vha, 0x5057,
 		    "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1537,8 +1549,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (!sp)
 		return;
-	bsg_job = sp->u.bsg_job;
-	bsg_reply = bsg_job->reply;
 
 	type = NULL;
 	switch (sp->type) {
@@ -1577,6 +1587,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
 	 * fc payload  to the caller
 	 */
+	bsg_job = sp->u.bsg_job;
+	bsg_reply = bsg_job->reply;
 	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
 
@@ -1823,7 +1835,7 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 	nvme = &sp->u.iocb_cmd;
 
 	if (unlikely(nvme->u.nvme.aen_op))
-		atomic_dec(&sp->vha->nvme_active_aen_cnt);
+		atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
 
 	/*
 	 * State flags: Bit 6 and 0.
@@ -1856,17 +1868,42 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 	fd->transferred_length = fd->payload_length -
 	    le32_to_cpu(sts->residual_len);
 
+	/*
+	 * If transport error then Failure (HBA rejects request)
+	 * otherwise transport will handle.
+	 */
 	if (sts->entry_status) {
 		ql_log(ql_log_warn, fcport->vha, 0x5038,
 		    "NVME-%s error - hdl=%x entry-status(%x).\n",
 		    sp->name, sp->handle, sts->entry_status);
 		ret = QLA_FUNCTION_FAILED;
-	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
-		ql_log(ql_log_warn, fcport->vha, 0x5039,
-		    "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-		    sp->name, sp->handle, sts->comp_status,
-		    le32_to_cpu(sts->residual_len), sts->ox_id);
-		ret = QLA_FUNCTION_FAILED;
+	} else  {
+		switch (le16_to_cpu(sts->comp_status)) {
+			case CS_COMPLETE:
+				ret = 0;
+			break;
+
+			case CS_ABORTED:
+			case CS_RESET:
+			case CS_PORT_UNAVAILABLE:
+			case CS_PORT_LOGGED_OUT:
+			case CS_PORT_BUSY:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				fd->transferred_length = fd->payload_length;
+				ret = QLA_ABORTED;
+			break;
+
+			default:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				ret = QLA_FUNCTION_FAILED;
+				break;
+		}
 	}
 	sp->done(sp, ret);
 }
@@ -2827,8 +2864,8 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp->done(sp, 0);
 }
 
-void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
-    struct req_que *req)
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
+    struct pt_ls4_request *pkt, struct req_que *req)
 {
 	srb_t *sp;
 	const char func[] = "LS4_IOCB";
@@ -3132,7 +3169,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 	struct device_reg_24xx __iomem *reg;
 	struct scsi_qla_host *vha;
 	unsigned long flags;
-	uint32_t stat = 0;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -3146,19 +3182,11 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	vha = pci_get_drvdata(ha->pdev);
-	/*
-	 * Use host_status register to check to PCI disconnection before we
-	 * we process the response queue.
-	 */
-	stat = RD_REG_DWORD(&reg->host_status);
-	if (qla2x00_check_reg32_for_disconnect(vha, stat))
-		goto out;
 	qla24xx_process_response_queue(vha, rsp);
 	if (!ha->flags.disable_msix_handshake) {
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 		RD_REG_DWORD_RELAXED(&reg->hccr);
 	}
-out:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return IRQ_HANDLED;
@@ -3429,7 +3457,7 @@ msix_register_fail:
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		if (ha->msixbase && ha->mqiobase &&
 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
 		     ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7c6d1a404011..99502fa90810 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -54,6 +54,10 @@ static struct rom_cmd {
 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
 	{ MBC_GET_RETRY_COUNT },
 	{ MBC_TRACE_CONTROL },
+	{ MBC_INITIALIZE_MULTIQ },
+	{ MBC_IOCB_COMMAND_A64 },
+	{ MBC_GET_ADAPTER_LOOP_ID },
+	{ MBC_READ_SFP },
 };
 
 static int is_rom_cmd(uint16_t cmd)
@@ -102,7 +106,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	uint16_t __iomem *optr;
 	uint32_t	cnt;
 	uint32_t	mboxes;
-	uint16_t __iomem *mbx_reg;
 	unsigned long	wait_time;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -486,21 +489,24 @@ premature_exit:
 
 mbx_done:
 	if (rval) {
-		ql_dbg(ql_dbg_disc, base_vha, 0x1020,
-		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
-		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
-
+		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
+			pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+			    dev_name(&ha->pdev->dev), 0x1020+0x800,
+			    vha->host_no);
+			mboxes = mcp->in_mb;
+			cnt = 4;
+			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
+				if (mboxes & BIT_0) {
+					printk(" mb[%u]=%x", i, mcp->mb[i]);
+					cnt--;
+				}
+			pr_warn(" cmd=%x ****\n", command);
+		}
 		ql_dbg(ql_dbg_mbx, vha, 0x1198,
-		    "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
+		    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
 		    RD_REG_DWORD(&reg->isp24.host_status),
-		    ha->fw_dump_cap_flags,
 		    RD_REG_DWORD(&reg->isp24.ictrl),
 		    RD_REG_DWORD(&reg->isp24.istatus));
-
-		mbx_reg = &reg->isp24.mailbox0;
-		for (i = 0; i < 6; i++)
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199,
-			    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
 	} else {
 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
 	}
@@ -561,6 +567,28 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
 
 #define	EXTENDED_BB_CREDITS	BIT_0
 #define	NVME_ENABLE_FLAG	BIT_3
+static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
+
+	return mb4;
+}
+
+static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		struct nvram_81xx *nv = ha->nvram;
+
+		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
+	}
+
+	return mb4;
+}
 
 /*
  * qla2x00_execute_fw
@@ -595,17 +623,44 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		mcp->mb[1] = MSW(risc_addr);
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->mb[3] = 0;
+		mcp->mb[4] = 0;
+		ha->flags.using_lr_setting = 0;
 		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
 		    IS_QLA27XX(ha)) {
-			struct nvram_81xx *nv = ha->nvram;
-			mcp->mb[4] = (nv->enhanced_features &
-			    EXTENDED_BB_CREDITS);
-		} else
-			mcp->mb[4] = 0;
+			if (ql2xautodetectsfp) {
+				if (ha->flags.detected_lr_sfp) {
+					mcp->mb[4] |=
+					    qla25xx_set_sfp_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			} else {
+				struct nvram_81xx *nv = ha->nvram;
+				/* set LR distance if specified in nvram */
+				if (nv->enhanced_features &
+				    NEF_LR_DIST_ENABLE) {
+					mcp->mb[4] |=
+					    qla25xx_set_nvr_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			}
+		}
 
 		if (ql2xnvmeenable && IS_QLA27XX(ha))
 			mcp->mb[4] |= NVME_ENABLE_FLAG;
 
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			struct nvram_81xx *nv = ha->nvram;
+			/* set minimum speed if specified in nvram */
+			if (nv->min_link_speed >= 2 &&
+			    nv->min_link_speed <= 5) {
+				mcp->mb[4] |= BIT_4;
+				mcp->mb[11] = nv->min_link_speed;
+				mcp->out_mb |= MBX_11;
+				mcp->in_mb |= BIT_5;
+				vha->min_link_speed_feat = nv->min_link_speed;
+			}
+		}
+
 		if (ha->flags.exlogins_enabled)
 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
 
@@ -613,7 +668,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
 
 		mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
-		mcp->in_mb |= MBX_1;
+		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
 	} else {
 		mcp->mb[1] = LSW(risc_addr);
 		mcp->out_mb |= MBX_1;
@@ -632,12 +687,30 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
 		if (IS_FWI2_CAPABLE(ha)) {
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
-			    "Done exchanges=%x.\n", mcp->mb[1]);
-		} else {
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
-			    "Done %s.\n", __func__);
+			ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+			ql_dbg(ql_dbg_mbx, vha, 0x119a,
+			    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+			ql_dbg(ql_dbg_mbx, vha, 0x1027,
+			    "exchanges=%x.\n", mcp->mb[1]);
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+				ha->max_speed_sup = mcp->mb[2] & BIT_0;
+				ql_dbg(ql_dbg_mbx, vha, 0x119b,
+				    "Maximum speed supported=%s.\n",
+				    ha->max_speed_sup ? "32Gps" : "16Gps");
+				if (vha->min_link_speed_feat) {
+					ha->min_link_speed = mcp->mb[5];
+					ql_dbg(ql_dbg_mbx, vha, 0x119c,
+					    "Minimum speed set=%s.\n",
+					    mcp->mb[5] == 5 ? "32Gps" :
+					    mcp->mb[5] == 4 ? "16Gps" :
+					    mcp->mb[5] == 3 ? "8Gps" :
+					    mcp->mb[5] == 2 ? "4Gps" :
+						"unknown");
+				}
+			}
 		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+		    "Done.\n");
 	}
 
 	return rval;
@@ -947,20 +1020,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
 			    "%s: Firmware supports Exchange Offload 0x%x\n",
 			    __func__, ha->fw_attributes_h);
 
-		/* bit 26 of fw_attributes */
-		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
-			struct init_cb_24xx *icb;
-
-			icb = (struct init_cb_24xx *)ha->init_cb;
-			/*
-			 * fw supports nvme and driver load
-			 * parameter requested nvme
-			 */
+		/*
+		 * FW supports nvme and driver load parameter requested nvme.
+		 * BIT 26 of fw_attributes indicates NVMe support.
+		 */
+		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
 			vha->flags.nvme_enabled = 1;
-			icb->firmware_options_2 &= cpu_to_le32(~0xf);
-			ha->zio_mode = 0;
-			ha->zio_timer = 0;
-		}
 
 	}
 
@@ -1673,7 +1738,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
 	} else {
-		/*EMPTY*/
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119d,
+				    "Invalid SFP/Validation Failed\n");
+		}
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
 		    "Done %s.\n", __func__);
 	}
@@ -1878,6 +1947,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
 	    "Entered %s.\n", __func__);
@@ -1906,7 +1976,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
 	} else {
-		/*EMPTY*/
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119e,
+				    "Invalid SFP/Validation Failed\n");
+		}
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
 		    "Done %s.\n", __func__);
 	}
@@ -3689,7 +3763,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 				if (qla_ini_mode_enabled(vha) &&
 				    ha->flags.fawwpn_enabled &&
 				    (rptid_entry->u.f1.flags &
-				     VP_FLAGS_NAME_VALID)) {
+				     BIT_6)) {
 					memcpy(vha->port_name,
 					    rptid_entry->u.f1.port_name,
 					    WWN_SIZE);
@@ -4590,6 +4664,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		if (mcp->mb[0] == MBS_COMMAND_ERROR &&
+		    mcp->mb[1] == 0x22)
+			/* sfp is not there */
+			rval = QLA_INTERFACE_ERROR;
 	} else {
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
 		    "Done %s.\n", __func__);
@@ -5817,7 +5895,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
 
 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
 	    dd_buf, size, DMA_FROM_DEVICE);
-	if (!dd_dma) {
+	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
@@ -6085,3 +6163,108 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
 done:
 	return rval;
 }
+
+int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(1);
+	mcp->mb[2] = cpu_to_le16(value);
+	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
+
+	return rval;
+}
+
+int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0, sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(0);
+	mcp->out_mb = MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval == QLA_SUCCESS)
+		*value = mc.mb[2];
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
+
+	return rval;
+}
+
+int
+qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t iter, addr, offset;
+	dma_addr_t phys_addr;
+	int rval, c;
+	u8 *sfp_data;
+
+	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
+	addr = 0xa0;
+	phys_addr = ha->sfp_data_dma;
+	sfp_data = ha->sfp_data;
+	offset = c = 0;
+
+	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
+		if (iter == 4) {
+			/* Skip to next device address. */
+			addr = 0xa2;
+			offset = 0;
+		}
+
+		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
+		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x706d,
+			    "Unable to read SFP data (%x/%x/%x).\n", rval,
+			    addr, offset);
+
+			return rval;
+		}
+
+		if (buf && (c < count)) {
+			u16 sz;
+
+			if ((count - c) >= SFP_BLOCK_SIZE)
+				sz = SFP_BLOCK_SIZE;
+			else
+				sz = count - c;
+
+			memcpy(buf, sfp_data, sz);
+			buf += SFP_BLOCK_SIZE;
+			c += sz;
+		}
+		phys_addr += SFP_BLOCK_SIZE;
+		sfp_data  += SFP_BLOCK_SIZE;
+		offset += SFP_BLOCK_SIZE;
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f0605cd196fb..c0f8f6c17b79 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 	 * ensures no active vp_list traversal while the vport is removed
 	 * from the queue)
 	 */
-	wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
 	    10*HZ);
 
 	spin_lock_irqsave(&ha->vport_slock, flags);
@@ -187,6 +187,11 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
 		!(ha->current_topology & ISP_CFG_F)) {
 		vha->vp_err_state =  VP_ERR_PORTDWN;
 		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+		ql_dbg(ql_dbg_taskm, vha, 0x800b,
+		    "%s skip enable. loop_state %x topo %x\n",
+		    __func__, base_vha->loop_state.counter,
+		    ha->current_topology);
+
 		goto enable_failed;
 	}
 
@@ -759,11 +764,18 @@ static void qla_do_work(struct work_struct *work)
 	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
 	struct scsi_qla_host *vha;
 	struct qla_hw_data *ha = qpair->hw;
+	struct srb_iocb	*nvme, *nxt_nvme;
 
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 	vha = pci_get_drvdata(ha->pdev);
 	qla24xx_process_response_queue(vha, qpair->rsp);
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+	list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
+		    u.nvme.entry) {
+		list_del_init(&nvme->u.nvme.entry);
+		qla_nvme_cmpl_io(nvme);
+	}
 }
 
 /* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 10b742d27e16..e23a3d4c36f3 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1819,6 +1819,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
 	if (!sp)
 		goto done;
 
+	sp->type = SRB_FXIOCB_DCMD;
+	sp->name = "fxdisc";
+	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+
 	fdisc = &sp->u.iocb_cmd;
 	switch (fx_type) {
 	case FXDISC_GET_CONFIG_INFO:
@@ -1920,9 +1924,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
 			goto done_unmap_req;
 	}
 
-	sp->type = SRB_FXIOCB_DCMD;
-	sp->name = "fxdisc";
-	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
 	fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
 	fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
 	sp->done = qla2x00_fxdisc_sp_done;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index f3710a75fe1f..1f59e7a74c7b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -5,7 +5,6 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_nvme.h"
-#include "qla_def.h"
 #include <linux/scatterlist.h>
 #include <linux/delay.h>
 #include <linux/nvme.h>
@@ -15,7 +14,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport;
 
 static void qla_nvme_unregister_remote_port(struct work_struct *);
 
-int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
 {
 	struct nvme_rport *rport;
 	int ret;
@@ -61,8 +60,8 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
 	rport->req.port_id = fcport->d_id.b24;
 
 	ql_log(ql_log_info, vha, 0x2102,
-	    "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
-	    __func__, rport->req.port_name, rport->req.node_name,
+	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
+	    __func__, rport->req.node_name, rport->req.port_name,
 	    rport->req.port_id);
 
 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
@@ -76,16 +75,14 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 	fcport->nvme_remote_port->private = fcport;
 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
-	atomic_set(&fcport->nvme_ref_count, 1);
-	init_waitqueue_head(&fcport->nvme_waitQ);
 	rport->fcport = fcport;
 	list_add_tail(&rport->list, &vha->nvme_rport_list);
 	return 0;
 }
 
 /* Allocate a queue for NVMe traffic */
-static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
-    u16 qsize, void **handle)
+static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
+    unsigned int qidx, u16 qsize, void **handle)
 {
 	struct scsi_qla_host *vha;
 	struct qla_hw_data *ha;
@@ -157,6 +154,16 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
 	qla2x00_rel_sp(sp);
 }
 
+void qla_nvme_cmpl_io(struct srb_iocb *nvme)
+{
+	srb_t *sp;
+	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+
+	sp = container_of(nvme, srb_t, u.iocb_cmd);
+	fd->done(fd);
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
 static void qla_nvme_sp_done(void *ptr, int res)
 {
 	srb_t *sp = ptr;
@@ -172,13 +179,14 @@ static void qla_nvme_sp_done(void *ptr, int res)
 	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
 		goto rel;
 
-	if (unlikely(nvme->u.nvme.comp_status || res))
-		fd->status = -EINVAL;
+	if (unlikely(res == QLA_FUNCTION_FAILED))
+		fd->status = NVME_SC_FC_TRANSPORT_ERROR;
 	else
 		fd->status = 0;
 
 	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
-	fd->done(fd);
+	list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
+	return;
 rel:
 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
@@ -193,13 +201,11 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
 	struct qla_hw_data *ha = fcport->vha->hw;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (rval != QLA_SUCCESS)
-		ql_log(ql_log_warn, fcport->vha, 0x2125,
-		    "%s: failed to abort LS command for SP:%p rval=%x\n",
-		    __func__, sp, rval);
 
 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
-	    "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+	    "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
 }
 
 static void qla_nvme_ls_complete(struct work_struct *work)
@@ -214,7 +220,7 @@ static void qla_nvme_ls_complete(struct work_struct *work)
 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 {
-	fc_port_t *fcport = (fc_port_t *)rport->private;
+	fc_port_t *fcport = rport->private;
 	struct srb_iocb   *nvme;
 	struct nvme_private *priv = fd->private;
 	struct scsi_qla_host *vha;
@@ -236,7 +242,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 	sp->name = "nvme_ls";
 	sp->done = qla_nvme_sp_ls_done;
 	atomic_set(&sp->ref_count, 1);
-	init_waitqueue_head(&sp->nvme_ls_waitQ);
 	nvme = &sp->u.iocb_cmd;
 	priv->sp = sp;
 	priv->fd = fd;
@@ -258,7 +263,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 		ql_log(ql_log_warn, vha, 0x700e,
 		    "qla2x00_start_sp failed = %d\n", rval);
 		atomic_dec(&sp->ref_count);
-		wake_up(&sp->nvme_ls_waitQ);
+		wake_up(&sp->nvme_ls_waitq);
 		return rval;
 	}
 
@@ -276,20 +281,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
 	struct qla_hw_data *ha = fcport->vha->hw;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (!rval)
-		ql_log(ql_log_warn, fcport->vha, 0x2127,
-		    "%s: failed to abort command for SP:%p rval=%x\n",
-		    __func__, sp, rval);
 
-	ql_dbg(ql_dbg_io, fcport->vha, 0x2126,
-	    "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+	ql_dbg(ql_dbg_io, fcport->vha, 0x2127,
+	    "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
 }
 
 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
 {
 	struct scsi_qla_host *vha = lport->private;
 	unsigned long flags;
-	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+	struct qla_qpair *qpair = hw_queue_handle;
 
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -310,6 +313,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	uint16_t	avail_dsds;
 	uint32_t	*cur_dsd;
 	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_qpair *qpair = sp->qpair;
@@ -318,13 +322,15 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
 	uint32_t        rval = QLA_SUCCESS;
 
-	/* Setup qpair pointers */
-	req = qpair->req;
 	tot_dsds = fd->sg_cnt;
 
 	/* Acquire qpair specific lock */
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 
+	/* Setup qpair pointers */
+	req = qpair->req;
+	rsp = qpair->rsp;
+
 	/* Check for room in outstanding command list. */
 	handle = req->current_outstanding_cmd;
 	for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -359,7 +365,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
 			nvme->u.nvme.aen_op = 1;
-			atomic_inc(&vha->nvme_active_aen_cnt);
+			atomic_inc(&vha->hw->nvme_active_aen_cnt);
 		}
 	}
 
@@ -472,6 +478,11 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	/* Set chip new ring index. */
 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
 
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
 queuing_error:
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
 	return rval;
@@ -487,7 +498,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 	struct scsi_qla_host *vha;
 	int rval = QLA_FUNCTION_FAILED;
 	srb_t *sp;
-	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+	struct qla_qpair *qpair = hw_queue_handle;
 	struct nvme_private *priv;
 
 	if (!fd) {
@@ -496,7 +507,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 	}
 
 	priv = fd->private;
-	fcport = (fc_port_t *)rport->private;
+	fcport = rport->private;
 	if (!fcport) {
 		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
 		return rval;
@@ -512,7 +523,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 		return -EIO;
 
 	atomic_set(&sp->ref_count, 1);
-	init_waitqueue_head(&sp->nvme_ls_waitQ);
+	init_waitqueue_head(&sp->nvme_ls_waitq);
 	priv->sp = sp;
 	sp->type = SRB_NVME_CMD;
 	sp->name = "nvme_cmd";
@@ -526,7 +537,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 		ql_log(ql_log_warn, vha, 0x212d,
 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
 		atomic_dec(&sp->ref_count);
-		wake_up(&sp->nvme_ls_waitQ);
+		wake_up(&sp->nvme_ls_waitq);
 		return -EIO;
 	}
 
@@ -537,12 +548,10 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
 {
 	struct scsi_qla_host *vha = lport->private;
 
-	atomic_dec(&vha->nvme_ref_count);
-	wake_up_all(&vha->nvme_waitQ);
-
 	ql_log(ql_log_info, vha, 0x210f,
 	    "localport delete of %p completed.\n", vha->nvme_local_port);
 	vha->nvme_local_port = NULL;
+	complete(&vha->nvme_del_done);
 }
 
 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
@@ -550,11 +559,9 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 	fc_port_t *fcport;
 	struct nvme_rport *r_port, *trport;
 
-	fcport = (fc_port_t *)rport->private;
+	fcport = rport->private;
 	fcport->nvme_remote_port = NULL;
 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-	atomic_dec(&fcport->nvme_ref_count);
-	wake_up_all(&fcport->nvme_waitQ);
 
 	list_for_each_entry_safe(r_port, trport,
 	    &fcport->vha->nvme_rport_list, list) {
@@ -564,6 +571,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 		}
 	}
 	kfree(r_port);
+	complete(&fcport->nvme_del_done);
 
 	ql_log(ql_log_info, fcport->vha, 0x2110,
 	    "remoteport_delete of %p completed.\n", fcport);
@@ -594,7 +602,7 @@ static int qla_nvme_wait_on_command(srb_t *sp)
 {
 	int ret = QLA_SUCCESS;
 
-	wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1),
+	wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
 	    NVME_ABORT_POLLING_PERIOD*HZ);
 
 	if (atomic_read(&sp->ref_count) > 1)
@@ -606,12 +614,11 @@ static int qla_nvme_wait_on_command(srb_t *sp)
 static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
 {
 	int ret = QLA_SUCCESS;
+	int timeout;
 
-	wait_event_timeout(fcport->nvme_waitQ,
-	    atomic_read(&fcport->nvme_ref_count),
-	    NVME_ABORT_POLLING_PERIOD*HZ);
-
-	if (atomic_read(&fcport->nvme_ref_count)) {
+	timeout = wait_for_completion_timeout(&fcport->nvme_del_done,
+	    msecs_to_jiffies(2000));
+	if (!timeout) {
 		ret = QLA_FUNCTION_FAILED;
 		ql_log(ql_log_info, fcport->vha, 0x2111,
 		    "timed out waiting for fcport=%p to delete\n", fcport);
@@ -620,50 +627,14 @@ static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
 	return ret;
 }
 
-void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
+void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
 {
 	int rval;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (!rval) {
-		if (!qla_nvme_wait_on_command(sp))
-			ql_log(ql_log_warn, NULL, 0x2112,
-			    "nvme_wait_on_command timed out waiting on sp=%p\n",
-			    sp);
-	}
-}
-
-static void qla_nvme_abort_all(fc_port_t *fcport)
-{
-	int que, cnt;
-	unsigned long flags;
-	srb_t *sp;
-	struct qla_hw_data *ha = fcport->vha->hw;
-	struct req_que *req;
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (que = 0; que < ha->max_req_queues; que++) {
-		req = ha->req_q_map[que];
-		if (!req)
-			continue;
-		if (!req->outstanding_cmds)
-			continue;
-		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
-			sp = req->outstanding_cmds[cnt];
-			if ((sp) && ((sp->type == SRB_NVME_CMD) ||
-			    (sp->type == SRB_NVME_LS)) &&
-				(sp->fcport == fcport)) {
-				atomic_inc(&sp->ref_count);
-				spin_unlock_irqrestore(&ha->hardware_lock,
-				    flags);
-				qla_nvme_abort(ha, sp);
-				spin_lock_irqsave(&ha->hardware_lock, flags);
-				req->outstanding_cmds[cnt] = NULL;
-				sp->done(sp, 1);
-			}
-		}
-	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (!rval && !qla_nvme_wait_on_command(sp))
+		ql_log(ql_log_warn, NULL, 0x2112,
+		    "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
 }
 
 static void qla_nvme_unregister_remote_port(struct work_struct *work)
@@ -675,18 +646,23 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
 	if (!IS_ENABLED(CONFIG_NVME_FC))
 		return;
 
+	ql_log(ql_log_warn, NULL, 0x2112,
+	    "%s: unregister remoteport on %p\n",__func__, fcport);
+
 	list_for_each_entry_safe(rport, trport,
 	    &fcport->vha->nvme_rport_list, list) {
 		if (rport->fcport == fcport) {
 			ql_log(ql_log_info, fcport->vha, 0x2113,
 			    "%s: fcport=%p\n", __func__, fcport);
+			init_completion(&fcport->nvme_del_done);
 			nvme_fc_unregister_remoteport(
 			    fcport->nvme_remote_port);
+			qla_nvme_wait_on_rport_del(fcport);
 		}
 	}
 }
 
-void qla_nvme_delete(scsi_qla_host_t *vha)
+void qla_nvme_delete(struct scsi_qla_host *vha)
 {
 	struct nvme_rport *rport, *trport;
 	fc_port_t *fcport;
@@ -701,12 +677,13 @@ void qla_nvme_delete(scsi_qla_host_t *vha)
 		ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
 		    __func__, fcport);
 
+		init_completion(&fcport->nvme_del_done);
 		nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
 		qla_nvme_wait_on_rport_del(fcport);
-		qla_nvme_abort_all(fcport);
 	}
 
 	if (vha->nvme_local_port) {
+		init_completion(&vha->nvme_del_done);
 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
 		if (nv_ret == 0)
 			ql_log(ql_log_info, vha, 0x2116,
@@ -715,10 +692,12 @@ void qla_nvme_delete(scsi_qla_host_t *vha)
 		else
 			ql_log(ql_log_info, vha, 0x2115,
 			    "Unregister of localport failed\n");
+		wait_for_completion_timeout(&vha->nvme_del_done,
+		    msecs_to_jiffies(5000));
 	}
 }
 
-void qla_nvme_register_hba(scsi_qla_host_t *vha)
+void qla_nvme_register_hba(struct scsi_qla_host *vha)
 {
 	struct nvme_fc_port_template *tmpl;
 	struct qla_hw_data *ha;
@@ -744,8 +723,8 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha)
 	pinfo.port_id = vha->d_id.b24;
 
 	ql_log(ql_log_info, vha, 0xffff,
-	    "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
-	    pinfo.port_name, pinfo.node_name, pinfo.port_id);
+	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
 
 	ret = nvme_fc_register_localport(&pinfo, tmpl,
@@ -755,7 +734,5 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha)
 		    "register_localport failed: ret=%x\n", ret);
 		return;
 	}
-	atomic_set(&vha->nvme_ref_count, 1);
 	vha->nvme_local_port->private = vha;
-	init_waitqueue_head(&vha->nvme_waitQ);
 }
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index dfe56f207b28..7f05fa1c77db 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -12,12 +12,18 @@
 #include <uapi/scsi/fc/fc_els.h>
 #include <linux/nvme-fc-driver.h>
 
+#include "qla_def.h"
+
 #define NVME_ATIO_CMD_OFF 32
 #define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
 #define Q2T_NVME_NUM_TAGS 2048
 #define QLA_MAX_FC_SEGMENTS 64
 
+struct scsi_qla_host;
+struct qla_hw_data;
+struct req_que;
 struct srb;
+
 struct nvme_private {
 	struct srb	*sp;
 	struct nvmefc_ls_req *fd;
@@ -129,4 +135,15 @@ struct pt_ls4_rx_unsol {
 	uint32_t desc_len;
 	uint32_t payload[3];
 };
+
+/*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+void qla_nvme_register_hba(struct scsi_qla_host *);
+int  qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
+void qla_nvme_delete(struct scsi_qla_host *);
+void qla_nvme_abort(struct qla_hw_data *, struct srb *sp);
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
+    struct req_que *);
+void qla24xx_async_gffid_sp_done(void *, int);
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index df57655779ed..5b2437a5ea44 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -113,12 +113,12 @@ MODULE_PARM_DESC(ql2xfdmienable,
 		"Enables FDMI registrations. "
 		"0 - no FDMI. Default is 1 - perform FDMI.");
 
-#define MAX_Q_DEPTH	32
+#define MAX_Q_DEPTH	64
 static int ql2xmaxqdepth = MAX_Q_DEPTH;
 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
 		"Maximum queue depth to set for each LUN. "
-		"Default is 32.");
+		"Default is 64.");
 
 #if (IS_ENABLED(CONFIG_NVME_FC))
 int ql2xenabledif;
@@ -200,7 +200,7 @@ MODULE_PARM_DESC(ql2xgffidenable,
 		"Enables GFF_ID checks of port type. "
 		"Default is 0 - Do not use GFF_ID information.");
 
-int ql2xasynctmfenable;
+int ql2xasynctmfenable = 1;
 module_param(ql2xasynctmfenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xasynctmfenable,
 		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
@@ -262,6 +262,12 @@ MODULE_PARM_DESC(ql2xmvasynctoatio,
 		"0 (Default). Do not move IOCBs"
 		"1 - Move IOCBs.");
 
+int ql2xautodetectsfp = 1;
+module_param(ql2xautodetectsfp, int, 0444);
+MODULE_PARM_DESC(ql2xautodetectsfp,
+		 "Detect SFP range and set appropriate distance.\n"
+		 "1 (Default): Enable\n");
+
 /*
  * SCSI host template entry points
  */
@@ -379,6 +385,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
 	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
 	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
 	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+	INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
 	ha->base_qpair->enable_class_2 = ql2xenableclass2;
 	/* init qpair to this cpu. Will adjust at run time. */
 	qla_cpu_update(rsp->qpair, smp_processor_id());
@@ -710,7 +717,7 @@ qla2x00_sp_free_dma(void *ptr)
 	}
 
 end:
-	if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
+	if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
 		CMD_SP(cmd) = NULL;
 		qla2x00_rel_sp(sp);
 	}
@@ -735,7 +742,7 @@ qla2x00_sp_compl(void *ptr, int res)
 	if (!atomic_dec_and_test(&sp->ref_count))
 		return;
 
-	qla2x00_sp_free_dma(sp);
+	sp->free(sp);
 	cmd->scsi_done(cmd);
 }
 
@@ -807,7 +814,7 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
 	if (!atomic_dec_and_test(&sp->ref_count))
 		return;
 
-	qla2xxx_qpair_sp_free_dma(sp);
+	sp->free(sp);
 	cmd->scsi_done(cmd);
 }
 
@@ -928,7 +935,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2x00_sp_free_dma(sp);
+	sp->free(sp);
 
 qc24_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -1017,7 +1024,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2xxx_qpair_sp_free_dma(sp);
+	sp->free(sp);
 
 qc24_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -1134,7 +1141,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
 	qla2x00_mark_all_devices_lost(vha, 0);
 
-	wait_event(vha->fcport_waitQ, test_fcport_count(vha));
+	wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
 }
 
 /*
@@ -1715,8 +1722,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 			if (sp) {
 				req->outstanding_cmds[cnt] = NULL;
 				if (sp->cmd_type == TYPE_SRB) {
-					if ((sp->type == SRB_NVME_CMD) ||
-					    (sp->type == SRB_NVME_LS)) {
+					if (sp->type == SRB_NVME_CMD ||
+					    sp->type == SRB_NVME_LS) {
 						sp_get(sp);
 						spin_unlock_irqrestore(
 						    &ha->hardware_lock, flags);
@@ -1725,6 +1732,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 						    &ha->hardware_lock, flags);
 					} else if (GET_CMD_SP(sp) &&
 					    !ha->flags.eeh_busy &&
+					    (!test_bit(ABORT_ISP_ACTIVE,
+						&vha->dpc_flags)) &&
 					    (sp->type == SRB_SCSI_CMD)) {
 						/*
 						 * Don't abort commands in
@@ -2751,6 +2760,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	spin_lock_init(&ha->tgt.sess_lock);
 	spin_lock_init(&ha->tgt.atio_lock);
 
+	atomic_set(&ha->nvme_active_aen_cnt, 0);
 
 	/* Clear our data area */
 	ha->bars = bars;
@@ -3168,7 +3178,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (ha->mqenable) {
 		bool mq = false;
 		bool startit = false;
-		ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+		ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
 
 		if (QLA_TGT_MODE_ENABLED()) {
 			mq = true;
@@ -3328,6 +3338,13 @@ skip_dpc:
 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
 		return -ENODEV;
 
+	if (ha->flags.detected_lr_sfp) {
+		ql_log(ql_log_info, base_vha, 0xffff,
+		    "Reset chip to pick up LR SFP setting\n");
+		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+	}
+
 	return 0;
 
 probe_init_failed:
@@ -3383,12 +3400,22 @@ qla2x00_shutdown(struct pci_dev *pdev)
 	scsi_qla_host_t *vha;
 	struct qla_hw_data  *ha;
 
-	if (!atomic_read(&pdev->enable_cnt))
-		return;
-
 	vha = pci_get_drvdata(pdev);
 	ha = vha->hw;
 
+	ql_log(ql_log_info, vha, 0xfffa,
+		"Adapter shutdown\n");
+
+	/*
+	 * Prevent future board_disable and wait
+	 * until any pending board_disable has completed.
+	 */
+	set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+	cancel_work_sync(&ha->board_disable);
+
+	if (!atomic_read(&pdev->enable_cnt))
+		return;
+
 	/* Notify ISPFX00 firmware */
 	if (IS_QLAFX00(ha))
 		qlafx00_driver_shutdown(vha, 20);
@@ -3419,8 +3446,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
 
 	qla2x00_free_fw_dump(ha);
 
-	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
+	ql_log(ql_log_info, vha, 0xfffe,
+		"Adapter shutdown successfully.\n");
 }
 
 /* Deletes all the virtual ports for a given ha */
@@ -4006,8 +4034,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 		    "loop_id_map=%p.\n", ha->loop_id_map);
 	}
 
+	ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
+	    SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
+	if (!ha->sfp_data) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+		    "Unable to allocate memory for SFP read-data.\n");
+		goto fail_sfp_data;
+	}
+
 	return 0;
 
+fail_sfp_data:
+	kfree(ha->loop_id_map);
 fail_loop_id_map:
 	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
@@ -4345,7 +4383,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 		ha->ct_sns, ha->ct_sns_dma);
 
 	if (ha->sfp_data)
-		dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
+		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
+		    ha->sfp_data_dma);
 
 	if (ha->ms_iocb)
 		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
@@ -4638,9 +4677,10 @@ static
 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
 	unsigned long flags;
-	fc_port_t *fcport =  NULL;
+	fc_port_t *fcport =  NULL, *tfcp;
 	struct qlt_plogi_ack_t *pla =
 	    (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
+	uint8_t free_fcport = 0;
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4655,6 +4695,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 			pla->ref_count--;
 		}
 	} else {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 		if (fcport) {
 			fcport->d_id = e->u.new_sess.id;
@@ -4664,6 +4705,29 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 
 			memcpy(fcport->port_name, e->u.new_sess.port_name,
 			    WWN_SIZE);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+				   "%s %8phC mem alloc fail.\n",
+				   __func__, e->u.new_sess.port_name);
+
+			if (pla)
+				kmem_cache_free(qla_tgt_plogi_cachep, pla);
+			return;
+		}
+
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+		/* search again to make sure one else got ahead */
+		tfcp = qla2x00_find_fcport_by_wwpn(vha,
+		    e->u.new_sess.port_name, 1);
+		if (tfcp) {
+			/* should rarily happen */
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+			    "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
+			    __func__, tfcp->port_name, tfcp->disc_state,
+			    tfcp->fw_login_state);
+
+			free_fcport = 1;
+		} else {
 			list_add_tail(&fcport->list, &vha->vp_fcports);
 
 			if (pla) {
@@ -4681,6 +4745,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 		else
 			qla24xx_async_gnl(vha, fcport);
 	}
+
+	if (free_fcport) {
+		qla2x00_free_fcport(fcport);
+		if (pla)
+			kmem_cache_free(qla_tgt_plogi_cachep, pla);
+	}
 }
 
 void
@@ -5493,6 +5563,13 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
 	ql_log(ql_log_warn, base_vha, 0x015b,
 	    "Disabling adapter.\n");
 
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, base_vha, 0xfffc,
+		    "PCI device disabled, no action req for PCI error=%lx\n",
+		    base_vha->pci_flags);
+		return;
+	}
+
 	qla2x00_wait_for_sess_deletion(base_vha);
 
 	set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -5687,6 +5764,16 @@ qla2x00_do_dpc(void *data)
 			}
 		}
 
+		if (test_and_clear_bit(DETECT_SFP_CHANGE,
+			&base_vha->dpc_flags) &&
+		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
+			qla24xx_detect_sfp(base_vha);
+
+			if (ha->flags.detected_lr_sfp !=
+			    ha->flags.using_lr_setting)
+				set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		}
+
 		if (test_and_clear_bit(ISP_ABORT_NEEDED,
 						&base_vha->dpc_flags)) {
 
@@ -5828,6 +5915,17 @@ intr_on_check:
 			mutex_unlock(&ha->mq_lock);
 		}
 
+		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0xffffff,
+				"nvme: SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+				ql_log(ql_log_info, base_vha, 0xffffff,
+					"nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			}
+		}
+
 		if (!IS_QLAFX00(ha))
 			qla2x00_do_dpc_all_vps(base_vha);
 
@@ -6025,12 +6123,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
 	 * FC-NVME
 	 * see if the active AEN count has changed from what was last reported.
 	 */
-	if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) {
-		vha->nvme_last_rptd_aen =
-		    atomic_read(&vha->nvme_active_aen_cnt);
+	if (!vha->vp_idx &&
+		atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
+		ha->zio_mode == QLA_ZIO_MODE_6) {
 		ql_log(ql_log_info, vha, 0x3002,
-		    "reporting new aen count of %d to the fw\n",
-		    vha->nvme_last_rptd_aen);
+			"nvme: Sched: Set ZIO exchange threshold to %d.\n",
+			ha->nvme_last_rptd_aen);
+		ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+		start_dpc++;
 	}
 
 	/* Schedule the DPC routine if needed */
@@ -6181,6 +6282,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 	ql_dbg(ql_dbg_aer, vha, 0x9000,
 	    "PCI error detected, state %x.\n", state);
 
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, vha, 0xffff,
+			"PCI device is disabled,state %x\n", state);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+
 	switch (state) {
 	case pci_channel_io_normal:
 		ha->flags.eeh_busy = 0;
@@ -6574,6 +6681,8 @@ qla2x00_module_init(void)
 	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
 	if (ql2xextended_error_logging)
 		strcat(qla2x00_version_str, "-debug");
+	if (ql2xextended_error_logging == 1)
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 
 	qla2xxx_transport_template =
 	    fc_attach_transport(&qla2xxx_transport_functions);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e101cd3043b9..f05cfc83c9c8 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -145,7 +145,7 @@ static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
  * Global Variables
  */
 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
-static struct kmem_cache *qla_tgt_plogi_cachep;
+struct kmem_cache *qla_tgt_plogi_cachep;
 static mempool_t *qla_tgt_mgmt_cmd_mempool;
 static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
@@ -585,11 +585,13 @@ void qla2x00_async_nack_sp_done(void *s, int res)
 		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
 		sp->fcport->logout_on_delete = 1;
 		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
+		sp->fcport->send_els_logo = 0;
 		break;
 
 	case SRB_NACK_PRLI:
 		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
 		sp->fcport->deleted = 0;
+		sp->fcport->send_els_logo = 0;
 
 		if (!sp->fcport->login_succ &&
 		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
@@ -1479,7 +1481,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
 	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
 
-	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
 
 	/* Big hammer */
 	if (!ha->flags.host_shutting_down &&
@@ -1487,7 +1489,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 		qlt_disable_vha(vha);
 
 	/* Wait for sessions to clear out (just in case) */
-	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
 	return 0;
 }
 EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1528,6 +1530,7 @@ static void qlt_release(struct qla_tgt *tgt)
 	u64 key = 0;
 	u16 i;
 	struct qla_qpair_hint *h;
+	struct qla_hw_data *ha = vha->hw;
 
 	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
 	    !tgt->tgt_stopped)
@@ -1548,12 +1551,18 @@ static void qlt_release(struct qla_tgt *tgt)
 		}
 	}
 	kfree(tgt->qphints);
+	mutex_lock(&qla_tgt_mutex);
+	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
+	mutex_unlock(&qla_tgt_mutex);
 
 	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
 		btree_remove64(&tgt->lun_qpair_map, key);
 
 	btree_destroy64(&tgt->lun_qpair_map);
 
+	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
+		ha->tgt.tgt_ops->remove_target(vha);
+
 	vha->vha_tgt.qla_tgt = NULL;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
@@ -1901,6 +1910,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
 	mcmd->reset_count = ha->base_qpair->chip_reset;
 	mcmd->tmr_func = QLA_TGT_ABTS;
 	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
 
 	/*
 	 * LUN is looked up by target-core internally based on the passed
@@ -2003,7 +2013,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
 	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
 {
-	struct scsi_qla_host *ha = qpair->vha;
+	struct scsi_qla_host *ha = mcmd->vha;
 	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
 	struct ctio7_to_24xx *ctio;
 	uint16_t temp;
@@ -3464,6 +3474,9 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
 
+	if (cmd)
+		vha = cmd->vha;
+
 	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
 	if (pkt == NULL) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe050,
@@ -4379,6 +4392,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
 	mcmd->flags = flags;
 	mcmd->reset_count = ha->base_qpair->chip_reset;
 	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
 
 	switch (fn) {
 	case QLA_TGT_LUN_RESET:
@@ -6170,10 +6184,6 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
 	/* free left over qfull cmds */
 	qlt_init_term_exchange(vha);
 
-	mutex_lock(&qla_tgt_mutex);
-	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
-	mutex_unlock(&qla_tgt_mutex);
-
 	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
 	    vha->host_no, ha);
 	qlt_release(vha->vha_tgt.qla_tgt);
@@ -6530,7 +6540,6 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 
 	/* Adjust ring index */
 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
-	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
 }
 
 void
@@ -6796,7 +6805,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 	if (!QLA_TGT_MODE_ENABLED())
 		return;
 
-	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if  (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
 		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
 	} else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 7fe02d036bdf..aba58d3848a6 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -705,6 +705,7 @@ struct qla_tgt_func_tmpl {
 	int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
 	int (*chk_dif_tags)(uint32_t tag);
 	void (*add_target)(struct scsi_qla_host *);
+	void (*remove_target)(struct scsi_qla_host *);
 };
 
 int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -959,6 +960,7 @@ struct qla_tgt_mgmt_cmd {
 	uint8_t fc_tm_rsp;
 	struct fc_port *sess;
 	struct qla_qpair *qpair;
+	struct scsi_qla_host *vha;
 	struct se_cmd se_cmd;
 	struct work_struct free_work;
 	unsigned int flags;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index b18646d6057f..733e8dcccf5c 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -443,8 +443,12 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
 		qla27xx_skip_entry(ent, buf);
 	}
 
-	if (buf)
-		ent->t263.num_queues = count;
+	if (buf) {
+		if (count)
+			ent->t263.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
 
 	return false;
 }
@@ -692,11 +696,12 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
 		qla27xx_skip_entry(ent, buf);
 	}
 
-	if (buf)
-		ent->t274.num_queues = count;
-
-	if (!count)
-		qla27xx_skip_entry(ent, buf);
+	if (buf) {
+		if (count)
+			ent->t274.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
 
 	return false;
 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 005a378f7fab..8c4b505c9f66 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.00.00.00-k"
+#define QLA2XXX_VERSION      "10.00.00.01-k"
 
 #define QLA_DRIVER_MAJOR_VER	10
 #define QLA_DRIVER_MINOR_VER	0
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 840823b99e51..95431d605c24 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -188,7 +188,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
 	.info			= qlogicfas408_info,
 	.queuecommand		= qlogicfas408_queuecommand,
 	.eh_abort_handler	= qlogicfas408_abort,
-	.eh_bus_reset_handler	= qlogicfas408_bus_reset,
+	.eh_host_reset_handler	= qlogicfas408_host_reset,
 	.bios_param		= qlogicfas408_biosparam,
 	.can_queue		= 1,
 	.this_id		= -1,
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index c3a9151ca823..8b471a925b43 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -496,13 +496,13 @@ int qlogicfas408_abort(struct scsi_cmnd *cmd)
 	return SUCCESS;
 }
 
-/* 
+/*
  *	Reset SCSI bus
  *	FIXME: This function is invoked with cmd = NULL directly by
  *	the PCMCIA qlogic_stub code. This wants fixing
  */
 
-int qlogicfas408_bus_reset(struct scsi_cmnd *cmd)
+int qlogicfas408_host_reset(struct scsi_cmnd *cmd)
 {
 	struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
 	unsigned long flags;
@@ -607,7 +607,7 @@ module_exit(qlogicfas408_exit);
 EXPORT_SYMBOL(qlogicfas408_info);
 EXPORT_SYMBOL(qlogicfas408_queuecommand);
 EXPORT_SYMBOL(qlogicfas408_abort);
-EXPORT_SYMBOL(qlogicfas408_bus_reset);
+EXPORT_SYMBOL(qlogicfas408_host_reset);
 EXPORT_SYMBOL(qlogicfas408_biosparam);
 EXPORT_SYMBOL(qlogicfas408_ihandl);
 EXPORT_SYMBOL(qlogicfas408_get_chip_type);
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index 2f6c0a166200..f6b1216af79f 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -108,7 +108,7 @@ int qlogicfas408_biosparam(struct scsi_device * disk,
 			   struct block_device *dev,
 			   sector_t capacity, int ip[]);
 int qlogicfas408_abort(struct scsi_cmnd * cmd);
-int qlogicfas408_bus_reset(struct scsi_cmnd * cmd);
+extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd);
 const char *qlogicfas408_info(struct Scsi_Host *host);
 int qlogicfas408_get_chip_type(int qbase, int int_type);
 void qlogicfas408_setup(int qbase, int id, int int_type);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69bfc0a1aea3..cec9a14982e6 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1283,7 +1283,7 @@ static struct scsi_host_template qpti_template = {
 	.queuecommand		= qlogicpti_queuecommand,
 	.slave_configure	= qlogicpti_slave_configure,
 	.eh_abort_handler	= qlogicpti_abort,
-	.eh_bus_reset_handler	= qlogicpti_reset,
+	.eh_host_reset_handler	= qlogicpti_reset,
 	.can_queue		= QLOGICPTI_REQ_QUEUE_LEN,
 	.this_id		= 7,
 	.sg_tablesize		= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1bf274e3b2b6..a7e4fba724b7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -412,6 +412,57 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
 /**
+ * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device
+ * @sdev: The device to ask
+ * @page: Which Vital Product Data to return
+ *
+ * Returns %NULL upon failure.
+ */
+static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
+{
+	struct scsi_vpd *vpd_buf;
+	int vpd_len = SCSI_VPD_PG_LEN, result;
+
+retry_pg:
+	vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
+	if (!vpd_buf)
+		return NULL;
+
+	result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
+	if (result < 0) {
+		kfree(vpd_buf);
+		return NULL;
+	}
+	if (result > vpd_len) {
+		vpd_len = result;
+		kfree(vpd_buf);
+		goto retry_pg;
+	}
+
+	vpd_buf->len = result;
+
+	return vpd_buf;
+}
+
+static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
+				 struct scsi_vpd __rcu **sdev_vpd_buf)
+{
+	struct scsi_vpd *vpd_buf;
+
+	vpd_buf = scsi_get_vpd_buf(sdev, page);
+	if (!vpd_buf)
+		return;
+
+	mutex_lock(&sdev->inquiry_mutex);
+	rcu_swap_protected(*sdev_vpd_buf, vpd_buf,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	mutex_unlock(&sdev->inquiry_mutex);
+
+	if (vpd_buf)
+		kfree_rcu(vpd_buf, rcu);
+}
+
+/**
  * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
  * @sdev: The device to ask
  *
@@ -422,95 +473,24 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
  */
 void scsi_attach_vpd(struct scsi_device *sdev)
 {
-	int result, i;
-	int vpd_len = SCSI_VPD_PG_LEN;
-	int pg80_supported = 0;
-	int pg83_supported = 0;
-	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
+	int i;
+	struct scsi_vpd *vpd_buf;
 
 	if (!scsi_device_supports_vpd(sdev))
 		return;
 
-retry_pg0:
-	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-	if (!vpd_buf)
-		return;
-
 	/* Ask for all the pages supported by this device */
-	result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
-	if (result < 0) {
-		kfree(vpd_buf);
+	vpd_buf = scsi_get_vpd_buf(sdev, 0);
+	if (!vpd_buf)
 		return;
-	}
-	if (result > vpd_len) {
-		vpd_len = result;
-		kfree(vpd_buf);
-		goto retry_pg0;
-	}
 
-	for (i = 4; i < result; i++) {
-		if (vpd_buf[i] == 0x80)
-			pg80_supported = 1;
-		if (vpd_buf[i] == 0x83)
-			pg83_supported = 1;
+	for (i = 4; i < vpd_buf->len; i++) {
+		if (vpd_buf->data[i] == 0x80)
+			scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
+		if (vpd_buf->data[i] == 0x83)
+			scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
 	}
 	kfree(vpd_buf);
-	vpd_len = SCSI_VPD_PG_LEN;
-
-	if (pg80_supported) {
-retry_pg80:
-		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-		if (!vpd_buf)
-			return;
-
-		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
-		if (result < 0) {
-			kfree(vpd_buf);
-			return;
-		}
-		if (result > vpd_len) {
-			vpd_len = result;
-			kfree(vpd_buf);
-			goto retry_pg80;
-		}
-		mutex_lock(&sdev->inquiry_mutex);
-		orig_vpd_buf = sdev->vpd_pg80;
-		sdev->vpd_pg80_len = result;
-		rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
-		mutex_unlock(&sdev->inquiry_mutex);
-		synchronize_rcu();
-		if (orig_vpd_buf) {
-			kfree(orig_vpd_buf);
-			orig_vpd_buf = NULL;
-		}
-		vpd_len = SCSI_VPD_PG_LEN;
-	}
-
-	if (pg83_supported) {
-retry_pg83:
-		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-		if (!vpd_buf)
-			return;
-
-		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
-		if (result < 0) {
-			kfree(vpd_buf);
-			return;
-		}
-		if (result > vpd_len) {
-			vpd_len = result;
-			kfree(vpd_buf);
-			goto retry_pg83;
-		}
-		mutex_lock(&sdev->inquiry_mutex);
-		orig_vpd_buf = sdev->vpd_pg83;
-		sdev->vpd_pg83_len = result;
-		rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
-		mutex_unlock(&sdev->inquiry_mutex);
-		synchronize_rcu();
-		if (orig_vpd_buf)
-			kfree(orig_vpd_buf);
-	}
 }
 
 /**
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3be980d47268..77a0335eb757 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2261,7 +2261,7 @@ static int resp_ie_l_pg(unsigned char * arr)
 static int resp_log_sense(struct scsi_cmnd * scp,
                           struct sdebug_dev_info * devip)
 {
-	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
+	int ppc, sp, pcode, subpcode, alloc_len, len, n;
 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
 	unsigned char *cmd = scp->cmnd;
 
@@ -2272,7 +2272,6 @@ static int resp_log_sense(struct scsi_cmnd * scp,
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
 		return check_condition_result;
 	}
-	pcontrol = (cmd[2] & 0xc0) >> 6;
 	pcode = cmd[2] & 0x3f;
 	subpcode = cmd[3] & 0xff;
 	alloc_len = get_unaligned_be16(cmd + 7);
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index a97c9507103d..5e9755008aed 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -6,8 +6,10 @@
 void scsi_show_rq(struct seq_file *m, struct request *rq)
 {
 	struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+	int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
 	char buf[80];
 
 	__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
-	seq_printf(m, ", .cmd=%s", buf);
+	seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
+		   cmd->retries, msecs / 1000, msecs % 1000);
 }
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ea9f40e51f68..38942050b265 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -259,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
  */
 enum blk_eh_timer_return scsi_times_out(struct request *req)
 {
-	struct scsi_cmnd *scmd = req->special;
+	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
 	enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
 	struct Scsi_Host *host = scmd->device->host;
 
@@ -552,6 +552,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
 			set_host_byte(scmd, DID_ALLOC_FAILURE);
 			return SUCCESS;
 		}
+		/* FALLTHROUGH */
 	case COPY_ABORTED:
 	case VOLUME_OVERFLOW:
 	case MISCOMPARE:
@@ -573,6 +574,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
 			return ADD_TO_MLQUEUE;
 		else
 			set_host_byte(scmd, DID_TARGET_FAILURE);
+		/* FALLTHROUGH */
 
 	case ILLEGAL_REQUEST:
 		if (sshdr.asc == 0x20 || /* Invalid command operation code */
@@ -683,6 +685,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 	switch (status_byte(scmd->result)) {
 	case GOOD:
 		scsi_handle_queue_ramp_up(scmd->device);
+		/* FALLTHROUGH */
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case CHECK_CONDITION:
@@ -1734,6 +1737,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 			set_host_byte(scmd, DID_TIME_OUT);
 			return SUCCESS;
 		}
+		/* FALLTHROUGH */
 	case DID_NO_CONNECT:
 	case DID_BAD_TARGET:
 		/*
@@ -1819,6 +1823,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		 * the case of trying to send too many commands to a
 		 * tagged queueing device.
 		 */
+		/* FALLTHROUGH */
 	case BUSY:
 		/*
 		 * device can't talk to us at the moment.  Should only
@@ -1831,6 +1836,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		if (scmd->cmnd[0] == REPORT_LUNS)
 			scmd->device->sdev_target->expecting_lun_change = 0;
 		scsi_handle_queue_ramp_up(scmd->device);
+		/* FALLTHROUGH */
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case TASK_ABORTED:
@@ -2320,8 +2326,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
 		rtn = scsi_try_host_reset(scmd);
 		if (rtn == SUCCESS)
 			break;
-	default:
 		/* FALLTHROUGH */
+	default:
 		rtn = FAILED;
 		break;
 	}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b6bf3f29a12a..0a875491f5a7 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -116,13 +116,15 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
 		case NOT_READY:	/* This happens if there is no disc in drive */
 			if (sdev->removable)
 				break;
+			/* FALLTHROUGH */
 		case UNIT_ATTENTION:
 			if (sdev->removable) {
 				sdev->changed = 1;
 				result = 0;	/* This is no longer considered an error */
 				break;
 			}
-		default:	/* Fall through for non-removable media */
+			/* FALLTHROUGH -- for non-removable media */
+		default:
 			sdev_printk(KERN_INFO, sdev,
 				    "ioctl_internal_command return code = %x\n",
 				    result);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f6097b89d5d3..9cf6a80fe297 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -44,6 +44,8 @@ static struct kmem_cache *scsi_sense_cache;
 static struct kmem_cache *scsi_sense_isadma_cache;
 static DEFINE_MUTEX(scsi_sense_cache_mutex);
 
+static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
+
 static inline struct kmem_cache *
 scsi_select_sense_cache(bool unchecked_isa_dma)
 {
@@ -140,6 +142,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
 
+	if (cmd->request->rq_flags & RQF_DONTPREP) {
+		cmd->request->rq_flags &= ~RQF_DONTPREP;
+		scsi_mq_uninit_cmd(cmd);
+	} else {
+		WARN_ON_ONCE(true);
+	}
 	blk_mq_requeue_request(cmd->request, true);
 	put_device(&sdev->sdev_gendev);
 }
@@ -627,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
 static bool scsi_end_request(struct request *req, blk_status_t error,
 		unsigned int bytes, unsigned int bidi_bytes)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev = cmd->device;
 	struct request_queue *q = sdev->request_queue;
 
@@ -642,6 +650,11 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
 	if (blk_queue_add_random(q))
 		add_disk_randomness(req->rq_disk);
 
+	if (!blk_rq_is_scsi(req)) {
+		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
+		cmd->flags &= ~SCMD_INITIALIZED;
+	}
+
 	if (req->mq_ctx) {
 		/*
 		 * In the MQ case the command gets freed by __blk_mq_end_request,
@@ -977,8 +990,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 		 * A new command will be prepared and issued.
 		 */
 		if (q->mq_ops) {
-			cmd->request->rq_flags &= ~RQF_DONTPREP;
-			scsi_mq_uninit_cmd(cmd);
 			scsi_mq_requeue_cmd(cmd);
 		} else {
 			scsi_release_buffers(cmd);
@@ -1107,15 +1118,23 @@ err_exit:
 EXPORT_SYMBOL(scsi_init_io);
 
 /**
- * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ * scsi_initialize_rq - initialize struct scsi_cmnd partially
+ * @rq: Request associated with the SCSI command to be initialized.
+ *
+ * This function initializes the members of struct scsi_cmnd that must be
+ * initialized before request processing starts and that won't be
+ * reinitialized if a SCSI command is requeued.
  *
- * Called from inside blk_get_request().
+ * Called from inside blk_get_request() for pass-through requests and from
+ * inside scsi_init_command() for filesystem requests.
  */
 void scsi_initialize_rq(struct request *rq)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
 	scsi_req_init(&cmd->req);
+	cmd->jiffies_at_alloc = jiffies;
+	cmd->retries = 0;
 }
 EXPORT_SYMBOL(scsi_initialize_rq);
 
@@ -1153,8 +1172,18 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
 	void *buf = cmd->sense_buffer;
 	void *prot = cmd->prot_sdb;
-	unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA;
+	struct request *rq = blk_mq_rq_from_pdu(cmd);
+	unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
+	unsigned long jiffies_at_alloc;
+	int retries;
+
+	if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
+		flags |= SCMD_INITIALIZED;
+		scsi_initialize_rq(rq);
+	}
 
+	jiffies_at_alloc = cmd->jiffies_at_alloc;
+	retries = cmd->retries;
 	/* zero out the cmd, except for the embedded scsi_request */
 	memset((char *)cmd + sizeof(cmd->req), 0,
 		sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1162,16 +1191,17 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 	cmd->device = dev;
 	cmd->sense_buffer = buf;
 	cmd->prot_sdb = prot;
-	cmd->flags = unchecked_isa_dma;
+	cmd->flags = flags;
 	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
-	cmd->jiffies_at_alloc = jiffies;
+	cmd->jiffies_at_alloc = jiffies_at_alloc;
+	cmd->retries = retries;
 
 	scsi_add_cmd_to_list(cmd);
 }
 
 static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	/*
 	 * Passthrough requests may transfer data, in which case they must
@@ -1202,7 +1232,7 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
  */
 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
 		int ret = sdev->handler->prep_fn(sdev, req);
@@ -1217,7 +1247,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 
 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	if (!blk_rq_bytes(req))
 		cmd->sc_data_direction = DMA_NONE;
@@ -1349,12 +1379,14 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
 
 	ret = scsi_setup_cmnd(sdev, req);
 out:
+	if (ret != BLKPREP_OK)
+		cmd->flags &= ~SCMD_INITIALIZED;
 	return scsi_prep_return(q, req, ret);
 }
 
 static void scsi_unprep_fn(struct request_queue *q, struct request *req)
 {
-	scsi_uninit_cmd(req->special);
+	scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
 }
 
 /*
@@ -1545,7 +1577,7 @@ static int scsi_lld_busy(struct request_queue *q)
  */
 static void scsi_kill_request(struct request *req, struct request_queue *q)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev;
 	struct scsi_target *starget;
 	struct Scsi_Host *shost;
@@ -1576,7 +1608,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
 
 static void scsi_softirq_done(struct request *rq)
 {
-	struct scsi_cmnd *cmd = rq->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
 	int disposition;
 
@@ -1764,8 +1796,8 @@ static void scsi_request_fn(struct request_queue *q)
 			blk_start_request(req);
 
 		spin_unlock_irq(q->queue_lock);
-		cmd = req->special;
-		if (unlikely(cmd == NULL)) {
+		cmd = blk_mq_rq_to_pdu(req);
+		if (cmd != req->special) {
 			printk(KERN_CRIT "impossible request in %s.\n"
 					 "please mail a stack trace to "
 					 "linux-scsi@vger.kernel.org\n",
@@ -1868,6 +1900,7 @@ static int scsi_mq_prep_fn(struct request *req)
 	struct scsi_device *sdev = req->q->queuedata;
 	struct Scsi_Host *shost = sdev->host;
 	struct scatterlist *sg;
+	int ret;
 
 	scsi_init_command(sdev, cmd);
 
@@ -1901,7 +1934,10 @@ static int scsi_mq_prep_fn(struct request *req)
 
 	blk_mq_start_request(req);
 
-	return scsi_setup_cmnd(sdev, req);
+	ret = scsi_setup_cmnd(sdev, req);
+	if (ret != BLK_STS_OK)
+		cmd->flags &= ~SCMD_INITIALIZED;
+	return ret;
 }
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
@@ -2001,8 +2037,8 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
 	return scsi_times_out(req);
 }
 
-static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
-		unsigned int hctx_idx, unsigned int numa_node)
+static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+				unsigned int hctx_idx, unsigned int numa_node)
 {
 	struct Scsi_Host *shost = set->driver_data;
 	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2026,8 +2062,8 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
 	return 0;
 }
 
-static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
-		unsigned int hctx_idx)
+static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+				 unsigned int hctx_idx)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
@@ -2104,7 +2140,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
-static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
+			    gfp_t gfp)
 {
 	struct Scsi_Host *shost = q->rq_alloc_data;
 	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2134,7 +2171,7 @@ fail:
 	return -ENOMEM;
 }
 
-static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
@@ -2144,7 +2181,7 @@ static void scsi_exit_rq(struct request_queue *q, struct request *rq)
 			       cmd->sense_buffer);
 }
 
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct request_queue *q;
@@ -2155,8 +2192,8 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 	q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
 	q->rq_alloc_data = shost;
 	q->request_fn = scsi_request_fn;
-	q->init_rq_fn = scsi_init_rq;
-	q->exit_rq_fn = scsi_exit_rq;
+	q->init_rq_fn = scsi_old_init_rq;
+	q->exit_rq_fn = scsi_old_exit_rq;
 	q->initialize_rq_fn = scsi_initialize_rq;
 
 	if (blk_init_allocated_queue(q) < 0) {
@@ -2180,8 +2217,8 @@ static const struct blk_mq_ops scsi_mq_ops = {
 #ifdef CONFIG_BLK_DEBUG_FS
 	.show_rq	= scsi_show_rq,
 #endif
-	.init_request	= scsi_init_request,
-	.exit_request	= scsi_exit_request,
+	.init_request	= scsi_mq_init_request,
+	.exit_request	= scsi_mq_exit_request,
 	.initialize_rq_fn = scsi_initialize_rq,
 	.map_queues	= scsi_map_queues,
 };
@@ -2547,7 +2584,7 @@ EXPORT_SYMBOL(scsi_test_unit_ready);
  *	@sdev:	scsi device to change the state of.
  *	@state:	state to change to.
  *
- *	Returns zero if unsuccessful or an error if the requested 
+ *	Returns zero if successful or an error if the requested
  *	transition is illegal.
  */
 int
@@ -2654,6 +2691,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
 
 	}
 	sdev->sdev_state = state;
+	sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
 	return 0;
 
  illegal:
@@ -3073,19 +3111,26 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
 	 * Try to transition the scsi device to SDEV_RUNNING or one of the
 	 * offlined states and goose the device queue if successful.
 	 */
-	if ((sdev->sdev_state == SDEV_BLOCK) ||
-	    (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
+	switch (sdev->sdev_state) {
+	case SDEV_BLOCK:
+	case SDEV_TRANSPORT_OFFLINE:
 		sdev->sdev_state = new_state;
-	else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
+		break;
+	case SDEV_CREATED_BLOCK:
 		if (new_state == SDEV_TRANSPORT_OFFLINE ||
 		    new_state == SDEV_OFFLINE)
 			sdev->sdev_state = new_state;
 		else
 			sdev->sdev_state = SDEV_CREATED;
-	} else if (sdev->sdev_state != SDEV_CANCEL &&
-		 sdev->sdev_state != SDEV_OFFLINE)
+		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
+		break;
+	case SDEV_CANCEL:
+	case SDEV_OFFLINE:
+		break;
+	default:
 		return -EINVAL;
-
+	}
 	scsi_start_queue(sdev);
 
 	return 0;
@@ -3262,8 +3307,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
 {
 	u8 cur_id_type = 0xff;
 	u8 cur_id_size = 0;
-	unsigned char *d, *cur_id_str;
-	unsigned char __rcu *vpd_pg83;
+	const unsigned char *d, *cur_id_str;
+	const struct scsi_vpd *vpd_pg83;
 	int id_size = -EINVAL;
 
 	rcu_read_lock();
@@ -3294,8 +3339,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
 	}
 
 	memset(id, 0, id_len);
-	d = vpd_pg83 + 4;
-	while (d < vpd_pg83 + sdev->vpd_pg83_len) {
+	d = vpd_pg83->data + 4;
+	while (d < vpd_pg83->data + vpd_pg83->len) {
 		/* Skip designators not referring to the LUN */
 		if ((d[1] & 0x30) != 0x00)
 			goto next_desig;
@@ -3411,8 +3456,8 @@ EXPORT_SYMBOL(scsi_vpd_lun_id);
  */
 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
 {
-	unsigned char *d;
-	unsigned char __rcu *vpd_pg83;
+	const unsigned char *d;
+	const struct scsi_vpd *vpd_pg83;
 	int group_id = -EAGAIN, rel_port = -1;
 
 	rcu_read_lock();
@@ -3422,8 +3467,8 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
 		return -ENXIO;
 	}
 
-	d = sdev->vpd_pg83 + 4;
-	while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
+	d = vpd_pg83->data + 4;
+	while (d < vpd_pg83->data + vpd_pg83->len) {
 		switch (d[1] & 0xf) {
 		case 0x4:
 			/* Relative target port */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index c11c1f9c912c..5c6d016a5ae9 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -88,7 +88,7 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern void scsi_start_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fd88dabd599d..e7818afeda2b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -268,7 +268,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 	if (shost_use_blk_mq(shost))
 		sdev->request_queue = scsi_mq_alloc_queue(sdev);
 	else
-		sdev->request_queue = scsi_alloc_queue(sdev);
+		sdev->request_queue = scsi_old_alloc_queue(sdev);
 	if (!sdev->request_queue) {
 		/* release fn is set up in scsi_sysfs_device_initialise, so
 		 * have to free and put manually here */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d6984df71f1c..bf53356f41f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -114,7 +114,7 @@ static int check_set(unsigned long long *val, char *src)
 {
 	char *last;
 
-	if (strncmp(src, "-", 20) == 0) {
+	if (strcmp(src, "-") == 0) {
 		*val = SCAN_WILD_CARD;
 	} else {
 		/*
@@ -303,6 +303,8 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
 
 	if (sht->host_reset)
 		ret = sht->host_reset(shost, type);
+	else
+		ret = -EOPNOTSUPP;
 
 exit_store_host_reset:
 	if (ret == 0)
@@ -426,6 +428,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 	struct scsi_device *sdev;
 	struct device *parent;
 	struct list_head *this, *tmp;
+	struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
 	unsigned long flags;
 
 	sdev = container_of(work, struct scsi_device, ew.work);
@@ -454,8 +457,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 	/* NULL queue means the device can't be used */
 	sdev->request_queue = NULL;
 
-	kfree(sdev->vpd_pg83);
-	kfree(sdev->vpd_pg80);
+	mutex_lock(&sdev->inquiry_mutex);
+	rcu_swap_protected(sdev->vpd_pg80, vpd_pg80,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	rcu_swap_protected(sdev->vpd_pg83, vpd_pg83,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	mutex_unlock(&sdev->inquiry_mutex);
+
+	if (vpd_pg83)
+		kfree_rcu(vpd_pg83, rcu);
+	if (vpd_pg80)
+		kfree_rcu(vpd_pg80, rcu);
 	kfree(sdev->inquiry);
 	kfree(sdev);
 
@@ -793,15 +805,16 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj,	\
 {									\
 	struct device *dev = container_of(kobj, struct device, kobj);	\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
-	int ret;							\
-	if (!sdev->vpd_##_page)						\
-		return -EINVAL;						\
+	struct scsi_vpd *vpd_page;					\
+	int ret = -EINVAL;						\
+									\
 	rcu_read_lock();						\
-	ret = memory_read_from_buffer(buf, count, &off,			\
-				      rcu_dereference(sdev->vpd_##_page), \
-				       sdev->vpd_##_page##_len);	\
+	vpd_page = rcu_dereference(sdev->vpd_##_page);			\
+	if (vpd_page)							\
+		ret = memory_read_from_buffer(buf, count, &off,		\
+				vpd_page->data, vpd_page->len);		\
 	rcu_read_unlock();						\
-	return ret;						\
+	return ret;							\
 }									\
 static struct bin_attribute dev_attr_vpd_##_page = {		\
 	.attr =	{.name = __stringify(vpd_##_page), .mode = S_IRUGO },	\
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 892fbd9800d9..3c6bc0081fcb 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3276,8 +3276,8 @@ fc_scsi_scan_rport(struct work_struct *work)
 }
 
 /**
- * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
- * @cmnd: SCSI command that scsi_eh is trying to recover
+ * fc_block_rport() - Block SCSI eh thread for blocked fc_rport.
+ * @rport: Remote port that scsi_eh is trying to recover.
  *
  * This routine can be called from a FC LLD scsi_eh callback. It
  * blocks the scsi_eh thread until the fc_rport leaves the
@@ -3289,10 +3289,9 @@ fc_scsi_scan_rport(struct work_struct *work)
  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
  *	    passed back to scsi_eh.
  */
-int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_rport(struct fc_rport *rport)
 {
-	struct Scsi_Host *shost = cmnd->device->host;
-	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 
 	spin_lock_irqsave(shost->host_lock, flags);
@@ -3309,6 +3308,28 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
 
 	return 0;
 }
+EXPORT_SYMBOL(fc_block_rport);
+
+/**
+ * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
+ * @cmnd: SCSI command that scsi_eh is trying to recover
+ *
+ * This routine can be called from a FC LLD scsi_eh callback. It
+ * blocks the scsi_eh thread until the fc_rport leaves the
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ *	    passed back to scsi_eh.
+ */
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+	return fc_block_rport(rport);
+}
 EXPORT_SYMBOL(fc_block_scsi_eh);
 
 /**
@@ -3763,7 +3784,8 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
 	snprintf(bsg_name, sizeof(bsg_name),
 		 "fc_host%d", shost->host_no);
 
-	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size,
+			NULL);
 	if (IS_ERR(q)) {
 		dev_err(dev,
 			"fc_host%d: bsg interface failed to initialize - setup queue\n",
@@ -3808,7 +3830,8 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
 	if (!i->f->bsg_request)
 		return -ENOTSUPP;
 
-	q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size,
+			NULL);
 	if (IS_ERR(q)) {
 		dev_err(dev, "failed to setup bsg queue\n");
 		return PTR_ERR(q);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index a424eaeafeb0..8934f19bce8e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev)
 	kfree(fnode_sess);
 }
 
-static struct device_type iscsi_flashnode_sess_dev_type = {
+static const struct device_type iscsi_flashnode_sess_dev_type = {
 	.name = "iscsi_flashnode_sess_dev_type",
 	.groups = iscsi_flashnode_sess_attr_groups,
 	.release = iscsi_flashnode_sess_release,
@@ -1195,7 +1195,7 @@ static void iscsi_flashnode_conn_release(struct device *dev)
 	kfree(fnode_conn);
 }
 
-static struct device_type iscsi_flashnode_conn_dev_type = {
+static const struct device_type iscsi_flashnode_conn_dev_type = {
 	.name = "iscsi_flashnode_conn_dev_type",
 	.groups = iscsi_flashnode_conn_attr_groups,
 	.release = iscsi_flashnode_conn_release,
@@ -1542,7 +1542,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
 		return -ENOTSUPP;
 
 	snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
-	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0, NULL);
 	if (IS_ERR(q)) {
 		shost_printk(KERN_ERR, shost, "bsg interface failed to "
 			     "initialize - no request queue\n");
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 5006a656e16a..319dff970237 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -169,39 +169,22 @@ static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
 	return rdev;
 }
 
-static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
-			    struct sas_rphy *rphy)
+static int sas_smp_dispatch(struct bsg_job *job)
 {
-	struct request *req;
-	blk_status_t ret;
-	int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
+	struct Scsi_Host *shost = dev_to_shost(job->dev);
+	struct sas_rphy *rphy = NULL;
 
-	while ((req = blk_fetch_request(q)) != NULL) {
-		spin_unlock_irq(q->queue_lock);
+	if (!scsi_is_host_device(job->dev))
+		rphy = dev_to_rphy(job->dev);
 
-		scsi_req(req)->resid_len = blk_rq_bytes(req);
-		if (req->next_rq)
-			scsi_req(req->next_rq)->resid_len =
-				blk_rq_bytes(req->next_rq);
-		handler = to_sas_internal(shost->transportt)->f->smp_handler;
-		ret = handler(shost, rphy, req);
-		scsi_req(req)->result = ret;
-
-		blk_end_request_all(req, 0);
-
-		spin_lock_irq(q->queue_lock);
+	if (!job->req->next_rq) {
+		dev_warn(job->dev, "space for a smp response is missing\n");
+		bsg_job_done(job, -EINVAL, 0);
+		return 0;
 	}
-}
-
-static void sas_host_smp_request(struct request_queue *q)
-{
-	sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
-}
 
-static void sas_non_host_smp_request(struct request_queue *q)
-{
-	struct sas_rphy *rphy = q->queuedata;
-	sas_smp_request(q, rphy_to_shost(rphy), rphy);
+	to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy);
+	return 0;
 }
 
 static void sas_host_release(struct device *dev)
@@ -217,81 +200,36 @@ static void sas_host_release(struct device *dev)
 static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 {
 	struct request_queue *q;
-	int error;
-	struct device *dev;
-	char namebuf[20];
-	const char *name;
-	void (*release)(struct device *);
 
 	if (!to_sas_internal(shost->transportt)->f->smp_handler) {
 		printk("%s can't handle SMP requests\n", shost->hostt->name);
 		return 0;
 	}
 
-	q = blk_alloc_queue(GFP_KERNEL);
-	if (!q)
-		return -ENOMEM;
-	q->initialize_rq_fn = scsi_initialize_rq;
-	q->cmd_size = sizeof(struct scsi_request);
-
 	if (rphy) {
-		q->request_fn = sas_non_host_smp_request;
-		dev = &rphy->dev;
-		name = dev_name(dev);
-		release = NULL;
+		q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
+				sas_smp_dispatch, 0, NULL);
+		if (IS_ERR(q))
+			return PTR_ERR(q);
+		rphy->q = q;
 	} else {
-		q->request_fn = sas_host_smp_request;
-		dev = &shost->shost_gendev;
-		snprintf(namebuf, sizeof(namebuf),
-			 "sas_host%d", shost->host_no);
-		name = namebuf;
-		release = sas_host_release;
+		char name[20];
+
+		snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
+		q = bsg_setup_queue(&shost->shost_gendev, name,
+				sas_smp_dispatch, 0, sas_host_release);
+		if (IS_ERR(q))
+			return PTR_ERR(q);
+		to_sas_host_attrs(shost)->q = q;
 	}
-	error = blk_init_allocated_queue(q);
-	if (error)
-		goto out_cleanup_queue;
 
 	/*
 	 * by default assume old behaviour and bounce for any highmem page
 	 */
 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
-	error = bsg_register_queue(q, dev, name, release);
-	if (error)
-		goto out_cleanup_queue;
-
-	if (rphy)
-		rphy->q = q;
-	else
-		to_sas_host_attrs(shost)->q = q;
-
-	if (rphy)
-		q->queuedata = rphy;
-	else
-		q->queuedata = shost;
-
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 	return 0;
-
-out_cleanup_queue:
-	blk_cleanup_queue(q);
-	return error;
-}
-
-static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
-{
-	struct request_queue *q;
-
-	if (rphy)
-		q = rphy->q;
-	else
-		q = to_sas_host_attrs(shost)->q;
-
-	if (!q)
-		return;
-
-	bsg_unregister_queue(q);
 }
 
 /*
@@ -321,9 +259,10 @@ static int sas_host_remove(struct transport_container *tc, struct device *dev,
 			   struct device *cdev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev);
+	struct request_queue *q = to_sas_host_attrs(shost)->q;
 
-	sas_bsg_remove(shost, NULL);
-
+	if (q)
+		bsg_unregister_queue(q);
 	return 0;
 }
 
@@ -421,6 +360,9 @@ sas_tlr_supported(struct scsi_device *sdev)
 	char *buffer = kzalloc(vpd_len, GFP_KERNEL);
 	int ret = 0;
 
+	if (!buffer)
+		goto out;
+
 	if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
 		goto out;
 
@@ -1710,7 +1652,8 @@ sas_rphy_remove(struct sas_rphy *rphy)
 	}
 
 	sas_rphy_unlink(rphy);
-	sas_bsg_remove(NULL, rphy);
+	if (rphy->q)
+		bsg_unregister_queue(rphy->q);
 	transport_remove_device(dev);
 	device_del(dev);
 }
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index f617021c94f7..4f6f01cf9968 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -78,7 +78,7 @@ static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  * parameters must be such that multipath can detect failed paths timely.
  * Hence do not allow all three parameters to be disabled simultaneously.
  */
-int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
+int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
 {
 	if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
 		return -EINVAL;
@@ -556,8 +556,11 @@ int srp_reconnect_rport(struct srp_rport *rport)
 		 */
 		shost_for_each_device(sdev, shost) {
 			mutex_lock(&sdev->state_mutex);
-			if (sdev->sdev_state == SDEV_OFFLINE)
+			if (sdev->sdev_state == SDEV_OFFLINE) {
 				sdev->sdev_state = SDEV_RUNNING;
+				sysfs_notify(&sdev->sdev_gendev.kobj,
+					     NULL, "state");
+			}
 			mutex_unlock(&sdev->state_mutex);
 		}
 	} else if (rport->state == SRP_RPORT_RUNNING) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e2647f2d4430..11c1738c2100 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -312,7 +312,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr,
 	if (err)
 		return err;
 
-	if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION)
+	if (val <= T10_PI_TYPE3_PROTECTION)
 		sdkp->protection_type = val;
 
 	return count;
@@ -1013,7 +1013,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	ret = scsi_init_io(SCpnt);
 	if (ret != BLKPREP_OK)
 		goto out;
-	SCpnt = rq->special;
+	WARN_ON_ONCE(SCpnt != rq->special);
 
 	/* from here on until we're complete, any goto out
 	 * is used for a killable error condition */
@@ -3226,7 +3226,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
 	gd->major = sd_major((index & 0xf0) >> 4);
 	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-	gd->minors = SD_MINORS;
 
 	gd->fops = &sd_fops;
 	gd->private_data = &sdkp->driver;
@@ -3540,7 +3539,7 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 			 * doesn't support sync. There's not much to do and
 			 * suspend shouldn't fail.
 			 */
-			 ret = 0;
+			ret = 0;
 		}
 	}
 
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8927f9f54ad9..11826c5c2dd4 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -51,6 +51,13 @@ struct ses_component {
 	u64 addr;
 };
 
+static bool ses_page2_supported(struct enclosure_device *edev)
+{
+	struct ses_device *ses_dev = edev->scratch;
+
+	return (ses_dev->page2 != NULL);
+}
+
 static int ses_probe(struct device *dev)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -179,7 +186,8 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
 	unsigned char *type_ptr = ses_dev->page1_types;
 	unsigned char *desc_ptr = ses_dev->page2 + 8;
 
-	ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+	if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0)
+		return NULL;
 
 	for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
@@ -203,6 +211,10 @@ static void ses_get_fault(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->fault = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->fault = (desc[3] & 0x60) >> 4;
@@ -215,6 +227,9 @@ static int ses_set_fault(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -242,6 +257,10 @@ static void ses_get_status(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->status = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->status = (desc[0] & 0x0f);
@@ -252,6 +271,10 @@ static void ses_get_locate(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->locate = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
@@ -264,6 +287,9 @@ static int ses_set_locate(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -292,6 +318,9 @@ static int ses_set_active(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -328,6 +357,11 @@ static void ses_get_power_status(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->power_status = 0;
+		return;
+	}
+
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->power_status = (desc[3] & 0x10) ? 0 : 1;
@@ -340,6 +374,9 @@ static int ses_set_power_status(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -601,7 +638,7 @@ static int ses_intf_add(struct device *cdev,
 {
 	struct scsi_device *sdev = to_scsi_device(cdev->parent);
 	struct scsi_device *tmp_sdev;
-	unsigned char *buf = NULL, *hdr_buf, *type_ptr;
+	unsigned char *buf = NULL, *hdr_buf, *type_ptr, page;
 	struct ses_device *ses_dev;
 	u32 result;
 	int i, types, len, components = 0;
@@ -630,7 +667,8 @@ static int ses_intf_add(struct device *cdev,
 	if (!hdr_buf || !ses_dev)
 		goto err_init_free;
 
-	result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
+	page = 1;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (result)
 		goto recv_failed;
 
@@ -639,7 +677,7 @@ static int ses_intf_add(struct device *cdev,
 	if (!buf)
 		goto err_free;
 
-	result = ses_recv_diag(sdev, 1, buf, len);
+	result = ses_recv_diag(sdev, page, buf, len);
 	if (result)
 		goto recv_failed;
 
@@ -669,9 +707,10 @@ static int ses_intf_add(struct device *cdev,
 	ses_dev->page1_len = len;
 	buf = NULL;
 
-	result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
+	page = 2;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (result)
-		goto recv_failed;
+		goto page2_not_supported;
 
 	len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
 	buf = kzalloc(len, GFP_KERNEL);
@@ -688,7 +727,8 @@ static int ses_intf_add(struct device *cdev,
 
 	/* The additional information page --- allows us
 	 * to match up the devices */
-	result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
+	page = 10;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (!result) {
 
 		len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
@@ -696,13 +736,14 @@ static int ses_intf_add(struct device *cdev,
 		if (!buf)
 			goto err_free;
 
-		result = ses_recv_diag(sdev, 10, buf, len);
+		result = ses_recv_diag(sdev, page, buf, len);
 		if (result)
 			goto recv_failed;
 		ses_dev->page10 = buf;
 		ses_dev->page10_len = len;
 		buf = NULL;
 	}
+page2_not_supported:
 	scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
 	if (!scomp)
 		goto err_free;
@@ -734,7 +775,7 @@ static int ses_intf_add(struct device *cdev,
 
  recv_failed:
 	sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
-		    result);
+		    page);
 	err = -ENODEV;
  err_free:
 	kfree(buf);
@@ -777,8 +818,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	if (!edev)
 		return;
 
-	enclosure_unregister(edev);
-
 	ses_dev = edev->scratch;
 	edev->scratch = NULL;
 
@@ -790,6 +829,7 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	kfree(edev->component[0].scratch);
 
 	put_device(&edev->edev);
+	enclosure_unregister(edev);
 }
 
 static void ses_intf_remove(struct device *cdev,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 84e782d8e7c3..cf0e71db9e51 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1081,8 +1081,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 		return blk_trace_setup(sdp->device->request_queue,
 				       sdp->disk->disk_name,
 				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
-				       NULL,
-				       (char *)arg);
+				       NULL, p);
 	case BLKTRACESTART:
 		return blk_trace_startstop(sdp->device->request_queue, 1);
 	case BLKTRACESTOP:
@@ -1233,6 +1232,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	unsigned long req_sz, len, sa;
 	Sg_scatter_hold *rsv_schp;
 	int k, length;
+	int ret = 0;
 
 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
 		return -ENXIO;
@@ -1243,8 +1243,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	if (vma->vm_pgoff)
 		return -EINVAL;	/* want no offset */
 	rsv_schp = &sfp->reserve;
-	if (req_sz > rsv_schp->bufflen)
-		return -ENOMEM;	/* cannot map more than reserved buffer */
+	mutex_lock(&sfp->f_mutex);
+	if (req_sz > rsv_schp->bufflen) {
+		ret = -ENOMEM;	/* cannot map more than reserved buffer */
+		goto out;
+	}
 
 	sa = vma->vm_start;
 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1258,7 +1261,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = sfp;
 	vma->vm_ops = &sg_mmap_vm_ops;
-	return 0;
+out:
+	mutex_unlock(&sfp->f_mutex);
+	return ret;
 }
 
 static void
@@ -1735,9 +1740,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
 		    !sfp->res_in_use) {
 			sfp->res_in_use = 1;
 			sg_link_reserve(sfp, srp, dxfer_len);
-		} else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
+		} else if (hp->flags & SG_FLAG_MMAP_IO) {
+			res = -EBUSY; /* sfp->res_in_use == 1 */
+			if (dxfer_len > rsv_schp->bufflen)
+				res = -ENOMEM;
 			mutex_unlock(&sfp->f_mutex);
-			return -EBUSY;
+			return res;
 		} else {
 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
 			if (res) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 80cfa93e407c..5ed696dc9bbd 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -192,20 +192,6 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
 	hcp->desc.pnext = hdata->dma;
 }
 
-static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
-{
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fallback
-	   to the same result, calling wd33c93_host_reset() */
-
-	spin_lock_irq(cmd->device->host->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(cmd->device->host->host_lock);
-
-	return SUCCESS;
-}
-
 /*
  * Kludge alert - the SCSI code calls the abort and reset method with int
  * arguments not with pointers.  So this is going to blow up beautyfully
@@ -217,7 +203,6 @@ static struct scsi_host_template sgiwd93_template = {
 	.name			= "SGI WD93",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= sgiwd93_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e164ffade38a..dc3a0542a2e8 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -688,6 +688,28 @@ struct pqi_config_table_heartbeat {
 	__le32	heartbeat_counter;
 };
 
+union pqi_reset_register {
+	struct {
+		u32	reset_type : 3;
+		u32	reserved : 2;
+		u32	reset_action : 3;
+		u32	hold_in_pd1 : 1;
+		u32	reserved2 : 23;
+	} bits;
+	u32	all_bits;
+};
+
+#define PQI_RESET_ACTION_RESET		0x1
+
+#define PQI_RESET_TYPE_NO_RESET		0x0
+#define PQI_RESET_TYPE_SOFT_RESET	0x1
+#define PQI_RESET_TYPE_FIRM_RESET	0x2
+#define PQI_RESET_TYPE_HARD_RESET	0x3
+
+#define PQI_RESET_ACTION_COMPLETED	0x2
+
+#define PQI_RESET_POLL_INTERVAL_MSECS	100
+
 #define PQI_MAX_OUTSTANDING_REQUESTS		((u32)~0)
 #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP	32
 #define PQI_MAX_TRANSFER_SIZE			(1024U * 1024U)
@@ -995,6 +1017,7 @@ struct pqi_ctrl_info {
 	u8		inbound_spanning_supported : 1;
 	u8		outbound_spanning_supported : 1;
 	u8		pqi_mode_enabled : 1;
+	u8		pqi_reset_quiesce_supported : 1;
 
 	struct list_head scsi_device_list;
 	spinlock_t	scsi_device_list_lock;
@@ -1056,9 +1079,9 @@ enum pqi_ctrl_mode {
 #define BMIC_SENSE_CONTROLLER_PARAMETERS	0x64
 #define BMIC_SENSE_SUBSYSTEM_INFORMATION	0x66
 #define BMIC_WRITE_HOST_WELLNESS		0xa5
-#define BMIC_CACHE_FLUSH			0xc2
+#define BMIC_FLUSH_CACHE			0xc2
 
-#define SA_CACHE_FLUSH				0x1
+#define SA_FLUSH_CACHE				0x1
 
 #define MASKED_DEVICE(lunid)			((lunid)[3] & 0xc0)
 #define CISS_GET_LEVEL_2_BUS(lunid)		((lunid)[7] & 0x3f)
@@ -1164,6 +1187,23 @@ struct bmic_identify_physical_device {
 	u8	padding_to_multiple_of_512[9];
 };
 
+struct bmic_flush_cache {
+	u8	disable_flag;
+	u8	system_power_action;
+	u8	ndu_flush;
+	u8	shutdown_event;
+	u8	reserved[28];
+};
+
+/* for shutdown_event member of struct bmic_flush_cache */
+enum bmic_flush_cache_shutdown_event {
+	NONE_CACHE_FLUSH_ONLY = 0,
+	SHUTDOWN = 1,
+	HIBERNATE = 2,
+	SUSPEND = 3,
+	RESTART = 4
+};
+
 #pragma pack()
 
 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index cb8f886e705c..83bdbd84eb01 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION		"1.0.4-100"
+#define DRIVER_VERSION		"1.1.2-125"
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		0
-#define DRIVER_RELEASE		4
-#define DRIVER_REVISION		100
+#define DRIVER_MINOR		1
+#define DRIVER_RELEASE		2
+#define DRIVER_REVISION		125
 
 #define DRIVER_NAME		"Microsemi PQI Driver (v" \
 				DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -431,10 +431,10 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 		cdb[1] = CISS_GET_RAID_MAP;
 		put_unaligned_be32(buffer_length, &cdb[6]);
 		break;
-	case SA_CACHE_FLUSH:
+	case SA_FLUSH_CACHE:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
-		cdb[6] = BMIC_CACHE_FLUSH;
+		cdb[6] = BMIC_FLUSH_CACHE;
 		put_unaligned_be16(buffer_length, &cdb[7]);
 		break;
 	case BMIC_IDENTIFY_CONTROLLER:
@@ -585,14 +585,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
 	return rc;
 }
 
-#define SA_CACHE_FLUSH_BUFFER_LENGTH	4
-
-static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
+static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
+	enum bmic_flush_cache_shutdown_event shutdown_event)
 {
 	int rc;
 	struct pqi_raid_path_request request;
 	int pci_direction;
-	u8 *buffer;
+	struct bmic_flush_cache *flush_cache;
 
 	/*
 	 * Don't bother trying to flush the cache if the controller is
@@ -601,13 +600,15 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
 	if (pqi_ctrl_offline(ctrl_info))
 		return -ENXIO;
 
-	buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
-	if (!buffer)
+	flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
+	if (!flush_cache)
 		return -ENOMEM;
 
+	flush_cache->shutdown_event = shutdown_event;
+
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
-		SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
+		SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
+		sizeof(*flush_cache), 0, &pci_direction);
 	if (rc)
 		goto out;
 
@@ -618,7 +619,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
 		pci_direction);
 
 out:
-	kfree(buffer);
+	kfree(flush_cache);
 
 	return rc;
 }
@@ -3007,11 +3008,9 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
 			break;
 		case IRQ_MODE_INTX:
 			pqi_configure_legacy_intx(ctrl_info, true);
-			sis_disable_msix(ctrl_info);
 			sis_enable_intx(ctrl_info);
 			break;
 		case IRQ_MODE_NONE:
-			sis_disable_msix(ctrl_info);
 			break;
 		}
 		break;
@@ -3019,14 +3018,12 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
 		switch (new_mode) {
 		case IRQ_MODE_MSIX:
 			pqi_configure_legacy_intx(ctrl_info, false);
-			sis_disable_intx(ctrl_info);
 			sis_enable_msix(ctrl_info);
 			break;
 		case IRQ_MODE_INTX:
 			break;
 		case IRQ_MODE_NONE:
 			pqi_configure_legacy_intx(ctrl_info, false);
-			sis_disable_intx(ctrl_info);
 			break;
 		}
 		break;
@@ -5498,6 +5495,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 	case XFER_NONE:
 	case XFER_WRITE:
 	case XFER_READ:
+	case XFER_READ | XFER_WRITE:
 		break;
 	default:
 		return -EINVAL;
@@ -5538,6 +5536,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 	case XFER_READ:
 		request.data_direction = SOP_READ_FLAG;
 		break;
+	case XFER_READ | XFER_WRITE:
+		request.data_direction = SOP_BIDIRECTIONAL;
+		break;
 	}
 
 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
@@ -5889,28 +5890,62 @@ static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
 	scsi_host_put(shost);
 }
 
-#define PQI_RESET_ACTION_RESET		0x1
+static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc = 0;
+	struct pqi_device_registers __iomem *pqi_registers;
+	unsigned long timeout;
+	unsigned int timeout_msecs;
+	union pqi_reset_register reset_reg;
 
-#define PQI_RESET_TYPE_NO_RESET		0x0
-#define PQI_RESET_TYPE_SOFT_RESET	0x1
-#define PQI_RESET_TYPE_FIRM_RESET	0x2
-#define PQI_RESET_TYPE_HARD_RESET	0x3
+	pqi_registers = ctrl_info->pqi_registers;
+	timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
+	timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
+
+	while (1) {
+		msleep(PQI_RESET_POLL_INTERVAL_MSECS);
+		reset_reg.all_bits = readl(&pqi_registers->device_reset);
+		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
+			break;
+		pqi_check_ctrl_health(ctrl_info);
+		if (pqi_ctrl_offline(ctrl_info)) {
+			rc = -ENXIO;
+			break;
+		}
+		if (time_after(jiffies, timeout)) {
+			rc = -ETIMEDOUT;
+			break;
+		}
+	}
+
+	return rc;
+}
 
 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
-	u32 reset_params;
+	union pqi_reset_register reset_reg;
+
+	if (ctrl_info->pqi_reset_quiesce_supported) {
+		rc = sis_pqi_reset_quiesce(ctrl_info);
+		if (rc) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"PQI reset failed during quiesce with error %d\n",
+				rc);
+			return rc;
+		}
+	}
 
-	reset_params = (PQI_RESET_ACTION_RESET << 5) |
-		PQI_RESET_TYPE_HARD_RESET;
+	reset_reg.all_bits = 0;
+	reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
+	reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
 
-	writel(reset_params,
-		&ctrl_info->pqi_registers->device_reset);
+	writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
 
-	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+	rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
 	if (rc)
 		dev_err(&ctrl_info->pci_dev->dev,
-			"PQI reset failed\n");
+			"PQI reset failed with error %d\n", rc);
 
 	return rc;
 }
@@ -6007,7 +6042,12 @@ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
 	rc = pqi_reset(ctrl_info);
 	if (rc)
 		return rc;
-	sis_reenable_sis_mode(ctrl_info);
+	rc = sis_reenable_sis_mode(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"re-enabling SIS mode failed with error %d\n", rc);
+		return rc;
+	}
 	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 
 	return 0;
@@ -6659,7 +6699,8 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
 	 * Write all data in the controller's battery-backed cache to
 	 * storage.
 	 */
-	rc = pqi_flush_cache(ctrl_info);
+	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+	pqi_reset(ctrl_info);
 	if (rc == 0)
 		return;
 
@@ -6703,7 +6744,7 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
 	pqi_cancel_rescan_worker(ctrl_info);
 	pqi_wait_until_scan_finished(ctrl_info);
 	pqi_wait_until_lun_reset_finished(ctrl_info);
-	pqi_flush_cache(ctrl_info);
+	pqi_flush_cache(ctrl_info, SUSPEND);
 	pqi_ctrl_block_requests(ctrl_info);
 	pqi_ctrl_wait_until_quiesced(ctrl_info);
 	pqi_wait_until_inbound_queues_empty(ctrl_info);
@@ -6781,7 +6822,7 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_ADAPTEC2, 0x0605)
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0608)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6813,6 +6854,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0807)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
 	},
 	{
@@ -6849,6 +6894,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x090a)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x1200)
 	},
 	{
@@ -6881,6 +6930,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_DELL, 0x1fe0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x0600)
 	},
 	{
@@ -6897,11 +6950,7 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0604)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0606)
+			       PCI_VENDOR_ID_HP, 0x0609)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6929,14 +6978,6 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0656)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0657)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x0700)
 	},
 	{
@@ -6957,14 +6998,6 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x1102)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x1150)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_ANY_ID, PCI_ANY_ID)
 	},
 	{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 0d89d3728b43..b209a35e482e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -329,14 +329,6 @@ static int pqi_sas_phy_speed(struct sas_phy *phy,
 	return -EINVAL;
 }
 
-/* SMP = Serial Management Protocol */
-
-static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-	struct request *req)
-{
-	return -EINVAL;
-}
-
 struct sas_function_template pqi_sas_transport_functions = {
 	.get_linkerrors = pqi_sas_get_linkerrors,
 	.get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
@@ -346,5 +338,4 @@ struct sas_function_template pqi_sas_transport_functions = {
 	.phy_setup = pqi_sas_phy_setup,
 	.phy_release = pqi_sas_phy_release,
 	.set_phy_speed = pqi_sas_phy_speed,
-	.smp_handler = pqi_sas_smp_handler,
 };
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index e55dfcf200e5..5141bd4c9f06 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -34,11 +34,13 @@
 #define SIS_REENABLE_SIS_MODE			0x1
 #define SIS_ENABLE_MSIX				0x40
 #define SIS_ENABLE_INTX				0x80
-#define SIS_SOFT_RESET				0x100
-#define SIS_TRIGGER_SHUTDOWN			0x800000
 #define SIS_CMD_READY				0x200
+#define SIS_TRIGGER_SHUTDOWN			0x800000
+#define SIS_PQI_RESET_QUIESCE			0x1000000
+
 #define SIS_CMD_COMPLETE			0x1000
 #define SIS_CLEAR_CTRL_TO_HOST_DOORBELL		0x1000
+
 #define SIS_CMD_STATUS_SUCCESS			0x1
 #define SIS_CMD_COMPLETE_TIMEOUT_SECS		30
 #define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS	10
@@ -47,6 +49,7 @@
 #define SIS_EXTENDED_PROPERTIES_SUPPORTED	0x800000
 #define SIS_SMARTARRAY_FEATURES_SUPPORTED	0x2
 #define SIS_PQI_MODE_SUPPORTED			0x4
+#define SIS_PQI_RESET_QUIESCE_SUPPORTED		0x8
 #define SIS_REQUIRED_EXTENDED_PROPERTIES	\
 	(SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED)
 
@@ -258,6 +261,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info)
 		SIS_REQUIRED_EXTENDED_PROPERTIES)
 		return -ENODEV;
 
+	if (extended_properties & SIS_PQI_RESET_QUIESCE_SUPPORTED)
+		ctrl_info->pqi_reset_quiesce_supported = true;
+
 	return 0;
 }
 
@@ -336,9 +342,10 @@ out:
 
 #define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS	30
 
-static void sis_wait_for_doorbell_bit_to_clear(
+static int sis_wait_for_doorbell_bit_to_clear(
 	struct pqi_ctrl_info *ctrl_info, u32 bit)
 {
+	int rc = 0;
 	u32 doorbell_register;
 	unsigned long timeout;
 
@@ -350,78 +357,38 @@ static void sis_wait_for_doorbell_bit_to_clear(
 		if ((doorbell_register & bit) == 0)
 			break;
 		if (readl(&ctrl_info->registers->sis_firmware_status) &
-			SIS_CTRL_KERNEL_PANIC)
+			SIS_CTRL_KERNEL_PANIC) {
+			rc = -ENODEV;
 			break;
+		}
 		if (time_after(jiffies, timeout)) {
 			dev_err(&ctrl_info->pci_dev->dev,
 				"doorbell register bit 0x%x not cleared\n",
 				bit);
+			rc = -ETIMEDOUT;
 			break;
 		}
 		usleep_range(1000, 2000);
 	}
-}
-
-/* Enable MSI-X interrupts on the controller. */
-
-void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
-{
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register |= SIS_ENABLE_MSIX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
 
-	sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
+	return rc;
 }
 
-/* Disable MSI-X interrupts on the controller. */
-
-void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
+static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
 {
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register &= ~SIS_ENABLE_MSIX;
+	writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
 
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+	return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
 }
 
-void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
 {
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register |= SIS_ENABLE_INTX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-
-	sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
+	sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_MSIX);
 }
 
-void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
-{
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register &= ~SIS_ENABLE_INTX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-}
-
-void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
 {
-	writel(SIS_SOFT_RESET,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+	sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX);
 }
 
 void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
@@ -434,38 +401,14 @@ void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
 		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
 }
 
-#define SIS_MODE_READY_TIMEOUT_SECS	30
+int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	return sis_set_doorbell_bit(ctrl_info, SIS_PQI_RESET_QUIESCE);
+}
 
 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
 {
-	int rc;
-	unsigned long timeout;
-	struct pqi_ctrl_registers __iomem *registers;
-	u32 doorbell;
-
-	registers = ctrl_info->registers;
-
-	writel(SIS_REENABLE_SIS_MODE,
-		&registers->sis_host_to_ctrl_doorbell);
-
-	rc = 0;
-	timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
-
-	while (1) {
-		doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
-		if ((doorbell & SIS_REENABLE_SIS_MODE) == 0)
-			break;
-		if (time_after(jiffies, timeout)) {
-			rc = -ETIMEDOUT;
-			break;
-		}
-	}
-
-	if (rc)
-		dev_err(&ctrl_info->pci_dev->dev,
-			"re-enabling SIS mode failed\n");
-
-	return rc;
+	return sis_set_doorbell_bit(ctrl_info, SIS_REENABLE_SIS_MODE);
 }
 
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 983184b69373..2bf889dbf5ab 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -27,11 +27,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
 int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
 int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
 void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
-void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
 void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_disable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
 void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
+int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a8f630213a1a..9be34d37c356 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -393,7 +393,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
 	ret = scsi_init_io(SCpnt);
 	if (ret != BLKPREP_OK)
 		goto out;
-	SCpnt = rq->special;
+	WARN_ON_ONCE(SCpnt != rq->special);
 	cd = scsi_cd(rq->rq_disk);
 
 	/* from here on until we're complete, any goto out
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3cc8d67783a1..5e7200f05873 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1640,6 +1640,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	put_cpu();
 
 	if (ret == -EAGAIN) {
+		if (payload_sz > sizeof(cmd_request->mpb))
+			kfree(payload);
 		/* no more space */
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index e64b0c542f95..9492638296c8 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -46,7 +46,7 @@
 #define NCR5380_write(reg, value)       out_8(hostdata->io + (reg), value)
 
 #define NCR5380_queue_command           sun3scsi_queue_command
-#define NCR5380_bus_reset               sun3scsi_bus_reset
+#define NCR5380_host_reset              sun3scsi_host_reset
 #define NCR5380_abort                   sun3scsi_abort
 #define NCR5380_info                    sun3scsi_info
 
@@ -495,7 +495,7 @@ static struct scsi_host_template sun3_scsi_template = {
 	.info			= sun3scsi_info,
 	.queuecommand		= sun3scsi_queue_command,
 	.eh_abort_handler	= sun3scsi_abort,
-	.eh_bus_reset_handler	= sun3scsi_bus_reset,
+	.eh_host_reset_handler	= sun3scsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_NONE,
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 6b349e301869..15ff285a9f8f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -759,7 +759,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
 	/*
 	 * Maximum synchronous period factor supported by the chip.
 	 */
-	period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+	period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz);
 	np->maxsync = period > 2540 ? 254 : period / 10;
 
 	/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5bc9dc14e075..794a4600e952 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -438,7 +438,7 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
 {
 	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 	dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
-		hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+		hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
 	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 		hba->saved_err, hba->saved_uic_err);
 	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 9be211d68b15..7c28e8d4955a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -818,7 +818,6 @@ static struct scsi_host_template virtscsi_host_template_single = {
 	.eh_timed_out = virtscsi_eh_timed_out,
 	.slave_alloc = virtscsi_device_alloc,
 
-	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
 	.use_clustering = ENABLE_CLUSTERING,
 	.target_alloc = virtscsi_target_alloc,
@@ -839,7 +838,6 @@ static struct scsi_host_template virtscsi_host_template_multi = {
 	.eh_timed_out = virtscsi_eh_timed_out,
 	.slave_alloc = virtscsi_device_alloc,
 
-	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
 	.use_clustering = ENABLE_CLUSTERING,
 	.target_alloc = virtscsi_target_alloc,
@@ -972,6 +970,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
 	if (err)
 		goto virtscsi_init_failed;
 
+	shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
+
 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 9e09da412b92..74be04f2357c 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1578,6 +1578,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt)
 	int i;
 
 	instance = SCpnt->device->host;
+	spin_lock_irq(instance->host_lock);
 	hostdata = (struct WD33C93_hostdata *) instance->hostdata;
 
 	printk("scsi%d: reset. ", instance->host_no);
@@ -1603,6 +1604,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt)
 	reset_wd33c93(instance);
 	SCpnt->result = DID_RESET << 16;
 	enable_irq(instance->irq);
+	spin_unlock_irq(instance->host_lock);
 	return SUCCESS;
 }