summary refs log tree commit diff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-13 10:57:01 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-13 10:57:01 +0900
commit6a5a3d6a4adde0c66f3be29bbd7c0d6ffb7e1a40 (patch)
treeae416ffa4458df755f984a05d65ee1c3e220c40b
parent8bbbfa70549bd84f29ff331d0ac051897ccbbd72 (diff)
parent5c1b10ab7f93d24f29b5630286e323d1c5802d5c (diff)
downloadlinux-6a5a3d6a4adde0c66f3be29bbd7c0d6ffb7e1a40.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI updates from James Bottomley:
 "This is an assorted set of stragglers into the merge window with
  driver updates for megaraid_sas, lpfc, bfi and mvumi.  It also
  includes some fairly major fixes for virtio-scsi (scatterlist init),
  scsi_debug (off by one error), storvsc (use after free) and qla2xxx
  (potential deadlock).

  Signed-off-by: James Bottomley <JBottomley@Parallels.com>"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (49 commits)
  [SCSI] storvsc: Account for in-transit packets in the RESET path
  [SCSI] qla2xxx: fix potential deadlock on ha->hardware_lock
  [SCSI] scsi_debug: Fix off-by-one bug when unmapping region
  [SCSI] Shorten the path length of scsi_cmd_to_driver()
  [SCSI] virtio-scsi: support online resizing of disks
  [SCSI] virtio-scsi: fix LUNs greater than 255
  [SCSI] virtio-scsi: initialize scatterlist structure
  [SCSI] megaraid_sas: Version, Changelog, Copyright update
  [SCSI] megaraid_sas: Remove duplicate code
  [SCSI] megaraid_sas: Add SystemPD FastPath support
  [SCSI] megaraid_sas: Add array boundary check for SystemPD
  [SCSI] megaraid_sas: Load io_request DataLength in bytes
  [SCSI] megaraid_sas: Add module param for configurable MSI-X vector count
  [SCSI] megaraid_sas: Remove un-needed completion_lock spinlock calls
  [SCSI] lpfc 8.3.35: Update lpfc version for 8.3.35 driver release
  [SCSI] lpfc 8.3.35: Fixed not reporting logical link speed to SCSI midlayer when QoS not on
  [SCSI] lpfc 8.3.35: Fix error with fabric service parameters causing performance issues
  [SCSI] lpfc 8.3.35: Fixed SCSI host create showing wrong link speed on SLI3 HBA ports
  [SCSI] lpfc 8.3.35: Fixed not checking solicition in progress bit when verifying FCF record for use
  [SCSI] lpfc 8.3.35: Fixed messages for misconfigured port errors
  ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe12
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas13
-rw-r--r--drivers/scsi/bfa/bfa_core.c85
-rw-r--r--drivers/scsi/bfa/bfa_defs.h61
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h119
-rw-r--r--drivers/scsi/bfa/bfa_fc.h5
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c123
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h13
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c64
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h23
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c155
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c288
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c494
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h63
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c236
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_port.c32
-rw-r--r--drivers/scsi/bfa/bfa_port.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c732
-rw-r--r--drivers/scsi/bfa/bfa_svc.h30
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c375
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h63
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi.h72
-rw-r--r--drivers/scsi/bfa/bfi_ms.h14
-rw-r--r--drivers/scsi/bfa/bfi_reg.h3
-rw-r--r--drivers/scsi/fcoe/fcoe.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c256
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c96
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h2
-rw-r--r--drivers/scsi/mvumi.c1093
-rw-r--r--drivers/scsi/mvumi.h235
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c8
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/virtio_scsi.c39
-rw-r--r--include/linux/virtio_scsi.h2
-rw-r--r--include/scsi/fc/fc_fcp.h6
-rw-r--r--include/scsi/libfcoe.h2
-rw-r--r--include/scsi/scsi_cmnd.h12
58 files changed, 4274 insertions, 747 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
index 469d09c02f6b..50e2a80ea28f 100644
--- a/Documentation/ABI/testing/sysfs-bus-fcoe
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -9,19 +9,19 @@ Attributes:
 			  this value will change the dev_loss_tmo for all
 			  FCFs discovered by this controller.
 
-	lesb_link_fail:   Link Error Status Block (LESB) link failure count.
+	lesb/link_fail:   Link Error Status Block (LESB) link failure count.
 
-	lesb_vlink_fail:  Link Error Status Block (LESB) virtual link
+	lesb/vlink_fail:  Link Error Status Block (LESB) virtual link
 			  failure count.
 
-	lesb_miss_fka:    Link Error Status Block (LESB) missed FCoE
+	lesb/miss_fka:    Link Error Status Block (LESB) missed FCoE
 			  Initialization Protocol (FIP) Keep-Alives (FKA).
 
-	lesb_symb_err:    Link Error Status Block (LESB) symbolic error count.
+	lesb/symb_err:    Link Error Status Block (LESB) symbolic error count.
 
-	lesb_err_block:   Link Error Status Block (LESB) block error count.
+	lesb/err_block:   Link Error Status Block (LESB) block error count.
 
-	lesb_fcs_error:   Link Error Status Block (LESB) Fibre Channel
+	lesb/fcs_error:   Link Error Status Block (LESB) Fibre Channel
 			  Serivces error count.
 
 Notes: ctlr_X (global increment starting at 0)
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 3a3079411a3d..da03146c182a 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,16 @@
+Release Date    : Mon. Oct 1, 2012 17:00:00 PST 2012 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 06.504.01.00-rc1
+Old Version     : 00.00.06.18-rc1
+    1. Removed un-needed completion_lock spinlock calls.
+    2. Add module param for configurable MSI-X vector count.
+    3. Load io_request DataLength in bytes.
+    4. Add array boundary check for SystemPD.
+    5. Add SystemPD FastPath support.
+    6. Remove duplicate code.
+    7. Version, Changelog, Copyright update.
+-------------------------------------------------------------------------------
 Release Date    : Tue. Jun 17, 2012 17:00:00 PST 2012 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford/Kashyap Desai
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index b7c326f7a6d0..342d7d9c0997 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -165,6 +165,16 @@ bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 	bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
 }
 
+static void
+bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+	struct bfa_fru_s	*fru = BFA_FRU(bfa);
+	struct bfa_mem_dma_s	*fru_dma = BFA_MEM_FRU_DMA(bfa);
+
+	bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+	bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
+}
+
 /*
  * BFA IOC FC related definitions
  */
@@ -274,6 +284,15 @@ bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 	case IOCFC_E_IOC_ENABLED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
 		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
 	case IOCFC_E_IOC_FAILED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 		break;
@@ -298,6 +317,15 @@ bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 	case IOCFC_E_DCONF_DONE:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
 		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
 	case IOCFC_E_IOC_FAILED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 		break;
@@ -322,6 +350,15 @@ bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 	case IOCFC_E_CFG_DONE:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
 		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
 	case IOCFC_E_IOC_FAILED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 		break;
@@ -433,6 +470,12 @@ bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
 			     bfa_iocfc_stop_cb, iocfc->bfa);
 		break;
+
+	case IOCFC_E_IOC_ENABLED:
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_CFG_DONE:
+		break;
+
 	default:
 		bfa_sm_fault(iocfc->bfa, event);
 		break;
@@ -454,6 +497,15 @@ bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 	case IOCFC_E_IOC_ENABLED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
 		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+
 	case IOCFC_E_IOC_FAILED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 
@@ -493,6 +545,13 @@ bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 			     bfa_iocfc_enable_cb, iocfc->bfa);
 		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
 	case IOCFC_E_IOC_FAILED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
@@ -524,6 +583,10 @@ bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 	case IOCFC_E_IOC_DISABLED:
 		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
 		break;
+	case IOCFC_E_IOC_ENABLED:
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_CFG_DONE:
+		break;
 	default:
 		bfa_sm_fault(iocfc->bfa, event);
 		break;
@@ -785,19 +848,20 @@ void
 bfa_isr_enable(struct bfa_s *bfa)
 {
 	u32 umsk;
-	int pci_func = bfa_ioc_pcifn(&bfa->ioc);
+	int port_id = bfa_ioc_portid(&bfa->ioc);
 
-	bfa_trc(bfa, pci_func);
+	bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
+	bfa_trc(bfa, port_id);
 
 	bfa_msix_ctrl_install(bfa);
 
 	if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
 		umsk = __HFN_INT_ERR_MASK_CT2;
-		umsk |= pci_func == 0 ?
+		umsk |= port_id == 0 ?
 			__HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
 	} else {
 		umsk = __HFN_INT_ERR_MASK;
-		umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+		umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
 	}
 
 	writel(umsk, bfa->iocfc.bfa_regs.intr_status);
@@ -930,7 +994,8 @@ bfa_iocfc_send_cfg(void *bfa_arg)
 		cfg_info->single_msix_vec = 1;
 	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
 	cfg_info->num_cqs = cfg->fwcfg.num_cqs;
-	cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
+	cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
+					       cfg->fwcfg.num_ioim_reqs));
 	cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
 
 	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
@@ -1192,10 +1257,14 @@ bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
 static void
 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
 {
+	struct bfa_iocfc_s	*iocfc   = &bfa->iocfc;
+	struct bfi_iocfc_cfg_s	*cfg_info = iocfc->cfginfo;
+
 	bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
 	bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
 	bfa_rport_res_recfg(bfa, fwcfg->num_rports);
-	bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
+	bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
+			  fwcfg->num_ioim_reqs);
 	bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
 }
 
@@ -1693,6 +1762,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
 	struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
 	struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
 	struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
+	struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
 
 	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
@@ -1717,6 +1787,8 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
 	bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
 	bfa_mem_dma_setup(meminfo, phy_dma,
 			  bfa_phy_meminfo(cfg->drvcfg.min_cfg));
+	bfa_mem_dma_setup(meminfo, fru_dma,
+			  bfa_fru_meminfo(cfg->drvcfg.min_cfg));
 }
 
 /*
@@ -1789,6 +1861,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
 	bfa_com_diag_attach(bfa);
 	bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
+	bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
 }
 
 /*
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index b5a1595cc0a5..0efdf312b42c 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -159,10 +159,13 @@ enum bfa_status {
 	BFA_STATUS_BEACON_ON    = 72,   /* Port Beacon already on */
 	BFA_STATUS_ENOFSAVE	= 78,	/*  No saved firmware trace */
 	BFA_STATUS_IOC_DISABLED = 82,   /* IOC is already disabled */
+	BFA_STATUS_ERROR_TRL_ENABLED  = 87,   /* TRL is enabled */
+	BFA_STATUS_ERROR_QOS_ENABLED  = 88,   /* QoS is enabled */
 	BFA_STATUS_NO_SFP_DEV = 89,	/* No SFP device check or replace SFP */
 	BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
 	BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
 	BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
+	BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
 	BFA_STATUS_PBC		= 154, /*  Operation not allowed for pre-boot
 					*  configuration */
 	BFA_STATUS_BAD_FWCFG = 156,	/* Bad firmware configuration */
@@ -184,6 +187,17 @@ enum bfa_status {
 	BFA_STATUS_FAA_ACQ_ADDR = 200,	/* Acquiring addr */
 	BFA_STATUS_ERROR_TRUNK_ENABLED = 203,	/* Trunk enabled on adapter */
 	BFA_STATUS_MAX_ENTRY_REACHED = 212,	/* MAX entry reached */
+	BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
+	BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported
+					    * on mezz cards */
+	BFA_STATUS_INVALID_BW = 233,	/* Invalid bandwidth value */
+	BFA_STATUS_QOS_BW_INVALID = 234,   /* Invalid QOS bandwidth
+					    * configuration */
+	BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */
+	BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
+	BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
+	BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
+	BFA_STATUS_DPORT_ERR = 245,	/* D-port mode is enabled */
 	BFA_STATUS_MAX_VAL		/* Unknown error code */
 };
 #define bfa_status_t enum bfa_status
@@ -249,6 +263,10 @@ struct bfa_adapter_attr_s {
 
 	u8		is_mezz;
 	u8		trunk_capable;
+	u8		mfg_day;	/* manufacturing day */
+	u8		mfg_month;	/* manufacturing month */
+	u16		mfg_year;	/* manufacturing year */
+	u16		rsvd;
 };
 
 /*
@@ -499,6 +517,17 @@ struct bfa_ioc_aen_data_s {
 };
 
 /*
+ *	D-port states
+ *
+*/
+enum bfa_dport_state {
+	BFA_DPORT_ST_DISABLED	= 0,	/* D-port is Disabled */
+	BFA_DPORT_ST_DISABLING	= 1,	/* D-port is Disabling */
+	BFA_DPORT_ST_ENABLING	= 2,	/* D-port is Enabling */
+	BFA_DPORT_ST_ENABLED	= 3,	/* D-port is Enabled */
+};
+
+/*
  * ---------------------- mfg definitions ------------
  */
 
@@ -722,7 +751,8 @@ struct bfa_ablk_cfg_pf_s {
 	u8	rsvd[1];
 	u16	num_qpairs;
 	u16	num_vectors;
-	u32	bw;
+	u16	bw_min;
+	u16	bw_max;
 };
 
 struct bfa_ablk_cfg_port_s {
@@ -889,11 +919,40 @@ struct sfp_diag_ext_s {
 	u8	ext_status_ctl[2];
 };
 
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * General Use Fields: User Writable Table - Features's Control Registers
+ * Total 32 bytes
+ */
+struct sfp_usr_eeprom_s {
+	u8	rsvd1[2];       /* 128-129 */
+	u8	ewrap;          /* 130 */
+	u8	rsvd2[2];       /*  */
+	u8	owrap;          /* 133 */
+	u8	rsvd3[2];       /*  */
+	u8	prbs;           /* 136: PRBS 7 generator */
+	u8	rsvd4[2];       /*  */
+	u8	tx_eqz_16;      /* 139: TX Equalizer (16xFC) */
+	u8	tx_eqz_8;       /* 140: TX Equalizer (8xFC) */
+	u8	rsvd5[2];       /*  */
+	u8	rx_emp_16;      /* 143: RX Emphasis (16xFC) */
+	u8	rx_emp_8;       /* 144: RX Emphasis (8xFC) */
+	u8	rsvd6[2];       /*  */
+	u8	tx_eye_adj;     /* 147: TX eye Threshold Adjust */
+	u8	rsvd7[3];       /*  */
+	u8	tx_eye_qctl;    /* 151: TX eye Quality Control */
+	u8	tx_eye_qres;    /* 152: TX eye Quality Result */
+	u8	rsvd8[2];       /*  */
+	u8	poh[3];         /* 155-157: Power On Hours */
+	u8	rsvd9[2];       /*  */
+};
+
 struct sfp_mem_s {
 	struct sfp_srlid_base_s	srlid_base;
 	struct sfp_srlid_ext_s	srlid_ext;
 	struct sfp_diag_base_s	diag_base;
 	struct sfp_diag_ext_s	diag_ext;
+	struct sfp_usr_eeprom_s usr_eeprom;
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 36756ce0e58f..ec03c8cd8dac 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -258,6 +258,7 @@ struct bfa_fw_port_lksm_stats_s {
 	u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
 	u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM       */
 	u32    bbsc_lr;		   /* LKSM LR tx for credit recovery       */
+	u32	rsvd;
 };
 
 struct bfa_fw_port_snsm_stats_s {
@@ -270,6 +271,9 @@ struct bfa_fw_port_snsm_stats_s {
 	u32    sync_lost;          /*  Sync loss count                     */
 	u32    sig_lost;           /*  Signal loss count                   */
 	u32    asn8g_attempts;	   /* SNSM HWSM at 8Gbps attempts	   */
+	u32    adapt_success;	   /* SNSM adaptation success	*/
+	u32    adapt_fails;	   /* SNSM adaptation failures */
+	u32    adapt_ign_fails;	   /* SNSM adaptation failures ignored */
 };
 
 struct bfa_fw_port_physm_stats_s {
@@ -324,12 +328,46 @@ struct bfa_fw_fcoe_port_stats_s {
 	struct bfa_fw_fip_stats_s		fip_stats;
 };
 
+/**
+ * @brief LPSM statistics
+ */
+struct bfa_fw_lpsm_stats_s {
+	u32	cls_rx;		/* LPSM cls_rx			*/
+	u32	cls_tx;		/* LPSM cls_tx			*/
+	u32	arbf0_rx;	/* LPSM abrf0 rcvd		*/
+	u32	arbf0_tx;	/* LPSM abrf0 xmit		*/
+	u32	init_rx;	/* LPSM loop init start		*/
+	u32	unexp_hwst;	/* LPSM unknown hw state	*/
+	u32	unexp_frame;	/* LPSM unknown_frame		*/
+	u32	unexp_prim;	/* LPSM unexpected primitive	*/
+	u32	prev_alpa_unavail; /* LPSM prev alpa unavailable */
+	u32	alpa_unavail;	/* LPSM alpa not available	*/
+	u32	lip_rx;		/* LPSM lip rcvd		*/
+	u32	lip_f7f7_rx;	/* LPSM lip f7f7 rcvd		*/
+	u32	lip_f8_rx;	/* LPSM lip f8 rcvd		*/
+	u32	lip_f8f7_rx;	/* LPSM lip f8f7 rcvd		*/
+	u32	lip_other_rx;	/* LPSM lip other rcvd		*/
+	u32	lip_tx;		/* LPSM lip xmit		*/
+	u32	retry_tov;	/* LPSM retry TOV		*/
+	u32	lip_tov;	/* LPSM LIP wait TOV		*/
+	u32	idle_tov;	/* LPSM idle wait TOV		*/
+	u32	arbf0_tov;	/* LPSM arbfo wait TOV		*/
+	u32	stop_loop_tov;	/* LPSM stop loop wait TOV	*/
+	u32	lixa_tov;	/* LPSM lisa wait TOV		*/
+	u32	lixx_tov;	/* LPSM lilp/lirp wait TOV	*/
+	u32	cls_tov;	/* LPSM cls wait TOV		*/
+	u32	sler;		/* LPSM SLER recvd		*/
+	u32	failed;		/* LPSM failed			*/
+	u32	success;	/* LPSM online			*/
+};
+
 /*
  * IOC firmware FC uport stats
  */
 struct bfa_fw_fc_uport_stats_s {
 	struct bfa_fw_port_snsm_stats_s		snsm_stats;
 	struct bfa_fw_port_lksm_stats_s		lksm_stats;
+	struct bfa_fw_lpsm_stats_s		lpsm_stats;
 };
 
 /*
@@ -357,11 +395,6 @@ struct bfa_fw_fcxchg_stats_s {
 	u32	ua_state_inv;
 };
 
-struct bfa_fw_lpsm_stats_s {
-	u32	cls_rx;
-	u32	cls_tx;
-};
-
 /*
  *  Trunk statistics
  */
@@ -454,7 +487,6 @@ struct bfa_fw_stats_s {
 	struct bfa_fw_io_stats_s	io_stats;
 	struct bfa_fw_port_stats_s	port_stats;
 	struct bfa_fw_fcxchg_stats_s	fcxchg_stats;
-	struct bfa_fw_lpsm_stats_s	lpsm_stats;
 	struct bfa_fw_lps_stats_s	lps_stats;
 	struct bfa_fw_trunk_stats_s	trunk_stats;
 	struct bfa_fw_advsm_stats_s	advsm_stats;
@@ -494,13 +526,23 @@ enum bfa_qos_bw_alloc {
 	BFA_QOS_BW_LOW  =  10,	/*  bandwidth allocation for Low */
 };
 #pragma pack(1)
+
+struct bfa_qos_bw_s {
+	u8	qos_bw_set;
+	u8	high;
+	u8	med;
+	u8	low;
+};
+
 /*
  * QoS attribute returned in QoS Query
  */
 struct bfa_qos_attr_s {
-	u8		state;		/*  QoS current state */
-	u8		rsvd[3];
-	u32  total_bb_cr;		/*  Total BB Credits */
+	u8	state;		/*  QoS current state */
+	u8	rsvd1[3];
+	u32	total_bb_cr;	/*  Total BB Credits */
+	struct bfa_qos_bw_s qos_bw;	/* QOS bw cfg */
+	struct bfa_qos_bw_s qos_bw_op;	/* QOS bw operational */
 };
 
 /*
@@ -692,7 +734,8 @@ enum bfa_port_states {
 	BFA_PORT_ST_FWMISMATCH		= 12,
 	BFA_PORT_ST_PREBOOT_DISABLED	= 13,
 	BFA_PORT_ST_TOGGLING_QWAIT	= 14,
-	BFA_PORT_ST_ACQ_ADDR		= 15,
+	BFA_PORT_ST_FAA_MISCONFIG	= 15,
+	BFA_PORT_ST_DPORT		= 16,
 	BFA_PORT_ST_MAX_STATE,
 };
 
@@ -714,9 +757,11 @@ enum bfa_port_type {
  */
 enum bfa_port_topology {
 	BFA_PORT_TOPOLOGY_NONE = 0,	/*  No valid topology */
-	BFA_PORT_TOPOLOGY_P2P  = 1,	/*  P2P only */
-	BFA_PORT_TOPOLOGY_LOOP = 2,	/*  LOOP topology */
-	BFA_PORT_TOPOLOGY_AUTO = 3,	/*  auto topology selection */
+	BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */
+	BFA_PORT_TOPOLOGY_LOOP = 2,	/* LOOP topology */
+	BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */
+	BFA_PORT_TOPOLOGY_AUTO = 4,	/* auto topology selection */
+	BFA_PORT_TOPOLOGY_P2P = 5,	/* P2P only */
 };
 
 /*
@@ -760,6 +805,7 @@ enum bfa_port_linkstate_rsn {
 	BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT	= 9,
 	BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT	= 10,
 	BFA_PORT_LINKSTATE_RSN_TIMEOUT		= 11,
+	BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG	= 12,
 
 
 
@@ -833,6 +879,19 @@ struct bfa_lunmask_cfg_s {
 	struct bfa_lun_mask_s	lun_list[MAX_LUN_MASK_CFG];
 };
 
+struct bfa_throttle_cfg_s {
+	u16	is_valid;
+	u16	value;
+	u32	rsvd;
+};
+
+struct bfa_defs_fcpim_throttle_s {
+	u16	max_value;
+	u16	cur_value;
+	u16	cfg_value;
+	u16	rsvd;
+};
+
 /*
  *      Physical port configuration
  */
@@ -851,9 +910,10 @@ struct bfa_port_cfg_s {
 	u8	 bb_scn;	/*  BB_SCN value from FLOGI Exchg */
 	u8	 bb_scn_state;	/*  Config state of BB_SCN */
 	u8	 faa_state;	/*  FAA enabled/disabled        */
-	u8	 rsvd[1];
+	u8	 rsvd1;
 	u16	 path_tov;	/*  device path timeout	*/
 	u16	 q_depth;	/*  SCSI Queue depth		*/
+	struct bfa_qos_bw_s qos_bw;	/* QOS bandwidth	*/
 };
 #pragma pack()
 
@@ -901,7 +961,7 @@ struct bfa_port_attr_s {
 
 	/* FCoE specific  */
 	u16			fcoe_vlan;
-	u8			rsvd1[2];
+	u8			rsvd1[6];
 };
 
 /*
@@ -971,6 +1031,13 @@ struct bfa_trunk_vc_attr_s {
 	u16 vc_credits[8];
 };
 
+struct bfa_fcport_loop_info_s {
+	u8	myalpa;		/* alpa claimed */
+	u8	alpabm_val;	/* alpa bitmap valid or not (1 or 0) */
+	u8	resvd[6];
+	struct fc_alpabm_s alpabm;	/* alpa bitmap */
+};
+
 /*
  *	Link state information
  */
@@ -981,13 +1048,18 @@ struct bfa_port_link_s {
 	u8	 speed;		/*  Link speed (1/2/4/8 G) */
 	u32	 linkstate_opt; /*  Linkstate optional data (debug) */
 	u8	 trunked;	/*  Trunked or not (1 or 0) */
-	u8	 resvd[3];
+	u8	 resvd[7];
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	union {
-		struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
-		struct bfa_trunk_vc_attr_s trunk_vc_attr;
-		struct bfa_fcport_fcf_s fcf; /*  FCF information (for FCoE) */
-	} vc_fcf;
+		struct bfa_fcport_loop_info_s loop_info;
+		union {
+			struct bfa_qos_vc_attr_s qos_vc_attr;
+					/*  VC info from ELP */
+			struct bfa_trunk_vc_attr_s trunk_vc_attr;
+			struct bfa_fcport_fcf_s fcf;
+					/*  FCF information (for FCoE) */
+		} vc_fcf;
+	} attr;
 };
 #pragma pack()
 
@@ -1112,6 +1184,9 @@ struct bfa_port_fc_stats_s {
 	u64     tx_frames;      /*  Tx frames                   */
 	u64     tx_words;       /*  Tx words                    */
 	u64     tx_lip;         /*  Tx LIP                      */
+	u64	tx_lip_f7f7;	/*  Tx LIP_F7F7		*/
+	u64	tx_lip_f8f7;	/*  Tx LIP_F8F7		*/
+	u64	tx_arbf0;	/*  Tx ARB F0			*/
 	u64     tx_nos;         /*  Tx NOS                      */
 	u64     tx_ols;         /*  Tx OLS                      */
 	u64     tx_lr;          /*  Tx LR                       */
@@ -1119,6 +1194,9 @@ struct bfa_port_fc_stats_s {
 	u64     rx_frames;      /*  Rx frames                   */
 	u64     rx_words;       /*  Rx words                    */
 	u64     lip_count;      /*  Rx LIP                      */
+	u64	rx_lip_f7f7;	/*  Rx LIP_F7F7		*/
+	u64	rx_lip_f8f7;	/*  Rx LIP_F8F7		*/
+	u64	rx_arbf0;	/*  Rx ARB F0			*/
 	u64     nos_count;      /*  Rx NOS                      */
 	u64     ols_count;      /*  Rx OLS                      */
 	u64     lr_count;       /*  Rx LR                       */
@@ -1140,6 +1218,7 @@ struct bfa_port_fc_stats_s {
 	u64	bbsc_frames_lost; /* Credit Recovery-Frames Lost  */
 	u64	bbsc_credits_lost; /* Credit Recovery-Credits Lost */
 	u64	bbsc_link_resets; /* Credit Recovery-Link Resets   */
+	u64	loop_timeouts;	/*  Loop timeouts		*/
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index e0beb4d7e264..bea821b98030 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -24,6 +24,7 @@ typedef u64 wwn_t;
 
 #define WWN_NULL	(0)
 #define FC_SYMNAME_MAX	256	/*  max name server symbolic name size */
+#define FC_ALPA_MAX	128
 
 #pragma pack(1)
 
@@ -1015,6 +1016,10 @@ struct fc_symname_s {
 	u8         symname[FC_SYMNAME_MAX];
 };
 
+struct fc_alpabm_s {
+	u8	alpa_bm[FC_ALPA_MAX / 8];
+};
+
 /*
  * protocol default timeout values
  */
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 273cee90b3b4..dce787f6cca2 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -228,6 +228,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 	memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
+	/* For FC AL bb_cr is 0 and altbbcred is 1 */
+	if (!bb_cr)
+		plogi->csp.altbbcred = 1;
+
 	plogi->els_cmd.els_code = els_code;
 	if (els_code == FC_ELS_PLOGI)
 		fc_els_req_build(fchs, d_id, s_id, ox_id);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 1633963c66ca..27b560962357 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -158,6 +158,7 @@ enum bfa_tskim_event {
 	BFA_TSKIM_SM_IOS_DONE	= 7,	/*  IO and sub TM completions	*/
 	BFA_TSKIM_SM_CLEANUP	= 8,	/*  TM cleanup on ITN offline	*/
 	BFA_TSKIM_SM_CLEANUP_DONE = 9,	/*  TM abort completion	*/
+	BFA_TSKIM_SM_UTAG	= 10,	/*  TM completion unknown tag  */
 };
 
 /*
@@ -3036,7 +3037,7 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
 static void
 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_START:
@@ -3074,7 +3075,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 static void
 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
@@ -3110,7 +3111,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 static void
 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
@@ -3119,6 +3120,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 		 */
 		break;
 
+	case BFA_TSKIM_SM_UTAG:
 	case BFA_TSKIM_SM_CLEANUP_DONE:
 		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
 		bfa_tskim_cleanup_ios(tskim);
@@ -3138,7 +3140,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 static void
 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_IOS_DONE:
@@ -3170,7 +3172,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 static void
 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_QRESUME:
@@ -3207,7 +3209,7 @@ static void
 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
 		enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
@@ -3238,7 +3240,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
 static void
 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 {
-	bfa_trc(tskim->bfa, event);
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
 
 	switch (event) {
 	case BFA_TSKIM_SM_HCB:
@@ -3560,6 +3562,8 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 	if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
 		bfa_stats(tskim->itnim, tm_cleanup_comps);
 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
+	} else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
 	} else {
 		bfa_stats(tskim->itnim, tm_fw_rsps);
 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
@@ -3699,6 +3703,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	struct bfa_mem_dma_s *seg_ptr;
 	u16	idx, nsegs, num_io_req;
 
+	fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
 	fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
 	fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
 	fcp->num_itns   = cfg->fwcfg.num_rports;
@@ -3721,6 +3726,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
 	}
 
+	fcp->throttle_update_required = 1;
 	bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
 
 	bfa_iotag_attach(fcp);
@@ -3759,23 +3765,33 @@ bfa_fcp_iocdisable(struct bfa_s *bfa)
 {
 	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
 
-	/* Enqueue unused ioim resources to free_q */
-	list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
-
 	bfa_fcpim_iocdisable(fcp);
 }
 
 void
-bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
 {
 	struct bfa_fcp_mod_s	*mod = BFA_FCP_MOD(bfa);
 	struct list_head	*qe;
 	int	i;
 
+	/* Update io throttle value only once during driver load time */
+	if (!mod->throttle_update_required)
+		return;
+
 	for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
 		bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
 		list_add_tail(qe, &mod->iotag_unused_q);
 	}
+
+	if (mod->num_ioim_reqs != num_ioim_fw) {
+		bfa_trc(bfa, mod->num_ioim_reqs);
+		bfa_trc(bfa, num_ioim_fw);
+	}
+
+	mod->max_ioim_reqs = max_ioim_fw;
+	mod->num_ioim_reqs = num_ioim_fw;
+	mod->throttle_update_required = 0;
 }
 
 void
@@ -3833,3 +3849,88 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
 
 	bfa_mem_kva_curp(fcp) = (u8 *) iotag;
 }
+
+
+/**
+ * To send config req, first try to use throttle value from flash
+ * If 0, then use driver parameter
+ * We need to use min(flash_val, drv_val) because
+ * memory allocation was done based on this cfg'd value
+ */
+u16
+bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
+{
+	u16 tmp;
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+	/*
+	 * If throttle value from flash is already in effect after driver is
+	 * loaded then until next load, always return current value instead
+	 * of actual flash value
+	 */
+	if (!fcp->throttle_update_required)
+		return (u16)fcp->num_ioim_reqs;
+
+	tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
+	if (!tmp || (tmp > drv_cfg_param))
+		tmp = drv_cfg_param;
+
+	return tmp;
+}
+
+bfa_status_t
+bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
+{
+	if (!bfa_dconf_get_min_cfg(bfa)) {
+		BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
+		BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
+		return BFA_STATUS_OK;
+	}
+
+	return BFA_STATUS_FAILED;
+}
+
+u16
+bfa_fcpim_read_throttle(struct bfa_s *bfa)
+{
+	struct bfa_throttle_cfg_s *throttle_cfg =
+			&(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
+
+	return ((!bfa_dconf_get_min_cfg(bfa)) ?
+	       ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
+{
+	/* in min cfg no commands should run. */
+	if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+	    (!bfa_dconf_read_data_valid(bfa)))
+		return BFA_STATUS_FAILED;
+
+	bfa_fcpim_write_throttle(bfa, value);
+
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfa_defs_fcpim_throttle_s throttle;
+
+	if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+	    (!bfa_dconf_read_data_valid(bfa)))
+		return BFA_STATUS_FAILED;
+
+	memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+	throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
+	throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
+	if (!throttle.cfg_value)
+		throttle.cfg_value = throttle.cur_value;
+	throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
+	memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 36f26da80f76..e693af6e5930 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -42,7 +42,7 @@ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
 		void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
 void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
 void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
-void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw);
 
 #define BFA_FCP_MOD(_hal)	(&(_hal)->modules.fcp_mod)
 #define BFA_MEM_FCP_KVA(__bfa)	(&(BFA_FCP_MOD(__bfa)->kva_seg))
@@ -51,7 +51,9 @@ void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
 #define BFA_ITN_FROM_TAG(_fcp, _tag)	\
 	((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
 #define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
-	bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
+	bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK),	\
+	BFI_IOIM_SNSLEN)
+
 
 #define BFA_ITNIM_MIN   32
 #define BFA_ITNIM_MAX   1024
@@ -148,6 +150,7 @@ struct bfa_fcp_mod_s {
 	struct list_head	iotag_unused_q;	/* unused IO resources*/
 	struct bfa_iotag_s	*iotag_arr;
 	struct bfa_itn_s	*itn_arr;
+	int			max_ioim_reqs;
 	int			num_ioim_reqs;
 	int			num_fwtio_reqs;
 	int			num_itns;
@@ -155,6 +158,7 @@ struct bfa_fcp_mod_s {
 	struct bfa_fcpim_s	fcpim;
 	struct bfa_mem_dma_s	dma_seg[BFA_FCP_DMA_SEGS];
 	struct bfa_mem_kva_s	kva_seg;
+	int			throttle_update_required;
 };
 
 /*
@@ -416,5 +420,10 @@ bfa_status_t	bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
 bfa_status_t	bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
 				wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
 bfa_status_t	bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+u16		bfa_fcpim_read_throttle(struct bfa_s *bfa);
+bfa_status_t	bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value);
+bfa_status_t	bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value);
+bfa_status_t	bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf);
+u16     bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param);
 
 #endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index fd3e84d32bd2..d428808fb37e 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -303,16 +303,30 @@ static void
 bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
 			  enum bfa_fcs_fabric_event event)
 {
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
 	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
 	bfa_trc(fabric->fcs, event);
 
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_START:
-		if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
+		if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+			break;
+		}
+		if (bfa_fcport_get_topology(bfa) ==
+				BFA_PORT_TOPOLOGY_LOOP) {
+			fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+			fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+			fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+			bfa_sm_set_state(fabric,
+					bfa_fcs_fabric_sm_online);
+			bfa_fcs_fabric_set_opertype(fabric);
+			bfa_fcs_lport_online(&fabric->bport);
+		} else {
 			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
 			bfa_fcs_fabric_login(fabric);
-		} else
-			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		}
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -337,16 +351,28 @@ static void
 bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
 			   enum bfa_fcs_fabric_event event)
 {
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
 	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
 	bfa_trc(fabric->fcs, event);
 
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_UP:
-		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
-		bfa_fcs_fabric_login(fabric);
+		if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+			bfa_fcs_fabric_login(fabric);
+			break;
+		}
+		fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+		fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+		fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+		bfa_fcs_fabric_set_opertype(fabric);
+		bfa_fcs_lport_online(&fabric->bport);
 		break;
 
 	case BFA_FCS_FABRIC_SM_RETRY_OP:
+	case BFA_FCS_FABRIC_SM_LOOPBACK:
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
@@ -595,14 +621,20 @@ void
 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 			 enum bfa_fcs_fabric_event event)
 {
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
 	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
 	bfa_trc(fabric->fcs, event);
 
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
-		bfa_fcs_fabric_notify_offline(fabric);
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_fcs_lport_offline(&fabric->bport);
+		} else {
+			bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+			bfa_fcs_fabric_notify_offline(fabric);
+		}
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
@@ -719,20 +751,29 @@ static void
 bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
 			   enum bfa_fcs_fabric_event event)
 {
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
 	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
 	bfa_trc(fabric->fcs, event);
 
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_STOPCOMP:
-		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
-		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		} else {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+			bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+		}
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
-		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		else
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
 		break;
 
 	default:
@@ -975,9 +1016,6 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
 	struct bfa_lport_cfg_s	*pcfg = &fabric->bport.port_cfg;
 	u8			alpa = 0, bb_scn = 0;
 
-	if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
-		alpa = bfa_fcport_get_myalpa(bfa);
-
 	if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
 	    (!fabric->fcs->bbscn_flogi_rjt))
 		bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 6c4377cb287f..a449706c6bc0 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -118,9 +118,9 @@ struct bfa_fcs_lport_fab_s {
 #define	MAX_ALPA_COUNT	127
 
 struct bfa_fcs_lport_loop_s {
-	u8         num_alpa;	/*  Num of ALPA entries in the map */
-	u8         alpa_pos_map[MAX_ALPA_COUNT];	/*  ALPA Positional
-							 *Map */
+	u8	num_alpa;	/*  Num of ALPA entries in the map */
+	u8	alpabm_valid;	/* alpa bitmap valid or not (1 or 0) */
+	u8	alpa_pos_map[MAX_ALPA_COUNT]; /*  ALPA Positional Map */
 	struct bfa_fcs_lport_s *port;	/*  parent port */
 };
 
@@ -175,6 +175,7 @@ enum bfa_fcs_fabric_type {
 	BFA_FCS_FABRIC_UNKNOWN = 0,
 	BFA_FCS_FABRIC_SWITCHED = 1,
 	BFA_FCS_FABRIC_N2N = 2,
+	BFA_FCS_FABRIC_LOOP = 3,
 };
 
 
@@ -350,9 +351,10 @@ void		bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
 				struct bfa_fcxp_s *fcxp_alloced);
 void            bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
-void            bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
 					      struct fchs_s *rx_frame, u32 len);
+void		bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
 
 struct bfa_fcs_vport_s {
 	struct list_head		qe;		/*  queue elem	*/
@@ -453,6 +455,7 @@ struct bfa_fcs_rport_s {
 	struct bfa_rport_stats_s stats;	/*  rport stats */
 	enum bfa_rport_function	scsi_function;  /*  Initiator/Target */
 	struct bfa_fcs_rpf_s rpf;	/* Rport features module */
+	bfa_boolean_t   scn_online;	/* SCN online flag */
 };
 
 static inline struct bfa_rport_s *
@@ -639,9 +642,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
 	u8         model[16];
 	u8         model_desc[256];
 	u8         hw_version[8];
-	u8         driver_version[8];
+	u8         driver_version[BFA_VERSION_LEN];
 	u8         option_rom_ver[BFA_VERSION_LEN];
-	u8         fw_version[8];
+	u8         fw_version[BFA_VERSION_LEN];
 	u8         os_name[256];
 	__be32        max_ct_pyld;
 };
@@ -733,7 +736,7 @@ enum rport_event {
 	RPSM_EVENT_LOGO_IMP     = 5,    /*  implicit logo for SLER      */
 	RPSM_EVENT_FCXP_SENT    = 6,    /*  Frame from has been sent    */
 	RPSM_EVENT_DELETE       = 7,    /*  RPORT delete request        */
-	RPSM_EVENT_SCN          = 8,    /*  state change notification   */
+	RPSM_EVENT_FAB_SCN	= 8,    /*  state change notification   */
 	RPSM_EVENT_ACCEPTED     = 9,    /*  Good response from remote device */
 	RPSM_EVENT_FAILED       = 10,   /*  Request to rport failed.    */
 	RPSM_EVENT_TIMEOUT      = 11,   /*  Rport SM timeout event      */
@@ -744,7 +747,9 @@ enum rport_event {
 	RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
 	RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
 	RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continuously */
-	RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */
+	RPSM_EVENT_SCN_OFFLINE = 19,	/* loop scn offline		*/
+	RPSM_EVENT_SCN_ONLINE   = 20,	/* loop scn online		*/
+	RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
 };
 
 /*
@@ -763,7 +768,7 @@ enum bfa_fcs_itnim_event {
 	BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
 	BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
 	BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
-	BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */
+	BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 3b75f6fb2de1..1224d0462a49 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -23,6 +23,34 @@
 
 BFA_TRC_FILE(FCS, PORT);
 
+/*
+ * ALPA to LIXA bitmap mapping
+ *
+ * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
+ * is for L_bit (login required) and is filled as ALPA 0x00 here.
+ */
+static const u8 loop_alpa_map[] = {
+	0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */
+	0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */
+	0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */
+
+	0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */
+	0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */
+	0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */
+	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */
+
+	0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */
+	0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */
+	0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */
+	0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */
+
+	0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */
+	0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */
+	0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */
+	0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */
+};
+
 static void     bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
 					 struct fchs_s *rx_fchs, u8 reason_code,
 					 u8 reason_code_expl);
@@ -51,6 +79,10 @@ static void	bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
 static void	bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
 static void	bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
 
+static void	bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port);
+
 static struct {
 	void		(*init) (struct bfa_fcs_lport_s *port);
 	void		(*online) (struct bfa_fcs_lport_s *port);
@@ -62,7 +94,9 @@ static struct {
 	bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
 			bfa_fcs_lport_fab_offline}, {
 	bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
-			bfa_fcs_lport_n2n_offline},
+			bfa_fcs_lport_n2n_offline}, {
+	bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
+			bfa_fcs_lport_loop_offline},
 	};
 
 /*
@@ -1127,7 +1161,7 @@ static void
 bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
 {
 	bfa_fcs_lport_ns_online(port);
-	bfa_fcs_lport_scn_online(port);
+	bfa_fcs_lport_fab_scn_online(port);
 }
 
 /*
@@ -1221,6 +1255,98 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
 	n2n_port->reply_oxid = 0;
 }
 
+void
+bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
+{
+	int i = 0, j = 0, bit = 0, alpa_bit = 0;
+	u8 k = 0;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa);
+
+	port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid;
+	port->pid = fcport->myalpa;
+	port->pid = bfa_hton3b(port->pid);
+
+	for (i = 0; i < (FC_ALPA_MAX / 8); i++) {
+		for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) {
+			bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]);
+			bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j)));
+			if (bit) {
+				port->port_topo.ploop.alpa_pos_map[k] =
+					loop_alpa_map[(i * 8) + alpa_bit];
+				k++;
+				bfa_trc(port->fcs->bfa, k);
+				bfa_trc(port->fcs->bfa,
+					 port->port_topo.ploop.alpa_pos_map[k]);
+			}
+		}
+	}
+	port->port_topo.ploop.num_alpa = k;
+}
+
+/*
+ * Called by fcs/port to initialize Loop topology.
+ */
+static void
+bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port)
+{
+}
+
+/*
+ * Called by fcs/port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port)
+{
+	u8 num_alpa = 0, alpabm_valid = 0;
+	struct bfa_fcs_rport_s *rport;
+	u8 *alpa_map = NULL;
+	int i = 0;
+	u32 pid;
+
+	bfa_fcport_get_loop_attr(port);
+
+	num_alpa = port->port_topo.ploop.num_alpa;
+	alpabm_valid = port->port_topo.ploop.alpabm_valid;
+	alpa_map = port->port_topo.ploop.alpa_pos_map;
+
+	bfa_trc(port->fcs->bfa, port->pid);
+	bfa_trc(port->fcs->bfa, num_alpa);
+	if (alpabm_valid == 1) {
+		for (i = 0; i < num_alpa; i++) {
+			bfa_trc(port->fcs->bfa, alpa_map[i]);
+			if (alpa_map[i] != bfa_hton3b(port->pid)) {
+				pid = alpa_map[i];
+				bfa_trc(port->fcs->bfa, pid);
+				rport = bfa_fcs_lport_get_rport_by_pid(port,
+						bfa_hton3b(pid));
+				if (!rport)
+					rport = bfa_fcs_rport_create(port,
+						bfa_hton3b(pid));
+			}
+		}
+	} else {
+		for (i = 0; i < MAX_ALPA_COUNT; i++) {
+			if (alpa_map[i] != port->pid) {
+				pid = loop_alpa_map[i];
+				bfa_trc(port->fcs->bfa, pid);
+				rport = bfa_fcs_lport_get_rport_by_pid(port,
+						bfa_hton3b(pid));
+				if (!rport)
+					rport = bfa_fcs_rport_create(port,
+						bfa_hton3b(pid));
+			}
+		}
+	}
+}
+
+/*
+ * Called by fcs/port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port)
+{
+}
+
 #define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
 
 /*
@@ -1888,13 +2014,10 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 					 sizeof(templen));
 	}
 
-	/*
-	 * f/w Version = driver version
-	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
-	templen = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = (u16) strlen(fcs_hba_attr->fw_version);
+	memcpy(attr->value, fcs_hba_attr->fw_version, templen);
 	templen = fc_roundup(templen, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 	len += templen;
@@ -2296,6 +2419,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 {
 	struct bfa_fcs_lport_s *port = fdmi->ms->port;
 	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
+	struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
 
 	memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
 
@@ -2331,7 +2455,9 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 				sizeof(driver_info->host_os_patch));
 	}
 
-	hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
+	/* Retrieve the max frame size from the port attr */
+	bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
+	hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
 }
 
 static void
@@ -2391,7 +2517,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	/*
 	 * Max PDU Size.
 	 */
-	port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
+	port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize);
 
 	/*
 	 * OS device Name
@@ -5199,7 +5325,7 @@ bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
 }
 
 void
-bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port)
+bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port)
 {
 	struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
 
@@ -5621,6 +5747,15 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
 }
 
 /*
+ * Let new loop map create missing rports
+ */
+void
+bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port)
+{
+	bfa_fcs_lport_loop_online(port);
+}
+
+/*
  * FCS virtual port state machine
  */
 
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index cc43b2a58ce3..58ac643ba9f3 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -106,9 +106,13 @@ static void	bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
 						 enum rport_event event);
 static void	bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
 					 enum rport_event event);
-static void	bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
-						enum rport_event event);
-static void	bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport,
+static void	bfa_fcs_rport_sm_adisc_online_sending(
+			struct bfa_fcs_rport_s *rport, enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s
+					*rport, enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
 					enum rport_event event);
 static void	bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
 						enum rport_event event);
@@ -150,8 +154,10 @@ static struct bfa_sm_table_s rport_sm_table[] = {
 	{BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
 	{BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
 	{BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
-	{BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC},
-	{BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC},
 	{BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
 	{BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
 	{BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
@@ -231,10 +237,19 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
 	case RPSM_EVENT_ADDRESS_CHANGE:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		/* query the NS */
 		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		rport->ns_retries = 0;
 		bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -280,12 +295,20 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
 
 	case RPSM_EVENT_PLOGI_RCVD:
 	case RPSM_EVENT_PLOGI_COMP:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		/*
 		 * Ignore, SCN is possibly online notification.
 		 */
 		break;
 
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
@@ -346,9 +369,19 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_stop(&rport->timer);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
 	case RPSM_EVENT_ADDRESS_CHANGE:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		bfa_timer_stop(&rport->timer);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		rport->ns_retries = 0;
 		bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -422,7 +455,18 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		}
 		break;
 
-	case	RPSM_EVENT_PLOGI_RETRY:
+	case RPSM_EVENT_SCN_ONLINE:
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_RETRY:
 		rport->plogi_retries = 0;
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
 		bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -440,8 +484,10 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		break;
 
 	case RPSM_EVENT_ADDRESS_CHANGE:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		bfa_fcxp_discard(rport->fcxp);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		rport->ns_retries = 0;
 		bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -512,7 +558,8 @@ bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
 	case RPSM_EVENT_PLOGI_COMP:
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_SCN_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
 		bfa_fcs_rport_fcs_offline_action(rport);
 		break;
@@ -561,9 +608,10 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_fcs_offline_action(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_SCN_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
 		bfa_fcs_rport_fcs_offline_action(rport);
 		break;
@@ -595,14 +643,15 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
 	bfa_trc(rport->fcs, event);
 
 	switch (event) {
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
 			bfa_sm_set_state(rport,
 					 bfa_fcs_rport_sm_nsquery_sending);
 			rport->ns_retries = 0;
 			bfa_fcs_rport_send_nsdisc(rport, NULL);
 		} else {
-			bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
+			bfa_sm_set_state(rport,
+				bfa_fcs_rport_sm_adisc_online_sending);
 			bfa_fcs_rport_send_adisc(rport, NULL);
 		}
 		break;
@@ -610,6 +659,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
 	case RPSM_EVENT_PLOGI_RCVD:
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_SCN_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
@@ -625,6 +675,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
 	case RPSM_EVENT_PLOGI_COMP:
 		break;
 
@@ -656,7 +707,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		/*
 		 * ignore SCN, wait for response to query itself
 		 */
@@ -696,7 +747,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
 
 	switch (event) {
 	case RPSM_EVENT_ACCEPTED:
-		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending);
 		bfa_fcs_rport_send_adisc(rport, NULL);
 		break;
 
@@ -718,7 +769,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		break;
 
 	case RPSM_EVENT_LOGO_RCVD:
@@ -747,7 +798,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
  *	authenticating with rport. FC-4s are paused.
  */
 static void
-bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
+bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport,
 	 enum rport_event event)
 {
 	bfa_trc(rport->fcs, rport->pwwn);
@@ -756,7 +807,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
 
 	switch (event) {
 	case RPSM_EVENT_FCXP_SENT:
-		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online);
 		break;
 
 	case RPSM_EVENT_DELETE:
@@ -779,7 +830,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
@@ -798,7 +849,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
  *		FC-4s are paused.
  */
 static void
-bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
+bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
 {
 	bfa_trc(rport->fcs, rport->pwwn);
 	bfa_trc(rport->fcs, rport->pid);
@@ -831,7 +883,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		bfa_fcs_rport_hal_offline_action(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		/*
 		 * already processing RSCN
 		 */
@@ -856,7 +908,96 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
 }
 
 /*
- *		Rport has sent LOGO. Awaiting FC-4 offline completion callback.
+ * ADISC is being sent for authenticating with rport
+ * Already did offline actions.
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport,
+	enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa,
+			&rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * ADISC to rport
+ * Already did offline actions
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+		bfa_fcs_rport_hal_online(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_FAILED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
  */
 static void
 bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
@@ -881,6 +1022,8 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
 	case RPSM_EVENT_HCB_ONLINE:
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
@@ -945,6 +1088,8 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_hal_offline(rport);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
+		break;
 	case RPSM_EVENT_LOGO_RCVD:
 		/*
 		 * Rport is going offline. Just ack the logo
@@ -956,8 +1101,9 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_send_prlo_acc(rport);
 		break;
 
+	case RPSM_EVENT_SCN_OFFLINE:
 	case RPSM_EVENT_HCB_ONLINE:
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		/*
@@ -1015,6 +1161,19 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
 				bfa_fcs_rport_sm_nsdisc_sending);
 			rport->ns_retries = 0;
 			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		} else if (bfa_fcport_get_topology(rport->port->fcs->bfa) ==
+					BFA_PORT_TOPOLOGY_LOOP) {
+			if (rport->scn_online) {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_adisc_offline_sending);
+				bfa_fcs_rport_send_adisc(rport, NULL);
+			} else {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_offline);
+				bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+			}
 		} else {
 			bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
 			rport->plogi_retries = 0;
@@ -1027,7 +1186,9 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_free(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
 	case RPSM_EVENT_PLOGI_RCVD:
@@ -1106,6 +1267,8 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
 		/*
@@ -1146,6 +1309,8 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		break;
 
@@ -1172,7 +1337,9 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_free(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		break;
 
@@ -1209,10 +1376,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		bfa_fcs_rport_free(rport);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_ADDRESS_CHANGE:
-		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		bfa_timer_stop(&rport->timer);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		rport->ns_retries = 0;
 		bfa_fcs_rport_send_nsdisc(rport, NULL);
 		break;
@@ -1232,6 +1401,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
 	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_SCN_OFFLINE:
 		break;
 
 	case RPSM_EVENT_PLOGI_COMP:
@@ -1240,6 +1410,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		bfa_fcs_rport_fcs_online_action(rport);
 		break;
 
+	case RPSM_EVENT_SCN_ONLINE:
+		bfa_timer_stop(&rport->timer);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+		bfa_fcs_rport_send_plogi(rport, NULL);
+		break;
+
 	case RPSM_EVENT_PLOGI_SEND:
 		bfa_timer_stop(&rport->timer);
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
@@ -1280,7 +1456,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
 	case RPSM_EVENT_PLOGI_SEND:
@@ -1326,7 +1502,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
 		bfa_fcs_rport_send_nsdisc(rport, NULL);
 		break;
 
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
 		bfa_timer_stop(&rport->timer);
@@ -1439,7 +1615,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
 	case RPSM_EVENT_PRLO_RCVD:
 		bfa_fcs_rport_send_prlo_acc(rport);
 		break;
-	case RPSM_EVENT_SCN:
+	case RPSM_EVENT_FAB_SCN:
 		/*
 		 * ignore, wait for NS query response
 		 */
@@ -2546,7 +2722,7 @@ void
 bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
 {
 	rport->stats.rscns++;
-	bfa_sm_send_event(rport, RPSM_EVENT_SCN);
+	bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN);
 }
 
 /*
@@ -2621,6 +2797,48 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
 	bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
 }
 
+void
+bfa_cb_rport_scn_online(struct bfa_s *bfa)
+{
+	struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+	struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+	struct bfa_fcs_rport_s *rp;
+	struct list_head *qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rp = (struct bfa_fcs_rport_s *) qe;
+		bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
+		rp->scn_online = BFA_TRUE;
+	}
+
+	if (bfa_fcs_lport_is_online(port))
+		bfa_fcs_lport_lip_scn_online(port);
+}
+
+void
+bfa_cb_rport_scn_no_dev(void *rport)
+{
+	struct bfa_fcs_rport_s *rp = rport;
+
+	bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+	rp->scn_online = BFA_FALSE;
+}
+
+void
+bfa_cb_rport_scn_offline(struct bfa_s *bfa)
+{
+	struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+	struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+	struct bfa_fcs_rport_s *rp;
+	struct list_head *qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rp = (struct bfa_fcs_rport_s *) qe;
+		bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+		rp->scn_online = BFA_FALSE;
+	}
+}
+
 /*
  *	brief
  *	This routine is a static BFA callback when there is a QoS priority
@@ -2808,6 +3026,9 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
 	struct bfa_rport_qos_attr_s qos_attr;
 	struct bfa_fcs_lport_s *port = rport->port;
 	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+	struct bfa_port_attr_s port_attr;
+
+	bfa_fcport_get_attr(rport->fcs->bfa, &port_attr);
 
 	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
 	memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
@@ -2838,7 +3059,8 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
 			rport_speed =
 				bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
 
-		if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
+		if ((bfa_fcs_lport_get_rport_max_speed(port) !=
+		    BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed))
 			rport_attr->trl_enforced = BFA_TRUE;
 	}
 }
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 75ca8752b9f4..0116c1032e25 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -731,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 	/*
 	 * Unlock the hw semaphore. Should be here only once per boot.
 	 */
-	readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
-	writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+	bfa_ioc_ownership_reset(iocpf->ioc);
 
 	/*
 	 * unlock init semaphore.
@@ -1751,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
 	attr->card_type     = be32_to_cpu(attr->card_type);
 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
+	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
 
 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
@@ -2497,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
+	ad_attr->mfg_day = ioc_attr->mfg_day;
+	ad_attr->mfg_month = ioc_attr->mfg_month;
+	ad_attr->mfg_year = ioc_attr->mfg_year;
 }
 
 enum bfa_ioc_type_e
@@ -2923,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
 		return;
 	}
 
-	if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
+	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
 		bfa_iocpf_timeout(ioc);
 	else {
 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
@@ -3016,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
 	struct bfa_ablk_cfg_inst_s *cfg_inst;
 	int i, j;
 	u16	be16;
-	u32	be32;
 
 	for (i = 0; i < BFA_ABLK_MAX; i++) {
 		cfg_inst = &cfg->inst[i];
@@ -3027,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
 			be16 = cfg_inst->pf_cfg[j].num_vectors;
 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
-			be32 = cfg_inst->pf_cfg[j].bw;
-			cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
+			be16 = cfg_inst->pf_cfg[j].bw_min;
+			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
+			be16 = cfg_inst->pf_cfg[j].bw_max;
+			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
 		}
 	}
 }
@@ -3170,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
 
 bfa_status_t
 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
-		u8 port, enum bfi_pcifn_class personality, int bw,
+		u8 port, enum bfi_pcifn_class personality,
+		u16 bw_min, u16 bw_max,
 		bfa_ablk_cbfn_t cbfn, void *cbarg)
 {
 	struct bfi_ablk_h2i_pf_req_s *m;
@@ -3194,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
 		    bfa_ioc_portid(ablk->ioc));
 	m->pers = cpu_to_be16((u16)personality);
-	m->bw = cpu_to_be32(bw);
+	m->bw_min = cpu_to_be16(bw_min);
+	m->bw_max = cpu_to_be16(bw_max);
 	m->port = port;
 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
 
@@ -3294,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
 }
 
 bfa_status_t
-bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
-		bfa_ablk_cbfn_t cbfn, void *cbarg)
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
+		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
 {
 	struct bfi_ablk_h2i_pf_req_s *m;
 
@@ -3317,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
 		bfa_ioc_portid(ablk->ioc));
 	m->pcifn = (u8)pcifn;
-	m->bw = cpu_to_be32(bw);
+	m->bw_min = cpu_to_be16(bw_min);
+	m->bw_max = cpu_to_be16(bw_max);
 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
 
 	return BFA_STATUS_OK;
@@ -4680,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
-	diag->tsensor.temp->status = BFA_STATUS_OK;
 
 	if (rsp->ts_brd) {
+		/* tsensor.temp->status is brd_temp status */
+		diag->tsensor.temp->status = rsp->status;
 		if (rsp->status == BFA_STATUS_OK) {
 			diag->tsensor.temp->brd_temp =
 				be16_to_cpu(rsp->brd_temp);
-		} else {
-			bfa_trc(diag, rsp->status);
+		} else
 			diag->tsensor.temp->brd_temp = 0;
-			diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
-		}
 	}
+
+	bfa_trc(diag, rsp->status);
 	bfa_trc(diag, rsp->ts_junc);
 	bfa_trc(diag, rsp->temp);
 	bfa_trc(diag, rsp->ts_brd);
 	bfa_trc(diag, rsp->brd_temp);
+
+	/* tsensor status is always good bcos we always have junction temp */
+	diag->tsensor.status = BFA_STATUS_OK;
 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
 	diag->tsensor.lock = 0;
 }
@@ -4924,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag,
 	diag->tsensor.temp = result;
 	diag->tsensor.cbfn = cbfn;
 	diag->tsensor.cbarg = cbarg;
+	diag->tsensor.status = BFA_STATUS_OK;
 
 	/* Send msg to fw */
 	diag_tempsensor_send(diag);
@@ -5615,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
 		}
 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
 		bfa_timer_start(dconf->bfa, &dconf->timer,
-			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
 					BFA_FLASH_PART_DRV, dconf->instance,
 					dconf->dconf,
@@ -5655,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
 		break;
 	case BFA_DCONF_SM_TIMEOUT:
 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
-		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
+		bfa_ioc_suspend(&dconf->bfa->ioc);
 		break;
 	case BFA_DCONF_SM_EXIT:
 		bfa_timer_stop(&dconf->timer);
@@ -5853,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
 	struct bfa_s *bfa = arg;
 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
 
-	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
 	if (status == BFA_STATUS_OK) {
 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5861,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
 	}
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
 }
 
@@ -5945,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa)
 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
 }
+
+/*
+ * FRU specific functions
+ */
+
+#define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
+#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
+#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
+
+static void
+bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+	struct bfa_fru_s *fru = cbarg;
+
+	bfa_trc(fru, event);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (fru->op_busy) {
+			fru->status = BFA_STATUS_IOC_FAILURE;
+			fru->cbfn(fru->cbarg, fru->status);
+			fru->op_busy = 0;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * Send fru write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+	struct bfa_fru_s *fru = cbarg;
+	struct bfi_fru_write_req_s *msg =
+			(struct bfi_fru_write_req_s *) fru->mb.msg;
+	u32 len;
+
+	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+				fru->residue : BFA_FRU_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+
+	/*
+	 * indicate if it's the last msg of the whole write operation
+	 */
+	msg->last = (len == fru->residue) ? 1 : 0;
+
+	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+
+	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
+	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+
+	fru->residue -= len;
+	fru->offset += len;
+}
+
+/*
+ * Send fru read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+	struct bfa_fru_s *fru = cbarg;
+	struct bfi_fru_read_req_s *msg =
+			(struct bfi_fru_read_req_s *) fru->mb.msg;
+	u32 len;
+
+	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+				fru->residue : BFA_FRU_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_fru_meminfo(bfa_boolean_t mincfg)
+{
+	/* min driver doesn't need fru */
+	if (mincfg)
+		return 0;
+
+	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
+	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+	fru->ioc = ioc;
+	fru->trcmod = trcmod;
+	fru->cbfn = NULL;
+	fru->cbarg = NULL;
+	fru->op_busy = 0;
+
+	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
+	bfa_q_qe_init(&fru->ioc_notify);
+	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
+	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
+
+	/* min driver doesn't need fru */
+	if (mincfg) {
+		fru->dbuf_kva = NULL;
+		fru->dbuf_pa = 0;
+	}
+}
+
+/*
+ * Claim memory for fru
+ *
+ * @param[in] fru - fru structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - frusical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
+	bfa_boolean_t mincfg)
+{
+	if (mincfg)
+		return;
+
+	fru->dbuf_kva = dm_kva;
+	fru->dbuf_pa = dm_pa;
+	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
+	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Update fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+		  bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
+		return BFA_STATUS_CMD_NOTSUPP;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+
+	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+		bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
+		return BFA_STATUS_CMD_NOTSUPP;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Get maximum size fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[out] size - maximum size of fru vpd data
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
+{
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK)
+		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
+	else
+		return BFA_STATUS_CMD_NOTSUPP;
+	return BFA_STATUS_OK;
+}
+/*
+ * tfru write.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+	       bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+	bfa_trc(fru, *((u8 *) buf));
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+
+	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * tfru read.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+	      bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Process fru response messages upon receiving interrupts.
+ *
+ * @param[in] fruarg - fru structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_fru_s *fru = fruarg;
+	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
+	u32 status;
+
+	bfa_trc(fru, msg->mh.msg_id);
+
+	if (!fru->op_busy) {
+		/*
+		 * receiving response after ioc failure
+		 */
+		bfa_trc(fru, 0x9999);
+		return;
+	}
+
+	switch (msg->mh.msg_id) {
+	case BFI_FRUVPD_I2H_WRITE_RSP:
+	case BFI_TFRU_I2H_WRITE_RSP:
+		status = be32_to_cpu(rsp->status);
+		bfa_trc(fru, status);
+
+		if (status != BFA_STATUS_OK || fru->residue == 0) {
+			fru->status = status;
+			fru->op_busy = 0;
+			if (fru->cbfn)
+				fru->cbfn(fru->cbarg, fru->status);
+		} else {
+			bfa_trc(fru, fru->offset);
+			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
+				bfa_fru_write_send(fru,
+					BFI_FRUVPD_H2I_WRITE_REQ);
+			else
+				bfa_fru_write_send(fru,
+					BFI_TFRU_H2I_WRITE_REQ);
+		}
+		break;
+	case BFI_FRUVPD_I2H_READ_RSP:
+	case BFI_TFRU_I2H_READ_RSP:
+		status = be32_to_cpu(rsp->status);
+		bfa_trc(fru, status);
+
+		if (status != BFA_STATUS_OK) {
+			fru->status = status;
+			fru->op_busy = 0;
+			if (fru->cbfn)
+				fru->cbfn(fru->cbarg, fru->status);
+		} else {
+			u32 len = be32_to_cpu(rsp->length);
+
+			bfa_trc(fru, fru->offset);
+			bfa_trc(fru, len);
+
+			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
+			fru->residue -= len;
+			fru->offset += len;
+
+			if (fru->residue == 0) {
+				fru->status = status;
+				fru->op_busy = 0;
+				if (fru->cbfn)
+					fru->cbfn(fru->cbarg, fru->status);
+			} else {
+				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
+					bfa_fru_read_send(fru,
+						BFI_FRUVPD_H2I_READ_REQ);
+				else
+					bfa_fru_read_send(fru,
+						BFI_TFRU_H2I_READ_REQ);
+			}
+		}
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index b2856f96567c..23a90e7b7107 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -702,6 +702,55 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
 void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
 
 /*
+ * FRU module specific
+ */
+typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_fru_s {
+	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
+	struct bfa_trc_mod_s *trcmod;	/* trace module */
+	u8		op_busy;	/* operation busy flag */
+	u8		rsv[3];
+	u32		residue;	/* residual length */
+	u32		offset;		/* offset */
+	bfa_status_t	status;		/* status */
+	u8		*dbuf_kva;	/* dma buf virtual address */
+	u64		dbuf_pa;	/* dma buf physical address */
+	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+	bfa_cb_fru_t	cbfn;		/* user callback function */
+	void		*cbarg;		/* user callback arg */
+	u8		*ubuf;		/* user supplied buffer */
+	struct bfa_cb_qe_s	hcb_qe;	/* comp: BFA callback qelem */
+	u32		addr_off;	/* fru address offset */
+	struct bfa_mbox_cmd_s mb;	/* mailbox */
+	struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+	struct bfa_mem_dma_s	fru_dma;
+};
+
+#define BFA_FRU(__bfa)	(&(__bfa)->modules.fru)
+#define BFA_MEM_FRU_DMA(__bfa)	(&(BFA_FRU(__bfa)->fru_dma))
+
+bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
+bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+u32	bfa_fru_meminfo(bfa_boolean_t mincfg);
+void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
+		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_fru_memclaim(struct bfa_fru_s *fru,
+		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
+
+/*
  * Driver Config( dconf) specific
  */
 #define BFI_DCONF_SIGNATURE	0xabcdabcd
@@ -716,6 +765,7 @@ struct bfa_dconf_hdr_s {
 struct bfa_dconf_s {
 	struct bfa_dconf_hdr_s		hdr;
 	struct bfa_lunmask_cfg_s	lun_mask;
+	struct bfa_throttle_cfg_s	throttle_cfg;
 };
 #pragma pack()
 
@@ -738,6 +788,8 @@ struct bfa_dconf_mod_s {
 #define bfa_dconf_read_data_valid(__bfa)	\
 	(BFA_DCONF_MOD(__bfa)->read_data_valid)
 #define BFA_DCONF_UPDATE_TOV	5000	/* memtest timeout in msec */
+#define bfa_dconf_get_min_cfg(__bfa)	\
+	(BFA_DCONF_MOD(__bfa)->min_cfg)
 
 void	bfa_dconf_modinit(struct bfa_s *bfa);
 void	bfa_dconf_modexit(struct bfa_s *bfa);
@@ -761,7 +813,8 @@ bfa_status_t	bfa_dconf_update(struct bfa_s *bfa);
 #define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
 #define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
-	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+	((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS :	\
+	 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
 #define bfa_ioc_get_nports(__ioc)	\
 	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
 
@@ -885,12 +938,12 @@ bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
 		enum bfa_mode_s mode, int max_pf, int max_vf,
 		bfa_ablk_cbfn_t cbfn, void *cbarg);
 bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
-		u8 port, enum bfi_pcifn_class personality, int bw,
-		bfa_ablk_cbfn_t cbfn, void *cbarg);
+		u8 port, enum bfi_pcifn_class personality,
+		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
 		bfa_ablk_cbfn_t cbfn, void *cbarg);
-bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
-		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
+		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
 		bfa_ablk_cbfn_t cbfn, void *cbarg);
 bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 2eb0c6a2938d..de4e726a1263 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -57,13 +57,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	u32 usecnt;
 	struct bfi_ioc_image_hdr_s fwhdr;
 
-	/*
-	 * If bios boot (flash based) -- do not increment usage count
-	 */
-	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
-						BFA_IOC_FWIMG_MINSZ)
-		return BFA_TRUE;
-
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 
@@ -115,13 +108,6 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 	u32 usecnt;
 
 	/*
-	 * If bios boot (flash based) -- do not decrement usage count
-	 */
-	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
-						BFA_IOC_FWIMG_MINSZ)
-		return;
-
-	/*
 	 * decrement usage count
 	 */
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -400,13 +386,12 @@ static void
 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 {
 
-	if (bfa_ioc_is_cna(ioc)) {
-		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-		writel(0, ioc->ioc_regs.ioc_usage_reg);
-		readl(ioc->ioc_regs.ioc_usage_sem_reg);
-		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
-	}
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(0, ioc->ioc_regs.ioc_usage_reg);
+	readl(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 
+	writel(0, ioc->ioc_regs.ioc_fail_sync);
 	/*
 	 * Read the hw sem reg to make sure that it is locked
 	 * before we clear it. If it is not locked, writing 1
@@ -759,25 +744,6 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
 void
 bfa_ioc_ct2_mac_reset(void __iomem *rb)
 {
-	u32	r32;
-
-	bfa_ioc_ct2_sclk_init(rb);
-	bfa_ioc_ct2_lclk_init(rb);
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
-		(rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-		(rb + CT2_APP_PLL_LCLK_CTL_REG));
-
 	/* put port0, port1 MAC & AHB in reset */
 	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
 		rb + CT2_CSI_MAC_CONTROL_REG(0));
@@ -785,8 +751,21 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
 		rb + CT2_CSI_MAC_CONTROL_REG(1));
 }
 
+static void
+bfa_ioc_ct2_enable_flash(void __iomem *rb)
+{
+	u32 r32;
+
+	r32 = readl((rb + PSS_GPIO_OUT_REG));
+	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+	r32 = readl((rb + PSS_GPIO_OE_REG));
+	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+}
+
 #define CT2_NFC_MAX_DELAY	1000
-#define CT2_NFC_VER_VALID	0x143
+#define CT2_NFC_PAUSE_MAX_DELAY 4000
+#define CT2_NFC_VER_VALID	0x147
+#define CT2_NFC_STATE_RUNNING   0x20000001
 #define BFA_IOC_PLL_POLL	1000000
 
 static bfa_boolean_t
@@ -802,6 +781,20 @@ bfa_ioc_ct2_nfc_halted(void __iomem *rb)
 }
 
 static void
+bfa_ioc_ct2_nfc_halt(void __iomem *rb)
+{
+	int	i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		if (bfa_ioc_ct2_nfc_halted(rb))
+			break;
+		udelay(1000);
+	}
+	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
+}
+
+static void
 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
 {
 	u32	r32;
@@ -817,105 +810,142 @@ bfa_ioc_ct2_nfc_resume(void __iomem *rb)
 	WARN_ON(1);
 }
 
-bfa_status_t
-bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+static void
+bfa_ioc_ct2_clk_reset(void __iomem *rb)
 {
-	u32 wgn, r32, nfc_ver, i;
+	u32 r32;
 
-	wgn = readl(rb + CT2_WGN_STATUS);
-	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+	bfa_ioc_ct2_sclk_init(rb);
+	bfa_ioc_ct2_lclk_init(rb);
 
-	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
-	    (nfc_ver >= CT2_NFC_VER_VALID)) {
-		if (bfa_ioc_ct2_nfc_halted(rb))
-			bfa_ioc_ct2_nfc_resume(rb);
+	/*
+	 * release soft reset on s_clk & l_clk
+	 */
+	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+			(rb + CT2_APP_PLL_SCLK_CTL_REG));
 
-		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
-		       rb + CT2_CSI_FW_CTL_SET_REG);
+	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+			(rb + CT2_APP_PLL_LCLK_CTL_REG));
 
-		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
-			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
-			if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
-				break;
-		}
+}
 
-		WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+static void
+bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
+{
+	u32 r32, i;
 
-		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
-			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
-			if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
-				break;
-		}
+	r32 = readl((rb + PSS_CTL_REG));
+	r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+	writel(r32, (rb + PSS_CTL_REG));
+
+	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
 
-		WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+		if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+			break;
+	}
+	WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+		if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+			break;
+	}
+	WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+	WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+}
+
+static void
+bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
+{
+	u32 r32;
+	int i;
+
+	if (bfa_ioc_ct2_nfc_halted(rb))
+		bfa_ioc_ct2_nfc_resume(rb);
+	for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_STS_REG);
+		if (r32 == CT2_NFC_STATE_RUNNING)
+			return;
 		udelay(1000);
+	}
 
-		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
-		WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
-	} else {
-		writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
-		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
-			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
-			if (r32 & __NFC_CONTROLLER_HALTED)
-				break;
-			udelay(1000);
-		}
+	r32 = readl(rb + CT2_NFC_STS_REG);
+	WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
+}
 
-		bfa_ioc_ct2_mac_reset(rb);
-		bfa_ioc_ct2_sclk_init(rb);
-		bfa_ioc_ct2_lclk_init(rb);
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+	u32 wgn, r32, nfc_ver;
 
-		/*
-		 * release soft reset on s_clk & l_clk
-		 */
-		r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
-		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
-		       (rb + CT2_APP_PLL_SCLK_CTL_REG));
+	wgn = readl(rb + CT2_WGN_STATUS);
 
+	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
 		/*
-		 * release soft reset on s_clk & l_clk
+		 * If flash is corrupted, enable flash explicitly
 		 */
-		r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
-		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-		      (rb + CT2_APP_PLL_LCLK_CTL_REG));
-	}
+		bfa_ioc_ct2_clk_reset(rb);
+		bfa_ioc_ct2_enable_flash(rb);
 
-	/*
-	 * Announce flash device presence, if flash was corrupted.
-	 */
-	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-		r32 = readl(rb + PSS_GPIO_OUT_REG);
-		writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
-		r32 = readl(rb + PSS_GPIO_OE_REG);
-		writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+		bfa_ioc_ct2_mac_reset(rb);
+
+		bfa_ioc_ct2_clk_reset(rb);
+		bfa_ioc_ct2_enable_flash(rb);
+
+	} else {
+		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+		if ((nfc_ver >= CT2_NFC_VER_VALID) &&
+		    (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
+
+			bfa_ioc_ct2_wait_till_nfc_running(rb);
+
+			bfa_ioc_ct2_nfc_clk_reset(rb);
+		} else {
+			bfa_ioc_ct2_nfc_halt(rb);
+
+			bfa_ioc_ct2_clk_reset(rb);
+			bfa_ioc_ct2_mac_reset(rb);
+			bfa_ioc_ct2_clk_reset(rb);
+
+		}
 	}
 
 	/*
 	 * Mask the interrupts and clear any
-	 * pending interrupts.
+	 * pending interrupts left by BIOS/EFI
 	 */
+
 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
 
 	/* For first time initialization, no need to clear interrupts */
 	r32 = readl(rb + HOST_SEM5_REG);
 	if (r32 & 0x1) {
-		r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
+		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
 		if (r32 == 1) {
-			writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
 			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
 		}
-		r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
 		if (r32 == 1) {
-			writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
-			readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
 		}
 	}
 
 	bfa_ioc_ct2_mem_init(rb);
 
-	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
-	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
+	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
 
 	return BFA_STATUS_OK;
 }
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 189fff71e3c2..a14c784ff3fc 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -45,6 +45,7 @@ struct bfa_modules_s {
 	struct bfa_diag_s	diag_mod;	/*  diagnostics module	*/
 	struct bfa_phy_s	phy;		/*  phy module		*/
 	struct bfa_dconf_mod_s	dconf_mod;	/*  DCONF common module	*/
+	struct bfa_fru_s	fru;		/*  fru module		*/
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 95e4ad8759ac..8ea7697deb9b 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -250,6 +250,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 		return BFA_STATUS_IOC_FAILURE;
 	}
 
+	/* if port is d-port enabled, return error */
+	if (port->dport_enabled) {
+		bfa_trc(port, BFA_STATUS_DPORT_ERR);
+		return BFA_STATUS_DPORT_ERR;
+	}
+
 	if (port->endis_pending) {
 		bfa_trc(port, BFA_STATUS_DEVBUSY);
 		return BFA_STATUS_DEVBUSY;
@@ -300,6 +306,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 		return BFA_STATUS_IOC_FAILURE;
 	}
 
+	/* if port is d-port enabled, return error */
+	if (port->dport_enabled) {
+		bfa_trc(port, BFA_STATUS_DPORT_ERR);
+		return BFA_STATUS_DPORT_ERR;
+	}
+
 	if (port->endis_pending) {
 		bfa_trc(port, BFA_STATUS_DEVBUSY);
 		return BFA_STATUS_DEVBUSY;
@@ -431,6 +443,10 @@ bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
 			port->endis_cbfn = NULL;
 			port->endis_pending = BFA_FALSE;
 		}
+
+		/* clear D-port mode */
+		if (port->dport_enabled)
+			bfa_port_set_dportenabled(port, BFA_FALSE);
 		break;
 	default:
 		break;
@@ -467,6 +483,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 	port->stats_cbfn = NULL;
 	port->endis_cbfn = NULL;
 	port->pbc_disabled = BFA_FALSE;
+	port->dport_enabled = BFA_FALSE;
 
 	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
 	bfa_q_qe_init(&port->ioc_notify);
@@ -483,6 +500,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 }
 
 /*
+ * bfa_port_set_dportenabled();
+ *
+ * Port module- set pbc disabled flag
+ *
+ * @param[in] port - Pointer to the Port module data structure
+ *
+ * @return void
+ */
+void
+bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
+{
+	port->dport_enabled = enabled;
+}
+
+/*
  *	CEE module specific definitions
  */
 
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 947f897328d6..2fcab6bc6280 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -45,6 +45,7 @@ struct bfa_port_s {
 	bfa_status_t			endis_status;
 	struct bfa_ioc_notify_s		ioc_notify;
 	bfa_boolean_t			pbc_disabled;
+	bfa_boolean_t			dport_enabled;
 	struct bfa_mem_dma_s		port_dma;
 };
 
@@ -66,6 +67,8 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
 u32     bfa_port_meminfo(void);
 void	     bfa_port_mem_claim(struct bfa_port_s *port,
 				 u8 *dma_kva, u64 dma_pa);
+void	bfa_port_set_dportenabled(struct bfa_port_s *port,
+				  bfa_boolean_t enabled);
 
 /*
  * CEE declaration
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index b2538d60db34..299c1c889b33 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -67,6 +67,9 @@ enum bfa_fcport_sm_event {
 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
+	BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
+	BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
+	BFA_FCPORT_SM_FAA_MISCONFIG = 12,	/* FAA misconfiguratin */
 };
 
 /*
@@ -197,6 +200,10 @@ static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
 					enum bfa_fcport_sm_event event);
 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 					enum bfa_fcport_sm_event event);
+static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
 
 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
 					enum bfa_fcport_ln_sm_event event);
@@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
+	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
 };
 
 
@@ -1244,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
 		 * Just ignore
 		 */
 		break;
+	case BFA_LPS_SM_SET_N2N_PID:
+		/*
+		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
+		 * this event. Ignore this event.
+		 */
+		break;
 
 	default:
 		bfa_sm_fault(lps->bfa, event);
@@ -1261,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 
 	switch (event) {
 	case BFA_LPS_SM_FWRSP:
+	case BFA_LPS_SM_OFFLINE:
 		if (lps->status == BFA_STATUS_OK) {
 			bfa_sm_set_state(lps, bfa_lps_sm_online);
 			if (lps->fdisc)
@@ -1289,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 		bfa_lps_login_comp(lps);
 		break;
 
-	case BFA_LPS_SM_OFFLINE:
 	case BFA_LPS_SM_DELETE:
 		bfa_sm_set_state(lps, bfa_lps_sm_init);
 		break;
@@ -2169,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2225,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2250,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
 
 			bfa_trc(fcport->bfa,
-				pevent->link_state.vc_fcf.fcf.fipenabled);
+				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
 			bfa_trc(fcport->bfa,
-				pevent->link_state.vc_fcf.fcf.fipfailed);
+				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
 
-			if (pevent->link_state.vc_fcf.fcf.fipfailed)
+			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 					BFA_PL_EID_FIP_FCF_DISC, 0,
 					"FIP FCF Discovery Failed");
@@ -2311,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2404,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 		}
 		break;
 
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2449,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
 		bfa_reqq_wcancel(&fcport->reqq_wait);
 		break;
 
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2600,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
 		break;
 
+	case BFA_FCPORT_SM_DPORTENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2680,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 	}
 }
 
+static void
+bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Ignore event for a port that is dport
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	case BFA_FCPORT_SM_DPORTDISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+			    enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Ignore event for a port as there is FAA misconfig
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
 /*
  * Link state is down
  */
@@ -2943,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	 */
 	do_gettimeofday(&tv);
 	fcport->stats_reset_time = tv.tv_sec;
+	fcport->stats_dma_ready = BFA_FALSE;
 
 	/*
 	 * initialize and set default configuration
@@ -2953,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	port_cfg->maxfrsize = 0;
 
 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
+	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
+	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
 
 	INIT_LIST_HEAD(&fcport->stats_pending_q);
 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
@@ -2996,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa)
 	bfa_trunk_iocdisable(bfa);
 }
 
+/*
+ * Update loop info in fcport for SCN online
+ */
+static void
+bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
+			struct bfa_fcport_loop_info_s *loop_info)
+{
+	fcport->myalpa = loop_info->myalpa;
+	fcport->alpabm_valid =
+			loop_info->alpabm_val;
+	memcpy(fcport->alpabm.alpa_bm,
+			loop_info->alpabm.alpa_bm,
+			sizeof(struct fc_alpabm_s));
+}
+
 static void
 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
 {
@@ -3005,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
 	fcport->speed = pevent->link_state.speed;
 	fcport->topology = pevent->link_state.topology;
 
-	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
-		fcport->myalpa = 0;
+	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
+		bfa_fcport_update_loop_info(fcport,
+				&pevent->link_state.attr.loop_info);
+		return;
+	}
 
 	/* QoS Details */
 	fcport->qos_attr = pevent->link_state.qos_attr;
-	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
+	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
 
 	/*
 	 * update trunk state if applicable
@@ -3019,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
 		trunk->attr.state = BFA_TRUNK_DISABLED;
 
 	/* update FCoE specific */
-	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
+	fcport->fcoe_vlan =
+		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
 
 	bfa_trc(fcport->bfa, fcport->speed);
 	bfa_trc(fcport->bfa, fcport->topology);
@@ -3453,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 	case BFI_FCPORT_I2H_ENABLE_RSP:
 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
 
+			fcport->stats_dma_ready = BFA_TRUE;
 			if (fcport->use_flash_cfg) {
 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
 				fcport->cfg.maxfrsize =
@@ -3468,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 				else
 					fcport->trunk.attr.state =
 						BFA_TRUNK_DISABLED;
+				fcport->qos_attr.qos_bw =
+					i2hmsg.penable_rsp->port_cfg.qos_bw;
 				fcport->use_flash_cfg = BFA_FALSE;
 			}
 
@@ -3476,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 			else
 				fcport->qos_attr.state = BFA_QOS_DISABLED;
 
+			fcport->qos_attr.qos_bw_op =
+					i2hmsg.penable_rsp->port_cfg.qos_bw;
+
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
 		}
 		break;
@@ -3488,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 	case BFI_FCPORT_I2H_EVENT:
 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
-		else
-			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
+		else {
+			if (i2hmsg.event->link_state.linkstate_rsn ==
+			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
+				bfa_sm_send_event(fcport,
+						  BFA_FCPORT_SM_FAA_MISCONFIG);
+			else
+				bfa_sm_send_event(fcport,
+						  BFA_FCPORT_SM_LINKDOWN);
+		}
+		fcport->qos_attr.qos_bw_op =
+				i2hmsg.event->link_state.qos_attr.qos_bw_op;
 		break;
 
 	case BFI_FCPORT_I2H_TRUNK_SCN:
@@ -3609,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
 
 	if (fcport->cfg.trunked == BFA_TRUE)
 		return BFA_STATUS_TRUNK_ENABLED;
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+			(speed == BFA_PORT_SPEED_16GBPS))
+		return BFA_STATUS_UNSUPP_SPEED;
 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
 		bfa_trc(bfa, fcport->speed_sup);
 		return BFA_STATUS_UNSUPP_SPEED;
@@ -3663,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
 
 	switch (topology) {
 	case BFA_PORT_TOPOLOGY_P2P:
+		break;
+
 	case BFA_PORT_TOPOLOGY_LOOP:
+		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
+			(fcport->qos_attr.state != BFA_QOS_DISABLED))
+			return BFA_STATUS_ERROR_QOS_ENABLED;
+		if (fcport->cfg.ratelimit != BFA_FALSE)
+			return BFA_STATUS_ERROR_TRL_ENABLED;
+		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
+			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
+			return BFA_STATUS_ERROR_TRUNK_ENABLED;
+		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
+			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
+			return BFA_STATUS_UNSUPP_SPEED;
+		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
+			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
+		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
+			return BFA_STATUS_DPORT_ERR;
+		break;
+
 	case BFA_PORT_TOPOLOGY_AUTO:
 		break;
 
@@ -3686,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa)
 	return fcport->topology;
 }
 
+/**
+ * Get config topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.topology;
+}
+
 bfa_status_t
 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
 {
@@ -3761,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
 u8
 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
 {
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
+		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
 
-	return fcport->cfg.rx_bbcredit;
+	else
+		return 0;
 }
 
 void
@@ -3850,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (bfa_ioc_is_disabled(&bfa->ioc))
-		return BFA_STATUS_IOC_DISABLED;
+	if (!bfa_iocfc_is_operational(bfa) ||
+	    !fcport->stats_dma_ready)
+		return BFA_STATUS_IOC_NON_OP;
 
 	if (!list_empty(&fcport->statsclr_pending_q))
 		return BFA_STATUS_DEVBUSY;
@@ -3876,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
+	if (!bfa_iocfc_is_operational(bfa) ||
+	    !fcport->stats_dma_ready)
+		return BFA_STATUS_IOC_NON_OP;
+
 	if (!list_empty(&fcport->stats_pending_q))
 		return BFA_STATUS_DEVBUSY;
 
@@ -3905,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
 }
 
 bfa_boolean_t
+bfa_fcport_is_dport(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DPORT);
+}
+
+bfa_status_t
+bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+
+	bfa_trc(bfa, ioc_type);
+
+	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
+	    (qos_bw->low > qos_bw->high))
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((ioc_type == BFA_IOC_TYPE_FC) &&
+	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
+		fcport->cfg.qos_bw = *qos_bw;
+
+	return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
 bfa_fcport_is_ratelim(struct bfa_s *bfa)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
@@ -3981,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
 	return fcport->cfg.trunked;
 }
 
+void
+bfa_fcport_dportenable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
+	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
+}
+
+void
+bfa_fcport_dportdisable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
+	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
+}
+
 /*
  * Rport State machine functions
  */
@@ -4707,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
 		break;
 
+	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
+		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
+				&msg.lip_scn->loop_info);
+		bfa_cb_rport_scn_online(bfa);
+		break;
+
+	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
+		bfa_cb_rport_scn_offline(bfa);
+		break;
+
+	case BFI_RPORT_I2H_NO_DEV:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
+		bfa_cb_rport_scn_no_dev(rp->rport_drv);
+		break;
+
 	default:
 		bfa_trc(bfa, m->mhdr.msg_id);
 		WARN_ON(1);
@@ -5348,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
 }
 
 /*
+ *	Dport forward declaration
+ */
+
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+	BFA_DPORT_SM_ENABLE	= 1,	/* dport enable event         */
+	BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
+	BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
+	BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
+	BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
+};
+
+static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
+				 enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+				 enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
+static void bfa_dport_qresume(void *cbarg);
+static void bfa_dport_req_comp(struct bfa_dport_s *dport,
+			       bfi_diag_dport_rsp_t *msg);
+
+/*
  *	BFA fcdiag module
  */
 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
@@ -5377,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		struct bfa_pcidev_s *pcidev)
 {
 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s  *dport = &fcdiag->dport;
+
 	fcdiag->bfa             = bfa;
 	fcdiag->trcmod  = bfa->trcmod;
 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
+	dport->bfa = bfa;
+	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
+	dport->cbfn = NULL;
+	dport->cbarg = NULL;
 }
 
 static void
 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
 {
 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
 	bfa_trc(fcdiag, fcdiag->lb.lock);
 	if (fcdiag->lb.lock) {
 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
@@ -5393,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa)
 		fcdiag->lb.lock = 0;
 		bfa_fcdiag_set_busy_status(fcdiag);
 	}
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
 }
 
 static void
@@ -5577,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 	case BFI_DIAG_I2H_QTEST:
 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
 		break;
+	case BFI_DIAG_I2H_DPORT:
+		bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
+		break;
 	default:
 		bfa_trc(fcdiag, msg->mhdr.msg_id);
 		WARN_ON(1);
@@ -5646,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
 		}
 	}
 
+	/*
+	 * For CT2, 1G is not supported
+	 */
+	if ((speed == BFA_PORT_SPEED_1GBPS) &&
+	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
+		bfa_trc(fcdiag, speed);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+
 	/* For Mezz card, port speed entered needs to be checked */
 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
-			if ((speed == BFA_PORT_SPEED_1GBPS) &&
-			    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
-				return BFA_STATUS_UNSUPP_SPEED;
 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
 			      speed == BFA_PORT_SPEED_2GBPS ||
 			      speed == BFA_PORT_SPEED_4GBPS ||
@@ -5764,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
 }
+
+/*
+ *	D-port
+ */
+static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
+					enum bfi_dport_req req);
+static void
+bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
+{
+	if (dport->cbfn != NULL) {
+		dport->cbfn(dport->cbarg, bfa_status);
+		dport->cbfn = NULL;
+		dport->cbarg = NULL;
+	}
+}
+
+static void
+bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_ENABLE:
+		bfa_fcport_dportenable(dport->bfa);
+		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
+			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
+		break;
+
+	case BFA_DPORT_SM_DISABLE:
+		/* Already disabled */
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		/* ignore */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_ENABLE:
+		/* Already enabled */
+		break;
+
+	case BFA_DPORT_SM_DISABLE:
+		bfa_fcport_dportdisable(dport->bfa);
+		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
+			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+			     enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+
+static bfa_boolean_t
+bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
+{
+	struct bfi_diag_dport_req_s *m;
+
+	/*
+	 * Increment message tag before queue check, so that responses to old
+	 * requests are discarded.
+	 */
+	dport->msgtag++;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
+	if (!m) {
+		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
+		    bfa_fn_lpu(dport->bfa));
+	m->req  = req;
+	m->msgtag = dport->msgtag;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_dport_qresume(void *cbarg)
+{
+	struct bfa_dport_s *dport = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
+}
+
+static void
+bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
+{
+	bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+	bfa_cb_fcdiag_dport(dport, msg->status);
+}
+
+/*
+ * Dport enable
+ *
+ * @param[in] *bfa            - bfa data struct
+ */
+bfa_status_t
+bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s  *dport = &fcdiag->dport;
+
+	/*
+	 * Dport is not support in MEZZ card
+	 */
+	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+	}
+
+	/*
+	 * Check to see if IOC is down
+	*/
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* if port is PBC disabled, return error */
+	if (bfa_fcport_is_pbcdisabled(bfa)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	/*
+	 * Check if port mode is FC port
+	 */
+	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
+		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
+		return BFA_STATUS_CMD_NOTSUPP_CNA;
+	}
+
+	/*
+	 * Check if port is in LOOP mode
+	 */
+	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
+	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_TOPOLOGY_LOOP;
+	}
+
+	/*
+	 * Check if port is TRUNK mode
+	 */
+	if (bfa_fcport_is_trunk_enabled(bfa)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_ERROR_TRUNK_ENABLED;
+	}
+
+	/*
+	 * Check to see if port is disable or in dport state
+	 */
+	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_PORT_NOT_DISABLED;
+	}
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	/*
+	 * Check if dport is already enabled
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_ENABLED;
+	}
+
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Dport disable
+ *
+ *	@param[in] *bfa            - bfa data struct
+ */
+bfa_status_t
+bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	/* if port is PBC disabled, return error */
+	if (bfa_fcport_is_pbcdisabled(bfa)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	/*
+	 * Check to see if port is disable or in dport state
+	 */
+	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_PORT_NOT_DISABLED;
+	}
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is already disabled
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+	}
+
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Get D-port state
+ *
+ * @param[in] *bfa            - bfa data struct
+ */
+
+bfa_status_t
+bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
+		*state = BFA_DPORT_ST_ENABLED;
+	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+		 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
+		*state = BFA_DPORT_ST_ENABLING;
+	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
+		*state = BFA_DPORT_ST_DISABLED;
+	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+		 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
+		*state = BFA_DPORT_ST_DISABLING;
+	else {
+		bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
+		return BFA_STATUS_EINVAL;
+	}
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 1abcf7c51661..8d7fbecfcb22 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -474,8 +474,10 @@ struct bfa_fcport_s {
 	/*  supported speeds */
 	enum bfa_port_speed speed;	/*  current speed */
 	enum bfa_port_topology topology;	/*  current topology */
-	u8			myalpa;	/*  my ALPA in LOOP topology */
 	u8			rsvd[3];
+	u8			myalpa;	/*  my ALPA in LOOP topology */
+	u8			alpabm_valid; /* alpa bitmap valid or not */
+	struct fc_alpabm_s	alpabm;	/* alpa bitmap */
 	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
 	bfa_boolean_t		use_flash_cfg; /* get port cfg from flash */
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
@@ -512,6 +514,7 @@ struct bfa_fcport_s {
 	struct bfa_fcport_trunk_s trunk;
 	u16		fcoe_vlan;
 	struct bfa_mem_dma_s	fcport_dma;
+	bfa_boolean_t		stats_dma_ready;
 };
 
 #define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
@@ -534,6 +537,7 @@ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
 				     enum bfa_port_topology topo);
 enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
 bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
 u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
@@ -547,6 +551,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 			void (*event_cbfn) (void *cbarg,
 			enum bfa_port_linkstate event), void *event_cbarg);
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
+				   struct bfa_qos_bw_s *qos_bw);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
 void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
@@ -560,6 +567,8 @@ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
 			struct bfa_cb_pending_q_s *cb);
 bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
 bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+void bfa_fcport_dportenable(struct bfa_s *bfa);
+void bfa_fcport_dportdisable(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
 void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
 
@@ -575,6 +584,9 @@ void bfa_cb_rport_offline(void *rport);
 void bfa_cb_rport_qos_scn_flowid(void *rport,
 				 struct bfa_rport_qos_attr_s old_qos_attr,
 				 struct bfa_rport_qos_attr_s new_qos_attr);
+void bfa_cb_rport_scn_online(struct bfa_s *bfa);
+void bfa_cb_rport_scn_offline(struct bfa_s *bfa);
+void bfa_cb_rport_scn_no_dev(void *rp);
 void bfa_cb_rport_qos_scn_prio(void *rport,
 			       struct bfa_rport_qos_attr_s old_qos_attr,
 			       struct bfa_rport_qos_attr_s new_qos_attr);
@@ -697,11 +709,21 @@ struct bfa_fcdiag_lb_s {
 	u32        status;
 };
 
+struct bfa_dport_s {
+	struct bfa_s	*bfa;		/* Back pointer to BFA	*/
+	bfa_sm_t	sm;		/* finite state machine */
+	u32		msgtag;		/* firmware msg tag for reply */
+	struct bfa_reqq_wait_s reqq_wait;
+	bfa_cb_diag_t	cbfn;
+	void		*cbarg;
+};
+
 struct bfa_fcdiag_s {
 	struct bfa_s    *bfa;           /* Back pointer to BFA */
 	struct bfa_trc_mod_s   *trcmod;
 	struct bfa_fcdiag_lb_s lb;
 	struct bfa_fcdiag_qtest_s qtest;
+	struct bfa_dport_s	dport;
 };
 
 #define BFA_FCDIAG_MOD(__bfa)	(&(__bfa)->modules.fcdiag)
@@ -717,5 +739,11 @@ bfa_status_t	bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
 			u32 queue, struct bfa_diag_qtest_result_s *result,
 			bfa_cb_diag_t cbfn, void *cbarg);
 bfa_status_t	bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+bfa_status_t	bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
+				 void *cbarg);
+bfa_status_t	bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
+				  void *cbarg);
+bfa_status_t	bfa_dport_get_state(struct bfa_s *bfa,
+				    enum bfa_dport_state *state);
 
 #endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index c37494916a1a..895b0e516e07 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int		max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
 u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CB		"cbfw.bin"
-#define BFAD_FW_FILE_CT		"ctfw.bin"
-#define BFAD_FW_FILE_CT2	"ct2fw.bin"
+#define BFAD_FW_FILE_CB		"cbfw-3.1.0.0.bin"
+#define BFAD_FW_FILE_CT		"ctfw-3.1.0.0.bin"
+#define BFAD_FW_FILE_CT2	"ct2fw-3.1.0.0.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 0afa39076cef..555e7db94a1c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -33,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
 	/* If IOC is not in disabled state - return */
 	if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		iocmd->status = BFA_STATUS_IOC_FAILURE;
+		iocmd->status = BFA_STATUS_OK;
 		return rc;
 	}
 
@@ -54,6 +54,12 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_OK;
+		return rc;
+	}
+
 	if (bfad->disable_active) {
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 		return -EBUSY;
@@ -101,9 +107,10 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
 
 	/* set adapter hw path */
 	strcpy(iocmd->adapter_hwpath, bfad->pci_name);
-	i = strlen(iocmd->adapter_hwpath) - 1;
-	while (iocmd->adapter_hwpath[i] != '.')
-		i--;
+	for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
+		;
+	for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
+		;
 	iocmd->adapter_hwpath[i] = '\0';
 	iocmd->status = BFA_STATUS_OK;
 	return 0;
@@ -880,6 +887,19 @@ out:
 }
 
 int
+bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
 {
 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -888,16 +908,22 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	if (cmd == IOCMD_RATELIM_ENABLE)
-		fcport->cfg.ratelimit = BFA_TRUE;
-	else if (cmd == IOCMD_RATELIM_DISABLE)
-		fcport->cfg.ratelimit = BFA_FALSE;
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		if (cmd == IOCMD_RATELIM_ENABLE)
+			fcport->cfg.ratelimit = BFA_TRUE;
+		else if (cmd == IOCMD_RATELIM_DISABLE)
+			fcport->cfg.ratelimit = BFA_FALSE;
 
-	if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
-		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+		if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+			fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+		iocmd->status = BFA_STATUS_OK;
+	}
 
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-	iocmd->status = BFA_STATUS_OK;
 
 	return 0;
 }
@@ -919,8 +945,13 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
 		return 0;
 	}
 
-	fcport->cfg.trl_def_speed = iocmd->speed;
-	iocmd->status = BFA_STATUS_OK;
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		fcport->cfg.trl_def_speed = iocmd->speed;
+		iocmd->status = BFA_STATUS_OK;
+	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	return 0;
@@ -1167,8 +1198,8 @@ bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
 				&iocmd->pcifn_id, iocmd->port,
-				iocmd->pcifn_class, iocmd->bandwidth,
-				bfad_hcb_comp, &fcomp);
+				iocmd->pcifn_class, iocmd->bw_min,
+				iocmd->bw_max, bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (iocmd->status != BFA_STATUS_OK)
 		goto out;
@@ -1211,8 +1242,8 @@ bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
-				iocmd->pcifn_id, iocmd->bandwidth,
-				bfad_hcb_comp, &fcomp);
+				iocmd->pcifn_id, iocmd->bw_min,
+				iocmd->bw_max, bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	bfa_trc(bfad, iocmd->status);
 	if (iocmd->status != BFA_STATUS_OK)
@@ -1736,6 +1767,52 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
 }
 
 int
+bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	unsigned long	flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (cmd == IOCMD_DIAG_DPORT_ENABLE)
+		iocmd->status = bfa_dport_enable(&bfad->bfa,
+					bfad_hcb_comp, &fcomp);
+	else if (cmd == IOCMD_DIAG_DPORT_DISABLE)
+		iocmd->status = bfa_dport_disable(&bfad->bfa,
+					bfad_hcb_comp, &fcomp);
+	else {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK)
+		bfa_trc(bfad, iocmd->status);
+	else {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_diag_dport_get_state_s *iocmd =
+			(struct bfa_bsg_diag_dport_get_state_s *)pcmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_phy_attr_s *iocmd =
@@ -2052,7 +2129,7 @@ bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
-			BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
 			bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2074,7 +2151,7 @@ bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
-			BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
 			bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2161,22 +2238,31 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	if (v_cmd == IOCMD_TRUNK_ENABLE) {
-		trunk->attr.state = BFA_TRUNK_OFFLINE;
-		bfa_fcport_disable(&bfad->bfa);
-		fcport->cfg.trunked = BFA_TRUE;
-	} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
-		trunk->attr.state = BFA_TRUNK_DISABLED;
-		bfa_fcport_disable(&bfad->bfa);
-		fcport->cfg.trunked = BFA_FALSE;
-	}
+	if (bfa_fcport_is_dport(&bfad->bfa))
+		return BFA_STATUS_DPORT_ERR;
 
-	if (!bfa_fcport_is_disabled(&bfad->bfa))
-		bfa_fcport_enable(&bfad->bfa);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		if (v_cmd == IOCMD_TRUNK_ENABLE) {
+			trunk->attr.state = BFA_TRUNK_OFFLINE;
+			bfa_fcport_disable(&bfad->bfa);
+			fcport->cfg.trunked = BFA_TRUE;
+		} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+			trunk->attr.state = BFA_TRUNK_DISABLED;
+			bfa_fcport_disable(&bfad->bfa);
+			fcport->cfg.trunked = BFA_FALSE;
+		}
+
+		if (!bfa_fcport_is_disabled(&bfad->bfa))
+			bfa_fcport_enable(&bfad->bfa);
+
+		iocmd->status = BFA_STATUS_OK;
+	}
 
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	iocmd->status = BFA_STATUS_OK;
 	return 0;
 }
 
@@ -2189,12 +2275,17 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
-		sizeof(struct bfa_trunk_attr_s));
-	iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+			sizeof(struct bfa_trunk_attr_s));
+		iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+		iocmd->status = BFA_STATUS_OK;
+	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	iocmd->status = BFA_STATUS_OK;
 	return 0;
 }
 
@@ -2207,14 +2298,22 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
-		if (v_cmd == IOCMD_QOS_ENABLE)
-			fcport->cfg.qos_enabled = BFA_TRUE;
-		else if (v_cmd == IOCMD_QOS_DISABLE)
-			fcport->cfg.qos_enabled = BFA_FALSE;
+		if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+			iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+		else {
+			if (v_cmd == IOCMD_QOS_ENABLE)
+				fcport->cfg.qos_enabled = BFA_TRUE;
+			else if (v_cmd == IOCMD_QOS_DISABLE) {
+				fcport->cfg.qos_enabled = BFA_FALSE;
+				fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
+				fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
+				fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
+			}
+		}
 	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	iocmd->status = BFA_STATUS_OK;
 	return 0;
 }
 
@@ -2226,11 +2325,21 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	iocmd->attr.state = fcport->qos_attr.state;
-	iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		iocmd->attr.state = fcport->qos_attr.state;
+		iocmd->attr.total_bb_cr =
+			be32_to_cpu(fcport->qos_attr.total_bb_cr);
+		iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
+		iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
+		iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
+		iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
+		iocmd->status = BFA_STATUS_OK;
+	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	iocmd->status = BFA_STATUS_OK;
 	return 0;
 }
 
@@ -2274,6 +2383,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
 	struct bfad_hal_comp fcomp;
 	unsigned long	flags;
 	struct bfa_cb_pending_q_s cb_qe;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
 
 	init_completion(&fcomp.comp);
 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2281,7 +2391,11 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
-	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else
+		iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (iocmd->status != BFA_STATUS_OK) {
 		bfa_trc(bfad, iocmd->status);
@@ -2300,6 +2414,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
 	struct bfad_hal_comp fcomp;
 	unsigned long	flags;
 	struct bfa_cb_pending_q_s cb_qe;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
 
 	init_completion(&fcomp.comp);
 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2307,7 +2422,11 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
-	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else
+		iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (iocmd->status != BFA_STATUS_OK) {
 		bfa_trc(bfad, iocmd->status);
@@ -2435,6 +2554,139 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
 	return 0;
 }
 
+int
+bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_throttle_s *iocmd =
+			(struct bfa_bsg_fcpim_throttle_s *)cmd;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
+				(void *)&iocmd->throttle);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_throttle_s *iocmd =
+			(struct bfa_bsg_fcpim_throttle_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
+				iocmd->throttle.cfg_value);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_tfru_s *iocmd =
+			(struct bfa_bsg_tfru_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_tfru_s *iocmd =
+			(struct bfa_bsg_tfru_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_s *iocmd =
+			(struct bfa_bsg_fruvpd_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_s *iocmd =
+			(struct bfa_bsg_fruvpd_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_max_size_s *iocmd =
+			(struct bfa_bsg_fruvpd_max_size_s *)cmd;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
+						&iocmd->max_size);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
 static int
 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 		unsigned int payload_len)
@@ -2660,6 +2912,13 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_DIAG_LB_STAT:
 		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
 		break;
+	case IOCMD_DIAG_DPORT_ENABLE:
+	case IOCMD_DIAG_DPORT_DISABLE:
+		rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_GET_STATE:
+		rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd);
+		break;
 	case IOCMD_PHY_GET_ATTR:
 		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
 		break;
@@ -2741,6 +3000,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_QOS_RESET_STATS:
 		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
 		break;
+	case IOCMD_QOS_SET_BW:
+		rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
+		break;
 	case IOCMD_VF_GET_STATS:
 		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
 		break;
@@ -2759,6 +3021,29 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_FCPIM_LUNMASK_DELETE:
 		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
 		break;
+	case IOCMD_FCPIM_THROTTLE_QUERY:
+		rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_THROTTLE_SET:
+		rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
+		break;
+	/* TFRU */
+	case IOCMD_TFRU_READ:
+		rc = bfad_iocmd_tfru_read(bfad, iocmd);
+		break;
+	case IOCMD_TFRU_WRITE:
+		rc = bfad_iocmd_tfru_write(bfad, iocmd);
+		break;
+	/* FRU */
+	case IOCMD_FRUVPD_READ:
+		rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
+		break;
+	case IOCMD_FRUVPD_UPDATE:
+		rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
+		break;
+	case IOCMD_FRUVPD_GET_MAX_SIZE:
+		rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
+		break;
 	default:
 		rc = -EINVAL;
 		break;
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 8c569ddb750d..15e1fc8e796b 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -141,6 +141,17 @@ enum {
 	IOCMD_FCPIM_LUNMASK_QUERY,
 	IOCMD_FCPIM_LUNMASK_ADD,
 	IOCMD_FCPIM_LUNMASK_DELETE,
+	IOCMD_DIAG_DPORT_ENABLE,
+	IOCMD_DIAG_DPORT_DISABLE,
+	IOCMD_DIAG_DPORT_GET_STATE,
+	IOCMD_QOS_SET_BW,
+	IOCMD_FCPIM_THROTTLE_QUERY,
+	IOCMD_FCPIM_THROTTLE_SET,
+	IOCMD_TFRU_READ,
+	IOCMD_TFRU_WRITE,
+	IOCMD_FRUVPD_READ,
+	IOCMD_FRUVPD_UPDATE,
+	IOCMD_FRUVPD_GET_MAX_SIZE,
 };
 
 struct bfa_bsg_gen_s {
@@ -463,7 +474,8 @@ struct bfa_bsg_pcifn_s {
 	bfa_status_t		status;
 	u16			bfad_num;
 	u16			pcifn_id;
-	u32			bandwidth;
+	u16			bw_min;
+	u16			bw_max;
 	u8			port;
 	enum bfi_pcifn_class	pcifn_class;
 	u8			rsvd[1];
@@ -613,6 +625,13 @@ struct bfa_bsg_diag_lb_stat_s {
 	u16		rsvd;
 };
 
+struct bfa_bsg_diag_dport_get_state_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_dport_state state;
+};
+
 struct bfa_bsg_phy_attr_s {
 	bfa_status_t	status;
 	u16	bfad_num;
@@ -694,6 +713,13 @@ struct bfa_bsg_qos_vc_attr_s {
 	struct bfa_qos_vc_attr_s attr;
 };
 
+struct bfa_bsg_qos_bw_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_bw_s qos_bw;
+};
+
 struct bfa_bsg_vf_stats_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -722,6 +748,41 @@ struct bfa_bsg_fcpim_lunmask_s {
 	struct scsi_lun	lun;
 };
 
+struct bfa_bsg_fcpim_throttle_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	struct bfa_defs_fcpim_throttle_s throttle;
+};
+
+#define BFA_TFRU_DATA_SIZE		64
+#define BFA_MAX_FRUVPD_TRANSFER_SIZE	0x1000
+
+struct bfa_bsg_tfru_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		offset;
+	u32		len;
+	u8		data[BFA_TFRU_DATA_SIZE];
+};
+
+struct bfa_bsg_fruvpd_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		offset;
+	u32		len;
+	u8		data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
+};
+
+struct bfa_bsg_fruvpd_max_size_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		max_size;
+};
+
 struct bfa_bsg_fcpt_s {
 	bfa_status_t    status;
 	u16		vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 1840651ce1d4..0c64a04f01fa 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -57,7 +57,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.1.2.0"
+#define BFAD_DRIVER_VERSION    "3.1.2.1"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index b2ba0b2e91b2..57b146bca18c 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -210,7 +210,8 @@ enum bfi_mclass {
 	BFI_MC_PORT		= 21,	/*  Physical port		    */
 	BFI_MC_SFP		= 22,	/*  SFP module	*/
 	BFI_MC_PHY		= 25,   /*  External PHY message class	*/
-	BFI_MC_MAX		= 32
+	BFI_MC_FRU		= 34,
+	BFI_MC_MAX		= 35
 };
 
 #define BFI_IOC_MAX_CQS		4
@@ -288,6 +289,9 @@ struct bfi_ioc_attr_s {
 	char		optrom_version[BFA_VERSION_LEN];
 	struct		bfa_mfg_vpd_s	vpd;
 	u32	card_type;	/*  card type			*/
+	u8	mfg_day;	/* manufacturing day */
+	u8	mfg_month;	/* manufacturing month */
+	u16	mfg_year;	/* manufacturing year */
 };
 
 /*
@@ -687,7 +691,8 @@ struct bfi_ablk_h2i_pf_req_s {
 	u8			pcifn;
 	u8			port;
 	u16			pers;
-	u32			bw;
+	u16			bw_min; /* percent BW @ max speed */
+	u16			bw_max; /* percent BW @ max speed */
 };
 
 /* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
@@ -957,6 +962,7 @@ enum bfi_diag_h2i {
 	BFI_DIAG_H2I_TEMPSENSOR = 4,
 	BFI_DIAG_H2I_LEDTEST = 5,
 	BFI_DIAG_H2I_QTEST      = 6,
+	BFI_DIAG_H2I_DPORT	= 7,
 };
 
 enum bfi_diag_i2h {
@@ -966,6 +972,7 @@ enum bfi_diag_i2h {
 	BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
 	BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
 	BFI_DIAG_I2H_QTEST      = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+	BFI_DIAG_I2H_DPORT	= BFA_I2HM(BFI_DIAG_H2I_DPORT),
 };
 
 #define BFI_DIAG_MAX_SGES	2
@@ -1052,6 +1059,23 @@ struct bfi_diag_qtest_req_s {
 #define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
 
 /*
+ *	D-port test
+ */
+enum bfi_dport_req {
+	BFI_DPORT_DISABLE	= 0,	/* disable dport request	*/
+	BFI_DPORT_ENABLE	= 1,	/* enable dport request		*/
+};
+
+struct bfi_diag_dport_req_s {
+	struct bfi_mhdr_s	mh;	/* 4 bytes                      */
+	u8			req;    /* request 1: enable 0: disable */
+	u8			status; /* reply status			*/
+	u8			rsvd[2];
+	u32			msgtag; /* msgtag for reply		*/
+};
+#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s
+
+/*
  *	PHY module specific
  */
 enum bfi_phy_h2i_msgs_e {
@@ -1147,6 +1171,50 @@ struct bfi_phy_write_rsp_s {
 	u32			length;
 };
 
+enum bfi_fru_h2i_msgs {
+	BFI_FRUVPD_H2I_WRITE_REQ = 1,
+	BFI_FRUVPD_H2I_READ_REQ = 2,
+	BFI_TFRU_H2I_WRITE_REQ = 3,
+	BFI_TFRU_H2I_READ_REQ = 4,
+};
+
+enum bfi_fru_i2h_msgs {
+	BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1),
+	BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2),
+	BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3),
+	BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * FRU write request
+ */
+struct bfi_fru_write_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u8			last;
+	u8			rsv[3];
+	u32			offset;
+	u32			length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * FRU read request
+ */
+struct bfi_fru_read_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			offset;
+	u32			length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * FRU response
+ */
+struct bfi_fru_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+	u32			length;
+};
 #pragma pack()
 
 #endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index d4220e13cafa..5ae2c167b2c8 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -426,6 +426,7 @@ struct bfi_lps_login_req_s {
 	u8		auth_en;
 	u8		lps_role;
 	u8		bb_scn;
+	u32		vvl_flag;
 };
 
 struct bfi_lps_login_rsp_s {
@@ -499,6 +500,9 @@ enum bfi_rport_i2h_msgs {
 	BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
 	BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
 	BFI_RPORT_I2H_QOS_SCN    = BFA_I2HM(3),
+	BFI_RPORT_I2H_LIP_SCN_ONLINE =	BFA_I2HM(4),
+	BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5),
+	BFI_RPORT_I2H_NO_DEV	= BFA_I2HM(6),
 };
 
 struct bfi_rport_create_req_s {
@@ -551,6 +555,14 @@ struct bfi_rport_qos_scn_s {
 	struct bfa_rport_qos_attr_s new_qos_attr;  /* New QoS Attributes */
 };
 
+struct bfi_rport_lip_scn_s {
+	struct bfi_mhdr_s  mh;		/*!< common msg header	*/
+	u16	bfa_handle;	/*!< host rport handle	*/
+	u8		status;		/*!< scn online status	*/
+	u8		rsvd;
+	struct bfa_fcport_loop_info_s	loop_info;
+};
+
 union bfi_rport_h2i_msg_u {
 	struct bfi_msg_s		*msg;
 	struct bfi_rport_create_req_s	*create_req;
@@ -563,6 +575,7 @@ union bfi_rport_i2h_msg_u {
 	struct bfi_rport_create_rsp_s	*create_rsp;
 	struct bfi_rport_delete_rsp_s	*delete_rsp;
 	struct bfi_rport_qos_scn_s	*qos_scn_evt;
+	struct bfi_rport_lip_scn_s	*lip_scn;
 };
 
 /*
@@ -828,6 +841,7 @@ enum bfi_tskim_status {
 	 */
 	BFI_TSKIM_STS_TIMEOUT  = 10,	/*  TM request timedout	*/
 	BFI_TSKIM_STS_ABORTED  = 11,	/*  Aborted on host request */
+	BFI_TSKIM_STS_UTAG     = 12,	/*  unknown tag for request */
 };
 
 struct bfi_tskim_rsp_s {
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index ed5f159e1867..99133bcf53f9 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -338,6 +338,7 @@ enum {
 #define __A2T_AHB_LOAD			0x00000800
 #define __WGN_READY			0x00000400
 #define __GLBL_PF_VF_CFG_RDY		0x00000200
+#define CT2_NFC_STS_REG			0x00027410
 #define CT2_NFC_CSR_CLR_REG		0x00027420
 #define CT2_NFC_CSR_SET_REG		0x00027424
 #define __HALT_NFC_CONTROLLER		0x00000002
@@ -355,6 +356,8 @@ enum {
 	(CT2_CSI_MAC0_CONTROL_REG +	\
 	(__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
 
+#define CT2_NFC_FLASH_STS_REG		0x00014834
+#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS	0x00000020
 /*
  * Name semaphore registers based on usage
  */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 078d262ac7cc..666b7ac4475f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1643,7 +1643,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 	skb_reset_network_header(skb);
 	skb->mac_len = elen;
 	skb->protocol = htons(ETH_P_FCOE);
-	skb->priority = port->priority;
+	skb->priority = fcoe->priority;
 
 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
 	    fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
@@ -1917,7 +1917,6 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
 	struct net_device *netdev;
-	struct fcoe_port *port;
 	int prio;
 
 	if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
@@ -1946,10 +1945,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 	    entry->app.protocol == ETH_P_FCOE)
 		ctlr->priority = prio;
 
-	if (entry->app.protocol == ETH_P_FCOE) {
-		port = lport_priv(ctlr->lp);
-		port->priority = prio;
-	}
+	if (entry->app.protocol == ETH_P_FCOE)
+		fcoe->priority = prio;
 
 	return NOTIFY_OK;
 }
@@ -2180,7 +2177,6 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 	u8 fup, up;
 	struct net_device *netdev = fcoe->realdev;
 	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
-	struct fcoe_port *port = lport_priv(ctlr->lp);
 	struct dcb_app app = {
 				.priority = 0,
 				.protocol = ETH_P_FCOE
@@ -2202,8 +2198,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 			fup = dcb_getapp(netdev, &app);
 		}
 
-		port->priority = ffs(up) ? ffs(up) - 1 : 0;
-		ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+		fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
+		ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
 	}
 #endif
 }
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index a624add4f8ec..b42dc32cb5eb 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,6 +71,7 @@ do {                                                            	\
  * @oem:	      The offload exchange manager for all local port
  *		      instances associated with this port
  * @removed:	      Indicates fcoe interface removed from net device
+ * @priority:	      Priority for the FCoE packet (DCB)
  * This structure is 1:1 with a net device.
  */
 struct fcoe_interface {
@@ -81,6 +82,7 @@ struct fcoe_interface {
 	struct packet_type fip_packet_type;
 	struct fc_exch_mgr *oem;
 	u8	removed;
+	u8	priority;
 };
 
 #define fcoe_to_ctlr(x)						\
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 14243fa5f8e8..fcb9d0b20ee4 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -851,7 +851,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 			fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
 			if (flags & FCP_RSP_LEN_VAL) {
 				respl = ntohl(rp_ex->fr_rsp_len);
-				if (respl != sizeof(*fc_rp_info))
+				if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
+				    (respl != FCP_RESP_RSP_INFO_LEN8))
 					goto len_err;
 				if (fsp->wait_for_comp) {
 					/* Abuse cdb_status for rsp code */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a184c2443a64..69b59935b53f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -27,6 +27,8 @@
 
 struct lpfc_sli2_slim;
 
+#define ELX_MODEL_NAME_SIZE	80
+
 #define LPFC_PCI_DEV_LP		0x1
 #define LPFC_PCI_DEV_OC		0x2
 
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b032562aa0d9..ad16e54ac383 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3935,6 +3935,12 @@ MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
 # 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
 #	- Allows you to ultimately specify which profiles to use
 #	- Default will result in registering capabilities for all profiles.
+#	- SHOST_DIF_TYPE1_PROTECTION	1
+#		HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
+#	- SHOST_DIX_TYPE0_PROTECTION	8
+#		HBA supports DIX Type 0: Host to HBA protection only
+#	- SHOST_DIX_TYPE1_PROTECTION	16
+#		HBA supports DIX Type 1: Host to HBA  Type 1 protection
 #
 */
 unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
@@ -3947,7 +3953,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
 /*
 # lpfc_prot_guard: i
 #	- Bit mask of protection guard types to register with the SCSI mid-layer
-# 	- Guard types are currently either 1) IP checksum 2) T10-DIF CRC
+#	- Guard types are currently either 1) T10-DIF CRC 2) IP checksum
 #	- Allows you to ultimately specify which profiles to use
 #	- Default will result in registering capabilities for all guard types
 #
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e470c489de07..4380a44000bc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -467,3 +467,4 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
 void lpfc_sli4_node_prep(struct lpfc_hba *);
 int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
+uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cfe533bc9790..f19e9b6f9f13 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -809,6 +809,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	phba->fc_ratov = FF_DEF_RATOV;
 	rc = memcmp(&vport->fc_portname, &sp->portName,
 		    sizeof(vport->fc_portname));
+	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
 	if (rc >= 0) {
 		/* This side will initiate the PLOGI */
 		spin_lock_irq(shost->host_lock);
@@ -3160,7 +3162,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				retry = 1;
 				break;
 			}
-			if (cmd == ELS_CMD_PLOGI) {
+			if ((cmd == ELS_CMD_PLOGI) ||
+			    (cmd == ELS_CMD_PRLI)) {
 				delay = 1000;
 				maxretry = lpfc_max_els_tries + 1;
 				retry = 1;
@@ -3305,7 +3308,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			ndlp->nlp_prev_state = ndlp->nlp_state;
 			if (cmd == ELS_CMD_PRLI)
 				lpfc_nlp_set_state(vport, ndlp,
-					NLP_STE_REG_LOGIN_ISSUE);
+					NLP_STE_PRLI_ISSUE);
 			else
 				lpfc_nlp_set_state(vport, ndlp,
 					NLP_STE_NPR_NODE);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e9845d2ecf10..d7096ad94d3f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1506,9 +1506,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
 		}
 	}
 
-	/* If FCF not available return 0 */
+	/* FCF not valid/available or solicitation in progress */
 	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
-		!bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
+	    !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
+	    bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
 		return 0;
 
 	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
@@ -1842,6 +1843,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
 			"\tFCF_Index     : x%x\n"
 			"\tFCF_Avail     : x%x\n"
 			"\tFCF_Valid     : x%x\n"
+			"\tFCF_SOL       : x%x\n"
 			"\tFIP_Priority  : x%x\n"
 			"\tMAC_Provider  : x%x\n"
 			"\tLowest VLANID : x%x\n"
@@ -1852,6 +1854,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
 			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
 			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
 			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
 			fcf_record->fip_priority,
 			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
 			vlan_id,
@@ -2185,12 +2188,14 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 					       new_fcf_record));
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 				"2781 FCF (x%x) failed connection "
-				"list check: (x%x/x%x)\n",
+				"list check: (x%x/x%x/%x)\n",
 				bf_get(lpfc_fcf_record_fcf_index,
 				       new_fcf_record),
 				bf_get(lpfc_fcf_record_fcf_avail,
 				       new_fcf_record),
 				bf_get(lpfc_fcf_record_fcf_valid,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_sol,
 				       new_fcf_record));
 		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
 		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 834b699cac76..2cdeb5434fb7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1305,6 +1305,11 @@ struct lpfc_mbx_mq_create_ext {
 #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
 #define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_link_WORD	async_evt_bmap
+#define LPFC_EVT_CODE_LINK_NO_LINK	0x0
+#define LPFC_EVT_CODE_LINK_10_MBIT	0x1
+#define LPFC_EVT_CODE_LINK_100_MBIT	0x2
+#define LPFC_EVT_CODE_LINK_1_GBIT	0x3
+#define LPFC_EVT_CODE_LINK_10_GBIT	0x4
 #define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT	LPFC_TRAILER_CODE_FCOE
 #define lpfc_mbx_mq_create_ext_async_evt_fip_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_fip_WORD	async_evt_bmap
@@ -1314,6 +1319,13 @@ struct lpfc_mbx_mq_create_ext {
 #define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT	LPFC_TRAILER_CODE_FC
 #define lpfc_mbx_mq_create_ext_async_evt_fc_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_fc_WORD	async_evt_bmap
+#define LPFC_EVT_CODE_FC_NO_LINK	0x0
+#define LPFC_EVT_CODE_FC_1_GBAUD	0x1
+#define LPFC_EVT_CODE_FC_2_GBAUD	0x2
+#define LPFC_EVT_CODE_FC_4_GBAUD	0x4
+#define LPFC_EVT_CODE_FC_8_GBAUD	0x8
+#define LPFC_EVT_CODE_FC_10_GBAUD	0xA
+#define LPFC_EVT_CODE_FC_16_GBAUD	0x10
 #define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT	LPFC_TRAILER_CODE_SLI
 #define lpfc_mbx_mq_create_ext_async_evt_sli_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_sli_WORD	async_evt_bmap
@@ -1695,8 +1707,14 @@ struct fcf_record {
 #define lpfc_fcf_record_fc_map_2_MASK		0x000000FF
 #define lpfc_fcf_record_fc_map_2_WORD		word7
 #define lpfc_fcf_record_fcf_valid_SHIFT		24
-#define lpfc_fcf_record_fcf_valid_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_valid_MASK		0x00000001
 #define lpfc_fcf_record_fcf_valid_WORD		word7
+#define lpfc_fcf_record_fcf_fc_SHIFT		25
+#define lpfc_fcf_record_fcf_fc_MASK		0x00000001
+#define lpfc_fcf_record_fcf_fc_WORD		word7
+#define lpfc_fcf_record_fcf_sol_SHIFT		31
+#define lpfc_fcf_record_fcf_sol_MASK		0x00000001
+#define lpfc_fcf_record_fcf_sol_WORD		word7
 	uint32_t word8;
 #define lpfc_fcf_record_fcf_index_SHIFT		0
 #define lpfc_fcf_record_fcf_index_MASK		0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8a55a586dd65..7dc4218d9c4c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1892,8 +1892,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 		max_speed = 4;
 	else if (phba->lmt & LMT_2Gb)
 		max_speed = 2;
-	else
+	else if (phba->lmt & LMT_1Gb)
 		max_speed = 1;
+	else
+		max_speed = 0;
 
 	vp = &phba->vpd;
 
@@ -2078,9 +2080,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	if (descp && descp[0] == '\0') {
 		if (oneConnect)
 			snprintf(descp, 255,
-				"Emulex OneConnect %s, %s Initiator, Port %s",
+				"Emulex OneConnect %s, %s Initiator %s",
 				m.name, m.function,
 				phba->Port);
+		else if (max_speed == 0)
+			snprintf(descp, 255,
+				"Emulex %s %s %s ",
+				m.name, m.bus, m.function);
 		else
 			snprintf(descp, 255,
 				"Emulex %s %d%s %s %s",
@@ -3502,6 +3508,119 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
 }
 
 /**
+ * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get an SLI3 FC port's link speed in Mbps.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+uint32_t
+lpfc_sli_port_speed_get(struct lpfc_hba *phba)
+{
+	uint32_t link_speed;
+
+	if (!lpfc_is_link_up(phba))
+		return 0;
+
+	switch (phba->fc_linkspeed) {
+	case LPFC_LINK_SPEED_1GHZ:
+		link_speed = 1000;
+		break;
+	case LPFC_LINK_SPEED_2GHZ:
+		link_speed = 2000;
+		break;
+	case LPFC_LINK_SPEED_4GHZ:
+		link_speed = 4000;
+		break;
+	case LPFC_LINK_SPEED_8GHZ:
+		link_speed = 8000;
+		break;
+	case LPFC_LINK_SPEED_10GHZ:
+		link_speed = 10000;
+		break;
+	case LPFC_LINK_SPEED_16GHZ:
+		link_speed = 16000;
+		break;
+	default:
+		link_speed = 0;
+	}
+	return link_speed;
+}
+
+/**
+ * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @evt_code: asynchronous event code.
+ * @speed_code: asynchronous event link speed code.
+ *
+ * This routine is to parse the giving SLI4 async event link speed code into
+ * value of Mbps for the link speed.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+static uint32_t
+lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
+			   uint8_t speed_code)
+{
+	uint32_t port_speed;
+
+	switch (evt_code) {
+	case LPFC_TRAILER_CODE_LINK:
+		switch (speed_code) {
+		case LPFC_EVT_CODE_LINK_NO_LINK:
+			port_speed = 0;
+			break;
+		case LPFC_EVT_CODE_LINK_10_MBIT:
+			port_speed = 10;
+			break;
+		case LPFC_EVT_CODE_LINK_100_MBIT:
+			port_speed = 100;
+			break;
+		case LPFC_EVT_CODE_LINK_1_GBIT:
+			port_speed = 1000;
+			break;
+		case LPFC_EVT_CODE_LINK_10_GBIT:
+			port_speed = 10000;
+			break;
+		default:
+			port_speed = 0;
+		}
+		break;
+	case LPFC_TRAILER_CODE_FC:
+		switch (speed_code) {
+		case LPFC_EVT_CODE_FC_NO_LINK:
+			port_speed = 0;
+			break;
+		case LPFC_EVT_CODE_FC_1_GBAUD:
+			port_speed = 1000;
+			break;
+		case LPFC_EVT_CODE_FC_2_GBAUD:
+			port_speed = 2000;
+			break;
+		case LPFC_EVT_CODE_FC_4_GBAUD:
+			port_speed = 4000;
+			break;
+		case LPFC_EVT_CODE_FC_8_GBAUD:
+			port_speed = 8000;
+			break;
+		case LPFC_EVT_CODE_FC_10_GBAUD:
+			port_speed = 10000;
+			break;
+		case LPFC_EVT_CODE_FC_16_GBAUD:
+			port_speed = 16000;
+			break;
+		default:
+			port_speed = 0;
+		}
+		break;
+	default:
+		port_speed = 0;
+	}
+	return port_speed;
+}
+
+/**
  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async link completion queue entry.
@@ -3558,7 +3677,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
 
 	/* Keep the link status for extra SLI4 state machine reference */
 	phba->sli4_hba.link_state.speed =
-				bf_get(lpfc_acqe_link_speed, acqe_link);
+			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
+				bf_get(lpfc_acqe_link_speed, acqe_link));
 	phba->sli4_hba.link_state.duplex =
 				bf_get(lpfc_acqe_link_duplex, acqe_link);
 	phba->sli4_hba.link_state.status =
@@ -3570,7 +3690,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
 	phba->sli4_hba.link_state.fault =
 				bf_get(lpfc_acqe_link_fault, acqe_link);
 	phba->sli4_hba.link_state.logical_speed =
-			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
+			bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
+
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 			"2900 Async FC/FCoE Link event - Speed:%dGBit "
 			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
@@ -3580,7 +3701,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
 			phba->sli4_hba.link_state.status,
 			phba->sli4_hba.link_state.type,
 			phba->sli4_hba.link_state.number,
-			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.logical_speed,
 			phba->sli4_hba.link_state.fault);
 	/*
 	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
@@ -3652,7 +3773,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
 	}
 	/* Keep the link status for extra SLI4 state machine reference */
 	phba->sli4_hba.link_state.speed =
-				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
+			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
 	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
 	phba->sli4_hba.link_state.topology =
 				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
@@ -3665,7 +3787,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
 	phba->sli4_hba.link_state.fault =
 				bf_get(lpfc_acqe_link_fault, acqe_fc);
 	phba->sli4_hba.link_state.logical_speed =
-				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
 			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -3675,7 +3797,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
 			phba->sli4_hba.link_state.status,
 			phba->sli4_hba.link_state.type,
 			phba->sli4_hba.link_state.number,
-			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.logical_speed,
 			phba->sli4_hba.link_state.fault);
 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!pmb) {
@@ -3783,14 +3905,18 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
 	case LPFC_SLI_EVENT_STATUS_VALID:
 		return; /* no message if the sfp is okay */
 	case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
-		sprintf(message, "Not installed");
+		sprintf(message, "Optics faulted/incorrectly installed/not " \
+				"installed - Reseat optics, if issue not "
+				"resolved, replace.");
 		break;
 	case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
 		sprintf(message,
-			"Optics of two types installed");
+			"Optics of two types installed - Remove one optic or " \
+			"install matching pair of optics.");
 		break;
 	case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
-		sprintf(message, "Incompatible optics");
+		sprintf(message, "Incompatible optics - Replace with " \
+				"compatible optics for card to function.");
 		break;
 	default:
 		/* firmware is reporting a status we don't know about */
@@ -4161,11 +4287,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
 	phba->fcoe_eventtag = acqe_grp5->event_tag;
 	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
 	phba->sli4_hba.link_state.logical_speed =
-		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 			"2789 GRP5 Async Event: Updating logical link speed "
-			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
-			(phba->sli4_hba.link_state.logical_speed*10));
+			"from %dMbps to %dMbps\n", prev_ll_spd,
+			phba->sli4_hba.link_state.logical_speed);
 }
 
 /**
@@ -4947,7 +5073,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	}
 
 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
-				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
+				      phba->cfg_fcp_io_channel), GFP_KERNEL);
 	if (!phba->sli4_hba.msix_entries) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2573 Failed allocate memory for msi-x "
@@ -6559,7 +6685,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 		i++;
 	}
 	if (i < cfg_fcp_io_channel) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+		lpfc_printf_log(phba,
+				KERN_ERR, LOG_INIT,
 				"3188 Reducing IO channels to match number of "
 				"CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
 		cfg_fcp_io_channel = i;
@@ -6567,8 +6694,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 
 	if (cfg_fcp_io_channel >
 	    phba->sli4_hba.max_cfg_param.max_eq) {
-		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
-		if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
+		if (phba->sli4_hba.max_cfg_param.max_eq <
+		    LPFC_FCP_IO_CHAN_MIN) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"2574 Not enough EQs (%d) from the "
 					"pci function for supporting FCP "
@@ -6577,13 +6704,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 					phba->cfg_fcp_io_channel);
 			goto out_error;
 		}
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"2575 Not enough EQs (%d) from the pci "
-				"function for supporting the requested "
-				"FCP EQs (%d), the actual FCP EQs can "
-				"be supported: %d\n",
-				phba->sli4_hba.max_cfg_param.max_eq,
-				phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2575 Reducing IO channels to match number of "
+				"available EQs: from %d to %d\n",
+				cfg_fcp_io_channel,
+				phba->sli4_hba.max_cfg_param.max_eq);
+		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
 	}
 
 	/* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
@@ -6592,7 +6718,6 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 	phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
 	phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
 	phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
-	phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
 
 	/* Get EQ depth from module parameter, fake the default for now */
 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8095,11 +8220,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 	int vectors, rc, index;
 
 	/* Set up MSI-X multi-message vectors */
-	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+	for (index = 0; index < phba->cfg_fcp_io_channel; index++)
 		phba->sli4_hba.msix_entries[index].entry = index;
 
 	/* Configure MSI-X capability structure */
-	vectors = phba->sli4_hba.cfg_eqn;
+	vectors = phba->cfg_fcp_io_channel;
 enable_msix_vectors:
 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
 			     vectors);
@@ -8142,8 +8267,14 @@ enable_msix_vectors:
 			goto cfg_fail_out;
 		}
 	}
-	phba->sli4_hba.msix_vec_nr = vectors;
 
+	if (vectors != phba->cfg_fcp_io_channel) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3238 Reducing IO channels to match number of "
+				"MSI-X vectors, requested %d got %d\n",
+				phba->cfg_fcp_io_channel, vectors);
+		phba->cfg_fcp_io_channel = vectors;
+	}
 	return rc;
 
 cfg_fail_out:
@@ -8171,7 +8302,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
 	int index;
 
 	/* Free up MSI-X multi-message vectors */
-	for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
+	for (index = 0; index < phba->cfg_fcp_io_channel; index++)
 		free_irq(phba->sli4_hba.msix_entries[index].vector,
 			 &phba->sli4_hba.fcp_eq_hdl[index]);
 
@@ -9304,23 +9435,28 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 
 /**
  * lpfc_write_firmware - attempt to write a firmware image to the port
- * @phba: pointer to lpfc hba data structure.
  * @fw: pointer to firmware image returned from request_firmware.
+ * @phba: pointer to lpfc hba data structure.
  *
- * returns the number of bytes written if write is successful.
- * returns a negative error value if there were errors.
- * returns 0 if firmware matches currently active firmware on port.
  **/
-int
-lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+static void
+lpfc_write_firmware(const struct firmware *fw, void *context)
 {
+	struct lpfc_hba *phba = (struct lpfc_hba *)context;
 	char fwrev[FW_REV_STR_SIZE];
-	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+	struct lpfc_grp_hdr *image;
 	struct list_head dma_buffer_list;
 	int i, rc = 0;
 	struct lpfc_dmabuf *dmabuf, *next;
 	uint32_t offset = 0, temp_offset = 0;
 
+	/* It can be null, sanity check */
+	if (!fw) {
+		rc = -ENXIO;
+		goto out;
+	}
+	image = (struct lpfc_grp_hdr *)fw->data;
+
 	INIT_LIST_HEAD(&dma_buffer_list);
 	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
 	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
@@ -9333,12 +9469,13 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 				be32_to_cpu(image->magic_number),
 				bf_get_be32(lpfc_grp_hdr_file_type, image),
 				bf_get_be32(lpfc_grp_hdr_id, image));
-		return -EINVAL;
+		rc = -EINVAL;
+		goto release_out;
 	}
 	lpfc_decode_firmware_rev(phba, fwrev, 1);
 	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"3023 Updating Firmware. Current Version:%s "
+				"3023 Updating Firmware, Current Version:%s "
 				"New Version:%s\n",
 				fwrev, image->revision);
 		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
@@ -9346,7 +9483,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 					 GFP_KERNEL);
 			if (!dmabuf) {
 				rc = -ENOMEM;
-				goto out;
+				goto release_out;
 			}
 			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
 							  SLI4_PAGE_SIZE,
@@ -9355,7 +9492,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 			if (!dmabuf->virt) {
 				kfree(dmabuf);
 				rc = -ENOMEM;
-				goto out;
+				goto release_out;
 			}
 			list_add_tail(&dmabuf->list, &dma_buffer_list);
 		}
@@ -9375,23 +9512,24 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 			}
 			rc = lpfc_wr_object(phba, &dma_buffer_list,
 				    (fw->size - offset), &offset);
-			if (rc) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-						"3024 Firmware update failed. "
-						"%d\n", rc);
-				goto out;
-			}
+			if (rc)
+				goto release_out;
 		}
 		rc = offset;
 	}
-out:
+
+release_out:
 	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
 		list_del(&dmabuf->list);
 		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
 				  dmabuf->virt, dmabuf->phys);
 		kfree(dmabuf);
 	}
-	return rc;
+	release_firmware(fw);
+out:
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3024 Firmware update done: %d.", rc);
+	return;
 }
 
 /**
@@ -9418,12 +9556,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 	struct lpfc_hba   *phba;
 	struct lpfc_vport *vport = NULL;
 	struct Scsi_Host  *shost = NULL;
-	int error;
+	int error, ret;
 	uint32_t cfg_mode, intr_mode;
 	int mcnt;
 	int adjusted_fcp_io_channel;
-	const struct firmware *fw;
-	uint8_t file_name[16];
+	uint8_t file_name[ELX_MODEL_NAME_SIZE];
 
 	/* Allocate memory for HBA structure */
 	phba = lpfc_hba_alloc(pdev);
@@ -9525,9 +9662,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 		/* Default to single EQ for non-MSI-X */
 		if (phba->intr_type != MSIX)
 			adjusted_fcp_io_channel = 1;
-		else if (phba->sli4_hba.msix_vec_nr <
-					phba->cfg_fcp_io_channel)
-			adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
 		else
 			adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
 		phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
@@ -9572,12 +9706,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 	/* check for firmware upgrade or downgrade (if_type 2 only) */
 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
 	    LPFC_SLI_INTF_IF_TYPE_2) {
-		snprintf(file_name, 16, "%s.grp", phba->ModelName);
-		error = request_firmware(&fw, file_name, &phba->pcidev->dev);
-		if (!error) {
-			lpfc_write_firmware(phba, fw);
-			release_firmware(fw);
-		}
+		snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
+			 phba->ModelName);
+		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+					file_name, &phba->pcidev->dev,
+					GFP_KERNEL, (void *)phba,
+					lpfc_write_firmware);
 	}
 
 	/* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 64013f3097ad..7f45ac9964a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3829,9 +3829,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	cmd->scsi_done(cmd);
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-		spin_lock_irq(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, flags);
 		lpfc_cmd->pCmd = NULL;
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 
 		/*
 		 * If there is a thread waiting for command completion
@@ -3871,9 +3871,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		}
 	}
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, flags);
 	lpfc_cmd->pCmd = NULL;
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	/*
 	 * If there is a thread waiting for command completion
@@ -4163,7 +4163,7 @@ lpfc_info(struct Scsi_Host *host)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	int len;
+	int len, link_speed = 0;
 	static char  lpfcinfobuf[384];
 
 	memset(lpfcinfobuf,0,384);
@@ -4184,12 +4184,18 @@ lpfc_info(struct Scsi_Host *host)
 				 phba->Port);
 		}
 		len = strlen(lpfcinfobuf);
-		if (phba->sli4_hba.link_state.logical_speed) {
-			snprintf(lpfcinfobuf + len,
-				 384-len,
-				 " Logical Link Speed: %d Mbps",
-				 phba->sli4_hba.link_state.logical_speed * 10);
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			link_speed = lpfc_sli_port_speed_get(phba);
+		} else {
+			if (phba->sli4_hba.link_state.logical_speed)
+				link_speed =
+				      phba->sli4_hba.link_state.logical_speed;
+			else
+				link_speed = phba->sli4_hba.link_state.speed;
 		}
+		if (link_speed != 0)
+			snprintf(lpfcinfobuf + len, 384-len,
+				 " Logical Link Speed: %d Mbps", link_speed);
 	}
 	return lpfcinfobuf;
 }
@@ -4398,16 +4404,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	struct lpfc_scsi_buf *lpfc_cmd;
 	IOCB_t *cmd, *icmd;
 	int ret = SUCCESS, status = 0;
+	unsigned long flags;
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
 	status = fc_block_scsi_eh(cmnd);
 	if (status != 0 && status != SUCCESS)
 		return status;
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, flags);
 	/* driver queued commands are in process of being flushed */
 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			"3168 SCSI Layer abort requested I/O has been "
 			"flushed by LLD.\n");
@@ -4416,7 +4423,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 
 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
 	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
 			 "x%x ID %d LUN %d\n",
@@ -4427,7 +4434,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	iocb = &lpfc_cmd->cur_iocbq;
 	/* the command is in process of being cancelled */
 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			"3169 SCSI Layer abort requested I/O has been "
 			"cancelled by LLD.\n");
@@ -4484,7 +4491,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
 	abtsiocb->vport = vport;
 	/* no longer need the lock after this point */
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
 	    IOCB_ERROR) {
@@ -4516,7 +4523,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	goto out;
 
 out_unlock:
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
 out:
 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 219bf534ef99..d7f3313ef886 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3964,9 +3964,9 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
-	/* Perform FCoE PCI function reset */
-	lpfc_sli4_queue_destroy(phba);
+	/* Perform FCoE PCI function reset before freeing queue memory */
 	rc = lpfc_pci_function_reset(phba);
+	lpfc_sli4_queue_destroy(phba);
 
 	/* Restore PCI cmd register */
 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
@@ -7072,6 +7072,40 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function waits for the bootstrap mailbox register ready bit from
+ * port for twice the regular mailbox command timeout value.
+ *
+ *      0 - no timeout on waiting for bootstrap mailbox register ready.
+ *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
+ **/
+static int
+lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	uint32_t db_ready;
+	unsigned long timeout;
+	struct lpfc_register bmbx_reg;
+
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
+				   * 1000) + jiffies;
+
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout))
+			return MBXERR_ERROR;
+	} while (!db_ready);
+
+	return 0;
+}
+
+/**
  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
  * @phba: Pointer to HBA context object.
  * @mboxq: Pointer to mailbox object.
@@ -7092,15 +7126,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 	int rc = MBX_SUCCESS;
 	unsigned long iflag;
-	uint32_t db_ready;
 	uint32_t mcqe_status;
 	uint32_t mbx_cmnd;
-	unsigned long timeout;
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_mqe *mb = &mboxq->u.mqe;
 	struct lpfc_bmbx_create *mbox_rgn;
 	struct dma_address *dma_address;
-	struct lpfc_register bmbx_reg;
 
 	/*
 	 * Only one mailbox can be active to the bootstrap mailbox region
@@ -7124,6 +7155,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	phba->sli.mbox_active = mboxq;
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 
+	/* wait for bootstrap mbox register for readyness */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
+
 	/*
 	 * Initialize the bootstrap memory region to avoid stale data areas
 	 * in the mailbox post.  Then copy the caller's mailbox contents to
@@ -7138,35 +7174,18 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	dma_address = &phba->sli4_hba.bmbx.dma_address;
 	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
 
-	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
-				   * 1000) + jiffies;
-	do {
-		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
-		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
-		if (!db_ready)
-			msleep(2);
-
-		if (time_after(jiffies, timeout)) {
-			rc = MBXERR_ERROR;
-			goto exit;
-		}
-	} while (!db_ready);
+	/* wait for bootstrap mbox register for hi-address write done */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
 
 	/* Post the low mailbox dma address to the port. */
 	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
-	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
-				   * 1000) + jiffies;
-	do {
-		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
-		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
-		if (!db_ready)
-			msleep(2);
 
-		if (time_after(jiffies, timeout)) {
-			rc = MBXERR_ERROR;
-			goto exit;
-		}
-	} while (!db_ready);
+	/* wait for bootstrap mbox register for low address write done */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
 
 	/*
 	 * Read the CQ to ensure the mailbox has completed.
@@ -8090,6 +8109,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
 		       LPFC_WQE_LENLOC_NONE);
 		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
+		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
 		break;
 	case CMD_GEN_REQUEST64_CR:
 		/* For this command calculate the xmit length of the
@@ -12099,6 +12120,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
 	struct lpfc_queue *eq;
 	int cnt, rc, length, status = 0;
 	uint32_t shdr_status, shdr_add_status;
+	uint32_t result;
 	int fcp_eqidx;
 	union lpfc_sli4_cfg_shdr *shdr;
 	uint16_t dmult;
@@ -12117,8 +12139,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
 	eq_delay = &mbox->u.mqe.un.eq_delay;
 
 	/* Calculate delay multiper from maximum interrupt per second */
-	dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
-	dmult = LPFC_DMULT_CONST/dmult - 1;
+	result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
+	if (result > LPFC_DMULT_CONST)
+		dmult = 0;
+	else
+		dmult = LPFC_DMULT_CONST/result - 1;
 
 	cnt = 0;
 	for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
@@ -12174,7 +12199,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
  * fails this function will return -ENXIO.
  **/
 uint32_t
-lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
 {
 	struct lpfc_mbx_eq_create *eq_create;
 	LPFC_MBOXQ_t *mbox;
@@ -12206,7 +12231,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
 	       LPFC_EQE_SIZE);
 	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
 	/* Calculate delay multiper from maximum interrupt per second */
-	dmult = LPFC_DMULT_CONST/imax - 1;
+	if (imax > LPFC_DMULT_CONST)
+		dmult = 0;
+	else
+		dmult = LPFC_DMULT_CONST/imax - 1;
 	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
 	       dmult);
 	switch (eq->entry_count) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index bd4bc4342ae2..f44a06a4c6e7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -37,7 +37,7 @@
 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
 #define LPFC_FCP_IO_CHAN_DEF       4
 #define LPFC_FCP_IO_CHAN_MIN       1
-#define LPFC_FCP_IO_CHAN_MAX       8
+#define LPFC_FCP_IO_CHAN_MAX       16
 
 /*
  * Provide the default FCF Record attributes used by the driver
@@ -168,7 +168,7 @@ struct lpfc_queue {
 };
 
 struct lpfc_sli4_link {
-	uint8_t speed;
+	uint16_t speed;
 	uint8_t duplex;
 	uint8_t status;
 	uint8_t type;
@@ -490,8 +490,6 @@ struct lpfc_sli4_hba {
 	struct lpfc_pc_sli4_params pc_sli4_params;
 	struct msix_entry *msix_entries;
 	uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
-	uint32_t cfg_eqn;
-	uint32_t msix_vec_nr;
 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
 
 	/* Pointers to the constructed SLI4 queues */
@@ -626,7 +624,7 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *);
 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
 			uint32_t);
 void lpfc_sli4_queue_free(struct lpfc_queue *);
-uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
 uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
 uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
 			struct lpfc_queue *, uint32_t, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 04265a1c4e52..0c2149189dda 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.34"
+#define LPFC_DRIVER_VERSION "8.3.35"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fcb005fa4bd1..16b7a72a70c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,7 +1,7 @@
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *  Copyright (c) 2009-2011  LSI Corporation.
+ *  Copyright (c) 2003-2012  LSI Corporation.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"00.00.06.18-rc1"
-#define MEGASAS_RELDATE				"Jun. 17, 2012"
-#define MEGASAS_EXT_VERSION			"Tue. Jun. 17 17:00:00 PDT 2012"
+#define MEGASAS_VERSION				"06.504.01.00-rc1"
+#define MEGASAS_RELDATE				"Oct. 1, 2012"
+#define MEGASAS_EXT_VERSION			"Mon. Oct. 1 17:00:00 PDT 2012"
 
 /*
  * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0393ec478cdf..d2c5366aff7f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,7 +1,7 @@
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *  Copyright (c) 2009-2011  LSI Corporation.
+ *  Copyright (c) 2003-2012  LSI Corporation.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.18-rc1
+ *  Version : v06.504.01.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -71,6 +71,10 @@ static int msix_disable;
 module_param(msix_disable, int, S_IRUGO);
 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
 
+static unsigned int msix_vectors;
+module_param(msix_vectors, int, S_IRUGO);
+MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+
 static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
 module_param(throttlequeuedepth, int, S_IRUGO);
 MODULE_PARM_DESC(throttlequeuedepth,
@@ -3520,6 +3524,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
 			instance->msix_vectors = (readl(&instance->reg_set->
 							outbound_scratch_pad_2
 							  ) & 0x1F) + 1;
+			if (msix_vectors)
+				instance->msix_vectors =
+					min(msix_vectors,
+					    instance->msix_vectors);
 		} else
 			instance->msix_vectors = 1;
 		/* Don't bother allocating more MSI-X vectors than cpus */
@@ -5233,7 +5241,6 @@ megasas_aen_polling(struct work_struct *work)
 
 		case MR_EVT_PD_REMOVED:
 			if (megasas_get_pd_list(instance) == 0) {
-			megasas_get_pd_list(instance);
 			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
 				for (j = 0;
 				j < MEGASAS_MAX_DEV_PER_CHANNEL;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e3d251a2e26a..a11df82474ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,7 +1,7 @@
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *  Copyright (c) 2009-2011  LSI Corporation.
+ *  Copyright (c) 2009-2012  LSI Corporation.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ddf094e7d0ac..74030aff69ad 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,7 +1,7 @@
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *  Copyright (c) 2009-2011  LSI Corporation.
+ *  Copyright (c) 2009-2012  LSI Corporation.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
@@ -1184,8 +1184,6 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
 		io_request->CDB.EEDP32.PrimaryReferenceTag =
 			cpu_to_be32(ref_tag);
 		io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
-
-		io_request->DataLength = num_blocks * 512;
 		io_request->IoFlags = 32; /* Specify 32-byte cdb */
 
 		/* Transfer length */
@@ -1329,7 +1327,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 			  struct megasas_cmd_fusion *cmd)
 {
 	u8 fp_possible;
-	u32 start_lba_lo, start_lba_hi, device_id;
+	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 	struct IO_REQUEST_INFO io_info;
@@ -1355,7 +1353,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
 	 */
 	if (scp->cmd_len == 6) {
-		io_request->DataLength = (u32) scp->cmnd[4];
+		datalength = (u32) scp->cmnd[4];
 		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
 			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
 
@@ -1366,7 +1364,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
 	 */
 	else if (scp->cmd_len == 10) {
-		io_request->DataLength = (u32) scp->cmnd[8] |
+		datalength = (u32) scp->cmnd[8] |
 			((u32) scp->cmnd[7] << 8);
 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
 			((u32) scp->cmnd[3] << 16) |
@@ -1377,7 +1375,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
 	 */
 	else if (scp->cmd_len == 12) {
-		io_request->DataLength = ((u32) scp->cmnd[6] << 24) |
+		datalength = ((u32) scp->cmnd[6] << 24) |
 			((u32) scp->cmnd[7] << 16) |
 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
@@ -1389,7 +1387,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
 	 */
 	else if (scp->cmd_len == 16) {
-		io_request->DataLength = ((u32) scp->cmnd[10] << 24) |
+		datalength = ((u32) scp->cmnd[10] << 24) |
 			((u32) scp->cmnd[11] << 16) |
 			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
 		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
@@ -1403,8 +1401,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 
 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
 	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
-	io_info.numBlocks = io_request->DataLength;
+	io_info.numBlocks = datalength;
 	io_info.ldTgtId = device_id;
+	io_request->DataLength = scsi_bufflen(scp);
 
 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
 		io_info.isRead = 1;
@@ -1431,7 +1430,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 	if (fp_possible) {
 		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
 				   local_map_ptr, start_lba_lo);
-		io_request->DataLength = scsi_bufflen(scp);
 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
@@ -1510,7 +1508,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
 	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
 
 	/* Check if this is a system PD I/O */
-	if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+	if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
+	    instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
 		io_request->Function = 0;
 		io_request->DevHandle =
 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1525,6 +1524,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
 			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		cmd->request_desc->SCSIIO.DevHandle =
+			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
 	} else {
 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
 		io_request->DevHandle = device_id;
@@ -1732,8 +1733,6 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
 	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 		return IRQ_NONE;
 
-	d_val.word = desc->Words;
-
 	num_completed = 0;
 
 	while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
@@ -1855,10 +1854,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
 	}
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-	spin_lock_irqsave(&instance->completion_lock, flags);
 	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
 		complete_cmd_fusion(instance, MSIxIndex);
-	spin_unlock_irqrestore(&instance->completion_lock, flags);
 }
 
 /**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 088c9f91da95..a7c64f051996 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,7 +1,7 @@
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *  Copyright (c) 2009-2011  LSI Corporation.
+ *  Copyright (c) 2009-2012  LSI Corporation.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 783edc7c6b98..c585a925b3cd 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -35,10 +35,12 @@
 #include <linux/io.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_eh.h>
 #include <linux/uaccess.h>
+#include <linux/kthread.h>
 
 #include "mvumi.h"
 
@@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver");
 
 static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) },
 	{ 0 }
 };
 
@@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 				enum resource_type type, unsigned int size)
 {
-	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
+	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 
 	if (!res) {
 		dev_err(&mhba->pdev->dev,
@@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 
 	switch (type) {
 	case RESOURCE_CACHED_MEMORY:
-		res->virt_addr = kzalloc(size, GFP_KERNEL);
+		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 		if (!res->virt_addr) {
 			dev_err(&mhba->pdev->dev,
 				"unable to allocate memory,size = %d.\n", size);
@@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 			m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 			m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 			m_sg->flags = 0;
-			m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
+			sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
 			if ((i + 1) == *sg_count)
-				m_sg->flags |= SGD_EOT;
+				m_sg->flags |= 1U << mhba->eot_flag;
 
-			m_sg++;
+			sgd_inc(mhba, m_sg);
 		}
 	} else {
 		scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
@@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 		busaddr = scmd->SCp.dma_handle;
 		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
-		m_sg->flags = SGD_EOT;
-		m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
+		m_sg->flags = 1U << mhba->eot_flag;
+		sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
 		*sg_count = 1;
 	}
 
@@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 
 	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
-	m_sg->flags = SGD_EOT;
-	m_sg->size = cpu_to_le32(size);
+	m_sg->flags = 1U << mhba->eot_flag;
+	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 
 	return 0;
 }
@@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 	}
 	INIT_LIST_HEAD(&cmd->queue_pointer);
 
-	cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+	cmd->frame = pci_alloc_consistent(mhba->pdev,
+				mhba->ib_max_size, &cmd->frame_phys);
 	if (!cmd->frame) {
 		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 			" frame,size = %d.\n", mhba->ib_max_size);
@@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 						" for internal frame\n");
-			kfree(cmd->frame);
+			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+					cmd->frame, cmd->frame_phys);
 			kfree(cmd);
 			return NULL;
 		}
@@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 	if (cmd && cmd->frame) {
 		if (cmd->frame->sg_counts) {
 			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
-			size = m_sg->size;
+			sgd_getsz(mhba, m_sg, size);
 
 			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
@@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 								phy_addr);
 		}
-		kfree(cmd->frame);
+		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+				cmd->frame, cmd->frame_phys);
 		kfree(cmd);
 	}
 }
@@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba)
 		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 							queue_pointer);
 		list_del(&cmd->queue_pointer);
-		kfree(cmd->frame);
+		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
+			kfree(cmd->frame);
 		kfree(cmd);
 	}
 }
@@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 
 		INIT_LIST_HEAD(&cmd->queue_pointer);
 		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
-		cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
+			cmd->frame_phys = mhba->ib_frame_phys
+						+ i * mhba->ib_max_size;
+		} else
+			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 		if (!cmd->frame)
 			goto err_exit;
 	}
@@ -409,48 +421,71 @@ err_exit:
 		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 						queue_pointer);
 		list_del(&cmd->queue_pointer);
-		kfree(cmd->frame);
+		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
+			kfree(cmd->frame);
 		kfree(cmd);
 	}
 	return -ENOMEM;
 }
 
-static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 {
-	unsigned int ib_rp_reg, cur_ib_entry;
+	unsigned int ib_rp_reg;
+	struct mvumi_hw_regs *regs = mhba->regs;
+
+	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 
+	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
+			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
+			((ib_rp_reg & regs->cl_pointer_toggle)
+			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
+		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
+		return 0;
+	}
 	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
-		return -1;
+		return 0;
+	} else {
+		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 	}
-	ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
+}
 
-	if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
-			(mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
-			((ib_rp_reg & CL_POINTER_TOGGLE) !=
-			(mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
-		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
-		return -1;
-	}
+static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
+{
+	unsigned int count;
+	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
+		return 0;
+	count = ioread32(mhba->ib_shadow);
+	if (count == 0xffff)
+		return 0;
+	return count;
+}
+
+static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+{
+	unsigned int cur_ib_entry;
 
-	cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
+	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 	cur_ib_entry++;
 	if (cur_ib_entry >= mhba->list_num_io) {
 		cur_ib_entry -= mhba->list_num_io;
-		mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
+		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
+	}
+	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
+	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
+	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+		*ib_entry = mhba->ib_list + cur_ib_entry *
+				sizeof(struct mvumi_dyn_list_entry);
+	} else {
+		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 	}
-	mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
-	mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
-	*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 	atomic_inc(&mhba->fw_outstanding);
-
-	return 0;
 }
 
 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 {
-	iowrite32(0xfff, mhba->ib_shadow);
-	iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
+	iowrite32(0xffff, mhba->ib_shadow);
+	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 }
 
 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
@@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 	return 0;
 }
 
-static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
+			unsigned int *cur_obf, unsigned int *assign_obf_end)
 {
-	unsigned int ob_write_reg, ob_write_shadow_reg;
-	unsigned int cur_obf, assign_obf_end, i;
-	struct mvumi_ob_data *ob_data;
-	struct mvumi_rsp_frame *p_outb_frame;
+	unsigned int ob_write, ob_write_shadow;
+	struct mvumi_hw_regs *regs = mhba->regs;
 
 	do {
-		ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
-		ob_write_shadow_reg = ioread32(mhba->ob_shadow);
-	} while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
+		ob_write = ioread32(regs->outb_copy_pointer);
+		ob_write_shadow = ioread32(mhba->ob_shadow);
+	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 
-	cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
-	assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
+	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
+	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 
-	if ((ob_write_reg & CL_POINTER_TOGGLE) !=
-				(mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
-		assign_obf_end += mhba->list_num_io;
+	if ((ob_write & regs->cl_pointer_toggle) !=
+			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
+		*assign_obf_end += mhba->list_num_io;
 	}
+	return 0;
+}
+
+static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
+			unsigned int *cur_obf, unsigned int *assign_obf_end)
+{
+	unsigned int ob_write;
+	struct mvumi_hw_regs *regs = mhba->regs;
+
+	ob_write = ioread32(regs->outb_read_pointer);
+	ob_write = ioread32(regs->outb_copy_pointer);
+	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
+	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
+	if (*assign_obf_end < *cur_obf)
+		*assign_obf_end += mhba->list_num_io;
+	else if (*assign_obf_end == *cur_obf)
+		return -1;
+	return 0;
+}
+
+static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+{
+	unsigned int cur_obf, assign_obf_end, i;
+	struct mvumi_ob_data *ob_data;
+	struct mvumi_rsp_frame *p_outb_frame;
+	struct mvumi_hw_regs *regs = mhba->regs;
+
+	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
+		return;
 
 	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 		cur_obf++;
 		if (cur_obf >= mhba->list_num_io) {
 			cur_obf -= mhba->list_num_io;
-			mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 		}
 
 		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
@@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 			ob_data = NULL;
 			if (cur_obf == 0) {
 				cur_obf = mhba->list_num_io - 1;
-				mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 			} else
 				cur_obf -= 1;
 			break;
@@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 
 		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 	}
-	mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
-	mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
-	iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
+	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
+	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
+	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 }
 
-static void mvumi_reset(void *regs)
+static void mvumi_reset(struct mvumi_hba *mhba)
 {
-	iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
-	if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
+	struct mvumi_hw_regs *regs = mhba->regs;
+
+	iowrite32(0, regs->enpointa_mask_reg);
+	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 		return;
 
-	iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 }
 
 static unsigned char mvumi_start(struct mvumi_hba *mhba);
@@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba);
 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 {
 	mhba->fw_state = FW_STATE_ABORT;
-	mvumi_reset(mhba->mmio);
+	mvumi_reset(mhba);
 
 	if (mvumi_start(mhba))
 		return FAILED;
@@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 		return SUCCESS;
 }
 
+static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
+{
+	struct mvumi_hw_regs *regs = mhba->regs;
+	u32 tmp;
+	unsigned long before;
+	before = jiffies;
+
+	iowrite32(0, regs->enpointa_mask_reg);
+	tmp = ioread32(regs->arm_to_pciea_msg1);
+	while (tmp != HANDSHAKE_READYSTATE) {
+		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
+		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+			dev_err(&mhba->pdev->dev,
+				"FW reset failed [0x%x].\n", tmp);
+			return FAILED;
+		}
+
+		msleep(500);
+		rmb();
+		tmp = ioread32(regs->arm_to_pciea_msg1);
+	}
+
+	return SUCCESS;
+}
+
+static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
+{
+	unsigned char i;
+
+	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
+						&mhba->pci_base[i]);
+	}
+}
+
+static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
+{
+	unsigned char i;
+
+	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+		if (mhba->pci_base[i])
+			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
+						mhba->pci_base[i]);
+	}
+}
+
+static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+{
+	unsigned int ret = 0;
+	pci_set_master(pdev);
+
+	if (IS_DMA64) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	} else
+		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+	return ret;
+}
+
+static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
+{
+	mhba->fw_state = FW_STATE_ABORT;
+
+	iowrite32(0, mhba->regs->reset_enable);
+	iowrite32(0xf, mhba->regs->reset_request);
+
+	iowrite32(0x10, mhba->regs->reset_enable);
+	iowrite32(0x10, mhba->regs->reset_request);
+	msleep(100);
+	pci_disable_device(mhba->pdev);
+
+	if (pci_enable_device(mhba->pdev)) {
+		dev_err(&mhba->pdev->dev, "enable device failed\n");
+		return FAILED;
+	}
+	if (mvumi_pci_set_master(mhba->pdev)) {
+		dev_err(&mhba->pdev->dev, "set master failed\n");
+		return FAILED;
+	}
+	mvumi_restore_bar_addr(mhba);
+	if (mvumi_wait_for_fw(mhba) == FAILED)
+		return FAILED;
+
+	return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
+{
+	return mvumi_wait_for_outstanding(mhba);
+}
+
 static int mvumi_host_reset(struct scsi_cmnd *scmd)
 {
 	struct mvumi_hba *mhba;
@@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
 	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 
-	return mvumi_wait_for_outstanding(mhba);
+	return mhba->instancet->reset_host(mhba);
 }
 
 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
@@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
 	mvumi_free_cmds(mhba);
 	mvumi_release_mem_resource(mhba);
 	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
-	kfree(mhba->handshake_page);
+	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+		mhba->handshake_page, mhba->handshake_page_phys);
+	kfree(mhba->regs);
 	pci_release_regions(mhba->pdev);
 }
 
@@ -665,6 +824,7 @@ get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 		frame->cdb_length = MAX_COMMAND_SIZE;
 		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+		frame->cdb[1] = CDB_CORE_MODULE;
 		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 
 		mvumi_issue_blocked_cmd(mhba, cmd);
@@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 	return ret;
 }
 
-void mvumi_hs_build_page(struct mvumi_hba *mhba,
+static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 				struct mvumi_hs_header *hs_header)
 {
 	struct mvumi_hs_page2 *hs_page2;
@@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
 		hs_header->frame_length = sizeof(*hs_page2) - 4;
 		memset(hs_header->frame_content, 0, hs_header->frame_length);
 		hs_page2->host_type = 3; /* 3 mean linux*/
+		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
+			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 		hs_page2->host_ver.ver_major = VER_MAJOR;
 		hs_page2->host_ver.ver_minor = VER_MINOR;
 		hs_page2->host_ver.ver_oem = VER_OEM;
@@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
 		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
-		hs_page4->ob_depth = mhba->list_num_io;
-		hs_page4->ib_depth = mhba->list_num_io;
+		if (mhba->hba_capability
+			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
+			hs_page4->ob_depth = find_first_bit((unsigned long *)
+							    &mhba->list_num_io,
+							    BITS_PER_LONG);
+			hs_page4->ib_depth = find_first_bit((unsigned long *)
+							    &mhba->list_num_io,
+							    BITS_PER_LONG);
+		} else {
+			hs_page4->ob_depth = (u8) mhba->list_num_io;
+			hs_page4->ib_depth = (u8) mhba->list_num_io;
+		}
 		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 						hs_header->frame_length);
 		break;
@@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
 		return 0;
 
 	tmp_size = mhba->ib_max_size * mhba->max_io;
+	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
+		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+
 	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
-	tmp_size += 8 + sizeof(u32) + 16;
+	tmp_size += 8 + sizeof(u32)*2 + 16;
 
 	res_mgnt = mvumi_alloc_mem_resource(mhba,
 					RESOURCE_UNCACHED_MEMORY, tmp_size);
@@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
 	v += offset;
 	mhba->ib_list = v;
 	mhba->ib_list_phys = p;
+	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+		mhba->ib_frame = v;
+		mhba->ib_frame_phys = p;
+	}
 	v += mhba->ib_max_size * mhba->max_io;
 	p += mhba->ib_max_size * mhba->max_io;
+
 	/* ib shadow */
 	offset = round_up(p, 8) - p;
 	p += offset;
 	v += offset;
 	mhba->ib_shadow = v;
 	mhba->ib_shadow_phys = p;
-	p += sizeof(u32);
-	v += sizeof(u32);
+	p += sizeof(u32)*2;
+	v += sizeof(u32)*2;
 	/* ob shadow */
-	offset = round_up(p, 8) - p;
-	p += offset;
-	v += offset;
-	mhba->ob_shadow = v;
-	mhba->ob_shadow_phys = p;
-	p += 8;
-	v += 8;
+	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
+		offset = round_up(p, 8) - p;
+		p += offset;
+		v += offset;
+		mhba->ob_shadow = v;
+		mhba->ob_shadow_phys = p;
+		p += 8;
+		v += 8;
+	} else {
+		offset = round_up(p, 4) - p;
+		p += offset;
+		v += offset;
+		mhba->ob_shadow = v;
+		mhba->ob_shadow_phys = p;
+		p += 4;
+		v += 4;
+	}
 
 	/* ob list */
 	offset = round_up(p, 128) - p;
@@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba,
 		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
 						hs_page1->fw_ver.ver_build);
 
+		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
+			mhba->eot_flag = 22;
+		else
+			mhba->eot_flag = 27;
+		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
+			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
 		break;
 	default:
 		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
@@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
 {
 	unsigned int hs_state, tmp, hs_fun;
 	struct mvumi_hs_header *hs_header;
-	void *regs = mhba->mmio;
+	struct mvumi_hw_regs *regs = mhba->regs;
 
 	if (mhba->fw_state == FW_STATE_STARTING)
 		hs_state = HS_S_START;
 	else {
-		tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
+		tmp = ioread32(regs->arm_to_pciea_msg0);
 		hs_state = HS_GET_STATE(tmp);
 		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
 		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
@@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
 		mhba->fw_state = FW_STATE_HANDSHAKING;
 		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
 		HS_SET_STATE(hs_fun, HS_S_RESET);
-		iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
-		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
-		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
+		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
 		break;
 
 	case HS_S_RESET:
 		iowrite32(lower_32_bits(mhba->handshake_page_phys),
-					regs + CPU_PCIEA_TO_ARM_MSG1);
+					regs->pciea_to_arm_msg1);
 		iowrite32(upper_32_bits(mhba->handshake_page_phys),
-					regs + CPU_ARM_TO_PCIEA_MSG1);
+					regs->arm_to_pciea_msg1);
 		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
 		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
-		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
-		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
-
+		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
 		break;
 
 	case HS_S_PAGE_ADDR:
@@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
 			HS_SET_STATE(hs_fun, HS_S_END);
 
 		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
-		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
-		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
 		break;
 
 	case HS_S_END:
 		/* Set communication list ISR */
-		tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
-		tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
-		iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+		tmp = ioread32(regs->enpointa_mask_reg);
+		tmp |= regs->int_comaout | regs->int_comaerr;
+		iowrite32(tmp, regs->enpointa_mask_reg);
 		iowrite32(mhba->list_num_io, mhba->ib_shadow);
 		/* Set InBound List Available count shadow */
 		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
-					regs + CLA_INB_AVAL_COUNT_BASEL);
+					regs->inb_aval_count_basel);
 		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
-					regs + CLA_INB_AVAL_COUNT_BASEH);
-
-		/* Set OutBound List Available count shadow */
-		iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
-						mhba->ob_shadow);
-		iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
-		iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
+					regs->inb_aval_count_baseh);
+
+		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
+			/* Set OutBound List Available count shadow */
+			iowrite32((mhba->list_num_io-1) |
+							regs->cl_pointer_toggle,
+							mhba->ob_shadow);
+			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
+							regs->outb_copy_basel);
+			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
+							regs->outb_copy_baseh);
+		}
 
-		mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
-		mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
+							regs->cl_pointer_toggle;
+		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
+							regs->cl_pointer_toggle;
 		mhba->fw_state = FW_STATE_STARTED;
 
 		break;
@@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
 	before = jiffies;
 	mvumi_handshake(mhba);
 	do {
-		isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
+		isr_status = mhba->instancet->read_fw_status_reg(mhba);
 
 		if (mhba->fw_state == FW_STATE_STARTED)
 			return 0;
@@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
 
 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
 {
-	void *regs = mhba->mmio;
 	unsigned int tmp;
 	unsigned long before;
 
 	before = jiffies;
-	tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
 	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
 		if (tmp != HANDSHAKE_READYSTATE)
 			iowrite32(DRBL_MU_RESET,
-					regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+					mhba->regs->pciea_to_arm_drbl_reg);
 		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 			dev_err(&mhba->pdev->dev,
 				"invalid signature [0x%x].\n", tmp);
@@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
 		}
 		usleep_range(1000, 2000);
 		rmb();
-		tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
 	}
 
 	mhba->fw_state = FW_STATE_STARTING;
@@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
 
 static unsigned char mvumi_start(struct mvumi_hba *mhba)
 {
-	void *regs = mhba->mmio;
 	unsigned int tmp;
+	struct mvumi_hw_regs *regs = mhba->regs;
+
 	/* clear Door bell */
-	tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
-	iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
+	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
 
-	iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
-	tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
-	iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
+	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
+	iowrite32(tmp, regs->enpointa_mask_reg);
+	msleep(100);
 	if (mvumi_check_handshake(mhba))
 		return -1;
 
@@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 	cmd->scmd->scsi_done(scmd);
 	mvumi_return_cmd(mhba, cmd);
 }
+
 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
 						struct mvumi_cmd *cmd,
 					struct mvumi_rsp_frame *ob_frame)
@@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba,
 	}
 }
 
+static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
+{
+	struct scsi_device *sdev;
+	int ret = -1;
+
+	if (status == DEVICE_OFFLINE) {
+		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
+		if (sdev) {
+			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
+								sdev->id, 0);
+			scsi_remove_device(sdev);
+			scsi_device_put(sdev);
+			ret = 0;
+		} else
+			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
+									devid);
+	} else if (status == DEVICE_ONLINE) {
+		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
+		if (!sdev) {
+			scsi_add_device(mhba->shost, 0, devid, 0);
+			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
+								devid, 0);
+			ret = 0;
+		} else {
+			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
+								0, devid, 0);
+			scsi_device_put(sdev);
+		}
+	}
+	return ret;
+}
+
+static u64 mvumi_inquiry(struct mvumi_hba *mhba,
+	unsigned int id, struct mvumi_cmd *cmd)
+{
+	struct mvumi_msg_frame *frame;
+	u64 wwid = 0;
+	int cmd_alloc = 0;
+	int data_buf_len = 64;
+
+	if (!cmd) {
+		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
+		if (cmd)
+			cmd_alloc = 1;
+		else
+			return 0;
+	} else {
+		memset(cmd->data_buf, 0, data_buf_len);
+	}
+	cmd->scmd = NULL;
+	cmd->cmd_status = REQ_STATUS_PENDING;
+	atomic_set(&cmd->sync_cmd, 0);
+	frame = cmd->frame;
+	frame->device_id = (u16) id;
+	frame->cmd_flag = CMD_FLAG_DATA_IN;
+	frame->req_function = CL_FUN_SCSI_CMD;
+	frame->cdb_length = 6;
+	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
+	memset(frame->cdb, 0, frame->cdb_length);
+	frame->cdb[0] = INQUIRY;
+	frame->cdb[4] = frame->data_transfer_length;
+
+	mvumi_issue_blocked_cmd(mhba, cmd);
+
+	if (cmd->cmd_status == SAM_STAT_GOOD) {
+		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
+			wwid = id + 1;
+		else
+			memcpy((void *)&wwid,
+			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
+			       MVUMI_INQUIRY_UUID_LEN);
+		dev_dbg(&mhba->pdev->dev,
+			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
+	} else {
+		wwid = 0;
+	}
+	if (cmd_alloc)
+		mvumi_delete_internal_cmd(mhba, cmd);
+
+	return wwid;
+}
+
+static void mvumi_detach_devices(struct mvumi_hba *mhba)
+{
+	struct mvumi_device *mv_dev = NULL , *dev_next;
+	struct scsi_device *sdev = NULL;
+
+	mutex_lock(&mhba->device_lock);
+
+	/* detach Hard Disk */
+	list_for_each_entry_safe(mv_dev, dev_next,
+		&mhba->shost_dev_list, list) {
+		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
+		list_del_init(&mv_dev->list);
+		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
+			mv_dev->id, mv_dev->wwid);
+		kfree(mv_dev);
+	}
+	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
+		list_del_init(&mv_dev->list);
+		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
+			mv_dev->id, mv_dev->wwid);
+		kfree(mv_dev);
+	}
+
+	/* detach virtual device */
+	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+		sdev = scsi_device_lookup(mhba->shost, 0,
+						mhba->max_target_id - 1, 0);
+
+	if (sdev) {
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+
+	mutex_unlock(&mhba->device_lock);
+}
+
+static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
+{
+	struct scsi_device *sdev;
+
+	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
+	if (sdev) {
+		scsi_rescan_device(&sdev->sdev_gendev);
+		scsi_device_put(sdev);
+	}
+}
+
+static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
+{
+	struct mvumi_device *mv_dev = NULL;
+
+	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
+		if (mv_dev->wwid == wwid) {
+			if (mv_dev->id != id) {
+				dev_err(&mhba->pdev->dev,
+					"%s has same wwid[%llx] ,"
+					" but different id[%d %d]\n",
+					__func__, mv_dev->wwid, mv_dev->id, id);
+				return -1;
+			} else {
+				if (mhba->pdev->device ==
+						PCI_DEVICE_ID_MARVELL_MV9143)
+					mvumi_rescan_devices(mhba, id);
+				return 1;
+			}
+		}
+	}
+	return 0;
+}
+
+static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
+{
+	struct mvumi_device *mv_dev = NULL, *dev_next;
+
+	list_for_each_entry_safe(mv_dev, dev_next,
+				&mhba->shost_dev_list, list) {
+		if (mv_dev->id == id) {
+			dev_dbg(&mhba->pdev->dev,
+				"detach device(0:%d:0) wwid(%llx) from HOST\n",
+				mv_dev->id, mv_dev->wwid);
+			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
+			list_del_init(&mv_dev->list);
+			kfree(mv_dev);
+		}
+	}
+}
+
+static int mvumi_probe_devices(struct mvumi_hba *mhba)
+{
+	int id, maxid;
+	u64 wwid = 0;
+	struct mvumi_device *mv_dev = NULL;
+	struct mvumi_cmd *cmd = NULL;
+	int found = 0;
+
+	cmd = mvumi_create_internal_cmd(mhba, 64);
+	if (!cmd)
+		return -1;
+
+	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
+		maxid = mhba->max_target_id;
+	else
+		maxid = mhba->max_target_id - 1;
+
+	for (id = 0; id < maxid; id++) {
+		wwid = mvumi_inquiry(mhba, id, cmd);
+		if (!wwid) {
+			/* device no response, remove it */
+			mvumi_remove_devices(mhba, id);
+		} else {
+			/* device response, add it */
+			found = mvumi_match_devices(mhba, id, wwid);
+			if (!found) {
+				mvumi_remove_devices(mhba, id);
+				mv_dev = kzalloc(sizeof(struct mvumi_device),
+								GFP_KERNEL);
+				if (!mv_dev) {
+					dev_err(&mhba->pdev->dev,
+						"%s alloc mv_dev failed\n",
+						__func__);
+					continue;
+				}
+				mv_dev->id = id;
+				mv_dev->wwid = wwid;
+				mv_dev->sdev = NULL;
+				INIT_LIST_HEAD(&mv_dev->list);
+				list_add_tail(&mv_dev->list,
+					      &mhba->mhba_dev_list);
+				dev_dbg(&mhba->pdev->dev,
+					"probe a new device(0:%d:0)"
+					" wwid(%llx)\n", id, mv_dev->wwid);
+			} else if (found == -1)
+				return -1;
+			else
+				continue;
+		}
+	}
+
+	if (cmd)
+		mvumi_delete_internal_cmd(mhba, cmd);
+
+	return 0;
+}
+
+static int mvumi_rescan_bus(void *data)
+{
+	int ret = 0;
+	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
+	struct mvumi_device *mv_dev = NULL , *dev_next;
+
+	while (!kthread_should_stop()) {
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (!atomic_read(&mhba->pnp_count))
+			schedule();
+		msleep(1000);
+		atomic_set(&mhba->pnp_count, 0);
+		__set_current_state(TASK_RUNNING);
+
+		mutex_lock(&mhba->device_lock);
+		ret = mvumi_probe_devices(mhba);
+		if (!ret) {
+			list_for_each_entry_safe(mv_dev, dev_next,
+						 &mhba->mhba_dev_list, list) {
+				if (mvumi_handle_hotplug(mhba, mv_dev->id,
+							 DEVICE_ONLINE)) {
+					dev_err(&mhba->pdev->dev,
+						"%s add device(0:%d:0) failed"
+						"wwid(%llx) has exist\n",
+						__func__,
+						mv_dev->id, mv_dev->wwid);
+					list_del_init(&mv_dev->list);
+					kfree(mv_dev);
+				} else {
+					list_move_tail(&mv_dev->list,
+						       &mhba->shost_dev_list);
+				}
+			}
+		}
+		mutex_unlock(&mhba->device_lock);
+	}
+	return 0;
+}
+
+static void mvumi_proc_msg(struct mvumi_hba *mhba,
+					struct mvumi_hotplug_event *param)
+{
+	u16 size = param->size;
+	const unsigned long *ar_bitmap;
+	const unsigned long *re_bitmap;
+	int index;
+
+	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
+		index = -1;
+		ar_bitmap = (const unsigned long *) param->bitmap;
+		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
+
+		mutex_lock(&mhba->sas_discovery_mutex);
+		do {
+			index = find_next_zero_bit(ar_bitmap, size, index + 1);
+			if (index >= size)
+				break;
+			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
+		} while (1);
+
+		index = -1;
+		do {
+			index = find_next_zero_bit(re_bitmap, size, index + 1);
+			if (index >= size)
+				break;
+			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
+		} while (1);
+		mutex_unlock(&mhba->sas_discovery_mutex);
+	}
+}
+
 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
 {
 	if (msg == APICDB1_EVENT_GETEVENT) {
@@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
 			param = &er->events[i];
 			mvumi_show_event(mhba, param);
 		}
+	} else if (msg == APICDB1_HOST_GETEVENT) {
+		mvumi_proc_msg(mhba, buffer);
 	}
 }
 
@@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work)
 	kfree(mu_ev);
 }
 
-static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
+static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
 {
 	struct mvumi_events_wq *mu_ev;
 
-	mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
-	if (mu_ev) {
-		INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
-		mu_ev->mhba = mhba;
-		mu_ev->event = msg;
-		mu_ev->param = NULL;
-		schedule_work(&mu_ev->work_q);
+	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
+		if (isr_status & DRBL_BUS_CHANGE) {
+			atomic_inc(&mhba->pnp_count);
+			wake_up_process(mhba->dm_thread);
+			isr_status &= ~(DRBL_BUS_CHANGE);
+			continue;
+		}
+
+		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
+		if (mu_ev) {
+			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
+			mu_ev->mhba = mhba;
+			mu_ev->event = APICDB1_EVENT_GETEVENT;
+			isr_status &= ~(DRBL_EVENT_NOTIFY);
+			mu_ev->param = NULL;
+			schedule_work(&mu_ev->work_q);
+		}
 	}
 }
 
@@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp)
 		return IRQ_NONE;
 	}
 
-	if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
+	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
+		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
+			mvumi_launch_events(mhba, mhba->isr_status);
 		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
 			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
 			mvumi_handshake(mhba);
 		}
-		if (mhba->isr_status & DRBL_EVENT_NOTIFY)
-			mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
+
 	}
 
-	if (mhba->global_isr & INT_MAP_COMAOUT)
+	if (mhba->global_isr & mhba->regs->int_comaout)
 		mvumi_receive_ob_list_entry(mhba);
 
 	mhba->global_isr = 0;
@@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
 		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
 		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
 	}
-	if (mvumi_get_ib_list_entry(mhba, &ib_entry))
-		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+	mvumi_get_ib_list_entry(mhba, &ib_entry);
 
 	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
 	cmd->frame->request_id = mhba->io_seq++;
@@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
 	mhba->tag_cmd[cmd->frame->tag] = cmd;
 	frame_len = sizeof(*ib_frame) - 4 +
 				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
-	memcpy(ib_entry, ib_frame, frame_len);
+	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+		struct mvumi_dyn_list_entry *dle;
+		dle = ib_entry;
+		dle->src_low_addr =
+			cpu_to_le32(lower_32_bits(cmd->frame_phys));
+		dle->src_high_addr =
+			cpu_to_le32(upper_32_bits(cmd->frame_phys));
+		dle->if_length = (frame_len >> 2) & 0xFFF;
+	} else {
+		memcpy(ib_entry, ib_frame, frame_len);
+	}
 	return MV_QUEUE_COMMAND_RESULT_SENT;
 }
 
 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
 {
 	unsigned short num_of_cl_sent = 0;
+	unsigned int count;
 	enum mvumi_qc_result result;
 
 	if (cmd)
 		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
+	count = mhba->instancet->check_ib_list(mhba);
+	if (list_empty(&mhba->waiting_req_list) || !count)
+		return;
 
-	while (!list_empty(&mhba->waiting_req_list)) {
+	do {
 		cmd = list_first_entry(&mhba->waiting_req_list,
-					 struct mvumi_cmd, queue_pointer);
+				       struct mvumi_cmd, queue_pointer);
 		list_del_init(&cmd->queue_pointer);
 		result = mvumi_send_command(mhba, cmd);
 		switch (result) {
@@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
 
 			return;
 		}
-	}
+	} while (!list_empty(&mhba->waiting_req_list) && count--);
+
 	if (num_of_cl_sent > 0)
 		mvumi_send_ib_list_entry(mhba);
 }
 
 /**
  * mvumi_enable_intr -	Enables interrupts
- * @regs:			FW register set
+ * @mhba:		Adapter soft state
  */
-static void mvumi_enable_intr(void *regs)
+static void mvumi_enable_intr(struct mvumi_hba *mhba)
 {
 	unsigned int mask;
+	struct mvumi_hw_regs *regs = mhba->regs;
 
-	iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
-	mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
-	mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
-	iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
+	mask = ioread32(regs->enpointa_mask_reg);
+	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
+	iowrite32(mask, regs->enpointa_mask_reg);
 }
 
 /**
  * mvumi_disable_intr -Disables interrupt
- * @regs:			FW register set
+ * @mhba:		Adapter soft state
  */
-static void mvumi_disable_intr(void *regs)
+static void mvumi_disable_intr(struct mvumi_hba *mhba)
 {
 	unsigned int mask;
+	struct mvumi_hw_regs *regs = mhba->regs;
 
-	iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
-	mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
-	mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
-	iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+	iowrite32(0, regs->arm_to_pciea_mask_reg);
+	mask = ioread32(regs->enpointa_mask_reg);
+	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
+							regs->int_comaerr);
+	iowrite32(mask, regs->enpointa_mask_reg);
 }
 
 static int mvumi_clear_intr(void *extend)
 {
 	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
 	unsigned int status, isr_status = 0, tmp = 0;
-	void *regs = mhba->mmio;
+	struct mvumi_hw_regs *regs = mhba->regs;
 
-	status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
-	if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
+	status = ioread32(regs->main_int_cause_reg);
+	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
 		return 1;
-	if (unlikely(status & INT_MAP_COMAERR)) {
-		tmp = ioread32(regs + CLA_ISR_CAUSE);
-		if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
-			iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
-					regs + CLA_ISR_CAUSE);
-		status ^= INT_MAP_COMAERR;
+	if (unlikely(status & regs->int_comaerr)) {
+		tmp = ioread32(regs->outb_isr_cause);
+		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
+			if (tmp & regs->clic_out_err) {
+				iowrite32(tmp & regs->clic_out_err,
+							regs->outb_isr_cause);
+			}
+		} else {
+			if (tmp & (regs->clic_in_err | regs->clic_out_err))
+				iowrite32(tmp & (regs->clic_in_err |
+						regs->clic_out_err),
+						regs->outb_isr_cause);
+		}
+		status ^= mhba->regs->int_comaerr;
 		/* inbound or outbound parity error, command will timeout */
 	}
-	if (status & INT_MAP_COMAOUT) {
-		tmp = ioread32(regs + CLA_ISR_CAUSE);
-		if (tmp & CLIC_OUT_IRQ)
-			iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
+	if (status & regs->int_comaout) {
+		tmp = ioread32(regs->outb_isr_cause);
+		if (tmp & regs->clic_irq)
+			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
 	}
-	if (status & INT_MAP_DL_CPU2PCIEA) {
-		isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+	if (status & regs->int_dl_cpu2pciea) {
+		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
 		if (isr_status)
-			iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
 	}
 
 	mhba->global_isr = status;
@@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend)
 
 /**
  * mvumi_read_fw_status_reg - returns the current FW status value
- * @regs:			FW register set
+ * @mhba:		Adapter soft state
  */
-static unsigned int mvumi_read_fw_status_reg(void *regs)
+static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
 {
 	unsigned int status;
 
-	status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
 	if (status)
-		iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
 	return status;
 }
 
-static struct mvumi_instance_template mvumi_instance_template = {
+static struct mvumi_instance_template mvumi_instance_9143 = {
 	.fire_cmd = mvumi_fire_cmd,
 	.enable_intr = mvumi_enable_intr,
 	.disable_intr = mvumi_disable_intr,
 	.clear_intr = mvumi_clear_intr,
 	.read_fw_status_reg = mvumi_read_fw_status_reg,
+	.check_ib_list = mvumi_check_ib_list_9143,
+	.check_ob_list = mvumi_check_ob_list_9143,
+	.reset_host = mvumi_reset_host_9143,
+};
+
+static struct mvumi_instance_template mvumi_instance_9580 = {
+	.fire_cmd = mvumi_fire_cmd,
+	.enable_intr = mvumi_enable_intr,
+	.disable_intr = mvumi_disable_intr,
+	.clear_intr = mvumi_clear_intr,
+	.read_fw_status_reg = mvumi_read_fw_status_reg,
+	.check_ib_list = mvumi_check_ib_list_9580,
+	.check_ob_list = mvumi_check_ob_list_9580,
+	.reset_host = mvumi_reset_host_9580,
 };
 
 static int mvumi_slave_configure(struct scsi_device *sdev)
@@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = {
 	.eh_timed_out = mvumi_timed_out,
 };
 
+static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
+{
+	void *base = NULL;
+	struct mvumi_hw_regs *regs;
+
+	switch (mhba->pdev->device) {
+	case PCI_DEVICE_ID_MARVELL_MV9143:
+		mhba->mmio = mhba->base_addr[0];
+		base = mhba->mmio;
+		if (!mhba->regs) {
+			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+			if (mhba->regs == NULL)
+				return -ENOMEM;
+		}
+		regs = mhba->regs;
+
+		/* For Arm */
+		regs->ctrl_sts_reg          = base + 0x20104;
+		regs->rstoutn_mask_reg      = base + 0x20108;
+		regs->sys_soft_rst_reg      = base + 0x2010C;
+		regs->main_int_cause_reg    = base + 0x20200;
+		regs->enpointa_mask_reg     = base + 0x2020C;
+		regs->rstoutn_en_reg        = base + 0xF1400;
+		/* For Doorbell */
+		regs->pciea_to_arm_drbl_reg = base + 0x20400;
+		regs->arm_to_pciea_drbl_reg = base + 0x20408;
+		regs->arm_to_pciea_mask_reg = base + 0x2040C;
+		regs->pciea_to_arm_msg0     = base + 0x20430;
+		regs->pciea_to_arm_msg1     = base + 0x20434;
+		regs->arm_to_pciea_msg0     = base + 0x20438;
+		regs->arm_to_pciea_msg1     = base + 0x2043C;
+
+		/* For Message Unit */
+
+		regs->inb_aval_count_basel  = base + 0x508;
+		regs->inb_aval_count_baseh  = base + 0x50C;
+		regs->inb_write_pointer     = base + 0x518;
+		regs->inb_read_pointer      = base + 0x51C;
+		regs->outb_coal_cfg         = base + 0x568;
+		regs->outb_copy_basel       = base + 0x5B0;
+		regs->outb_copy_baseh       = base + 0x5B4;
+		regs->outb_copy_pointer     = base + 0x544;
+		regs->outb_read_pointer     = base + 0x548;
+		regs->outb_isr_cause        = base + 0x560;
+		regs->outb_coal_cfg         = base + 0x568;
+		/* Bit setting for HW */
+		regs->int_comaout           = 1 << 8;
+		regs->int_comaerr           = 1 << 6;
+		regs->int_dl_cpu2pciea      = 1 << 1;
+		regs->cl_pointer_toggle     = 1 << 12;
+		regs->clic_irq              = 1 << 1;
+		regs->clic_in_err           = 1 << 8;
+		regs->clic_out_err          = 1 << 12;
+		regs->cl_slot_num_mask      = 0xFFF;
+		regs->int_drbl_int_mask     = 0x3FFFFFFF;
+		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
+							regs->int_comaerr;
+		break;
+	case PCI_DEVICE_ID_MARVELL_MV9580:
+		mhba->mmio = mhba->base_addr[2];
+		base = mhba->mmio;
+		if (!mhba->regs) {
+			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+			if (mhba->regs == NULL)
+				return -ENOMEM;
+		}
+		regs = mhba->regs;
+		/* For Arm */
+		regs->ctrl_sts_reg          = base + 0x20104;
+		regs->rstoutn_mask_reg      = base + 0x1010C;
+		regs->sys_soft_rst_reg      = base + 0x10108;
+		regs->main_int_cause_reg    = base + 0x10200;
+		regs->enpointa_mask_reg     = base + 0x1020C;
+		regs->rstoutn_en_reg        = base + 0xF1400;
+
+		/* For Doorbell */
+		regs->pciea_to_arm_drbl_reg = base + 0x10460;
+		regs->arm_to_pciea_drbl_reg = base + 0x10480;
+		regs->arm_to_pciea_mask_reg = base + 0x10484;
+		regs->pciea_to_arm_msg0     = base + 0x10400;
+		regs->pciea_to_arm_msg1     = base + 0x10404;
+		regs->arm_to_pciea_msg0     = base + 0x10420;
+		regs->arm_to_pciea_msg1     = base + 0x10424;
+
+		/* For reset*/
+		regs->reset_request         = base + 0x10108;
+		regs->reset_enable          = base + 0x1010c;
+
+		/* For Message Unit */
+		regs->inb_aval_count_basel  = base + 0x4008;
+		regs->inb_aval_count_baseh  = base + 0x400C;
+		regs->inb_write_pointer     = base + 0x4018;
+		regs->inb_read_pointer      = base + 0x401C;
+		regs->outb_copy_basel       = base + 0x4058;
+		regs->outb_copy_baseh       = base + 0x405C;
+		regs->outb_copy_pointer     = base + 0x406C;
+		regs->outb_read_pointer     = base + 0x4070;
+		regs->outb_coal_cfg         = base + 0x4080;
+		regs->outb_isr_cause        = base + 0x4088;
+		/* Bit setting for HW */
+		regs->int_comaout           = 1 << 4;
+		regs->int_dl_cpu2pciea      = 1 << 12;
+		regs->int_comaerr           = 1 << 29;
+		regs->cl_pointer_toggle     = 1 << 14;
+		regs->cl_slot_num_mask      = 0x3FFF;
+		regs->clic_irq              = 1 << 0;
+		regs->clic_out_err          = 1 << 1;
+		regs->int_drbl_int_mask     = 0x3FFFFFFF;
+		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
+		break;
+	default:
+		return -1;
+		break;
+	}
+
+	return 0;
+}
+
 /**
  * mvumi_init_fw -	Initializes the FW
  * @mhba:		Adapter soft state
@@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
 	if (ret)
 		goto fail_ioremap;
 
-	mhba->mmio = mhba->base_addr[0];
-
 	switch (mhba->pdev->device) {
 	case PCI_DEVICE_ID_MARVELL_MV9143:
-		mhba->instancet = &mvumi_instance_template;
+		mhba->instancet = &mvumi_instance_9143;
 		mhba->io_seq = 0;
 		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
 		mhba->request_id_enabled = 1;
 		break;
+	case PCI_DEVICE_ID_MARVELL_MV9580:
+		mhba->instancet = &mvumi_instance_9580;
+		mhba->io_seq = 0;
+		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
+		break;
 	default:
 		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
 							mhba->pdev->device);
@@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
 	}
 	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
 							mhba->pdev->device);
-
-	mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
+	ret = mvumi_cfg_hw_reg(mhba);
+	if (ret) {
+		dev_err(&mhba->pdev->dev,
+			"failed to allocate memory for reg\n");
+		ret = -ENOMEM;
+		goto fail_alloc_mem;
+	}
+	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
+						&mhba->handshake_page_phys);
 	if (!mhba->handshake_page) {
 		dev_err(&mhba->pdev->dev,
 			"failed to allocate memory for handshake\n");
 		ret = -ENOMEM;
-		goto fail_alloc_mem;
+		goto fail_alloc_page;
 	}
-	mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
 
 	if (mvumi_start(mhba)) {
 		ret = -EINVAL;
@@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
 
 fail_ready_state:
 	mvumi_release_mem_resource(mhba);
-	kfree(mhba->handshake_page);
+	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+		mhba->handshake_page, mhba->handshake_page_phys);
+fail_alloc_page:
+	kfree(mhba->regs);
 fail_alloc_mem:
 	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 fail_ioremap:
@@ -1755,6 +2441,7 @@ fail_ioremap:
 static int mvumi_io_attach(struct mvumi_hba *mhba)
 {
 	struct Scsi_Host *host = mhba->shost;
+	struct scsi_device *sdev = NULL;
 	int ret;
 	unsigned int max_sg = (mhba->ib_max_size + 4 -
 		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
@@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
 	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
 	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
 	host->max_sectors = mhba->max_transfer_size / 512;
-	host->cmd_per_lun =  (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
 	host->max_id = mhba->max_target_id;
 	host->max_cmd_len = MAX_COMMAND_SIZE;
 	host->transportt = &mvumi_transport_template;
@@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
 		return ret;
 	}
 	mhba->fw_flag |= MVUMI_FW_ATTACH;
-	scsi_scan_host(host);
 
+	mutex_lock(&mhba->sas_discovery_mutex);
+	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
+	else
+		ret = 0;
+	if (ret) {
+		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
+		mutex_unlock(&mhba->sas_discovery_mutex);
+		goto fail_add_device;
+	}
+
+	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
+						mhba, "mvumi_scanthread");
+	if (IS_ERR(mhba->dm_thread)) {
+		dev_err(&mhba->pdev->dev,
+			"failed to create device scan thread\n");
+		mutex_unlock(&mhba->sas_discovery_mutex);
+		goto fail_create_thread;
+	}
+	atomic_set(&mhba->pnp_count, 1);
+	wake_up_process(mhba->dm_thread);
+
+	mutex_unlock(&mhba->sas_discovery_mutex);
 	return 0;
+
+fail_create_thread:
+	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+		sdev = scsi_device_lookup(mhba->shost, 0,
+						mhba->max_target_id - 1, 0);
+	if (sdev) {
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+fail_add_device:
+	scsi_remove_host(mhba->shost);
+	return ret;
 }
 
 /**
@@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
 	INIT_LIST_HEAD(&mhba->free_ob_list);
 	INIT_LIST_HEAD(&mhba->res_list);
 	INIT_LIST_HEAD(&mhba->waiting_req_list);
+	mutex_init(&mhba->device_lock);
+	INIT_LIST_HEAD(&mhba->mhba_dev_list);
+	INIT_LIST_HEAD(&mhba->shost_dev_list);
 	atomic_set(&mhba->fw_outstanding, 0);
 	init_waitqueue_head(&mhba->int_cmd_wait_q);
+	mutex_init(&mhba->sas_discovery_mutex);
 
 	mhba->pdev = pdev;
 	mhba->shost = host;
@@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
 		dev_err(&pdev->dev, "failed to register IRQ\n");
 		goto fail_init_irq;
 	}
-	mhba->instancet->enable_intr(mhba->mmio);
+
+	mhba->instancet->enable_intr(mhba);
 	pci_set_drvdata(pdev, mhba);
 
 	ret = mvumi_io_attach(mhba);
 	if (ret)
 		goto fail_io_attach;
+
+	mvumi_backup_bar_addr(mhba);
 	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
 
 	return 0;
 
 fail_io_attach:
 	pci_set_drvdata(pdev, NULL);
-	mhba->instancet->disable_intr(mhba->mmio);
+	mhba->instancet->disable_intr(mhba);
 	free_irq(mhba->pdev->irq, mhba);
 fail_init_irq:
 	mvumi_release_fw(mhba);
@@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev)
 	struct mvumi_hba *mhba;
 
 	mhba = pci_get_drvdata(pdev);
+	if (mhba->dm_thread) {
+		kthread_stop(mhba->dm_thread);
+		mhba->dm_thread = NULL;
+	}
+
+	mvumi_detach_devices(mhba);
 	host = mhba->shost;
 	scsi_remove_host(mhba->shost);
 	mvumi_flush_cache(mhba);
 
-	mhba->instancet->disable_intr(mhba->mmio);
+	mhba->instancet->disable_intr(mhba);
 	free_irq(mhba->pdev->irq, mhba);
 	mvumi_release_fw(mhba);
 	scsi_host_put(host);
@@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
 	mvumi_flush_cache(mhba);
 
 	pci_set_drvdata(pdev, mhba);
-	mhba->instancet->disable_intr(mhba->mmio);
+	mhba->instancet->disable_intr(mhba);
 	free_irq(mhba->pdev->irq, mhba);
 	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
 	pci_release_regions(pdev);
@@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev)
 	if (ret)
 		goto release_regions;
 
+	if (mvumi_cfg_hw_reg(mhba)) {
+		ret = -EINVAL;
+		goto unmap_pci_addr;
+	}
+
 	mhba->mmio = mhba->base_addr[0];
-	mvumi_reset(mhba->mmio);
+	mvumi_reset(mhba);
 
 	if (mvumi_start(mhba)) {
 		ret = -EINVAL;
@@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev)
 		dev_err(&pdev->dev, "failed to register IRQ\n");
 		goto unmap_pci_addr;
 	}
-	mhba->instancet->enable_intr(mhba->mmio);
+	mhba->instancet->enable_intr(mhba);
 
 	return 0;
 
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index 10b9237566f0..e360135fd1bd 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -34,51 +34,87 @@
 #define MV_DRIVER_NAME			"mvumi"
 #define PCI_VENDOR_ID_MARVELL_2		0x1b4b
 #define PCI_DEVICE_ID_MARVELL_MV9143	0x9143
+#define PCI_DEVICE_ID_MARVELL_MV9580	0x9580
 
 #define MVUMI_INTERNAL_CMD_WAIT_TIME	45
+#define MVUMI_INQUIRY_LENGTH		44
+#define MVUMI_INQUIRY_UUID_OFF		36
+#define MVUMI_INQUIRY_UUID_LEN		8
 
 #define IS_DMA64			(sizeof(dma_addr_t) == 8)
 
 enum mvumi_qc_result {
-	MV_QUEUE_COMMAND_RESULT_SENT	= 0,
+	MV_QUEUE_COMMAND_RESULT_SENT = 0,
 	MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
 };
 
-enum {
-	/*******************************************/
-
-	/* ARM Mbus Registers Map	*/
-
-	/*******************************************/
-	CPU_MAIN_INT_CAUSE_REG	= 0x20200,
-	CPU_MAIN_IRQ_MASK_REG	= 0x20204,
-	CPU_MAIN_FIQ_MASK_REG	= 0x20208,
-	CPU_ENPOINTA_MASK_REG	= 0x2020C,
-	CPU_ENPOINTB_MASK_REG	= 0x20210,
-
-	INT_MAP_COMAERR		= 1 << 6,
-	INT_MAP_COMAIN		= 1 << 7,
-	INT_MAP_COMAOUT		= 1 << 8,
-	INT_MAP_COMBERR		= 1 << 9,
-	INT_MAP_COMBIN		= 1 << 10,
-	INT_MAP_COMBOUT		= 1 << 11,
-
-	INT_MAP_COMAINT	= (INT_MAP_COMAOUT | INT_MAP_COMAERR),
-	INT_MAP_COMBINT	= (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR),
-
-	INT_MAP_DL_PCIEA2CPU	= 1 << 0,
-	INT_MAP_DL_CPU2PCIEA	= 1 << 1,
-
-	/***************************************/
+struct mvumi_hw_regs {
+	/* For CPU */
+	void *main_int_cause_reg;
+	void *enpointa_mask_reg;
+	void *enpointb_mask_reg;
+	void *rstoutn_en_reg;
+	void *ctrl_sts_reg;
+	void *rstoutn_mask_reg;
+	void *sys_soft_rst_reg;
+
+	/* For Doorbell */
+	void *pciea_to_arm_drbl_reg;
+	void *arm_to_pciea_drbl_reg;
+	void *arm_to_pciea_mask_reg;
+	void *pciea_to_arm_msg0;
+	void *pciea_to_arm_msg1;
+	void *arm_to_pciea_msg0;
+	void *arm_to_pciea_msg1;
+
+	/* reset register */
+	void *reset_request;
+	void *reset_enable;
+
+	/* For Message Unit */
+	void *inb_list_basel;
+	void *inb_list_baseh;
+	void *inb_aval_count_basel;
+	void *inb_aval_count_baseh;
+	void *inb_write_pointer;
+	void *inb_read_pointer;
+	void *outb_list_basel;
+	void *outb_list_baseh;
+	void *outb_copy_basel;
+	void *outb_copy_baseh;
+	void *outb_copy_pointer;
+	void *outb_read_pointer;
+	void *inb_isr_cause;
+	void *outb_isr_cause;
+	void *outb_coal_cfg;
+	void *outb_coal_timeout;
+
+	/* Bit setting for HW */
+	u32 int_comaout;
+	u32 int_comaerr;
+	u32 int_dl_cpu2pciea;
+	u32 int_mu;
+	u32 int_drbl_int_mask;
+	u32 int_main_int_mask;
+	u32 cl_pointer_toggle;
+	u32 cl_slot_num_mask;
+	u32 clic_irq;
+	u32 clic_in_err;
+	u32 clic_out_err;
+};
 
-	/* ARM Doorbell Registers Map		*/
+struct mvumi_dyn_list_entry {
+	u32 src_low_addr;
+	u32 src_high_addr;
+	u32 if_length;
+	u32 reserve;
+};
 
-	/***************************************/
-	CPU_PCIEA_TO_ARM_DRBL_REG	= 0x20400,
-	CPU_PCIEA_TO_ARM_MASK_REG	= 0x20404,
-	CPU_ARM_TO_PCIEA_DRBL_REG	= 0x20408,
-	CPU_ARM_TO_PCIEA_MASK_REG	= 0x2040C,
+#define SCSI_CMD_MARVELL_SPECIFIC	0xE1
+#define CDB_CORE_MODULE			0x1
+#define CDB_CORE_SHUTDOWN		0xB
 
+enum {
 	DRBL_HANDSHAKE			= 1 << 0,
 	DRBL_SOFT_RESET			= 1 << 1,
 	DRBL_BUS_CHANGE			= 1 << 2,
@@ -86,46 +122,6 @@ enum {
 	DRBL_MU_RESET			= 1 << 4,
 	DRBL_HANDSHAKE_ISR		= DRBL_HANDSHAKE,
 
-	CPU_PCIEA_TO_ARM_MSG0		= 0x20430,
-	CPU_PCIEA_TO_ARM_MSG1		= 0x20434,
-	CPU_ARM_TO_PCIEA_MSG0		= 0x20438,
-	CPU_ARM_TO_PCIEA_MSG1		= 0x2043C,
-
-	/*******************************************/
-
-	/* ARM Communication List Registers Map    */
-
-	/*******************************************/
-	CLA_INB_LIST_BASEL		= 0x500,
-	CLA_INB_LIST_BASEH		= 0x504,
-	CLA_INB_AVAL_COUNT_BASEL	= 0x508,
-	CLA_INB_AVAL_COUNT_BASEH	= 0x50C,
-	CLA_INB_DESTI_LIST_BASEL	= 0x510,
-	CLA_INB_DESTI_LIST_BASEH	= 0x514,
-	CLA_INB_WRITE_POINTER		= 0x518,
-	CLA_INB_READ_POINTER		= 0x51C,
-
-	CLA_OUTB_LIST_BASEL		= 0x530,
-	CLA_OUTB_LIST_BASEH		= 0x534,
-	CLA_OUTB_SOURCE_LIST_BASEL	= 0x538,
-	CLA_OUTB_SOURCE_LIST_BASEH	= 0x53C,
-	CLA_OUTB_COPY_POINTER		= 0x544,
-	CLA_OUTB_READ_POINTER		= 0x548,
-
-	CLA_ISR_CAUSE			= 0x560,
-	CLA_ISR_MASK			= 0x564,
-
-	INT_MAP_MU		= (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
-
-	CL_POINTER_TOGGLE		= 1 << 12,
-
-	CLIC_IN_IRQ			= 1 << 0,
-	CLIC_OUT_IRQ			= 1 << 1,
-	CLIC_IN_ERR_IRQ			= 1 << 8,
-	CLIC_OUT_ERR_IRQ		= 1 << 12,
-
-	CL_SLOT_NUM_MASK		= 0xFFF,
-
 	/*
 	* Command flag is the flag for the CDB command itself
 	*/
@@ -137,15 +133,23 @@ enum {
 	CMD_FLAG_DATA_IN		= 1 << 3,
 	/* 1-host write data */
 	CMD_FLAG_DATA_OUT		= 1 << 4,
-
-	SCSI_CMD_MARVELL_SPECIFIC	= 0xE1,
-	CDB_CORE_SHUTDOWN		= 0xB,
+	CMD_FLAG_PRDT_IN_HOST		= 1 << 5,
 };
 
 #define APICDB0_EVENT			0xF4
 #define APICDB1_EVENT_GETEVENT		0
+#define APICDB1_HOST_GETEVENT		1
 #define MAX_EVENTS_RETURNED		6
 
+#define DEVICE_OFFLINE	0
+#define DEVICE_ONLINE	1
+
+struct mvumi_hotplug_event {
+	u16 size;
+	u8 dummy[2];
+	u8 bitmap[0];
+};
+
 struct mvumi_driver_event {
 	u32	time_stamp;
 	u32	sequence_no;
@@ -172,8 +176,14 @@ struct mvumi_events_wq {
 	void *param;
 };
 
+#define HS_CAPABILITY_SUPPORT_COMPACT_SG	(1U << 4)
+#define HS_CAPABILITY_SUPPORT_PRD_HOST		(1U << 5)
+#define HS_CAPABILITY_SUPPORT_DYN_SRC		(1U << 6)
+#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF	(1U << 14)
+
 #define MVUMI_MAX_SG_ENTRY	32
 #define SGD_EOT			(1L << 27)
+#define SGD_EOT_CP		(1L << 22)
 
 struct mvumi_sgl {
 	u32	baseaddr_l;
@@ -181,6 +191,39 @@ struct mvumi_sgl {
 	u32	flags;
 	u32	size;
 };
+struct mvumi_compact_sgl {
+	u32	baseaddr_l;
+	u32	baseaddr_h;
+	u32	flags;
+};
+
+#define GET_COMPACT_SGD_SIZE(sgd)	\
+	((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL)
+
+#define SET_COMPACT_SGD_SIZE(sgd, sz) do {			\
+	(((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL;	\
+	(((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz);		\
+} while (0)
+#define sgd_getsz(_mhba, sgd, sz) do {				\
+	if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)	\
+		(sz) = GET_COMPACT_SGD_SIZE(sgd);	\
+	else \
+		(sz) = (sgd)->size;			\
+} while (0)
+
+#define sgd_setsz(_mhba, sgd, sz) do {				\
+	if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)	\
+		SET_COMPACT_SGD_SIZE(sgd, sz);		\
+	else \
+		(sgd)->size = (sz);			\
+} while (0)
+
+#define sgd_inc(_mhba, sgd) do {	\
+	if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)	\
+		sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \
+	else \
+		sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \
+} while (0)
 
 struct mvumi_res {
 	struct list_head entry;
@@ -197,7 +240,7 @@ enum resource_type {
 };
 
 struct mvumi_sense_data {
-	u8 error_eode:7;
+	u8 error_code:7;
 	u8 valid:1;
 	u8 segment_number;
 	u8 sense_key:4;
@@ -220,6 +263,7 @@ struct mvumi_sense_data {
 struct mvumi_cmd {
 	struct list_head queue_pointer;
 	struct mvumi_msg_frame *frame;
+	dma_addr_t frame_phys;
 	struct scsi_cmnd *scmd;
 	atomic_t sync_cmd;
 	void *data_buf;
@@ -393,7 +437,8 @@ struct mvumi_hs_page2 {
 	u16 frame_length;
 
 	u8 host_type;
-	u8 reserved[3];
+	u8 host_cap;
+	u8 reserved[2];
 	struct version_info host_ver;
 	u32 system_io_bus;
 	u32 slot_number;
@@ -435,8 +480,17 @@ struct mvumi_tag {
 	unsigned short size;
 };
 
+struct mvumi_device {
+	struct list_head list;
+	struct scsi_device *sdev;
+	u64	wwid;
+	u8	dev_type;
+	int	id;
+};
+
 struct mvumi_hba {
 	void *base_addr[MAX_BASE_ADDRESS];
+	u32 pci_base[MAX_BASE_ADDRESS];
 	void *mmio;
 	struct list_head cmd_pool;
 	struct Scsi_Host *shost;
@@ -449,6 +503,9 @@ struct mvumi_hba {
 	void *ib_list;
 	dma_addr_t ib_list_phys;
 
+	void *ib_frame;
+	dma_addr_t ib_frame_phys;
+
 	void *ob_list;
 	dma_addr_t ob_list_phys;
 
@@ -477,12 +534,14 @@ struct mvumi_hba {
 	unsigned char hba_total_pages;
 	unsigned char fw_flag;
 	unsigned char request_id_enabled;
+	unsigned char eot_flag;
 	unsigned short hba_capability;
 	unsigned short io_seq;
 
 	unsigned int ib_cur_slot;
 	unsigned int ob_cur_slot;
 	unsigned int fw_state;
+	struct mutex sas_discovery_mutex;
 
 	struct list_head ob_data_list;
 	struct list_head free_ob_list;
@@ -491,14 +550,24 @@ struct mvumi_hba {
 
 	struct mvumi_tag tag_pool;
 	struct mvumi_cmd **tag_cmd;
+	struct mvumi_hw_regs *regs;
+	struct mutex device_lock;
+	struct list_head mhba_dev_list;
+	struct list_head shost_dev_list;
+	struct task_struct *dm_thread;
+	atomic_t pnp_count;
 };
 
 struct mvumi_instance_template {
-	void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *);
-	void (*enable_intr)(void *) ;
-	void (*disable_intr)(void *);
-	int (*clear_intr)(void *);
-	unsigned int (*read_fw_status_reg)(void *);
+	void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *);
+	void (*enable_intr) (struct mvumi_hba *);
+	void (*disable_intr) (struct mvumi_hba *);
+	int (*clear_intr) (void *);
+	unsigned int (*read_fw_status_reg) (struct mvumi_hba *);
+	unsigned int (*check_ib_list) (struct mvumi_hba *);
+	int (*check_ob_list) (struct mvumi_hba *, unsigned int *,
+			      unsigned int *);
+	int (*reset_host) (struct mvumi_hba *);
 };
 
 extern struct timezone sys_tz;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 799a58bb9859..48fca47384b7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2080,6 +2080,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 	uint8_t       domain;
 	char		connect_type[22];
 	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
 
 	/* Get host addresses. */
 	rval = qla2x00_get_adapter_id(vha,
@@ -2154,9 +2155,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 	vha->d_id.b.area = area;
 	vha->d_id.b.al_pa = al_pa;
 
-	spin_lock(&ha->vport_slock);
+	spin_lock_irqsave(&ha->vport_slock, flags);
 	qlt_update_vp_map(vha, SET_AL_PA);
-	spin_unlock(&ha->vport_slock);
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 	if (!vha->flags.init_done)
 		ql_log(ql_log_info, vha, 0x2010,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 57fbd5a3d4e2..5cda11c07c68 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2055,7 +2055,7 @@ static void unmap_region(sector_t lba, unsigned int len)
 		block = lba + alignment;
 		rem = do_div(block, granularity);
 
-		if (rem == 0 && lba + granularity <= end && block < map_size) {
+		if (rem == 0 && lba + granularity < end && block < map_size) {
 			clear_bit(block, map_storep);
 			if (scsi_debug_lbprz)
 				memset(fake_storep +
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index de2337f255a7..c1b05a83d403 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -789,7 +789,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
 			     int cmnd_size, int timeout, unsigned sense_bytes)
 {
 	struct scsi_device *sdev = scmd->device;
-	struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
 	struct Scsi_Host *shost = sdev->host;
 	DECLARE_COMPLETION_ONSTACK(done);
 	unsigned long timeleft;
@@ -845,8 +844,11 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
 
 	scsi_eh_restore_cmnd(scmd, &ses);
 
-	if (sdrv && sdrv->eh_action)
-		rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
+	if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+		struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
+		if (sdrv->eh_action)
+			rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
+	}
 
 	return rtn;
 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 528d52beaa1c..01440782feb2 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
 	/*
 	 * At this point, all outstanding requests in the adapter
 	 * should have been flushed out and return to us
+	 * There is a potential race here where the host may be in
+	 * the process of responding when we return from here.
+	 * Just wait for all in-transit packets to be accounted for
+	 * before we return from here.
 	 */
+	storvsc_wait_to_drain(stor_device);
 
 	return SUCCESS;
 }
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3e79a2f00042..595af1ae4421 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
 	struct scatterlist sg;
 	unsigned long flags;
 
-	sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
 
 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
 
@@ -279,6 +279,31 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
 	}
 }
 
+static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
+					 struct virtio_scsi_event *event)
+{
+	struct scsi_device *sdev;
+	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+	unsigned int target = event->lun[1];
+	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
+	u8 asc = event->reason & 255;
+	u8 ascq = event->reason >> 8;
+
+	sdev = scsi_device_lookup(shost, 0, target, lun);
+	if (!sdev) {
+		pr_err("SCSI device %d 0 %d %d not found\n",
+			shost->host_no, target, lun);
+		return;
+	}
+
+	/* Handle "Parameters changed", "Mode parameters changed", and
+	   "Capacity data has changed".  */
+	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
+		scsi_rescan_device(&sdev->sdev_gendev);
+
+	scsi_device_put(sdev);
+}
+
 static void virtscsi_handle_event(struct work_struct *work)
 {
 	struct virtio_scsi_event_node *event_node =
@@ -297,6 +322,9 @@ static void virtscsi_handle_event(struct work_struct *work)
 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
 		virtscsi_handle_transport_reset(vscsi, event);
 		break;
+	case VIRTIO_SCSI_T_PARAM_CHANGE:
+		virtscsi_handle_param_change(vscsi, event);
+		break;
 	default:
 		pr_err("Unsupport virtio scsi event %x\n", event->event);
 	}
@@ -677,7 +705,11 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
-	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
+
+	/* LUNs > 256 are reported with format 1, so they go in the range
+	 * 16640-32767.
+	 */
+	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
 	shost->max_id = num_targets;
 	shost->max_channel = 0;
 	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
@@ -733,7 +765,8 @@ static struct virtio_device_id id_table[] = {
 };
 
 static unsigned int features[] = {
-	VIRTIO_SCSI_F_HOTPLUG
+	VIRTIO_SCSI_F_HOTPLUG,
+	VIRTIO_SCSI_F_CHANGE,
 };
 
 static struct virtio_driver virtio_scsi_driver = {
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
index dc8d305b0e05..d6b4440387b7 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/linux/virtio_scsi.h
@@ -72,6 +72,7 @@ struct virtio_scsi_config {
 /* Feature Bits */
 #define VIRTIO_SCSI_F_INOUT                    0
 #define VIRTIO_SCSI_F_HOTPLUG                  1
+#define VIRTIO_SCSI_F_CHANGE                   2
 
 /* Response codes */
 #define VIRTIO_SCSI_S_OK                       0
@@ -108,6 +109,7 @@ struct virtio_scsi_config {
 #define VIRTIO_SCSI_T_NO_EVENT                 0
 #define VIRTIO_SCSI_T_TRANSPORT_RESET          1
 #define VIRTIO_SCSI_T_ASYNC_NOTIFY             2
+#define VIRTIO_SCSI_T_PARAM_CHANGE             3
 
 /* Reasons of transport reset event */
 #define VIRTIO_SCSI_EVT_RESET_HARD             0
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 0d7d67e96d43..9c8702942b61 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -127,6 +127,9 @@ struct fcp_txrdy {
  *
  * All response frames will always contain the fcp_resp template.  Some
  * will also include the fcp_resp_len template.
+ *
+ * From Table 23, the FCP_RSP_INFO can either be 4 bytes or 8 bytes, both
+ * are valid length.
  */
 struct fcp_resp {
 	__u8		_fr_resvd[8];	/* reserved */
@@ -156,6 +159,9 @@ struct fcp_resp_rsp_info {
     __u8      _fr_resvd2[4];      /* reserved */
 };
 
+#define FCP_RESP_RSP_INFO_LEN4    4 /* without reserved field */
+#define FCP_RESP_RSP_INFO_LEN8    8 /* with reserved field */
+
 struct fcp_resp_with_ext {
 	struct fcp_resp resp;
 	struct fcp_resp_ext ext;
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 22b07cc99808..8742d853a3b8 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -327,7 +327,6 @@ struct fcoe_percpu_s {
  * @lport:		       The associated local port
  * @fcoe_pending_queue:	       The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @priority:		       Packet priority (DCB)
  * @max_queue_depth:	       Max queue depth of pending queue
  * @min_queue_depth:	       Min queue depth of pending queue
  * @timer:		       The queue timer
@@ -343,7 +342,6 @@ struct fcoe_port {
 	struct fc_lport	      *lport;
 	struct sk_buff_head   fcoe_pending_queue;
 	u8		      fcoe_pending_queue_active;
-	u8		      priority;
 	u32		      max_queue_depth;
 	u32		      min_queue_depth;
 	struct timer_list     timer;
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index ac06cc595890..de5f5d8f1f8a 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -132,18 +132,10 @@ struct scsi_cmnd {
 	unsigned char tag;	/* SCSI-II queued command tag */
 };
 
+/* make sure not to use it with REQ_TYPE_BLOCK_PC commands */
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
-	struct scsi_driver **sdp;
-
-	if (!cmd->request->rq_disk)
-		return NULL;
-
-	sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
-	if (!sdp)
-		return NULL;
-
-	return *sdp;
+	return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
 }
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);