summary refs log tree commit diff
path: root/drivers/scsi/bfa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bfa')
-rw-r--r--drivers/scsi/bfa/bfa.h51
-rw-r--r--drivers/scsi/bfa/bfa_core.c60
-rw-r--r--drivers/scsi/bfa/bfa_defs.h171
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h99
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c736
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h45
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c26
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c74
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c49
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c38
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c25
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c569
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h48
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c249
-rw-r--r--drivers/scsi/bfa/bfa_svc.h29
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c1082
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h237
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfad_im.h22
-rw-r--r--drivers/scsi/bfa/bfi.h20
26 files changed, 3723 insertions, 143 deletions
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 3b0af1102bf4..a796de935054 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,6 +27,7 @@
 struct bfa_s;
 
 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
 
 /*
  * Interrupt message handlers
@@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 #define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
 		(__hcb_qe)->cbfn  = (__cbfn);      \
 		(__hcb_qe)->cbarg = (__cbarg);      \
+		(__hcb_qe)->pre_rmv = BFA_FALSE;		\
 		list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
 	} while (0)
 
@@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 		}							\
 	} while (0)
 
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do {		\
+		(__hcb_qe)->fw_status = (__status);			\
+		list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);	\
+} while (0)
+
 #define bfa_cb_queue_done(__hcb_qe) do {	\
 		(__hcb_qe)->once = BFA_FALSE;	\
 	} while (0)
@@ -177,7 +184,7 @@ struct bfa_msix_s {
 struct bfa_hwif_s {
 	void (*hw_reginit)(struct bfa_s *bfa);
 	void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
-	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
 	void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
 	void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
 	void (*hw_msix_queue_install)(struct bfa_s *bfa);
@@ -268,10 +275,8 @@ struct bfa_iocfc_s {
 	((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
 #define bfa_msix_uninstall(__bfa)					\
 	((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_rspq_ack(__bfa, __queue) do {				\
-	if ((__bfa)->iocfc.hwif.hw_rspq_ack)				\
-		(__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue);	\
-} while (0)
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci)				\
+	((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
 #define bfa_isr_reqq_ack(__bfa, __queue) do {				\
 	if ((__bfa)->iocfc.hwif.hw_reqq_ack)				\
 		(__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue);	\
@@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
 void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
 
 void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
 void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
 void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
 void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
@@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
 void bfa_hwct_reginit(struct bfa_s *bfa);
 void bfa_hwct2_reginit(struct bfa_s *bfa);
 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
 void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
 void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
 void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
@@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 #define bfa_get_fw_clock_res(__bfa)		\
 	((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
 
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa)					\
+	((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL :	\
+	 (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa)				\
+	((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL :	\
+	 (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa)				\
+	(((&(_bfa)->modules.dconf_mod)->min_cfg)		\
+	 ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
 void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
 void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
 void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
@@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
+void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
 	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
+struct bfa_cb_pending_q_s {
+	struct bfa_cb_qe_s	hcb_qe;
+	void			*data;  /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do {	\
+	bfa_q_qe_init(&((__qe)->hcb_qe.qe));			\
+	(__qe)->hcb_qe.cbfn = (__cbfn);				\
+	(__qe)->hcb_qe.cbarg = (__cbarg);			\
+	(__qe)->hcb_qe.pre_rmv = BFA_TRUE;			\
+	(__qe)->data = (__data);				\
+} while (0)
+
 #endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c38e589105a5..4bd546bcc240 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
 	&hal_mod_uf,
 	&hal_mod_rport,
 	&hal_mod_fcp,
+	&hal_mod_dconf,
 	NULL
 };
 
@@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
 	u32	pi, ci;
 	struct list_head *waitq;
 
-	bfa_isr_rspq_ack(bfa, qid);
-
 	ci = bfa_rspq_ci(bfa, qid);
 	pi = bfa_rspq_pi(bfa, qid);
 
@@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
 	}
 
 	/*
-	 * update CI
+	 * acknowledge RME completions and update CI
 	 */
-	bfa_rspq_ci(bfa, qid) = pi;
-	writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
-	mmiowb();
+	bfa_isr_rspq_ack(bfa, qid, ci);
 
 	/*
 	 * Resume any pending requests in the corresponding reqq.
@@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
 	int queue;
 
 	intr = readl(bfa->iocfc.bfa_regs.intr_status);
-	if (!intr)
-		return BFA_FALSE;
 
 	qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
 	if (qintr)
 		writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
 	/*
-	 * RME completion queue interrupt
+	 * Unconditional RME completion queue interrupt
 	 */
-	qintr = intr & __HFN_INT_RME_MASK;
-	if (qintr && bfa->queue_process) {
+	if (bfa->queue_process) {
 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 			bfa_isr_rspq(bfa, queue);
 	}
 
-	intr &= ~qintr;
 	if (!intr)
 		return BFA_TRUE;
 
@@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 				   __HFN_INT_MBOX_LPU1_CT2);
 		intr    &= __HFN_INT_ERR_MASK_CT2;
 	} else {
-		halt_isr = intr & __HFN_INT_LL_HALT;
+		halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+					  (intr & __HFN_INT_LL_HALT) : 0;
 		pss_isr  = intr & __HFN_INT_ERR_PSS;
 		lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
 		intr    &= __HFN_INT_ERR_MASK;
@@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	} else {
 		iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
 		iocfc->hwif.hw_reqq_ack = NULL;
-		iocfc->hwif.hw_rspq_ack = NULL;
+		iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
 		iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
 		iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
 		iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
@@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
 		iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
 		iocfc->hwif.hw_isr_mode_set = NULL;
-		iocfc->hwif.hw_rspq_ack = NULL;
+		iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
 	}
 
 	iocfc->hwif.hw_reginit(bfa);
@@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
 
 	bfa->queue_process = BFA_TRUE;
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
-		bfa_isr_rspq_ack(bfa, i);
+		bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->start(bfa);
@@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
 	struct bfa_s	*bfa = bfa_arg;
 
 	if (complete) {
-		if (bfa->iocfc.cfgdone)
+		if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
 			bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
 		else
 			bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 	 */
 	bfa_fcport_init(bfa);
 
-	if (iocfc->action == BFA_IOCFC_ACT_INIT)
-		bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
-	else {
+	if (iocfc->action == BFA_IOCFC_ACT_INIT) {
+		if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+			bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
+				bfa_iocfc_init_cb, bfa);
+	} else {
 		if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
 			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
 					bfa_iocfc_enable_cb, bfa);
@@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
 	}
 
 	bfa_iocfc_send_cfg(bfa);
+	bfa_dconf_modinit(bfa);
 }
 
 /*
@@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
 	bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
 
 	bfa->queue_process = BFA_FALSE;
-	bfa_ioc_disable(&bfa->ioc);
+	bfa_dconf_modexit(bfa);
+	if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+		bfa_ioc_disable(&bfa->ioc);
 }
 
 void
@@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
 	struct list_head		*qe;
 	struct list_head		*qen;
 	struct bfa_cb_qe_s	*hcb_qe;
+	bfa_cb_cbfn_status_t	cbfn;
 
 	list_for_each_safe(qe, qen, comp_q) {
 		hcb_qe = (struct bfa_cb_qe_s *) qe;
-		hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+		if (hcb_qe->pre_rmv) {
+			/* qe is invalid after return, dequeue before cbfn() */
+			list_del(qe);
+			cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+			cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+		} else
+			hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
 	}
 }
 
@@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
 	while (!list_empty(comp_q)) {
 		bfa_q_deq(comp_q, &qe);
 		hcb_qe = (struct bfa_cb_qe_s *) qe;
+		WARN_ON(hcb_qe->pre_rmv);
 		hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
 	}
 }
 
+void
+bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
+{
+	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
+		if (bfa->iocfc.cfgdone == BFA_TRUE)
+			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+				bfa_iocfc_init_cb, bfa);
+	}
+}
 
 /*
  * Return the list of PCI vendor/device id lists supported by this
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index ed8d31b0188b..7b3d235d20b4 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -144,6 +144,7 @@ enum bfa_status {
 	BFA_STATUS_INVLD_DFSZ	= 24,	/*  Invalid Max data field size */
 	BFA_STATUS_CMD_NOTSUPP  = 26,   /*  Command/API not supported */
 	BFA_STATUS_FABRIC_RJT	= 29,	/*  Reject from attached fabric */
+	BFA_STATUS_UNKNOWN_VWWN = 30,	/*  VPORT PWWN not found */
 	BFA_STATUS_PORT_OFFLINE = 34,	/*  Port is not online */
 	BFA_STATUS_VPORT_WWN_BP	= 46,	/*  WWN is same as base port's WWN */
 	BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
@@ -164,6 +165,8 @@ enum bfa_status {
 	BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
 	BFA_STATUS_PBC		= 154, /*  Operation not allowed for pre-boot
 					*  configuration */
+	BFA_STATUS_BAD_FWCFG = 156,	/* Bad firmware configuration */
+	BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
 	BFA_STATUS_SFP_NOT_READY = 159,	/* SFP info is not ready. Retry */
 	BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
 					 * this adapter */
@@ -172,11 +175,15 @@ enum bfa_status {
 	BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
 	BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
 	BFA_STATUS_FEATURE_NOT_SUPPORTED = 192,	/* Feature not supported */
+	BFA_STATUS_ENTRY_EXISTS = 193,	/* Entry already exists */
+	BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+	BFA_STATUS_NO_CHANGE = 195,	/* Feature already in that state */
 	BFA_STATUS_FAA_ENABLED = 197,	/* FAA is already enabled */
 	BFA_STATUS_FAA_DISABLED = 198,	/* FAA is already disabled */
 	BFA_STATUS_FAA_ACQUIRED = 199,	/* FAA is already acquired */
 	BFA_STATUS_FAA_ACQ_ADDR = 200,	/* Acquiring addr */
 	BFA_STATUS_ERROR_TRUNK_ENABLED = 203,	/* Trunk enabled on adapter */
+	BFA_STATUS_MAX_ENTRY_REACHED = 212,	/* MAX entry reached */
 	BFA_STATUS_MAX_VAL		/* Unknown error code */
 };
 #define bfa_status_t enum bfa_status
@@ -359,6 +366,139 @@ struct bfa_ioc_attr_s {
 };
 
 /*
+ *			AEN related definitions
+ */
+enum bfa_aen_category {
+	BFA_AEN_CAT_ADAPTER	= 1,
+	BFA_AEN_CAT_PORT	= 2,
+	BFA_AEN_CAT_LPORT	= 3,
+	BFA_AEN_CAT_RPORT	= 4,
+	BFA_AEN_CAT_ITNIM	= 5,
+	BFA_AEN_CAT_AUDIT	= 8,
+	BFA_AEN_CAT_IOC		= 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+	BFA_ADAPTER_AEN_ADD	= 1,	/* New Adapter found event */
+	BFA_ADAPTER_AEN_REMOVE	= 2,	/* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+	char	serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	u32	nports; /* Number of NPorts */
+	wwn_t	pwwn;   /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+	BFA_PORT_AEN_ONLINE	= 1,    /* Physical Port online event */
+	BFA_PORT_AEN_OFFLINE	= 2,    /* Physical Port offline event */
+	BFA_PORT_AEN_RLIR	= 3,    /* RLIR event, not supported */
+	BFA_PORT_AEN_SFP_INSERT	= 4,    /* SFP inserted event */
+	BFA_PORT_AEN_SFP_REMOVE	= 5,    /* SFP removed event */
+	BFA_PORT_AEN_SFP_POM	= 6,    /* SFP POM event */
+	BFA_PORT_AEN_ENABLE	= 7,    /* Physical Port enable event */
+	BFA_PORT_AEN_DISABLE	= 8,    /* Physical Port disable event */
+	BFA_PORT_AEN_AUTH_ON	= 9,    /* Physical Port auth success event */
+	BFA_PORT_AEN_AUTH_OFF	= 10,   /* Physical Port auth fail event */
+	BFA_PORT_AEN_DISCONNECT	= 11,   /* Physical Port disconnect event */
+	BFA_PORT_AEN_QOS_NEG	= 12,   /* Base Port QOS negotiation event */
+	BFA_PORT_AEN_FABRIC_NAME_CHANGE	= 13, /* Fabric Name/WWN change */
+	BFA_PORT_AEN_SFP_ACCESS_ERROR	= 14, /* SFP read error event */
+	BFA_PORT_AEN_SFP_UNSUPPORT	= 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+	BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+	BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+	BFA_PORT_AEN_SFP_POM_RED   = 3, /* Critical */
+	BFA_PORT_AEN_SFP_POM_MAX   = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+	wwn_t		pwwn;		/* WWN of the physical port */
+	wwn_t		fwwn;		/* WWN of the fabric port */
+	u32		phy_port_num;	/* For SFP related events */
+	u16		ioc_type;
+	u16		level;		/* Only transitions will be informed */
+	mac_t		mac;		/* MAC address of the ethernet port */
+	u16		rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+	BFA_LPORT_AEN_NEW	= 1,		/* LPort created event */
+	BFA_LPORT_AEN_DELETE	= 2,		/* LPort deleted event */
+	BFA_LPORT_AEN_ONLINE	= 3,		/* LPort online event */
+	BFA_LPORT_AEN_OFFLINE	= 4,		/* LPort offline event */
+	BFA_LPORT_AEN_DISCONNECT = 5,		/* LPort disconnect event */
+	BFA_LPORT_AEN_NEW_PROP	= 6,		/* VPort created event */
+	BFA_LPORT_AEN_DELETE_PROP = 7,		/* VPort deleted event */
+	BFA_LPORT_AEN_NEW_STANDARD = 8,		/* VPort created event */
+	BFA_LPORT_AEN_DELETE_STANDARD = 9,	/* VPort deleted event */
+	BFA_LPORT_AEN_NPIV_DUP_WWN = 10,	/* VPort with duplicate WWN */
+	BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11,	/* Max NPIV in fabric/fport */
+	BFA_LPORT_AEN_NPIV_UNKNOWN = 12,	/* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+	u16	vf_id;	/* vf_id of this logical port */
+	u16	roles;	/* Logical port mode,IM/TM/IP etc */
+	u32	rsvd;
+	wwn_t	ppwwn;	/* WWN of its physical port */
+	wwn_t	lpwwn;	/* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+	BFA_ITNIM_AEN_ONLINE	 = 1,	/* Target online */
+	BFA_ITNIM_AEN_OFFLINE	 = 2,	/* Target offline */
+	BFA_ITNIM_AEN_DISCONNECT = 3,	/* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+	u16		vf_id;		/* vf_id of the IT nexus */
+	u16		rsvd[3];
+	wwn_t		ppwwn;		/* WWN of its physical port */
+	wwn_t		lpwwn;		/* WWN of logical port */
+	wwn_t		rpwwn;		/* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+	BFA_AUDIT_AEN_AUTH_ENABLE	= 1,
+	BFA_AUDIT_AEN_AUTH_DISABLE	= 2,
+	BFA_AUDIT_AEN_FLASH_ERASE	= 3,
+	BFA_AUDIT_AEN_FLASH_UPDATE	= 4,
+};
+
+struct bfa_audit_aen_data_s {
+	wwn_t	pwwn;
+	int	partition_inst;
+	int	partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+	BFA_IOC_AEN_HBGOOD  = 1,	/* Heart Beat restore event	*/
+	BFA_IOC_AEN_HBFAIL  = 2,	/* Heart Beat failure event	*/
+	BFA_IOC_AEN_ENABLE  = 3,	/* IOC enabled event		*/
+	BFA_IOC_AEN_DISABLE = 4,	/* IOC disabled event		*/
+	BFA_IOC_AEN_FWMISMATCH  = 5,	/* IOC firmware mismatch	*/
+	BFA_IOC_AEN_FWCFG_ERROR = 6,	/* IOC firmware config error	*/
+	BFA_IOC_AEN_INVALID_VENDOR = 7,
+	BFA_IOC_AEN_INVALID_NWWN = 8,	/* Zero NWWN			*/
+	BFA_IOC_AEN_INVALID_PWWN = 9	/* Zero PWWN			*/
+};
+
+struct bfa_ioc_aen_data_s {
+	wwn_t	pwwn;
+	u16	ioc_type;
+	mac_t	mac;
+};
+
+/*
  * ---------------------- mfg definitions ------------
  */
 
@@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
 /*
  * BOOT boot configuraton
  */
+struct bfa_boot_cfg_s {
+	u8		version;
+	u8		rsvd1;
+	u16		chksum;
+	u8		enable;		/* enable/disable SAN boot */
+	u8		speed;          /* boot speed settings */
+	u8		topology;       /* boot topology setting */
+	u8		bootopt;        /* bfa_boot_bootopt_t */
+	u32		nbluns;         /* number of boot luns */
+	u32		rsvd2;
+	struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+	struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
 struct bfa_boot_pbc_s {
 	u8              enable;         /*  enable/disable SAN boot */
 	u8              speed;          /*  boot speed settings */
@@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
 	struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
 };
 
+struct bfa_ethboot_cfg_s {
+	u8		version;
+	u8		rsvd1;
+	u16		chksum;
+	u8		enable;	/* enable/disable Eth/PXE boot */
+	u8		rsvd2;
+	u16		vlan;
+};
+
 /*
  * ASIC block configuration related structures
  */
@@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
  */
 #define SFP_DIAGMON_SIZE	10 /* num bytes of diag monitor data */
 
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED	0
+#define BFA_SFP_SCN_INSERTED	1
+#define BFA_SFP_SCN_POM		2
+#define BFA_SFP_SCN_FAILED	3
+#define BFA_SFP_SCN_UNSUPPORT	4
+#define BFA_SFP_SCN_VALID	5
+
 enum bfa_defs_sfp_media_e {
 	BFA_SFP_MEDIA_UNKNOWN	= 0x00,
 	BFA_SFP_MEDIA_CU	= 0x01,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 0b97525803fb..863c6ba7d5eb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
     u32    error_resets;       /*  error resets initiated by upsm      */
     u32    sync_lost;          /*  Sync loss count                     */
     u32    sig_lost;           /*  Signal loss count                   */
+	u32	asn8g_attempts;	/* SNSM HWSM at 8Gbps attempts */
 };
 
 struct bfa_fw_port_physm_stats_s {
@@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
  * QoS states
  */
 enum bfa_qos_state {
+	BFA_QOS_DISABLED = 0,		/* QoS is disabled */
 	BFA_QOS_ONLINE = 1,		/*  QoS is online */
 	BFA_QOS_OFFLINE = 2,		/*  QoS is offline */
 };
@@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
 	u32	tm_iocdowns;		/*  TM cleaned-up due to IOC down   */
 	u32	tm_cleanups;		/*  TM cleanup requests	*/
 	u32	tm_cleanup_comps;	/*  TM cleanup completions	*/
+	u32	lm_lun_across_sg;	/*  LM lun is across sg data buf */
+	u32	lm_lun_not_sup;		/*  LM lun not supported */
+	u32	lm_rpl_data_changed;	/*  LM report-lun data changed */
+	u32	lm_wire_residue_changed; /* LM report-lun rsp residue changed */
+	u32	lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
+	u32	lm_lun_not_rdy;		/* LM lun not ready */
 };
 
 /* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -785,8 +793,51 @@ enum bfa_port_linkstate_rsn {
 	CEE_ISCSI_PRI_PFC_OFF			= 42,
 	CEE_ISCSI_PRI_OVERLAP_FCOE_PRI		= 43
 };
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage.  when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+	BFA_IOIM_LUN_MASK_INACTIVE = 0,
+	BFA_IOIM_LUN_MASK_ACTIVE = 1,
+	BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+	BFA_LUNMASK_DISABLED = 0x00,
+	BFA_LUNMASK_ENABLED = 0x01,
+	BFA_LUNMASK_MINCFG = 0x02,
+	BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
 #pragma pack(1)
 /*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+	wwn_t		lp_wwn;
+	wwn_t		rp_wwn;
+	struct scsi_lun	lun;
+	u8		ua;
+	u8		rsvd[3];
+	u16		rp_tag;
+	u8		lp_tag;
+	u8		state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+	u32	status;
+	u32	rsvd;
+	struct bfa_lun_mask_s	lun_list[MAX_LUN_MASK_CFG];
+};
+
+/*
  *      Physical port configuration
  */
 struct bfa_port_cfg_s {
@@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
 
 #pragma pack()
 
+/*
+ *			AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+			   | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+	BFA_RPORT_AEN_ONLINE     = 1,   /* RPort online event */
+	BFA_RPORT_AEN_OFFLINE    = 2,   /* RPort offline event */
+	BFA_RPORT_AEN_DISCONNECT = 3,   /* RPort disconnect event */
+	BFA_RPORT_AEN_QOS_PRIO   = 4,   /* QOS priority change event */
+	BFA_RPORT_AEN_QOS_FLOWID = 5,   /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+	u16             vf_id;  /* vf_id of this logical port */
+	u16             rsvd[3];
+	wwn_t           ppwwn;  /* WWN of its physical port */
+	wwn_t           lpwwn;  /* WWN of this logical port */
+	wwn_t           rpwwn;  /* WWN of this remote port */
+	union {
+		struct bfa_rport_qos_attr_s qos;
+	} priv;
+};
+
+union bfa_aen_data_u {
+	struct bfa_adapter_aen_data_s	adapter;
+	struct bfa_port_aen_data_s	port;
+	struct bfa_lport_aen_data_s	lport;
+	struct bfa_rport_aen_data_s	rport;
+	struct bfa_itnim_aen_data_s	itnim;
+	struct bfa_audit_aen_data_s	audit;
+	struct bfa_ioc_aen_data_s	ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY	512
+
+struct bfa_aen_entry_s {
+	struct list_head	qe;
+	enum bfa_aen_category   aen_category;
+	u32                     aen_type;
+	union bfa_aen_data_u    aen_data;
+	struct timeval          aen_tv;
+	u32                     seq_num;
+	u32                     bfad_num;
+};
+
 #endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a38..50b6a1c86195 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
 
 #define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
 
+#define SCSI_SENSE_CUR_ERR	0x70
+#define SCSI_SENSE_DEF_ERR	0x71
+
+/*
+ * SCSI additional sense codes
+ */
+#define SCSI_ASC_LUN_NOT_READY		0x04
+#define SCSI_ASC_LUN_NOT_SUPPORTED	0x25
+#define SCSI_ASC_TOCC			0x3F
+
+/*
+ * SCSI additional sense code qualifiers
+ */
+#define SCSI_ASCQ_MAN_INTR_REQ		0x03	/* manual intervention req */
+#define SCSI_ASCQ_RL_DATA_CHANGED	0x0E	/* report luns data changed */
+
+/*
+ * Methods of reporting informational exceptions
+ */
+#define SCSI_MP_IEC_UNIT_ATTN		0x2	/* generate unit attention */
+
+struct scsi_report_luns_data_s {
+	u32		lun_list_length;	/* length of LUN list length */
+	u32		reserved;
+	struct scsi_lun	lun[1];			/* first LUN in lun list */
+};
+
+struct scsi_inquiry_vendor_s {
+	u8	vendor_id[8];
+};
+
+struct scsi_inquiry_prodid_s {
+	u8	product_id[16];
+};
+
+struct scsi_inquiry_prodrev_s {
+	u8	product_rev[4];
+};
+
+struct scsi_inquiry_data_s {
+#ifdef __BIG_ENDIAN
+	u8		peripheral_qual:3;	/* peripheral qualifier */
+	u8		device_type:5;		/* peripheral device type */
+	u8		rmb:1;			/* removable medium bit */
+	u8		device_type_mod:7;	/* device type modifier */
+	u8		version;
+	u8		aenc:1;		/* async evt notification capability */
+	u8		trm_iop:1;	/* terminate I/O process */
+	u8		norm_aca:1;	/* normal ACA supported */
+	u8		hi_support:1;	/* SCSI-3: supports REPORT LUNS */
+	u8		rsp_data_format:4;
+	u8		additional_len;
+	u8		sccs:1;
+	u8		reserved1:7;
+	u8		reserved2:1;
+	u8		enc_serv:1;	/* enclosure service component */
+	u8		reserved3:1;
+	u8		multi_port:1;	/* multi-port device */
+	u8		m_chngr:1;	/* device in medium transport element */
+	u8		ack_req_q:1;	/* SIP specific bit */
+	u8		addr32:1;	/* SIP specific bit */
+	u8		addr16:1;	/* SIP specific bit */
+	u8		rel_adr:1;	/* relative address */
+	u8		w_bus32:1;
+	u8		w_bus16:1;
+	u8		synchronous:1;
+	u8		linked_commands:1;
+	u8		trans_dis:1;
+	u8		cmd_queue:1;	/* command queueing supported */
+	u8		soft_reset:1;	/* soft reset alternative (VS) */
+#else
+	u8		device_type:5;	/* peripheral device type */
+	u8		peripheral_qual:3; /* peripheral qualifier */
+	u8		device_type_mod:7; /* device type modifier */
+	u8		rmb:1;		/* removable medium bit */
+	u8		version;
+	u8		rsp_data_format:4;
+	u8		hi_support:1;	/* SCSI-3: supports REPORT LUNS */
+	u8		norm_aca:1;	/* normal ACA supported */
+	u8		terminate_iop:1;/* terminate I/O process */
+	u8		aenc:1;		/* async evt notification capability */
+	u8		additional_len;
+	u8		reserved1:7;
+	u8		sccs:1;
+	u8		addr16:1;	/* SIP specific bit */
+	u8		addr32:1;	/* SIP specific bit */
+	u8		ack_req_q:1;	/* SIP specific bit */
+	u8		m_chngr:1;	/* device in medium transport element */
+	u8		multi_port:1;	/* multi-port device */
+	u8		reserved3:1;	/* TBD - Vendor Specific */
+	u8		enc_serv:1;	/* enclosure service component */
+	u8		reserved2:1;
+	u8		soft_seset:1;	/* soft reset alternative (VS) */
+	u8		cmd_queue:1;	/* command queueing supported */
+	u8		trans_dis:1;
+	u8		linked_commands:1;
+	u8		synchronous:1;
+	u8		w_bus16:1;
+	u8		w_bus32:1;
+	u8		rel_adr:1;	/* relative address */
+#endif
+	struct scsi_inquiry_vendor_s	vendor_id;
+	struct scsi_inquiry_prodid_s	product_id;
+	struct scsi_inquiry_prodrev_s	product_rev;
+	u8		vendor_specific[20];
+	u8		reserved4[40];
+};
+
+/*
+ *	SCSI sense data format
+ */
+struct scsi_sense_s {
+#ifdef __BIG_ENDIAN
+	u8		valid:1;
+	u8		rsp_code:7;
+#else
+	u8		rsp_code:7;
+	u8		valid:1;
+#endif
+	u8		seg_num;
+#ifdef __BIG_ENDIAN
+	u8		file_mark:1;
+	u8		eom:1;		/* end of media */
+	u8		ili:1;		/* incorrect length indicator */
+	u8		reserved:1;
+	u8		sense_key:4;
+#else
+	u8		sense_key:4;
+	u8		reserved:1;
+	u8		ili:1;		/* incorrect length indicator */
+	u8		eom:1;		/* end of media */
+	u8		file_mark:1;
+#endif
+	u8		information[4];	/* device-type or cmd specific info */
+	u8		add_sense_length; /* additional sense length */
+	u8		command_info[4];/* command specific information */
+	u8		asc;		/* additional sense code */
+	u8		ascq;		/* additional sense code qualifier */
+	u8		fru_code;	/* field replaceable unit code */
+#ifdef __BIG_ENDIAN
+	u8		sksv:1;		/* sense key specific valid */
+	u8		c_d:1;		/* command/data bit */
+	u8		res1:2;
+	u8		bpv:1;		/* bit pointer valid */
+	u8		bpointer:3;	/* bit pointer */
+#else
+	u8		bpointer:3;	/* bit pointer */
+	u8		bpv:1;		/* bit pointer valid */
+	u8		res1:2;
+	u8		c_d:1;		/* command/data bit */
+	u8		sksv:1;		/* sense key specific valid */
+#endif
+	u8		fpointer[2];	/* field pointer */
+};
+
 /*
  * Fibre Channel Header Structure (FCHS) definition
  */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index a4e7951c6063..e07bd4745d8b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
+static void bfa_ioim_lm_init(struct bfa_s *bfa);
 
 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
 	(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
 	}								\
 } while (0)
 
+#define bfa_ioim_rp_wwn(__ioim)						\
+	(((struct bfa_fcs_rport_s *)					\
+	 (__ioim)->itnim->rport->rport_drv)->pwwn)
+
+#define bfa_ioim_lp_wwn(__ioim)						\
+	((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa),			\
+	(__ioim)->itnim->rport->rport_info.lp_tag))->pwwn)		\
+
 #define bfa_itnim_sler_cb(__itnim) do {					\
 	if ((__itnim)->bfa->fcs)					\
 		bfa_cb_itnim_sler((__itnim)->ditn);      \
@@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
 	}								\
 } while (0)
 
+enum bfa_ioim_lm_status {
+	BFA_IOIM_LM_PRESENT = 1,
+	BFA_IOIM_LM_LUN_NOT_SUP = 2,
+	BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
+	BFA_IOIM_LM_LUN_NOT_RDY = 4,
+};
+
+enum bfa_ioim_lm_ua_status {
+	BFA_IOIM_LM_UA_RESET = 0,
+	BFA_IOIM_LM_UA_SET = 1,
+};
+
 /*
  *  itnim state machine event
  */
@@ -122,6 +145,9 @@ enum bfa_ioim_event {
 	BFA_IOIM_SM_TMDONE	= 16,	/*  IO cleanup from tskim */
 	BFA_IOIM_SM_HWFAIL	= 17,	/*  IOC h/w failure event */
 	BFA_IOIM_SM_IOTOV	= 18,	/*  ITN offline TOV */
+	BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/*  lunmask lun not supported */
+	BFA_IOIM_SM_LM_RPL_DC = 20,	/*  lunmask report-lun data changed */
+	BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/*  lunmask lun not ready */
 };
 
 
@@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
 
 /*
  * forward declaration of BFA IO state machine
@@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
 	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
 	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
 	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
+	bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
 }
 
 bfa_status_t
@@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
 	return BFA_STATUS_OK;
 }
 
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+	struct bfa_itnim_latency_s *io_lat =
+			&(ioim->itnim->ioprofile.io_latency);
+	u32 val, idx;
+
+	val = (u32)(jiffies - ioim->start_time);
+	idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+	bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+	io_lat->count[idx]++;
+	io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+	io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+	io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+	ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+	struct bfa_itnim_s *itnim;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct list_head *qe, *qen;
+
+	/* accumulate IO stats from itnim */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	fcpim->io_profile = BFA_TRUE;
+	fcpim->io_profile_start_time = time;
+	fcpim->profile_comp = bfa_ioim_profile_comp;
+	fcpim->profile_start = bfa_ioim_profile_start;
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	fcpim->io_profile = BFA_FALSE;
+	fcpim->io_profile_start_time = 0;
+	fcpim->profile_comp = NULL;
+	fcpim->profile_start = NULL;
+	return BFA_STATUS_OK;
+}
+
 u16
 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 {
@@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
 }
 
+#define bfa_io_lat_clock_res_div	HZ
+#define bfa_io_lat_clock_res_mul	1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+	if (!fcpim->io_profile)
+		return BFA_STATUS_IOPROFILE_OFF;
+
+	itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+	itnim->ioprofile.io_profile_start_time =
+				bfa_io_profile_start_time(itnim->bfa);
+	itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+	itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+	*ioprofile = itnim->ioprofile;
+
+	return BFA_STATUS_OK;
+}
+
 void
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
@@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
 		WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-				__bfa_cb_ioim_abort, ioim);
+			__bfa_cb_ioim_abort, ioim);
+		break;
+
+	case BFA_IOIM_SM_LM_LUN_NOT_SUP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			__bfa_cb_ioim_lm_lun_not_sup, ioim);
+		break;
+
+	case BFA_IOIM_SM_LM_RPL_DC:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+				__bfa_cb_ioim_lm_rpl_dc, ioim);
+		break;
+
+	case BFA_IOIM_SM_LM_LUN_NOT_RDY:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			__bfa_cb_ioim_lm_lun_not_rdy, ioim);
 		break;
 
 	default:
@@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 	}
 }
 
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+static void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+	struct bfa_lun_mask_s *lunm_list;
+	int	i;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+		lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+		lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+	}
+}
+
+/*
+ * Validate LUN for LUN masking
+ */
+static enum bfa_ioim_lm_status
+bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
+		struct bfa_rport_s *rp, struct scsi_lun lun)
+{
+	u8 i;
+	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+	struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
+
+	if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
+	    (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+		ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+		return BFA_IOIM_LM_PRESENT;
+	}
+
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+
+		if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+			continue;
+
+		if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
+		    scsilun_to_int((struct scsi_lun *)&lun))
+		    && (rp->rport_tag == lun_list[i].rp_tag)
+		    && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
+						lun_list[i].lp_tag)) {
+			bfa_trc(ioim->bfa, lun_list[i].rp_tag);
+			bfa_trc(ioim->bfa, lun_list[i].lp_tag);
+			bfa_trc(ioim->bfa, scsilun_to_int(
+				(struct scsi_lun *)&lun_list[i].lun));
+
+			if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
+			    ((cdb->scsi_cdb[0] != INQUIRY) ||
+			    (cdb->scsi_cdb[0] != REPORT_LUNS))) {
+				lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
+				return BFA_IOIM_LM_RPL_DATA_CHANGED;
+			}
+
+			if (cdb->scsi_cdb[0] == REPORT_LUNS)
+				ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+
+			return BFA_IOIM_LM_PRESENT;
+		}
+	}
+
+	if ((cdb->scsi_cdb[0] == INQUIRY) &&
+	    (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+		ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
+		return BFA_IOIM_LM_PRESENT;
+	}
+
+	if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
+		return BFA_IOIM_LM_LUN_NOT_RDY;
+
+	return BFA_IOIM_LM_LUN_NOT_SUP;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
+{
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
+		int buf_lun_cnt)
+{
+	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+	struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
+	struct scsi_lun lun;
+	int i, j;
+
+	bfa_trc(ioim->bfa, buf_lun_cnt);
+	for (j = 0; j < buf_lun_cnt; j++) {
+		lun = *((struct scsi_lun *)(lun_data + j));
+		for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+			if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+				continue;
+			if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
+			    (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
+			    (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
+				== scsilun_to_int((struct scsi_lun *)&lun))) {
+				lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
+				break;
+			}
+		} /* next lun in mask DB */
+	} /* next lun in buf */
+}
+
+static int
+bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
+		struct scsi_report_luns_data_s *rl)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+	struct scatterlist *sg = scsi_sglist(cmnd);
+	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+	struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
+	int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
+	int lun_across_sg_bytes, bytes_from_next_buf;
+	u64	last_lun, temp_last_lun;
+
+	/* fetch luns from the first sg element */
+	bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
+			(sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
+
+	/* fetch luns from multiple sg elements */
+	scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
+		if (sgeid == 0) {
+			prev_sg_len = sg_dma_len(sg);
+			prev_rl_data = (struct scsi_lun *)
+					phys_to_virt(sg_dma_address(sg));
+			continue;
+		}
+
+		/* if the buf is having more data */
+		lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
+		if (lun_across_sg_bytes) {
+			bfa_trc(ioim->bfa, lun_across_sg_bytes);
+			bfa_stats(ioim->itnim, lm_lun_across_sg);
+			bytes_from_next_buf = sizeof(struct scsi_lun) -
+					      lun_across_sg_bytes;
+
+			/* from next buf take higher bytes */
+			temp_last_lun = *((u64 *)
+					  phys_to_virt(sg_dma_address(sg)));
+			last_lun |= temp_last_lun >>
+				    (lun_across_sg_bytes * BITS_PER_BYTE);
+
+			/* from prev buf take higher bytes */
+			temp_last_lun = *((u64 *)(prev_rl_data +
+					  (prev_sg_len - lun_across_sg_bytes)));
+			temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
+			last_lun = last_lun | (temp_last_lun <<
+				   (bytes_from_next_buf * BITS_PER_BYTE));
+
+			bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
+		} else
+			bytes_from_next_buf = 0;
+
+		*pgdlen += sg_dma_len(sg);
+		prev_sg_len = sg_dma_len(sg);
+		prev_rl_data = (struct scsi_lun *)
+				phys_to_virt(sg_dma_address(sg));
+		bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
+				bytes_from_next_buf,
+				sg_dma_len(sg) / sizeof(struct scsi_lun));
+	}
+
+	/* update the report luns data - based on fetched luns */
+	sg = scsi_sglist(cmnd);
+	base_rl_data = (struct scsi_lun *)rl->lun;
+	base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
+	for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
+			base_rl_data[j] = lun_list[i].lun;
+			lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
+			j++;
+			lun_fetched_cnt++;
+		}
+
+		if (j > base_count) {
+			j = 0;
+			sg = sg_next(sg);
+			base_rl_data = (struct scsi_lun *)
+					phys_to_virt(sg_dma_address(sg));
+			base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
+		}
+	}
+
+	bfa_trc(ioim->bfa, lun_fetched_cnt);
+	return lun_fetched_cnt;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
+{
+	struct scsi_inquiry_data_s *inq;
+	struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
+
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+	inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
+
+	bfa_trc(ioim->bfa, inq->device_type);
+	inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
+	return 0;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+	struct scatterlist *sg = scsi_sglist(cmnd);
+	struct bfi_ioim_rsp_s *m;
+	struct scsi_report_luns_data_s *rl = NULL;
+	int lun_count = 0, lun_fetched_cnt = 0;
+	u32 residue, pgdlen = 0;
+
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+	if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
+		return BFA_TRUE;
+
+	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+	if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
+		return BFA_TRUE;
+
+	pgdlen = sg_dma_len(sg);
+	bfa_trc(ioim->bfa, pgdlen);
+	rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
+	lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
+	lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
+
+	if (lun_count == lun_fetched_cnt)
+		return BFA_TRUE;
+
+	bfa_trc(ioim->bfa, lun_count);
+	bfa_trc(ioim->bfa, lun_fetched_cnt);
+	bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+
+	if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
+		rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
+				      sizeof(struct scsi_lun);
+	else
+		bfa_stats(ioim->itnim, lm_small_buf_addresidue);
+
+	bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+	bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
+
+	residue = be32_to_cpu(m->residue);
+	residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
+	bfa_stats(ioim->itnim, lm_wire_residue_changed);
+	m->residue = be32_to_cpu(residue);
+	bfa_trc(ioim->bfa, ioim->nsges);
+	return BFA_FALSE;
+}
 
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -2068,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
 }
 
 static void
+__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+	int sns_len = 0xD;
+	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+	struct scsi_sense_s *snsinfo;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+					ioim->fcpim->fcp, ioim->iotag);
+	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+	snsinfo->add_sense_length = 0xa;
+	snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
+	snsinfo->sense_key = ILLEGAL_REQUEST;
+	bfa_trc(ioim->bfa, residue);
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+			SCSI_STATUS_CHECK_CONDITION, sns_len,
+			(u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+	int sns_len = 0xD;
+	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+	struct scsi_sense_s *snsinfo;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+						       ioim->iotag);
+	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+	snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
+	snsinfo->asc = SCSI_ASC_TOCC;
+	snsinfo->add_sense_length = 0x6;
+	snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
+	bfa_trc(ioim->bfa, residue);
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+			SCSI_STATUS_CHECK_CONDITION, sns_len,
+			(u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+	int sns_len = 0xD;
+	u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+	struct scsi_sense_s *snsinfo;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+					ioim->fcpim->fcp, ioim->iotag);
+	snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+	snsinfo->add_sense_length = 0xa;
+	snsinfo->sense_key = NOT_READY;
+	snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
+	snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
+	bfa_trc(ioim->bfa, residue);
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+			SCSI_STATUS_CHECK_CONDITION, sns_len,
+			(u8 *)snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+			u16 rp_tag, u8 lp_tag)
+{
+	struct bfa_lun_mask_s *lun_list;
+	u8	i;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return;
+
+	lun_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+			if ((lun_list[i].lp_wwn == lp_wwn) &&
+			    (lun_list[i].rp_wwn == rp_wwn)) {
+				lun_list[i].rp_tag = rp_tag;
+				lun_list[i].lp_tag = lp_tag;
+			}
+		}
+	}
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+	struct bfa_lun_mask_s	*lunm_list;
+	int	i;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+			continue;
+		lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+	struct bfa_lunmask_cfg_s	*lun_mask;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	if (bfa_get_lun_mask_status(bfa) == update)
+		return BFA_STATUS_NO_CHANGE;
+
+	lun_mask = bfa_get_lun_mask(bfa);
+	lun_mask->status = update;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+		bfa_ioim_lm_set_ua(bfa);
+
+	return  bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+	int i;
+	struct bfa_lun_mask_s	*lunm_list;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+			if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+				bfa_rport_unset_lunmask(bfa,
+				  BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+		}
+	}
+
+	memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+	struct bfa_lunmask_cfg_s *lun_mask;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	lun_mask = bfa_get_lun_mask(bfa);
+	memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+		      wwn_t rpwwn, struct scsi_lun lun)
+{
+	struct bfa_lun_mask_s *lunm_list;
+	struct bfa_rport_s *rp = NULL;
+	int i, free_index = MAX_LUN_MASK_CFG + 1;
+	struct bfa_fcs_lport_s *port = NULL;
+	struct bfa_fcs_rport_s *rp_fcs;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+				   vf_id, *pwwn);
+	if (port) {
+		*pwwn = port->port_cfg.pwwn;
+		rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+		rp = rp_fcs->bfa_rport;
+	}
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	/* if entry exists */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+			free_index = i;
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn) &&
+		    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+		     scsilun_to_int((struct scsi_lun *)&lun)))
+			return  BFA_STATUS_ENTRY_EXISTS;
+	}
+
+	if (free_index > MAX_LUN_MASK_CFG)
+		return BFA_STATUS_MAX_ENTRY_REACHED;
+
+	if (rp) {
+		lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+						   rp->rport_info.local_pid);
+		lunm_list[free_index].rp_tag = rp->rport_tag;
+	} else {
+		lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+		lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+	}
+
+	lunm_list[free_index].lp_wwn = *pwwn;
+	lunm_list[free_index].rp_wwn = rpwwn;
+	lunm_list[free_index].lun = lun;
+	lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+	/* set for all luns in this rp */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn))
+			lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+			 wwn_t rpwwn, struct scsi_lun lun)
+{
+	struct bfa_lun_mask_s	*lunm_list;
+	struct bfa_rport_s	*rp = NULL;
+	struct bfa_fcs_lport_s *port = NULL;
+	struct bfa_fcs_rport_s *rp_fcs;
+	int	i;
+
+	/* in min cfg lunm_list could be NULL but  no commands should run. */
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	bfa_trc(bfa, *pwwn);
+	bfa_trc(bfa, rpwwn);
+	bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+	if (*pwwn == 0) {
+		port = bfa_fcs_lookup_port(
+				&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+				vf_id, *pwwn);
+		if (port) {
+			*pwwn = port->port_cfg.pwwn;
+			rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+			rp = rp_fcs->bfa_rport;
+		}
+	}
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn) &&
+		    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+		     scsilun_to_int((struct scsi_lun *)&lun))) {
+			lunm_list[i].lp_wwn = 0;
+			lunm_list[i].rp_wwn = 0;
+			int_to_scsilun(0, &lunm_list[i].lun);
+			lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+			if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+				lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+				lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+			}
+			return bfa_dconf_update(bfa);
+		}
+	}
+
+	/* set for all luns in this rp */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn))
+			lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+
+	return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
+static void
 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
 {
 	struct bfa_ioim_s *ioim = cbarg;
@@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
 		return;
 	}
 
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
 			  0, 0, NULL, 0);
 }
@@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
 		return;
 	}
 
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
 			  0, 0, NULL, 0);
 }
@@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
 		return;
 	}
 
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
 }
 
@@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
 		ioim->bfa     = fcpim->bfa;
 		ioim->fcpim   = fcpim;
 		ioim->iosp    = iosp;
+		ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 		INIT_LIST_HEAD(&ioim->sgpg_q);
 		bfa_reqq_winit(&ioim->iosp->reqq_wait,
 				   bfa_ioim_qresume, ioim);
@@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 			evt = BFA_IOIM_SM_DONE;
 		else
 			evt = BFA_IOIM_SM_COMP;
+		ioim->proc_rsp_data(ioim);
 		break;
 
 	case BFI_IOIM_STS_TIMEDOUT:
@@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 		if (rsp->abort_tag != ioim->abort_tag) {
 			bfa_trc(ioim->bfa, rsp->abort_tag);
 			bfa_trc(ioim->bfa, ioim->abort_tag);
+			ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 			return;
 		}
 
@@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 		WARN_ON(1);
 	}
 
+	ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
 	bfa_sm_send_event(ioim, evt);
 }
 
@@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 	WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
 
 	bfa_ioim_cb_profile_comp(fcpim, ioim);
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+
+	if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED)  {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+		return;
+	}
+
+	if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+	else
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
 }
 
 /*
@@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+	struct bfa_lps_s	*lps;
+	enum bfa_ioim_lm_status status;
+	struct scsi_lun scsilun;
+
+	if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
+		lps = BFA_IOIM_TO_LPS(ioim);
+		int_to_scsilun(cmnd->device->lun, &scsilun);
+		status = bfa_ioim_lm_check(ioim, lps,
+				ioim->itnim->rport, scsilun);
+		if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
+			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
+			bfa_stats(ioim->itnim, lm_lun_not_rdy);
+			return;
+		}
+
+		if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
+			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
+			bfa_stats(ioim->itnim, lm_lun_not_sup);
+			return;
+		}
+
+		if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
+			bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
+			bfa_stats(ioim->itnim, lm_rpl_data_changed);
+			return;
+		}
+	}
+
 	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
 	/*
@@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
 static void
 bfa_fcp_start(struct bfa_s *bfa)
 {
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+	/*
+	 * bfa_init() with flash read is complete. now invalidate the stale
+	 * content of lun mask like unit attention, rp tag and lp tag.
+	 */
+	bfa_ioim_lm_init(fcp->bfa);
 }
 
 static void
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 57b695ad4ee5..1080bcb81cb7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
 	if (n >= (1UL)<<22)
 		return BFA_IOBUCKET_MAX - 1;
 	n >>= 8;
-	if (n >= (1UL)<<16)
-		n >>= 16; pos += 16;
-	if (n >= 1 << 8)
-		n >>= 8; pos += 8;
-	if (n >= 1 << 4)
-		n >>= 4; pos += 4;
-	if (n >= 1 << 2)
-		n >>= 2; pos += 2;
+	if (n >= (1UL)<<16) {
+		n >>= 16;
+		pos += 16;
+	}
+	if (n >= 1 << 8) {
+		n >>= 8;
+		pos += 8;
+	}
+	if (n >= 1 << 4) {
+		n >>= 4;
+		pos += 4;
+	}
+	if (n >= 1 << 2) {
+		n >>= 2;
+		pos += 2;
+	}
 	if (n >= 1 << 1)
 		pos += 1;
 
@@ -102,6 +110,7 @@ struct bfad_ioim_s;
 struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
 
 struct bfa_fcpim_s {
 	struct bfa_s		*bfa;
@@ -115,7 +124,7 @@ struct bfa_fcpim_s {
 	u32			path_tov;
 	u16			q_depth;
 	u8			reqq;		/*  Request queue to be used */
-	u8			rsvd;
+	u8			lun_masking_pending;
 	struct list_head	itnim_q;	/*  queue of active itnim */
 	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
 	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
@@ -170,7 +179,9 @@ struct bfa_ioim_s {
 	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
 	struct bfa_ioim_sp_s	*iosp;		/*  slow-path IO handling */
 	u8			reqq;		/*  Request queue for I/O */
+	u8			mode;		/*  IO is passthrough or not */
 	u64			start_time;	/*  IO's Profile start val */
+	bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
 };
 
 struct bfa_ioim_sp_s {
@@ -250,6 +261,10 @@ struct bfa_itnim_s {
 	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
 } while (0)
 
+#define BFA_IOIM_TO_LPS(__ioim)		\
+	BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa),	\
+		__ioim->itnim->rport->rport_info.lp_tag)
+
 static inline bfa_boolean_t
 bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
@@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
 			struct bfa_itnim_iostats_s *stats, u8 lp_tag);
 void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
 			struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
 
 #define bfa_fcpim_ioredirect_enabled(__bfa)				\
 	(((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
@@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
 void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
 			enum bfi_tskim_status tsk_status);
 
+void	bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+			wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t	bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t	bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t	bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+				wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t	bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+				wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t	bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+
 #endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a9b22bc48bc3..eaac57e1ddec 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 
@@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 	bfa_trc(fabric->fcs, status);
 }
 
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+			enum bfa_port_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+	aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_PORT, event);
+}
+
 /*
  *
  * @param[in] fabric - fabric
@@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
 		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
 			"Base port WWN = %s Fabric WWN = %s\n",
 			pwwn_ptr, fwwn_ptr);
+		bfa_fcs_fabric_aen_post(&fabric->bport,
+				BFA_PORT_AEN_FABRIC_NAME_CHANGE);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index a5f1faf335a7..e75e07d25915 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -675,6 +675,7 @@ struct bfa_fcs_s {
 	struct bfa_fcs_fabric_s fabric; /*  base fabric state machine */
 	struct bfa_fcs_stats_s	stats;	/*  FCS statistics */
 	struct bfa_wc_s		wc;	/*  waiting counter */
+	int			fcs_aen_seq;
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 29b4108be269..9272840a2409 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void	bfa_fcs_itnim_prli_response(void *fcsarg,
 			 struct bfa_fcxp_s *fcxp, void *cbarg,
 			    bfa_status_t req_status, u32 rsp_len,
 			    u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+			enum bfa_itnim_aen_event event);
 
 /*
  *  fcs_itnim_sm FCS itnim state machine events
@@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"Target (WWN = %s) is online for initiator (WWN = %s)\n",
 		rpwwn_buf, lpwwn_buf);
+		bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
 		bfa_itnim_offline(itnim->bfa_itnim);
 		wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
 		wwn2str(rpwwn_buf, itnim->rport->pwwn);
-		if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
+		if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 			"Target (WWN = %s) connectivity lost for "
 			"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
-		else
+			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+		} else {
 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
 			rpwwn_buf, lpwwn_buf);
+			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+		}
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -382,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 }
 
 static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+			enum bfa_itnim_aen_event event)
+{
+	struct bfa_fcs_rport_s *rport = itnim->rport;
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	struct bfa_aen_entry_s	*aen_entry;
+
+	/* Don't post events for well known addresses */
+	if (BFA_FCS_PID_IS_WKA(rport->pid))
+		return;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+	aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(itnim->fcs));
+	aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+	aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_ITNIM, event);
+}
+
+static void
 bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 {
 	struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f8251a91ba91..d4f951fe753e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
@@ -300,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
  */
 
 /*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+			enum bfa_lport_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+	aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+	aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(port->fcs));
+	aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_LPORT, event);
+}
+
+/*
  * Send a LS reject
  */
 static void
@@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"Logical port online: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
 
 	bfad->bfad_flags |= BFAD_PORT_ONLINE;
 }
@@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 	if (bfa_sm_cmp_state(port->fabric,
-			bfa_fcs_fabric_sm_online) == BFA_TRUE)
+			bfa_fcs_fabric_sm_online) == BFA_TRUE) {
 		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
-	else
+		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+	} else {
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"Logical port taken offline: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
+		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+	}
 
 	list_for_each_safe(qe, qen, &port->rport_q) {
 		rport = (struct bfa_fcs_rport_s *) qe;
@@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"Logical port deleted: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
 
 	/* Base port will be deleted by the OS driver */
 	if (port->vport) {
@@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"New logical port created: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
 
 	bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
 	bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -5559,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
  *  fcs_vport_private FCS virtual port private functions
  */
 /*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+		       enum bfa_lport_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+	aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+	aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(port->fcs));
+	aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_LPORT, event);
+}
+
+/*
  * This routine will be called to send a FDISC command.
  */
 static void
@@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 	case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
 		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
 			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
-		else
+		else {
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_DUP_WWN);
 			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+		}
 		break;
 
 	case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 		 */
 		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
 			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
-		else
+		else {
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_FABRIC_MAX);
 			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+		}
 		break;
 
 	default:
+		if (vport->fdisc_retries == 0)
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_UNKNOWN);
 		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
 	}
 }
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 2c514458a6b4..52628d5d3c9b 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 
@@ -2041,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
 }
 
 static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+			enum bfa_rport_aen_event event,
+			struct bfa_rport_aen_data_s *data)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	if (event == BFA_RPORT_AEN_QOS_PRIO)
+		aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+	else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+		aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+	aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+	aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(rport->fcs));
+	aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+	aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_RPORT, event);
+}
+
+static void
 bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 {
 	struct bfa_fcs_lport_s *port = rport->port;
@@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 	wwn2str(rpwwn_buf, rport->pwwn);
-	if (!BFA_FCS_PID_IS_WKA(rport->pid))
+	if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 		"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
 		rpwwn_buf, lpwwn_buf);
+		bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+	}
 }
 
 static void
@@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 	wwn2str(rpwwn_buf, rport->pwwn);
 	if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
-		if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
+		if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 				"Remote port (WWN = %s) connectivity lost for "
 				"logical port (WWN = %s)\n",
 				rpwwn_buf, lpwwn_buf);
-		else
+			bfa_fcs_rport_aen_post(rport,
+				BFA_RPORT_AEN_DISCONNECT, NULL);
+		} else {
 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 				"Remote port (WWN = %s) offlined by "
 				"logical port (WWN = %s)\n",
 				rpwwn_buf, lpwwn_buf);
+			bfa_fcs_rport_aen_post(rport,
+				BFA_RPORT_AEN_OFFLINE, NULL);
+		}
 	}
 
 	if (bfa_fcs_lport_is_initiator(port)) {
@@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
 		struct bfa_rport_qos_attr_s new_qos_attr)
 {
 	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct bfa_rport_aen_data_s aen_data;
 
 	bfa_trc(rport->fcs, rport->pwwn);
+	aen_data.priv.qos = new_qos_attr;
+	bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
 }
 
 /*
@@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
 		struct bfa_rport_qos_attr_s new_qos_attr)
 {
 	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct bfa_rport_aen_data_s aen_data;
 
 	bfa_trc(rport->fcs, rport->pwwn);
+	aen_data.priv.qos = new_qos_attr;
+	bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
 }
 
 /*
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index e7ffd8205dc7..ea24d4c6e67a 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
 			bfa->iocfc.bfa_regs.intr_status);
 }
 
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ *              INTX - done in bfa_intx()
+ *              MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
 static void
-bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
 {
 	writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
-			bfa->iocfc.bfa_regs.intr_status);
+		bfa->iocfc.bfa_regs.intr_status);
+
+	if (bfa_rspq_ci(bfa, rspq) == ci)
+		return;
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	if (bfa_rspq_ci(bfa, rspq) == ci)
+		return;
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
 }
 
 void
@@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
 void
 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 {
-	bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
-	bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+	if (msix) {
+		bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+		bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+	} else {
+		bfa->iocfc.hwif.hw_reqq_ack = NULL;
+		bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+	}
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 989bbce9b296..637527f48b40 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
 	writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 }
 
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
 void
-bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
 {
 	u32	r32;
 
 	r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
 	writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index d6c2bf3865d2..1ac5aecf25a6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,6 +16,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_ioc.h"
 #include "bfi_reg.h"
 #include "bfa_defs.h"
@@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 }
 
 static void
@@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 }
 
 /*
@@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
 
 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
 		"Heart Beat of IOC has failed\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
 
 }
 
@@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
 		"Running firmware version is incompatible "
 		"with the driver version\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
 }
 
 bfa_status_t
@@ -2679,6 +2684,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
 }
 
 /*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	struct bfa_aen_entry_s	*aen_entry;
+	enum bfa_ioc_type_e ioc_type;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	ioc_type = bfa_ioc_get_type(ioc);
+	switch (ioc_type) {
+	case BFA_IOC_TYPE_FC:
+		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+		break;
+	case BFA_IOC_TYPE_FCoE:
+		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+		break;
+	case BFA_IOC_TYPE_LL:
+		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+		break;
+	default:
+		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+		break;
+	}
+
+	/* Send the AEN notification */
+	aen_entry->aen_data.ioc.ioc_type = ioc_type;
+	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_IOC, event);
+}
+
+/*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
 {
 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
 		return;
+	if (ioc->attr->nwwn == 0)
+		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
+	if (ioc->attr->pwwn == 0)
+		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
 }
 
 /*
@@ -3443,6 +3489,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
 }
 
 /*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+	enum bfa_port_aen_event aen_evt = 0;
+
+	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+		      ((u64)rsp->event));
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+	switch (rsp->event) {
+	case BFA_SFP_SCN_INSERTED:
+		aen_evt = BFA_PORT_AEN_SFP_INSERT;
+		break;
+	case BFA_SFP_SCN_REMOVED:
+		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+		break;
+	case BFA_SFP_SCN_FAILED:
+		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+		break;
+	case BFA_SFP_SCN_UNSUPPORT:
+		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+		break;
+	case BFA_SFP_SCN_POM:
+		aen_evt = BFA_PORT_AEN_SFP_POM;
+		aen_entry->aen_data.port.level = rsp->pomlvl;
+		break;
+	default:
+		bfa_trc(sfp, rsp->event);
+		WARN_ON(1);
+	}
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
  *	SFP get data send
  */
 static void
@@ -3482,6 +3576,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
 }
 
 /*
+ *	SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+	switch (rsp->event) {
+	case BFA_SFP_SCN_INSERTED:
+		sfp->state = BFA_SFP_STATE_INSERTED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_REMOVED:
+		sfp->state = BFA_SFP_STATE_REMOVED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		 break;
+	case BFA_SFP_SCN_FAILED:
+		sfp->state = BFA_SFP_STATE_FAILED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_UNSUPPORT:
+		sfp->state = BFA_SFP_STATE_UNSUPPORT;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		if (!sfp->lock)
+			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+		break;
+	case BFA_SFP_SCN_POM:
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_VALID:
+		sfp->state = BFA_SFP_STATE_VALID;
+		if (!sfp->lock)
+			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+		break;
+	default:
+		bfa_trc(sfp, rsp->event);
+		WARN_ON(1);
+	}
+}
+
+/*
  * SFP show complete
  */
 static void
@@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
 		break;
 
 	case BFI_SFP_I2H_SCN:
-		bfa_trc(sfp, msg->mh.msg_id);
+		bfa_sfp_scn(sfp, msg);
 		break;
 
 	default:
@@ -3838,6 +3976,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
 
 static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+			int inst, int type)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+	aen_entry->aen_data.audit.partition_inst = inst;
+	aen_entry->aen_data.audit.partition_type = type;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
 bfa_flash_cb(struct bfa_flash_s *flash)
 {
 	flash->op_busy = 0;
@@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
 		struct bfi_flash_erase_rsp_s *erase;
 		struct bfi_flash_write_rsp_s *write;
 		struct bfi_flash_read_rsp_s *read;
+		struct bfi_flash_event_s *event;
 		struct bfi_mbmsg_s   *msg;
 	} m;
 
@@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
 		}
 		break;
 	case BFI_FLASH_I2H_BOOT_VER_RSP:
+		break;
 	case BFI_FLASH_I2H_EVENT:
-		bfa_trc(flash, msg->mh.msg_id);
+		status = be32_to_cpu(m.event->status);
+		bfa_trc(flash, status);
+		if (status == BFA_STATUS_BAD_FWCFG)
+			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+		else if (status == BFA_STATUS_INVALID_VENDOR) {
+			u32 param;
+			param = be32_to_cpu(m.event->param);
+			bfa_trc(flash, param);
+			bfa_ioc_aen_post(flash->ioc,
+				BFA_IOC_AEN_INVALID_VENDOR);
+		}
 		break;
 
 	default:
@@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
 	flash->instance = instance;
 
 	bfa_flash_erase_send(flash);
+	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+				instance, type);
 	return BFA_STATUS_OK;
 }
 
@@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
 		WARN_ON(1);
 	}
 }
+
+/*
+ *	DCONF module specific
+ */
+
+BFA_MODULE(dconf);
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
+	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
+	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
+	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
+	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
+	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Begining state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_status_t bfa_status;
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_INIT:
+		if (dconf->min_cfg) {
+			bfa_trc(dconf->bfa, dconf->min_cfg);
+			return;
+		}
+		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+		dconf->flashdone = BFA_FALSE;
+		bfa_trc(dconf->bfa, dconf->flashdone);
+		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+					BFA_FLASH_PART_DRV, dconf->instance,
+					dconf->dconf,
+					sizeof(struct bfa_dconf_s), 0,
+					bfa_dconf_init_cb, dconf->bfa);
+		if (bfa_status != BFA_STATUS_OK) {
+			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+			return;
+		}
+		break;
+	case BFA_DCONF_SM_EXIT:
+		dconf->flashdone = BFA_TRUE;
+	case BFA_DCONF_SM_IOCDISABLE:
+	case BFA_DCONF_SM_WR:
+	case BFA_DCONF_SM_FLASH_COMP:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		break;
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		dconf->flashdone = BFA_TRUE;
+		bfa_trc(dconf->bfa, dconf->flashdone);
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_WR:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		dconf->flashdone = BFA_TRUE;
+		bfa_trc(dconf->bfa, dconf->flashdone);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		break;
+	case BFA_DCONF_SM_INIT:
+	case BFA_DCONF_SM_IOCDISABLE:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+		bfa_dconf_flash_write(dconf);
+		break;
+	case BFA_DCONF_SM_WR:
+		bfa_timer_stop(&dconf->timer);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_timer_stop(&dconf->timer);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+		bfa_dconf_flash_write(dconf);
+		break;
+	case BFA_DCONF_SM_FLASH_COMP:
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_IOCDISABLE:
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_timer_stop(&dconf->timer);
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		dconf->flashdone = BFA_TRUE;
+		bfa_trc(dconf->bfa, dconf->flashdone);
+		bfa_ioc_disable(&dconf->bfa->ioc);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		break;
+	case BFA_DCONF_SM_WR:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_INIT:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		dconf->flashdone = BFA_TRUE;
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+static void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+		  struct bfa_s *bfa)
+{
+	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+	if (cfg->drvcfg.min_cfg)
+		bfa_mem_kva_setup(meminfo, dconf_kva,
+				sizeof(struct bfa_dconf_hdr_s));
+	else
+		bfa_mem_kva_setup(meminfo, dconf_kva,
+				sizeof(struct bfa_dconf_s));
+}
+
+static void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+	dconf->bfad = bfad;
+	dconf->bfa = bfa;
+	dconf->instance = bfa->ioc.port_id;
+	bfa_trc(bfa, dconf->instance);
+
+	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+	if (cfg->drvcfg.min_cfg) {
+		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+		dconf->min_cfg = BFA_TRUE;
+		/*
+		 * Set the flashdone flag to TRUE explicitly as no flash
+		 * write will happen in min_cfg mode.
+		 */
+		dconf->flashdone = BFA_TRUE;
+	} else {
+		dconf->min_cfg = BFA_FALSE;
+		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+	}
+
+	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+	struct bfa_s *bfa = arg;
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+	dconf->flashdone = BFA_TRUE;
+	bfa_trc(bfa, dconf->flashdone);
+	bfa_iocfc_cb_dconf_modinit(bfa, status);
+	if (status == BFA_STATUS_OK) {
+		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+	}
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+static void
+bfa_dconf_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_dconf_stop(struct bfa_s *bfa)
+{
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+	struct bfa_dconf_mod_s *dconf = cbarg;
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+static void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static void
+bfa_dconf_detach(struct bfa_s *bfa)
+{
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+	bfa_status_t bfa_status;
+	bfa_trc(dconf->bfa, 0);
+
+	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+				BFA_FLASH_PART_DRV, dconf->instance,
+				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
+				bfa_dconf_cbfn, dconf);
+	if (bfa_status != BFA_STATUS_OK)
+		WARN_ON(bfa_status);
+	bfa_trc(dconf->bfa, bfa_status);
+
+	return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_trc(dconf->bfa, 0);
+	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+		return BFA_STATUS_FAILED;
+
+	if (dconf->min_cfg) {
+		bfa_trc(dconf->bfa, dconf->min_cfg);
+		return BFA_STATUS_FAILED;
+	}
+
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+	struct bfa_dconf_mod_s *dconf = arg;
+	WARN_ON(status);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
+	bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c5ecd2edc95d..546d46b37101 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -327,6 +327,7 @@ struct bfa_ioc_s {
 	enum bfa_mode_s		port_mode;
 	u8			ad_cap_bm;	/* adapter cap bit mask */
 	u8			port_mode_cfg;	/* config port mode */
+	int			ioc_aen_seq;
 };
 
 struct bfa_ioc_hwif_s {
@@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
 	struct list_head	qe;
 	bfa_cb_cbfn_t	cbfn;
 	bfa_boolean_t	once;
+	bfa_boolean_t	pre_rmv;	/* set for stack based qe(s) */
+	bfa_status_t	fw_status;	/* to access fw status in comp proc */
 	void		*cbarg;
 };
 
@@ -658,7 +661,6 @@ struct bfa_phy_s {
 	struct bfa_ioc_notify_s	ioc_notify; /* ioc event notify */
 	struct bfa_mem_dma_s	phy_dma;
 };
-
 #define BFA_PHY(__bfa)	(&(__bfa)->modules.phy)
 #define BFA_MEM_PHY_DMA(__bfa)	(&(BFA_PHY(__bfa)->phy_dma))
 
@@ -684,6 +686,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
 void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
 
 /*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE	0xabcdabcd
+#define BFI_DCONF_VERSION	1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+	u32	signature;
+	u32	version;
+};
+
+struct bfa_dconf_s {
+	struct bfa_dconf_hdr_s		hdr;
+	struct bfa_lunmask_cfg_s	lun_mask;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+	bfa_sm_t		sm;
+	u8			instance;
+	bfa_boolean_t		flashdone;
+	bfa_boolean_t		read_data_valid;
+	bfa_boolean_t		min_cfg;
+	struct bfa_timer_s	timer;
+	struct bfa_s		*bfa;
+	void			*bfad;
+	void			*trcmod;
+	struct bfa_dconf_s	*dconf;
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa)	\
+	(&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa)	(&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa)	\
+	(BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV	5000	/* memtest timeout in msec */
+
+void	bfa_dconf_modinit(struct bfa_s *bfa);
+void	bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t	bfa_dconf_update(struct bfa_s *bfa);
+
+/*
  *	IOC specfic macros
  */
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
 bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c6efd40a673..2d36e4823835 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -44,6 +44,7 @@ struct bfa_modules_s {
 	struct bfa_flash_s	flash;		/*  flash module */
 	struct bfa_diag_s	diag_mod;	/*  diagnostics module	*/
 	struct bfa_phy_s	phy;		/*  phy module		*/
+	struct bfa_dconf_mod_s	dconf_mod;	/*  DCONF common module	*/
 };
 
 /*
@@ -119,6 +120,7 @@ struct bfa_s {
 	struct list_head	reqq_waitq[BFI_IOC_MAX_CQS];
 	bfa_boolean_t		fcs;		/*  FCS is attached to BFA */
 	struct bfa_msix_s	msix;
+	int			bfa_aen_seq;
 };
 
 extern bfa_boolean_t bfa_auto_recover;
@@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
 extern struct bfa_module_s hal_mod_uf;
 extern struct bfa_module_s hal_mod_rport;
 extern struct bfa_module_s hal_mod_fcp;
+extern struct bfa_module_s hal_mod_dconf;
 
 #endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 21caaefce99f..aa8a0eaf91f9 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,6 +16,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_plog.h"
 #include "bfa_cs.h"
 #include "bfa_modules.h"
@@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 	}
 }
 
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+	aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+				  BFA_AEN_CAT_PORT, event);
+}
+
 /*
  * FC PORT state machine functions
  */
@@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
 	case BFA_FCPORT_SM_LINKUP:
@@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
 	case BFA_FCPORT_SM_STOP:
@@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port online: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+		/* If QoS is enabled and it is not online, send AEN */
+		if (fcport->cfg.qos_enabled &&
+		    fcport->qos_attr.state != BFA_QOS_ONLINE)
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
 		break;
 
 	case BFA_FCPORT_SM_LINKDOWN:
@@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
 	case BFA_FCPORT_SM_STOP:
@@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port offline: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
 	case BFA_FCPORT_SM_LINKDOWN:
@@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
 		wwn2str(pwwn_buf, fcport->pwwn);
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 				"Base port offline: WWN = %s\n", pwwn_buf);
-		else
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 				"Base port (WWN = %s) "
 				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
 		break;
 
 	case BFA_FCPORT_SM_STOP:
 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		bfa_fcport_reset_linkinfo(fcport);
 		wwn2str(pwwn_buf, fcport->pwwn);
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 				"Base port offline: WWN = %s\n", pwwn_buf);
-		else
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 				"Base port (WWN = %s) "
 				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
 		break;
 
 	case BFA_FCPORT_SM_HWFAIL:
@@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 		bfa_fcport_reset_linkinfo(fcport);
 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
 		wwn2str(pwwn_buf, fcport->pwwn);
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 				"Base port offline: WWN = %s\n", pwwn_buf);
-		else
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 				"Base port (WWN = %s) "
 				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
 		break;
 
 	default:
@@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port enabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
 		break;
 
 	case BFA_FCPORT_SM_STOP:
@@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 		wwn2str(pwwn_buf, fcport->pwwn);
 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
 			"Base port enabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
@@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
 
+	INIT_LIST_HEAD(&fcport->stats_pending_q);
+	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
 }
 
@@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
 static void
 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
 {
-	struct bfa_fcport_s *fcport = cbarg;
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+	struct bfa_cb_pending_q_s *cb;
+	struct list_head *qe, *qen;
+	union bfa_fcport_stats_u *ret;
 
 	if (complete) {
-		if (fcport->stats_status == BFA_STATUS_OK) {
-			struct timeval tv;
-
-			/* Swap FC QoS or FCoE stats */
-			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
-				bfa_fcport_qos_stats_swap(
-					&fcport->stats_ret->fcqos,
-					&fcport->stats->fcqos);
-			} else {
-				bfa_fcport_fcoe_stats_swap(
-					&fcport->stats_ret->fcoe,
-					&fcport->stats->fcoe);
-
-				do_gettimeofday(&tv);
-				fcport->stats_ret->fcoe.secs_reset =
+		struct timeval tv;
+		if (fcport->stats_status == BFA_STATUS_OK)
+			do_gettimeofday(&tv);
+
+		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+			bfa_q_deq(&fcport->stats_pending_q, &qe);
+			cb = (struct bfa_cb_pending_q_s *)qe;
+			if (fcport->stats_status == BFA_STATUS_OK) {
+				ret = (union bfa_fcport_stats_u *)cb->data;
+				/* Swap FC QoS or FCoE stats */
+				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+					bfa_fcport_qos_stats_swap(&ret->fcqos,
+							&fcport->stats->fcqos);
+				else {
+					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+							&fcport->stats->fcoe);
+					ret->fcoe.secs_reset =
 					tv.tv_sec - fcport->stats_reset_time;
+				}
 			}
+			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+					fcport->stats_status);
 		}
-		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+		fcport->stats_status = BFA_STATUS_OK;
 	} else {
-		fcport->stats_busy = BFA_FALSE;
+		INIT_LIST_HEAD(&fcport->stats_pending_q);
 		fcport->stats_status = BFA_STATUS_OK;
 	}
 }
@@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
 	}
 
 	fcport->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
-		fcport);
+	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
 }
 
 static void
@@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
 static void
 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
 {
-	struct bfa_fcport_s *fcport = cbarg;
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfa_cb_pending_q_s *cb;
+	struct list_head *qe, *qen;
 
 	if (complete) {
 		struct timeval tv;
@@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
 		 */
 		do_gettimeofday(&tv);
 		fcport->stats_reset_time = tv.tv_sec;
-
-		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+			cb = (struct bfa_cb_pending_q_s *)qe;
+			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+						fcport->stats_status);
+		}
+		fcport->stats_status = BFA_STATUS_OK;
 	} else {
-		fcport->stats_busy = BFA_FALSE;
+		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
 		fcport->stats_status = BFA_STATUS_OK;
 	}
 }
@@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
 	}
 
 	fcport->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-			__bfa_cb_fcport_stats_clr, fcport);
+	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
 }
 
 static void
@@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 				fcport->use_flash_cfg = BFA_FALSE;
 			}
 
+			if (fcport->cfg.qos_enabled)
+				fcport->qos_attr.state = BFA_QOS_OFFLINE;
+			else
+				fcport->qos_attr.state = BFA_QOS_DISABLED;
+
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
 		}
 		break;
@@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 		/*
 		 * check for timer pop before processing the rsp
 		 */
-		if (fcport->stats_busy == BFA_FALSE ||
-		    fcport->stats_status == BFA_STATUS_ETIMER)
+		if (list_empty(&fcport->stats_pending_q) ||
+		    (fcport->stats_status == BFA_STATUS_ETIMER))
 			break;
 
 		bfa_timer_stop(&fcport->timer);
 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
-		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-				__bfa_cb_fcport_stats_get, fcport);
+		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
 		break;
 
 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
 		/*
 		 * check for timer pop before processing the rsp
 		 */
-		if (fcport->stats_busy == BFA_FALSE ||
-		    fcport->stats_status == BFA_STATUS_ETIMER)
+		if (list_empty(&fcport->statsclr_pending_q) ||
+		    (fcport->stats_status == BFA_STATUS_ETIMER))
 			break;
 
 		bfa_timer_stop(&fcport->timer);
 		fcport->stats_status = BFA_STATUS_OK;
-		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-				__bfa_cb_fcport_stats_clr, fcport);
+		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
 		break;
 
 	case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
  * Fetch port statistics (FCQoS or FCoE).
  */
 bfa_status_t
-bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-	bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (fcport->stats_busy) {
-		bfa_trc(bfa, fcport->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
 
-	fcport->stats_busy  = BFA_TRUE;
-	fcport->stats_ret   = stats;
-	fcport->stats_cbfn  = cbfn;
-	fcport->stats_cbarg = cbarg;
+	if (!list_empty(&fcport->statsclr_pending_q))
+		return BFA_STATUS_DEVBUSY;
 
-	bfa_fcport_send_stats_get(fcport);
+	if (list_empty(&fcport->stats_pending_q)) {
+		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+		bfa_fcport_send_stats_get(fcport);
+		bfa_timer_start(bfa, &fcport->timer,
+				bfa_fcport_stats_get_timeout,
+				fcport, BFA_FCPORT_STATS_TOV);
+	} else
+		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
 
-	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
-			fcport, BFA_FCPORT_STATS_TOV);
 	return BFA_STATUS_OK;
 }
 
@@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
  * Reset port statistics (FCQoS or FCoE).
  */
 bfa_status_t
-bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (fcport->stats_busy) {
-		bfa_trc(bfa, fcport->stats_busy);
+	if (!list_empty(&fcport->stats_pending_q))
 		return BFA_STATUS_DEVBUSY;
-	}
-
-	fcport->stats_busy  = BFA_TRUE;
-	fcport->stats_cbfn  = cbfn;
-	fcport->stats_cbarg = cbarg;
 
-	bfa_fcport_send_stats_clear(fcport);
+	if (list_empty(&fcport->statsclr_pending_q)) {
+		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+		bfa_fcport_send_stats_clear(fcport);
+		bfa_timer_start(bfa, &fcport->timer,
+				bfa_fcport_stats_clr_timeout,
+				fcport, BFA_FCPORT_STATS_TOV);
+	} else
+		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
 
-	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
-			fcport, BFA_FCPORT_STATS_TOV);
 	return BFA_STATUS_OK;
 }
 
-
 /*
  * Fetch port attributes.
  */
@@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
 		rp->fw_handle = msg.create_rsp->fw_handle;
 		rp->qos_attr = msg.create_rsp->qos_attr;
+		bfa_rport_set_lunmask(bfa, rp);
 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
 		break;
@@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 	case BFI_RPORT_I2H_DELETE_RSP:
 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+		bfa_rport_unset_lunmask(bfa, rp);
 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
 		break;
 
@@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
 }
 
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
+	wwn_t	lp_wwn, rp_wwn;
+	u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+					rp->lun_mask = BFA_TRUE;
+	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
+	wwn_t	lp_wwn, rp_wwn;
+
+	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+				rp->lun_mask = BFA_FALSE;
+	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
 
 /*
  * SGPG related functions
@@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
 		return BFA_STATUS_PORT_NOT_DISABLED;
 	}
 
-	/* Check if the speed is supported */
-	bfa_fcport_get_attr(bfa, &attr);
-	bfa_trc(fcdiag, attr.speed_supported);
-	if (speed > attr.speed_supported)
-		return BFA_STATUS_UNSUPP_SPEED;
+	/*
+	 * Check if input speed is supported by the port mode
+	 */
+	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+		if (!(speed == BFA_PORT_SPEED_1GBPS ||
+		      speed == BFA_PORT_SPEED_2GBPS ||
+		      speed == BFA_PORT_SPEED_4GBPS ||
+		      speed == BFA_PORT_SPEED_8GBPS ||
+		      speed == BFA_PORT_SPEED_16GBPS ||
+		      speed == BFA_PORT_SPEED_AUTO)) {
+			bfa_trc(fcdiag, speed);
+			return BFA_STATUS_UNSUPP_SPEED;
+		}
+		bfa_fcport_get_attr(bfa, &attr);
+		bfa_trc(fcdiag, attr.speed_supported);
+		if (speed > attr.speed_supported)
+			return BFA_STATUS_UNSUPP_SPEED;
+	} else {
+		if (speed != BFA_PORT_SPEED_10GBPS) {
+			bfa_trc(fcdiag, speed);
+			return BFA_STATUS_UNSUPP_SPEED;
+		}
+	}
 
 	/* For Mezz card, port speed entered needs to be checked */
 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index fbe513a671b5..95adb86d3769 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -297,6 +297,7 @@ struct bfa_rport_s {
 	void		*rport_drv;	/*  fcs/driver rport object	    */
 	u16	fw_handle;	/*  firmware rport handle	    */
 	u16	rport_tag;	/*  BFA rport tag		    */
+	u8	lun_mask;	/*  LUN mask flag		    */
 	struct bfa_rport_info_s rport_info; /*  rport info from fcs/driver */
 	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq     */
 	struct bfa_cb_qe_s hcb_qe;	/*  BFA callback qelem		    */
@@ -404,6 +405,7 @@ struct bfa_lps_s {
 	u8		bb_scn;		/*  local BB_SCN		*/
 	u8		lsrjt_rsn;	/*  LSRJT reason		*/
 	u8		lsrjt_expl;	/*  LSRJT explanation		*/
+	u8		lun_mask;	/*  LUN mask flag		*/
 	wwn_t		pwwn;		/*  port wwn of lport		*/
 	wwn_t		nwwn;		/*  node wwn of lport		*/
 	wwn_t		pr_pwwn;	/*  port wwn of lport peer	*/
@@ -441,7 +443,6 @@ void	bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
  */
 
 #define BFA_FCPORT(_bfa)	(&((_bfa)->modules.port))
-typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
 
 /*
  * Link notification data structure
@@ -495,13 +496,11 @@ struct bfa_fcport_s {
 	u8			*stats_kva;
 	u64		stats_pa;
 	union bfa_fcport_stats_u *stats;
-	union bfa_fcport_stats_u *stats_ret; /*  driver stats location */
 	bfa_status_t		stats_status; /*  stats/statsclr status */
-	bfa_boolean_t		stats_busy; /*  outstanding stats/statsclr */
+	struct list_head	stats_pending_q;
+	struct list_head	statsclr_pending_q;
 	bfa_boolean_t		stats_qfull;
 	u32		stats_reset_time; /*  stats reset time stamp */
-	bfa_cb_port_t		stats_cbfn; /*  driver callback function */
-	void			*stats_cbarg; /* *!< user callback arg */
 	bfa_boolean_t		diag_busy; /*  diag busy status */
 	bfa_boolean_t		beacon; /*  port beacon status */
 	bfa_boolean_t		link_e2e_beacon; /*  link beacon status */
@@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
 			bfa_boolean_t link_e2e_beacon);
 bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
-				  union bfa_fcport_stats_u *stats,
-				  bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-				    void *cbarg);
+			struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+			struct bfa_cb_pending_q_s *cb);
 bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
 bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
@@ -578,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
 			       struct bfa_rport_qos_attr_s new_qos_attr);
 
 /*
+ *	Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID	0xffff
+#define BFA_LP_TAG_INVALID	0xff
+void	bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void	bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+bfa_boolean_t	bfa_rport_lunmask_active(struct bfa_rport_s *rp);
+wwn_t	bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
+struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
+					 wwn_t *lpwwn, wwn_t rpwwn);
+void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
+
+/*
  * bfa fcxp API functions
  */
 struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index beb30a748ea5..66fb72531b34 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1348,7 +1348,7 @@ int
 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
 	struct bfad_s	*bfad;
-	int		error = -ENODEV, retval;
+	int		error = -ENODEV, retval, i;
 
 	/* For single port cards - only claim function 0 */
 	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 	bfa_trc_init(bfad->trcmod);
 	bfa_trc(bfad, bfad_inst);
 
+	/* AEN INIT */
+	INIT_LIST_HEAD(&bfad->free_aen_q);
+	INIT_LIST_HEAD(&bfad->active_aen_q);
+	for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+		list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
 	if (!(bfad_load_fwimg(pdev))) {
 		kfree(bfad->trcmod);
 		goto out_alloc_trace_failure;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 89f863ed2334..06fc00caeb41 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -56,7 +56,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	if (bfad->disable_active) {
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		return EBUSY;
+		return -EBUSY;
 	}
 
 	bfad->disable_active = BFA_TRUE;
@@ -90,6 +90,7 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
 	bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
 	iocmd->factorynwwn = pattr.factorynwwn;
 	iocmd->factorypwwn = pattr.factorypwwn;
+	iocmd->bfad_num = bfad->inst_no;
 	im_port = bfad->pport.im_port;
 	iocmd->host = im_port->shost->host_no;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -178,6 +179,38 @@ out:
 }
 
 int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	if (v_cmd == IOCMD_IOC_RESET_STATS) {
+		bfa_ioc_clear_stats(&bfad->bfa);
+		iocmd->status = BFA_STATUS_OK;
+	} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+	if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+		strcpy(bfad->adapter_name, iocmd->name);
+	else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+		strcpy(bfad->port_name, iocmd->name);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -306,6 +339,81 @@ out:
 	return 0;
 }
 
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		return 0;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_PORT_CFG_TOPO)
+		cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+		cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+		cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+		cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+				(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+		if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
+			fcport->cfg.bb_scn_state = BFA_TRUE;
+		else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
+			fcport->cfg.bb_scn_state = BFA_FALSE;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
 static int
 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
 {
@@ -354,6 +462,40 @@ out:
 }
 
 int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_bsg_reset_stats_s *iocmd =
+			(struct bfa_bsg_reset_stats_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcs_lport_clear_stats(fcs_port);
+	/* clear IO stats from all active itnims */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+			continue;
+		bfa_itnim_clear_stats(itnim);
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_fcs_lport_s *fcs_port;
@@ -389,7 +531,7 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
 	void	*iocmd_bufptr;
 
 	if (iocmd->nrports == 0)
-		return EINVAL;
+		return -EINVAL;
 
 	if (bfad_chk_iocmd_sz(payload_len,
 			sizeof(struct bfa_bsg_lport_get_rports_s),
@@ -539,6 +681,152 @@ out:
 	return 0;
 }
 
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_reset_stats_s *iocmd =
+				(struct bfa_bsg_rport_reset_stats_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	struct bfa_rport_s *rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+	rport = bfa_fcs_rport_get_halrport(fcs_rport);
+	memset(&rport->stats, 0, sizeof(rport->stats));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_set_speed_s *iocmd =
+				(struct bfa_bsg_rport_set_speed_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	fcs_rport->rpf.assigned_speed  = iocmd->speed;
+	/* Set this speed in f/w only if the RPSC speed is not available */
+	if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+		bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_vport_stats_s *iocmd =
+				(struct bfa_bsg_vport_stats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+		sizeof(struct bfa_vport_stats_s));
+	memcpy((void *)&iocmd->vport_stats.port_stats,
+	       (void *)&fcs_vport->lport.stats,
+		sizeof(struct bfa_lport_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_reset_stats_s *iocmd =
+				(struct bfa_bsg_reset_stats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+	memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
 static int
 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
 			unsigned int payload_len)
@@ -582,6 +870,66 @@ out:
 }
 
 int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	if (cmd == IOCMD_RATELIM_ENABLE)
+		fcport->cfg.ratelimit = BFA_TRUE;
+	else if (cmd == IOCMD_RATELIM_DISABLE)
+		fcport->cfg.ratelimit = BFA_FALSE;
+
+	if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+
+	return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	/* Auto and speeds greater than the supported speed, are invalid */
+	if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+	    (iocmd->speed > fcport->speed_sup)) {
+		iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return 0;
+	}
+
+	fcport->cfg.trl_def_speed = iocmd->speed;
+	iocmd->status = BFA_STATUS_OK;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -604,6 +952,28 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
 }
 
 int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+				(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	memset(&fcpim->del_itn_stats, 0,
+		sizeof(struct bfa_fcpim_del_itn_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -670,6 +1040,35 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
 }
 
 static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_reset_stats_s *iocmd =
+			(struct bfa_bsg_rport_reset_stats_s *)cmd;
+	struct bfa_fcs_lport_s	*fcs_port;
+	struct bfa_fcs_itnim_s	*itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (!fcs_port)
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+	else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else {
+			iocmd->status = BFA_STATUS_OK;
+			bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+			bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+static int
 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
 {
 	struct bfa_bsg_itnim_itnstats_s *iocmd =
@@ -1511,11 +1910,545 @@ out:
 	return 0;
 }
 
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ	0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+			!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+			!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+		bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+		iocmd->status = BFA_STATUS_EINVAL;
+		goto out;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+				(u32 *)&iocmd->offset, &iocmd->bufsz);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+		bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+	else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+		bfa_trc_init(bfad->trcmod);
+	else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+		bfa_trc_stop(bfad->trcmod);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+	if (iocmd->ctl == BFA_TRUE)
+		bfad->plog_buf.plog_enabled = 1;
+	else
+		bfad->plog_buf.plog_enabled = 0;
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_fcpim_profile_s *iocmd =
+				(struct bfa_bsg_fcpim_profile_s *)cmd;
+	struct timeval  tv;
+	unsigned long	flags;
+
+	do_gettimeofday(&tv);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+		iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+	else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+		iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_itnim_ioprofile_s *iocmd =
+				(struct bfa_bsg_itnim_ioprofile_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_itnim_s *itnim;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->lpwwn);
+	if (!fcs_port)
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+	else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else
+			iocmd->status = bfa_itnim_get_ioprofile(
+						bfa_fcs_itnim_get_halitn(itnim),
+						&iocmd->ioprofile);
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcport_stats_s *iocmd =
+				(struct bfa_bsg_fcport_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, &iocmd->stats);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+			BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+			bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+			BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+			bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+	struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+	iocmd->status = BFA_STATUS_OK;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+				BFA_FLASH_PART_PXECFG,
+				bfad->bfa.ioc.port_id, &iocmd->cfg,
+				sizeof(struct bfa_ethboot_cfg_s), 0,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+				BFA_FLASH_PART_PXECFG,
+				bfad->bfa.ioc.port_id, &iocmd->cfg,
+				sizeof(struct bfa_ethboot_cfg_s), 0,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	if (v_cmd == IOCMD_TRUNK_ENABLE) {
+		trunk->attr.state = BFA_TRUNK_OFFLINE;
+		bfa_fcport_disable(&bfad->bfa);
+		fcport->cfg.trunked = BFA_TRUE;
+	} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+		trunk->attr.state = BFA_TRUNK_DISABLED;
+		bfa_fcport_disable(&bfad->bfa);
+		fcport->cfg.trunked = BFA_FALSE;
+	}
+
+	if (!bfa_fcport_is_disabled(&bfad->bfa))
+		bfa_fcport_enable(&bfad->bfa);
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+		sizeof(struct bfa_trunk_attr_s));
+	iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+		if (v_cmd == IOCMD_QOS_ENABLE)
+			fcport->cfg.qos_enabled = BFA_TRUE;
+		else if (v_cmd == IOCMD_QOS_DISABLE)
+			fcport->cfg.qos_enabled = BFA_FALSE;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->attr.state = fcport->qos_attr.state;
+	iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_qos_vc_attr_s *iocmd =
+				(struct bfa_bsg_qos_vc_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+	unsigned long	flags;
+	u32	i = 0;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+	iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
+	iocmd->attr.elp_opmode_flags  =
+				be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+	/* Individual VC info */
+	while (i < iocmd->attr.total_vc_count) {
+		iocmd->attr.vc_info[i].vc_credit =
+				bfa_vc_attr->vc_info[i].vc_credit;
+		iocmd->attr.vc_info[i].borrow_credit =
+				bfa_vc_attr->vc_info[i].borrow_credit;
+		iocmd->attr.vc_info[i].priority =
+				bfa_vc_attr->vc_info[i].priority;
+		i++;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcport_stats_s *iocmd =
+				(struct bfa_bsg_fcport_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, &iocmd->stats);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, NULL);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_vf_stats_s *iocmd =
+			(struct bfa_bsg_vf_stats_s *)cmd;
+	struct bfa_fcs_fabric_s	*fcs_vf;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+	if (fcs_vf == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+		goto out;
+	}
+	memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+		sizeof(struct bfa_vf_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_vf_reset_stats_s *iocmd =
+			(struct bfa_bsg_vf_reset_stats_s *)cmd;
+	struct bfa_fcs_fabric_s	*fcs_vf;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+	if (fcs_vf == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+		goto out;
+	}
+	memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+	else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+			(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+	struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_fcpim_lunmask_s *iocmd =
+				(struct bfa_bsg_fcpim_lunmask_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+		iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+					&iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+		iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+					iocmd->vf_id, &iocmd->pwwn,
+					iocmd->rpwwn, iocmd->lun);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
 static int
 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 		unsigned int payload_len)
 {
-	int rc = EINVAL;
+	int rc = -EINVAL;
 
 	switch (cmd) {
 	case IOCMD_IOC_ENABLE:
@@ -1536,6 +2469,14 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_IOC_GET_FWSTATS:
 		rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
 		break;
+	case IOCMD_IOC_RESET_STATS:
+	case IOCMD_IOC_RESET_FWSTATS:
+		rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+		break;
+	case IOCMD_IOC_SET_ADAPTER_NAME:
+	case IOCMD_IOC_SET_PORT_NAME:
+		rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+		break;
 	case IOCMD_IOCFC_GET_ATTR:
 		rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
 		break;
@@ -1554,12 +2495,31 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_PORT_GET_STATS:
 		rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
 		break;
+	case IOCMD_PORT_RESET_STATS:
+		rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_PORT_CFG_TOPO:
+	case IOCMD_PORT_CFG_SPEED:
+	case IOCMD_PORT_CFG_ALPA:
+	case IOCMD_PORT_CLR_ALPA:
+		rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+		break;
+	case IOCMD_PORT_CFG_MAXFRSZ:
+		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+		break;
+	case IOCMD_PORT_BBSC_ENABLE:
+	case IOCMD_PORT_BBSC_DISABLE:
+		rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+		break;
 	case IOCMD_LPORT_GET_ATTR:
 		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
 		break;
 	case IOCMD_LPORT_GET_STATS:
 		rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
 		break;
+	case IOCMD_LPORT_RESET_STATS:
+		rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+		break;
 	case IOCMD_LPORT_GET_IOSTATS:
 		rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
 		break;
@@ -1575,12 +2535,40 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_RPORT_GET_STATS:
 		rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
 		break;
+	case IOCMD_RPORT_RESET_STATS:
+		rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+		break;
+	case IOCMD_RPORT_SET_SPEED:
+		rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_GET_ATTR:
+		rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_GET_STATS:
+		rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_RESET_STATS:
+		rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+		break;
 	case IOCMD_FABRIC_GET_LPORTS:
 		rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
 		break;
+	case IOCMD_RATELIM_ENABLE:
+	case IOCMD_RATELIM_DISABLE:
+		rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+		break;
+	case IOCMD_RATELIM_DEF_SPEED:
+		rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+		break;
+	case IOCMD_FCPIM_FAILOVER:
+		rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+		break;
 	case IOCMD_FCPIM_MODSTATS:
 		rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
 		break;
+	case IOCMD_FCPIM_MODSTATSCLR:
+		rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+		break;
 	case IOCMD_FCPIM_DEL_ITN_STATS:
 		rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
 		break;
@@ -1590,6 +2578,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_ITNIM_GET_IOSTATS:
 		rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
 		break;
+	case IOCMD_ITNIM_RESET_STATS:
+		rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+		break;
 	case IOCMD_ITNIM_GET_ITNSTATS:
 		rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
 		break;
@@ -1702,11 +2693,92 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_DEBUG_PORTLOG:
 		rc = bfad_iocmd_porglog_get(bfad, iocmd);
 		break;
+	case IOCMD_DEBUG_FW_CORE:
+		rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_DEBUG_FW_STATE_CLR:
+	case IOCMD_DEBUG_PORTLOG_CLR:
+	case IOCMD_DEBUG_START_DTRC:
+	case IOCMD_DEBUG_STOP_DTRC:
+		rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+		break;
+	case IOCMD_DEBUG_PORTLOG_CTL:
+		rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_PROFILE_ON:
+	case IOCMD_FCPIM_PROFILE_OFF:
+		rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+		break;
+	case IOCMD_ITNIM_GET_IOPROFILE:
+		rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_GET_STATS:
+		rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_RESET_STATS:
+		rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_BOOT_CFG:
+		rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+		break;
+	case IOCMD_BOOT_QUERY:
+		rc = bfad_iocmd_boot_query(bfad, iocmd);
+		break;
+	case IOCMD_PREBOOT_QUERY:
+		rc = bfad_iocmd_preboot_query(bfad, iocmd);
+		break;
+	case IOCMD_ETHBOOT_CFG:
+		rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+		break;
+	case IOCMD_ETHBOOT_QUERY:
+		rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+		break;
+	case IOCMD_TRUNK_ENABLE:
+	case IOCMD_TRUNK_DISABLE:
+		rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+		break;
+	case IOCMD_TRUNK_GET_ATTR:
+		rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_ENABLE:
+	case IOCMD_QOS_DISABLE:
+		rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+		break;
+	case IOCMD_QOS_GET_ATTR:
+		rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_GET_VC_ATTR:
+		rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_GET_STATS:
+		rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_QOS_RESET_STATS:
+		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_VF_GET_STATS:
+		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_VF_RESET_STATS:
+		rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_ENABLE:
+	case IOCMD_FCPIM_LUNMASK_DISABLE:
+	case IOCMD_FCPIM_LUNMASK_CLEAR:
+		rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_QUERY:
+		rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_ADD:
+	case IOCMD_FCPIM_LUNMASK_DELETE:
+		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+		break;
 	default:
-		rc = EINVAL;
+		rc = -EINVAL;
 		break;
 	}
-	return -rc;
+	return rc;
 }
 
 static int
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 99b0e8a70c89..e859adb9aa9e 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -30,24 +30,48 @@ enum {
 	IOCMD_IOC_GET_INFO,
 	IOCMD_IOC_GET_STATS,
 	IOCMD_IOC_GET_FWSTATS,
+	IOCMD_IOC_RESET_STATS,
+	IOCMD_IOC_RESET_FWSTATS,
+	IOCMD_IOC_SET_ADAPTER_NAME,
+	IOCMD_IOC_SET_PORT_NAME,
 	IOCMD_IOCFC_GET_ATTR,
 	IOCMD_IOCFC_SET_INTR,
 	IOCMD_PORT_ENABLE,
 	IOCMD_PORT_DISABLE,
 	IOCMD_PORT_GET_ATTR,
 	IOCMD_PORT_GET_STATS,
+	IOCMD_PORT_RESET_STATS,
+	IOCMD_PORT_CFG_TOPO,
+	IOCMD_PORT_CFG_SPEED,
+	IOCMD_PORT_CFG_ALPA,
+	IOCMD_PORT_CFG_MAXFRSZ,
+	IOCMD_PORT_CLR_ALPA,
+	IOCMD_PORT_BBSC_ENABLE,
+	IOCMD_PORT_BBSC_DISABLE,
 	IOCMD_LPORT_GET_ATTR,
 	IOCMD_LPORT_GET_RPORTS,
 	IOCMD_LPORT_GET_STATS,
+	IOCMD_LPORT_RESET_STATS,
 	IOCMD_LPORT_GET_IOSTATS,
 	IOCMD_RPORT_GET_ATTR,
 	IOCMD_RPORT_GET_ADDR,
 	IOCMD_RPORT_GET_STATS,
+	IOCMD_RPORT_RESET_STATS,
+	IOCMD_RPORT_SET_SPEED,
+	IOCMD_VPORT_GET_ATTR,
+	IOCMD_VPORT_GET_STATS,
+	IOCMD_VPORT_RESET_STATS,
 	IOCMD_FABRIC_GET_LPORTS,
+	IOCMD_RATELIM_ENABLE,
+	IOCMD_RATELIM_DISABLE,
+	IOCMD_RATELIM_DEF_SPEED,
+	IOCMD_FCPIM_FAILOVER,
 	IOCMD_FCPIM_MODSTATS,
+	IOCMD_FCPIM_MODSTATSCLR,
 	IOCMD_FCPIM_DEL_ITN_STATS,
 	IOCMD_ITNIM_GET_ATTR,
 	IOCMD_ITNIM_GET_IOSTATS,
+	IOCMD_ITNIM_RESET_STATS,
 	IOCMD_ITNIM_GET_ITNSTATS,
 	IOCMD_IOC_PCIFN_CFG,
 	IOCMD_FCPORT_ENABLE,
@@ -86,6 +110,39 @@ enum {
 	IOCMD_PHY_READ_FW,
 	IOCMD_VHBA_QUERY,
 	IOCMD_DEBUG_PORTLOG,
+	IOCMD_DEBUG_FW_CORE,
+	IOCMD_DEBUG_FW_STATE_CLR,
+	IOCMD_DEBUG_PORTLOG_CLR,
+	IOCMD_DEBUG_START_DTRC,
+	IOCMD_DEBUG_STOP_DTRC,
+	IOCMD_DEBUG_PORTLOG_CTL,
+	IOCMD_FCPIM_PROFILE_ON,
+	IOCMD_FCPIM_PROFILE_OFF,
+	IOCMD_ITNIM_GET_IOPROFILE,
+	IOCMD_FCPORT_GET_STATS,
+	IOCMD_FCPORT_RESET_STATS,
+	IOCMD_BOOT_CFG,
+	IOCMD_BOOT_QUERY,
+	IOCMD_PREBOOT_QUERY,
+	IOCMD_ETHBOOT_CFG,
+	IOCMD_ETHBOOT_QUERY,
+	IOCMD_TRUNK_ENABLE,
+	IOCMD_TRUNK_DISABLE,
+	IOCMD_TRUNK_GET_ATTR,
+	IOCMD_QOS_ENABLE,
+	IOCMD_QOS_DISABLE,
+	IOCMD_QOS_GET_ATTR,
+	IOCMD_QOS_GET_VC_ATTR,
+	IOCMD_QOS_GET_STATS,
+	IOCMD_QOS_RESET_STATS,
+	IOCMD_VF_GET_STATS,
+	IOCMD_VF_RESET_STATS,
+	IOCMD_FCPIM_LUNMASK_ENABLE,
+	IOCMD_FCPIM_LUNMASK_DISABLE,
+	IOCMD_FCPIM_LUNMASK_CLEAR,
+	IOCMD_FCPIM_LUNMASK_QUERY,
+	IOCMD_FCPIM_LUNMASK_ADD,
+	IOCMD_FCPIM_LUNMASK_DELETE,
 };
 
 struct bfa_bsg_gen_s {
@@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
 	u16		rsvd;
 };
 
+struct bfa_bsg_portlogctl_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	bfa_boolean_t	ctl;
+	int		inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u16		rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		rpwwn;
+	struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	char		name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
 struct bfa_bsg_ioc_info_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
 	struct bfa_port_attr_s	attr;
 };
 
+struct bfa_bsg_port_cfg_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		param;
+	u32		rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		maxfrsize;
+};
+
 struct bfa_bsg_port_stats_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
 	u32		lun;
 };
 
+struct bfa_bsg_rport_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+	bfa_status_t		status;
+	u16			bfad_num;
+	u16			vf_id;
+	enum bfa_port_speed	speed;
+	u32			rsvd;
+	wwn_t			pwwn;
+	wwn_t			rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+	struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+	struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+};
+
 struct bfa_bsg_fabric_get_lports_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
 	u32		rsvd;
 };
 
+struct bfa_bsg_trl_speed_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		param;
+};
+
 struct bfa_bsg_fcpim_modstats_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
 	struct bfa_fcpim_del_itn_stats_s modstats;
 };
 
+struct bfa_bsg_fcpim_modstatsclr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+};
+
 struct bfa_bsg_itnim_attr_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
 	struct bfa_vhba_attr_s	attr;
 };
 
+struct bfa_bsg_boot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_boot_cfg_s	cfg;
+};
+
+struct bfa_bsg_preboot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_boot_pbc_s	cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct  bfa_ethboot_cfg_s  cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_attr_s	attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_vf_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	struct bfa_vf_stats_s	stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+	struct scsi_lun	lun;
+};
+
 struct bfa_bsg_fcpt_s {
 	bfa_status_t    status;
 	u16		vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 48661a2726d7..bda999ad9f52 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.0.2.1"
+#define BFAD_DRIVER_VERSION    "3.0.2.2"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -224,6 +224,10 @@ struct bfad_s {
 	char *regdata;
 	u32 reglen;
 	struct dentry *bfad_dentry_files[5];
+	struct list_head	free_aen_q;
+	struct list_head	active_aen_q;
+	struct bfa_aen_entry_s	aen_list[BFA_AEN_MAX_ENTRY];
+	spinlock_t		bfad_aen_spinlock;
 };
 
 /* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f2bf81265ae5..01312381639f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+	struct bfad_im_s *im =
+		container_of(work, struct bfad_im_s, aen_im_notify_work);
+	struct bfa_aen_entry_s *aen_entry;
+	struct bfad_s *bfad = im->bfad;
+	struct Scsi_Host *shost = bfad->pport.im_port->shost;
+	void *event_data;
+	unsigned long flags;
+
+	while (!list_empty(&bfad->active_aen_q)) {
+		spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+		bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+		spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+		event_data = (char *)aen_entry + sizeof(struct list_head);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+				sizeof(struct bfa_aen_entry_s) -
+				sizeof(struct list_head),
+				(char *)event_data, BFAD_NL_VENDOR_ID);
+		spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+		list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+		spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+	}
+}
+
 bfa_status_t
 bfad_im_probe(struct bfad_s *bfad)
 {
@@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
 		rc = BFA_STATUS_FAILED;
 	}
 
+	INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
 ext:
 	return rc;
 }
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4fe34d576b05..004b6cf848d9 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
 	struct bfad_s         *bfad;
 	struct workqueue_struct *drv_workq;
 	char            drv_workq_name[KOBJ_NAME_LEN];
+	struct work_struct	aen_im_notify_work;
 };
 
+#define bfad_get_aen_entry(_drv, _entry) do {				\
+	unsigned long	_flags;						\
+	spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags);		\
+	bfa_q_deq(&(_drv)->free_aen_q, &(_entry));			\
+	if (_entry)							\
+		list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q);	\
+	spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags);	\
+} while (0)
+
+/* post fc_host vendor event */
+#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do {	      \
+	do_gettimeofday(&(_entry)->aen_tv);				      \
+	(_entry)->bfad_num = (_drv)->inst_no;				      \
+	(_entry)->seq_num = (_cnt);					      \
+	(_entry)->aen_category = (_cat);				      \
+	(_entry)->aen_type = (_evt);					      \
+	if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE)			      \
+		queue_work((_drv)->im->drv_workq,			      \
+			   &(_drv)->im->aen_im_notify_work);		      \
+} while (0)
+
 struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
 				struct bfad_s *);
 bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 1e258d5f8aec..b2ba0b2e91b2 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -784,6 +784,17 @@ enum bfi_sfp_i2h_e {
 };
 
 /*
+ *	SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+	struct bfi_mhdr_s mhr;	/* host msg header        */
+	u8	event;
+	u8	sfpid;
+	u8	pomlvl;	/* pom level: normal/warning/alarm */
+	u8	is_elb;	/* e-loopback */
+};
+
+/*
  *	SFP state
  */
 enum bfa_sfp_stat_e {
@@ -926,6 +937,15 @@ struct bfi_flash_erase_rsp_s {
 };
 
 /*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	bfa_status_t		status;
+	u32			param;
+};
+
+/*
  *----------------------------------------------------------------------
  *				DIAG
  *----------------------------------------------------------------------