summary refs log tree commit diff
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 13:16:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 13:16:38 -0700
commit5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7 (patch)
tree89cbf0b67634ecc43a863a6ca058ff749df3cce7 /drivers/scsi/lpfc
parent6da6dc2380c3cfe8d6b59d7c3c55fdd7a521fe6c (diff)
parent9e45dd73234af9a59613dc2989dcc2df2dab847f (diff)
downloadlinux-5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James "Jej B" Bottomley:
 "The patch set is mostly driver updates (qla4, qla2 [ISF support
  updates], lpfc, aacraid [dual firmware image support]) and a few bug
  fixes"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits)
  [SCSI] iscsi_tcp: support PF_MEMALLOC/__GFP_MEMALLOC
  [SCSI] libiscsi: avoid unnecessary multiple NULL assignments
  [SCSI] qla4xxx: Update driver version to 5.03.00-k8
  [SCSI] qla4xxx: Added print statements to display AENs
  [SCSI] qla4xxx: Use correct value for max flash node entries
  [SCSI] qla4xxx: Restrict logout from boot target session using session id
  [SCSI] qla4xxx: Use correct flash ddb offset for ISP40XX
  [SCSI] isci: add CONFIG_PM_SLEEP to suspend/resume functions
  [SCSI] scsi_dh_alua: Add module parameter to allow failover to non preferred path without STPG
  [SCSI] qla2xxx: Update the driver version to 8.05.00.03-k.
  [SCSI] qla2xxx: Obtain loopback iteration count from bsg request.
  [SCSI] qla2xxx: Add clarifying printk to thermal access fail cases.
  [SCSI] qla2xxx: Remove duplicated include form qla_isr.c
  [SCSI] qla2xxx: Enhancements to support ISPFx00.
  [SCSI] qla4xxx: Update driver version to 5.03.00-k7
  [SCSI] qla4xxx: Replace dev type macros with generic portal type macros
  [SCSI] scsi_transport_iscsi: Declare portal type string macros for generic use
  [SCSI] qla4xxx: Add flash node mgmt support
  [SCSI] libiscsi: export function iscsi_switch_str_param
  [SCSI] scsi_transport_iscsi: Add flash node mgmt support
  ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1107
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
12 files changed, 837 insertions, 631 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a364cae9e984..9290713af253 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -692,7 +692,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
 	 */
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
-		while (pring->txcmplq_cnt) {
+		while (!list_empty(&pring->txcmplq)) {
 			msleep(10);
 			if (cnt++ > 500) {  /* 5 secs */
 				lpfc_printf_log(phba,
@@ -2302,11 +2302,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
 	"FCF Fast failover=1 Priority failover=2");
 
-int lpfc_enable_rrq;
+int lpfc_enable_rrq = 2;
 module_param(lpfc_enable_rrq, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
 lpfc_param_show(enable_rrq);
-lpfc_param_init(enable_rrq, 0, 0, 1);
+/*
+# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
+#	0x0 = disabled, XRI/OXID use not tracked.
+#	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
+#	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
+*/
+lpfc_param_init(enable_rrq, 2, 0, 2);
 static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
 
 /*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d106456f1d..888666892004 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -64,18 +64,14 @@ struct lpfc_bsg_event {
 	struct list_head events_to_get;
 	struct list_head events_to_see;
 
-	/* job waiting for this event to finish */
-	struct fc_bsg_job *set_job;
+	/* driver data associated with the job */
+	void *dd_data;
 };
 
 struct lpfc_bsg_iocb {
 	struct lpfc_iocbq *cmdiocbq;
-	struct lpfc_iocbq *rspiocbq;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *rmp;
 	struct lpfc_nodelist *ndlp;
-
-	/* job waiting for this iocb to finish */
-	struct fc_bsg_job *set_job;
 };
 
 struct lpfc_bsg_mbox {
@@ -86,20 +82,13 @@ struct lpfc_bsg_mbox {
 	uint32_t mbOffset; /* from app */
 	uint32_t inExtWLen; /* from app */
 	uint32_t outExtWLen; /* from app */
-
-	/* job waiting for this mbox command to finish */
-	struct fc_bsg_job *set_job;
 };
 
 #define MENLO_DID 0x0000FC0E
 
 struct lpfc_bsg_menlo {
 	struct lpfc_iocbq *cmdiocbq;
-	struct lpfc_iocbq *rspiocbq;
-	struct lpfc_dmabuf *bmp;
-
-	/* job waiting for this iocb to finish */
-	struct fc_bsg_job *set_job;
+	struct lpfc_dmabuf *rmp;
 };
 
 #define TYPE_EVT 	1
@@ -108,6 +97,7 @@ struct lpfc_bsg_menlo {
 #define TYPE_MENLO	4
 struct bsg_job_data {
 	uint32_t type;
+	struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
 	union {
 		struct lpfc_bsg_event *evt;
 		struct lpfc_bsg_iocb iocb;
@@ -141,6 +131,138 @@ struct lpfc_dmabufext {
 	uint32_t flag;
 };
 
+static void
+lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+	struct lpfc_dmabuf *mlast, *next_mlast;
+
+	if (mlist) {
+		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
+					 list) {
+			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+			list_del(&mlast->list);
+			kfree(mlast);
+		}
+		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+		kfree(mlist);
+	}
+	return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
+		       int outbound_buffers, struct ulp_bde64 *bpl,
+		       int *bpl_entries)
+{
+	struct lpfc_dmabuf *mlist = NULL;
+	struct lpfc_dmabuf *mp;
+	unsigned int bytes_left = size;
+
+	/* Verify we can support the size specified */
+	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
+		return NULL;
+
+	/* Determine the number of dma buffers to allocate */
+	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
+			size/LPFC_BPL_SIZE);
+
+	/* Allocate dma buffer and place in BPL passed */
+	while (bytes_left) {
+		/* Allocate dma buffer  */
+		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!mp) {
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&mp->list);
+		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+
+		if (!mp->virt) {
+			kfree(mp);
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		/* Queue it to a linked list */
+		if (!mlist)
+			mlist = mp;
+		else
+			list_add_tail(&mp->list, &mlist->list);
+
+		/* Add buffer to buffer pointer list */
+		if (outbound_buffers)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+		bpl->tus.f.bdeSize = (uint16_t)
+			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
+			 bytes_left);
+		bytes_left -= bpl->tus.f.bdeSize;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+	}
+	return mlist;
+}
+
+static unsigned int
+lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
+		   struct fc_bsg_buffer *bsg_buffers,
+		   unsigned int bytes_to_transfer, int to_buffers)
+{
+
+	struct lpfc_dmabuf *mp;
+	unsigned int transfer_bytes, bytes_copied = 0;
+	unsigned int sg_offset, dma_offset;
+	unsigned char *dma_address, *sg_address;
+	struct scatterlist *sgel;
+	LIST_HEAD(temp_list);
+
+
+	list_splice_init(&dma_buffers->list, &temp_list);
+	list_add(&dma_buffers->list, &temp_list);
+	sg_offset = 0;
+	sgel = bsg_buffers->sg_list;
+	list_for_each_entry(mp, &temp_list, list) {
+		dma_offset = 0;
+		while (bytes_to_transfer && sgel &&
+		       (dma_offset < LPFC_BPL_SIZE)) {
+			dma_address = mp->virt + dma_offset;
+			if (sg_offset) {
+				/* Continue previous partial transfer of sg */
+				sg_address = sg_virt(sgel) + sg_offset;
+				transfer_bytes = sgel->length - sg_offset;
+			} else {
+				sg_address = sg_virt(sgel);
+				transfer_bytes = sgel->length;
+			}
+			if (bytes_to_transfer < transfer_bytes)
+				transfer_bytes = bytes_to_transfer;
+			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
+				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
+			if (to_buffers)
+				memcpy(dma_address, sg_address, transfer_bytes);
+			else
+				memcpy(sg_address, dma_address, transfer_bytes);
+			dma_offset += transfer_bytes;
+			sg_offset += transfer_bytes;
+			bytes_to_transfer -= transfer_bytes;
+			bytes_copied += transfer_bytes;
+			if (sg_offset >= sgel->length) {
+				sg_offset = 0;
+				sgel = sg_next(sgel);
+			}
+		}
+	}
+	list_del_init(&dma_buffers->list);
+	list_splice(&temp_list, &dma_buffers->list);
+	return bytes_copied;
+}
+
 /**
  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
  * @phba: Pointer to HBA context object.
@@ -166,62 +288,72 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_bsg_iocb *iocb;
 	unsigned long flags;
+	unsigned int rsp_size;
 	int rc = 0;
 
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context2;
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		lpfc_sli_release_iocbq(phba, cmdiocbq);
-		return;
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job */
+		job->dd_data = NULL;
 	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	iocb = &dd_data->context_un.iocb;
-	job = iocb->set_job;
-	job->dd_data = NULL; /* so timeout handler does not reply */
-
-	bmp = iocb->bmp;
+	ndlp = iocb->ndlp;
+	rmp = iocb->rmp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
 	rsp = &rspiocbq->iocb;
-	ndlp = cmdiocbq->context1;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	/* Copy the completed data or set the error status */
 
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
+	if (job) {
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
 				rc = -EACCES;
-				break;
 			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
+	}
 
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
-	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -240,12 +372,9 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	uint32_t timeout;
 	struct lpfc_iocbq *cmdiocbq = NULL;
 	IOCB_t *cmd;
-	struct lpfc_dmabuf *bmp = NULL;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	int request_nseg;
 	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	uint32_t creg_val;
 	int rc = 0;
@@ -268,54 +397,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 		goto no_ndlp;
 	}
 
-	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-	if (!bmp) {
-		rc = -ENOMEM;
-		goto free_ndlp;
-	}
-
 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
 		rc = -ENODEV;
-		goto free_bmp;
+		goto free_ndlp;
 	}
 
 	cmdiocbq = lpfc_sli_get_iocbq(phba);
 	if (!cmdiocbq) {
 		rc = -ENOMEM;
-		goto free_bmp;
+		goto free_ndlp;
 	}
 
 	cmd = &cmdiocbq->iocb;
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
 	if (!bmp->virt) {
 		rc = -ENOMEM;
-		goto free_cmdiocbq;
+		goto free_bmp;
 	}
 
 	INIT_LIST_HEAD(&bmp->list);
+
 	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto free_bmp;
 	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
 	}
 
 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -343,17 +468,20 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	cmd->ulpTimeout = timeout;
 
 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
-	cmdiocbq->context1 = ndlp;
-	cmdiocbq->context2 = dd_data;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = bmp;
+	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = rmp;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
 			rc = -EIO ;
-			goto free_cmdiocbq;
+			goto free_rmp;
 		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
@@ -368,19 +496,18 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	else
 		rc = -EIO;
 
-
 	/* iocb failed so cleanup */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
-free_cmdiocbq:
-	lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
 free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
 free_ndlp:
 	lpfc_nlp_put(ndlp);
 no_ndlp:
@@ -418,67 +545,68 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
 	struct lpfc_nodelist *ndlp;
-	struct lpfc_dmabuf *pbuflist = NULL;
+	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
 	struct fc_bsg_ctels_reply *els_reply;
 	uint8_t *rjt_data;
 	unsigned long flags;
+	unsigned int rsp_size;
 	int rc = 0;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = cmdiocbq->context1;
-	/* normal completion and timeout crossed paths, already done */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
-	}
+	ndlp = dd_data->context_un.iocb.ndlp;
+	cmdiocbq->context1 = ndlp;
 
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
-	job = dd_data->context_un.iocb.set_job;
-	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
-	rspiocbq = dd_data->context_un.iocb.rspiocbq;
 	rsp = &rspiocbq->iocb;
-	ndlp = dd_data->context_un.iocb.ndlp;
+	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	/* Copy the completed job data or determine the job status if job is
+	 * still active
+	 */
 
-	if (job->reply->result == -EAGAIN)
-		rc = -EAGAIN;
-	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
-		job->reply->reply_payload_rcv_len =
-			rsp->un.elsreq64.bdl.bdeSize;
-	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
-		job->reply->reply_payload_rcv_len =
-			sizeof(struct fc_bsg_ctels_reply);
-		/* LS_RJT data returned in word 4 */
-		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
-		els_reply = &job->reply->reply_data.ctels_reply;
-		els_reply->status = FC_CTELS_STATUS_REJECT;
-		els_reply->rjt_data.action = rjt_data[3];
-		els_reply->rjt_data.reason_code = rjt_data[2];
-		els_reply->rjt_data.reason_explanation = rjt_data[1];
-		els_reply->rjt_data.vendor_unique = rjt_data[0];
-	} else
-		rc = -EIO;
+	if (job) {
+		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    prsp->virt,
+						    rsp_size);
+		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+			job->reply->reply_payload_rcv_len =
+				sizeof(struct fc_bsg_ctels_reply);
+			/* LS_RJT data returned in word 4 */
+			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+			els_reply = &job->reply->reply_data.ctels_reply;
+			els_reply->status = FC_CTELS_STATUS_REJECT;
+			els_reply->rjt_data.action = rjt_data[3];
+			els_reply->rjt_data.reason_code = rjt_data[2];
+			els_reply->rjt_data.reason_explanation = rjt_data[1];
+			els_reply->rjt_data.vendor_unique = rjt_data[0];
+		} else {
+			rc = -EIO;
+		}
+	}
 
-	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
-	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
-	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
+	lpfc_els_free_iocb(phba, cmdiocbq);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	job->dd_data = NULL;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -496,19 +624,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	uint32_t elscmd;
 	uint32_t cmdsize;
 	uint32_t rspsize;
-	struct lpfc_iocbq *rspiocbq;
 	struct lpfc_iocbq *cmdiocbq;
-	IOCB_t *rsp;
 	uint16_t rpi = 0;
-	struct lpfc_dmabuf *pcmd;
-	struct lpfc_dmabuf *prsp;
-	struct lpfc_dmabuf *pbuflist = NULL;
-	struct ulp_bde64 *bpl;
-	int request_nseg;
-	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	uint32_t creg_val;
 	int rc = 0;
@@ -516,6 +633,15 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
+	/* verify the els command is not greater than the
+	 * maximum ELS transfer size.
+	 */
+
+	if (job->request_payload.payload_len > FCELSSIZE) {
+		rc = -EINVAL;
+		goto no_dd_data;
+	}
+
 	/* allocate our bsg tracking structure */
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	if (!dd_data) {
@@ -525,88 +651,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 		goto no_dd_data;
 	}
 
-	if (!lpfc_nlp_get(ndlp)) {
-		rc = -ENODEV;
-		goto free_dd_data;
-	}
-
 	elscmd = job->request->rqst_data.r_els.els_code;
 	cmdsize = job->request_payload.payload_len;
 	rspsize = job->reply_payload.payload_len;
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		lpfc_nlp_put(ndlp);
-		rc = -ENOMEM;
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
 		goto free_dd_data;
 	}
 
-	rsp = &rspiocbq->iocb;
-	rpi = ndlp->nlp_rpi;
+	/* We will use the allocated dma buffers by prep els iocb for command
+	 * and response to ensure if the job times out and the request is freed,
+	 * we won't be dma into memory that is no longer allocated to for the
+	 * request.
+	 */
 
 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
 				      ndlp->nlp_DID, elscmd);
 	if (!cmdiocbq) {
 		rc = -EIO;
-		goto free_rspiocbq;
+		goto release_ndlp;
 	}
 
-	/* prep els iocb set context1 to the ndlp, context2 to the command
-	 * dmabuf, context3 holds the data dmabuf
-	 */
-	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
-	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
-	kfree(pcmd);
-	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
-	kfree(prsp);
-	cmdiocbq->context2 = NULL;
-
-	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
-	bpl = (struct ulp_bde64 *) pbuflist->virt;
-
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
-	}
+	rpi = ndlp->nlp_rpi;
+
+	/* Transfer the request payload to allocated command dma buffer */
+
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+			  cmdsize);
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
-	}
-	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
-		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
 	else
 		cmdiocbq->iocb.ulpContext = rpi;
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
-	cmdiocbq->context1 = NULL;
-	cmdiocbq->context2 = NULL;
-
-	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
 	cmdiocbq->context1 = dd_data;
 	cmdiocbq->context_un.ndlp = ndlp;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.rspiocbq = rspiocbq;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = NULL;
 	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -617,8 +706,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
 	}
+
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-	lpfc_nlp_put(ndlp);
+
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 	else if (rc == IOCB_BUSY)
@@ -627,17 +717,12 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 		rc = -EIO;
 
 linkdown_err:
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
-
-	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	cmdiocbq->context1 = ndlp;
+	lpfc_els_free_iocb(phba, cmdiocbq);
 
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
+release_ndlp:
+	lpfc_nlp_put(ndlp);
 
 free_dd_data:
 	kfree(dd_data);
@@ -680,6 +765,7 @@ lpfc_bsg_event_free(struct kref *kref)
 		kfree(ed);
 	}
 
+	kfree(evt->dd_data);
 	kfree(evt);
 }
 
@@ -723,6 +809,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
 	evt->req_id = ev_req_id;
 	evt->reg_id = ev_reg_id;
 	evt->wait_time_stamp = jiffies;
+	evt->dd_data = NULL;
 	init_waitqueue_head(&evt->wq);
 	kref_init(&evt->kref);
 	return evt;
@@ -790,6 +877,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	struct lpfc_hbq_entry *hbqe;
 	struct lpfc_sli_ct_request *ct_req;
 	struct fc_bsg_job *job = NULL;
+	struct bsg_job_data *dd_data = NULL;
 	unsigned long flags;
 	int size = 0;
 
@@ -986,10 +1074,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		}
 
 		list_move(evt->events_to_see.prev, &evt->events_to_get);
-		lpfc_bsg_event_unref(evt);
 
-		job = evt->set_job;
-		evt->set_job = NULL;
+		dd_data = (struct bsg_job_data *)evt->dd_data;
+		job = dd_data->set_job;
+		dd_data->set_job = NULL;
+		lpfc_bsg_event_unref(evt);
 		if (job) {
 			job->reply->reply_payload_rcv_len = size;
 			/* make error code available to userspace */
@@ -1078,14 +1167,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 		goto job_error;
 	}
 
-	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
-	if (dd_data == NULL) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
-				"2734 Failed allocation of dd_data\n");
-		rc = -ENOMEM;
-		goto job_error;
-	}
-
 	event_req = (struct set_ct_event *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
@@ -1095,6 +1176,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 		if (evt->reg_id == event_req->ev_reg_id) {
 			lpfc_bsg_event_ref(evt);
 			evt->wait_time_stamp = jiffies;
+			dd_data = (struct bsg_job_data *)evt->dd_data;
 			break;
 		}
 	}
@@ -1102,6 +1184,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 
 	if (&evt->node == &phba->ct_ev_waiters) {
 		/* no event waiting struct yet - first call */
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (dd_data == NULL) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2734 Failed allocation of dd_data\n");
+			rc = -ENOMEM;
+			goto job_error;
+		}
 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
 					event_req->ev_req_id);
 		if (!evt) {
@@ -1111,7 +1200,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 			rc = -ENOMEM;
 			goto job_error;
 		}
-
+		dd_data->type = TYPE_EVT;
+		dd_data->set_job = NULL;
+		dd_data->context_un.evt = evt;
+		evt->dd_data = (void *)dd_data;
 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		list_add(&evt->node, &phba->ct_ev_waiters);
 		lpfc_bsg_event_ref(evt);
@@ -1121,9 +1213,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	evt->waiting = 1;
-	dd_data->type = TYPE_EVT;
-	dd_data->context_un.evt = evt;
-	evt->set_job = job; /* for unsolicited command */
+	dd_data->set_job = job; /* for unsolicited command */
 	job->dd_data = dd_data; /* for fc transport timeout callback*/
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	return 0; /* call job done later */
@@ -1252,57 +1342,64 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp;
 	struct lpfc_nodelist *ndlp;
 	unsigned long flags;
 	int rc = 0;
 
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context2;
-	/* normal completion and timeout crossed paths, already done */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
 	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
-	job = dd_data->context_un.iocb.set_job;
-	bmp = dd_data->context_un.iocb.bmp;
-	rsp = &rspiocbq->iocb;
 	ndlp = dd_data->context_un.iocb.ndlp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
+	rsp = &rspiocbq->iocb;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	/* Copy the completed job data or set the error status */
 
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
+	if (job) {
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
 				rc = -EACCES;
-				break;
 			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+		} else {
+			job->reply->reply_payload_rcv_len = 0;
+		}
+	}
 
+	lpfc_free_bsg_buffers(phba, cmp);
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
-	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	job->dd_data = NULL;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -1316,7 +1413,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
  **/
 static int
 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
-		  struct lpfc_dmabuf *bmp, int num_entry)
+		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
+		  int num_entry)
 {
 	IOCB_t *icmd;
 	struct lpfc_iocbq *ctiocb = NULL;
@@ -1377,7 +1475,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 
 		/* Check if the ndlp is active */
 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-			rc = -IOCB_ERROR;
+			rc = IOCB_ERROR;
 			goto issue_ct_rsp_exit;
 		}
 
@@ -1385,7 +1483,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 		 * we respond
 		 */
 		if (!lpfc_nlp_get(ndlp)) {
-			rc = -IOCB_ERROR;
+			rc = IOCB_ERROR;
 			goto issue_ct_rsp_exit;
 		}
 
@@ -1407,17 +1505,17 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 	ctiocb->iocb_cmpl = NULL;
 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
 	ctiocb->vport = phba->pport;
+	ctiocb->context1 = dd_data;
+	ctiocb->context2 = cmp;
 	ctiocb->context3 = bmp;
-
 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
-	ctiocb->context2 = dd_data;
-	ctiocb->context1 = ndlp;
+
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
-	dd_data->context_un.iocb.rspiocbq = NULL;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = bmp;
 	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -1454,11 +1552,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 	struct ulp_bde64 *bpl;
-	struct lpfc_dmabuf *bmp = NULL;
-	struct scatterlist *sgel = NULL;
-	int request_nseg;
-	int numbde;
-	dma_addr_t busaddr;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
+	int bpl_entries;
 	uint32_t tag = mgmt_resp->tag;
 	unsigned long reqbfrcnt =
 			(unsigned long)job->request_payload.payload_len;
@@ -1486,30 +1581,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
 
 	INIT_LIST_HEAD(&bmp->list);
 	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &bpl_entries);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
 	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
 
-	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
 
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 
-	/* TBD need to handle a timeout */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	rc = -EACCES;
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+	lpfc_free_bsg_buffers(phba, cmp);
 
 send_mgmt_rsp_free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 send_mgmt_rsp_exit:
 	/* make error code available to userspace */
@@ -1559,7 +1652,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
 		scsi_block_requests(shost);
 	}
 
-	while (pring->txcmplq_cnt) {
+	while (!list_empty(&pring->txcmplq)) {
 		if (i++ > 500)  /* wait up to 5 seconds */
 			break;
 		msleep(10);
@@ -3193,13 +3286,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	unsigned long flags;
 	uint8_t *pmb, *pmb_buf;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = pmboxq->context1;
-	/* job already timed out? */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
-	}
 
 	/*
 	 * The outgoing buffer is readily referred from the dma buffer,
@@ -3209,29 +3296,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
 
-	job = dd_data->context_un.mbox.set_job;
+	/* Determine if job has been aborted */
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Copy the mailbox data to the job if it is still active */
+
 	if (job) {
 		size = job->reply_payload.payload_len;
 		job->reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
-		/* need to hold the lock until we set job->dd_data to NULL
-		 * to hold off the timeout handler returning to the mid-layer
-		 * while we are still processing the job.
-		 */
-		job->dd_data = NULL;
-		dd_data->context_un.mbox.set_job = NULL;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-	} else {
-		dd_data->context_un.mbox.set_job = NULL;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	}
 
+	dd_data->set_job = NULL;
 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
 	kfree(dd_data);
 
+	/* Complete the job if the job is still active */
+
 	if (job) {
 		job->reply->result = 0;
 		job->job_done(job);
@@ -3377,19 +3468,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	uint8_t *pmbx;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = pmboxq->context1;
-	/* has the job already timed out? */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job = NULL;
-		goto job_done_out;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
 	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	/*
 	 * The outgoing buffer is readily referred from the dma buffer,
 	 * just need to get header part from mailboxq structure.
 	 */
+
 	pmb = (uint8_t *)&pmboxq->u.mb;
 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
 	/* Copy the byte swapped response mailbox back to the user */
@@ -3406,21 +3500,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
 	}
 
-	job = dd_data->context_un.mbox.set_job;
+	/* Complete the job if the job is still active */
+
 	if (job) {
 		size = job->reply_payload.payload_len;
 		job->reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
+
 		/* result for successful */
 		job->reply->result = 0;
-		job->dd_data = NULL;
-		/* need to hold the lock util we set job->dd_data to NULL
-		 * to hold off the timeout handler from midlayer to take
-		 * any action.
-		 */
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 				"2937 SLI_CONFIG ext-buffer maibox command "
 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
@@ -3431,20 +3522,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 					phba->mbox_ext_buf_ctx.mboxType,
 					dma_ebuf, sta_pos_addr,
 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
-	} else
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
-job_done_out:
-	if (!job)
+	} else {
 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
 				"2938 SLI_CONFIG ext-buffer maibox "
 				"command (x%x/x%x) failure, rc:x%x\n",
 				phba->mbox_ext_buf_ctx.nembType,
 				phba->mbox_ext_buf_ctx.mboxType, rc);
+	}
+
+
 	/* state change */
 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
 	kfree(dd_data);
-
 	return job;
 }
 
@@ -3461,8 +3550,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	struct fc_bsg_job *job;
 
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
 	/* handle the BSG job with mailbox command */
-	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+	if (!job)
 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3470,15 +3561,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 			"complete, ctxState:x%x, mbxStatus:x%x\n",
 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
 
-	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
 		lpfc_bsg_mbox_ext_session_reset(phba);
 
 	/* free base driver mailbox structure memory */
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 
-	/* complete the bsg job if we have it */
+	/* if the job is still active, call job done */
 	if (job)
 		job->job_done(job);
 
@@ -3498,8 +3587,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	struct fc_bsg_job *job;
 
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
 	/* handle the BSG job with the mailbox command */
-	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+	if (!job)
 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3507,13 +3598,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 			"complete, ctxState:x%x, mbxStatus:x%x\n",
 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
 
-	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
 	/* free all memory, including dma buffers */
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 	lpfc_bsg_mbox_ext_session_reset(phba);
 
-	/* complete the bsg job if we have it */
+	/* if the job is still active, call job done */
 	if (job)
 		job->job_done(job);
 
@@ -3759,9 +3848,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	/* context fields to callback function */
 	pmboxq->context1 = dd_data;
 	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
-	dd_data->context_un.mbox.set_job = job;
 	job->dd_data = dd_data;
 
 	/* state change */
@@ -3928,14 +4017,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 		/* context fields to callback function */
 		pmboxq->context1 = dd_data;
 		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
-		dd_data->context_un.mbox.set_job = job;
 		job->dd_data = dd_data;
 
 		/* state change */
-		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3951,6 +4040,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	}
 
 	/* wait for additoinal external buffers */
+
 	job->reply->result = 0;
 	job->job_done(job);
 	return SLI_CONFIG_HANDLED;
@@ -4268,9 +4358,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
 		/* context fields to callback function */
 		pmboxq->context1 = dd_data;
 		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
-		dd_data->context_un.mbox.set_job = job;
 		job->dd_data = dd_data;
 
 		/* state change */
@@ -4455,7 +4545,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	uint8_t *from;
 	uint32_t size;
 
-
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
@@ -4681,9 +4770,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	/* setup context field to pass wait_queue pointer to wake function */
 	pmboxq->context1 = dd_data;
 	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
-	dd_data->context_un.mbox.set_job = job;
 	dd_data->context_un.mbox.ext = ext;
 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
@@ -4797,75 +4886,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_bsg_menlo *menlo;
 	unsigned long flags;
 	struct menlo_response *menlo_resp;
+	unsigned int rsp_size;
 	int rc = 0;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = cmdiocbq->context1;
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
-	}
-
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
 	menlo = &dd_data->context_un.menlo;
-	job = menlo->set_job;
-	job->dd_data = NULL; /* so timeout handler does not reply */
-
-	spin_lock(&phba->hbalock);
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
-	spin_unlock(&phba->hbalock);
-
-	bmp = menlo->bmp;
-	rspiocbq = menlo->rspiocbq;
+	rmp = menlo->rmp;
 	rsp = &rspiocbq->iocb;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Copy the job data or set the failing status for the job */
 
-	/* always return the xri, this would be used in the case
-	 * of a menlo download to allow the data to be sent as a continuation
-	 * of the exchange.
-	 */
-	menlo_resp = (struct menlo_response *)
-		job->reply->reply_data.vendor_reply.vendor_rsp;
-	menlo_resp->xri = rsp->ulpContext;
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
+	if (job) {
+		/* always return the xri, this would be used in the case
+		 * of a menlo download to allow the data to be sent as a
+		 * continuation of the exchange.
+		 */
+
+		menlo_resp = (struct menlo_response *)
+			job->reply->reply_data.vendor_reply.vendor_rsp;
+		menlo_resp->xri = rsp->ulpContext;
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
 				rc = -EACCES;
-				break;
 			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
+
+	}
 
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
+
 	return;
 }
 
@@ -4883,17 +4976,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
-	IOCB_t *cmd, *rsp;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd;
 	int rc = 0;
 	struct menlo_command *menlo_cmd;
 	struct menlo_response *menlo_resp;
-	struct lpfc_dmabuf *bmp = NULL;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	int request_nseg;
 	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	struct ulp_bde64 *bpl = NULL;
 
@@ -4948,50 +5038,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 		goto free_dd;
 	}
 
-	cmdiocbq = lpfc_sli_get_iocbq(phba);
-	if (!cmdiocbq) {
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
 		rc = -ENOMEM;
 		goto free_bmp;
 	}
 
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		rc = -ENOMEM;
-		goto free_cmdiocbq;
-	}
-
-	rsp = &rspiocbq->iocb;
+	INIT_LIST_HEAD(&bmp->list);
 
-	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
-	if (!bmp->virt) {
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
 		rc = -ENOMEM;
-		goto free_rspiocbq;
+		goto free_bmp;
 	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
 
-	INIT_LIST_HEAD(&bmp->list);
-	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
 	}
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_rmp;
 	}
 
 	cmd = &cmdiocbq->iocb;
@@ -5013,11 +5091,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	cmdiocbq->vport = phba->pport;
 	/* We want the firmware to timeout before we do */
 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
-	cmdiocbq->context3 = bmp;
-	cmdiocbq->context2 = rspiocbq;
 	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
 	cmdiocbq->context1 = dd_data;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
 		cmd->ulpPU = MENLO_PU; /* 3 */
@@ -5031,29 +5108,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	}
 
 	dd_data->type = TYPE_MENLO;
+	dd_data->set_job = job;
 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
-	dd_data->context_un.menlo.rspiocbq = rspiocbq;
-	dd_data->context_un.menlo.set_job = job;
-	dd_data->context_un.menlo.bmp = bmp;
+	dd_data->context_un.menlo.rmp = rmp;
+	job->dd_data = dd_data;
 
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
 		MENLO_TIMEOUT - 5);
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 
-	/* iocb failed so cleanup */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
-free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
 free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 free_dd:
 	kfree(dd_data);
@@ -5162,70 +5235,94 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb;
-	struct lpfc_bsg_event *evt;
-	struct lpfc_bsg_iocb *iocb;
-	struct lpfc_bsg_mbox *mbox;
-	struct lpfc_bsg_menlo *menlo;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	struct bsg_job_data *dd_data;
 	unsigned long flags;
+	int rc = 0;
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *check_iocb, *next_iocb;
+
+	/* if job's driver data is NULL, the command completed or is in the
+	 * the process of completing.  In this case, return status to request
+	 * so the timeout is retried.  This avoids double completion issues
+	 * and the request will be pulled off the timer queue when the
+	 * command's completion handler executes.  Otherwise, prevent the
+	 * command's completion handler from executing the job done callback
+	 * and continue processing to abort the outstanding the command.
+	 */
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = (struct bsg_job_data *)job->dd_data;
-	/* timeout and completion crossed paths if no dd_data */
-	if (!dd_data) {
+	if (dd_data) {
+		dd_data->set_job = NULL;
+		job->dd_data = NULL;
+	} else {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return 0;
+		return -EAGAIN;
 	}
 
 	switch (dd_data->type) {
 	case TYPE_IOCB:
-		iocb = &dd_data->context_un.iocb;
-		cmdiocb = iocb->cmdiocbq;
-		/* hint to completion handler that the job timed out */
-		job->reply->result = -EAGAIN;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		/* this will call our completion handler */
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag
+		 */
+
+		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
 		spin_lock_irq(&phba->hbalock);
-		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
 		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
 		break;
+
 	case TYPE_EVT:
-		evt = dd_data->context_un.evt;
-		/* this event has no job anymore */
-		evt->set_job = NULL;
-		job->dd_data = NULL;
-		job->reply->reply_payload_rcv_len = 0;
-		/* Return -EAGAIN which is our way of signallying the
-		 * app to retry.
-		 */
-		job->reply->result = -EAGAIN;
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job->job_done(job);
 		break;
+
 	case TYPE_MBOX:
-		mbox = &dd_data->context_un.mbox;
-		/* this mbox has no job anymore */
-		mbox->set_job = NULL;
-		job->dd_data = NULL;
-		job->reply->reply_payload_rcv_len = 0;
-		job->reply->result = -EAGAIN;
-		/* the mbox completion handler can now be run */
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job->job_done(job);
+		/* Update the ext buf ctx state if needed */
+
 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 		break;
 	case TYPE_MENLO:
-		menlo = &dd_data->context_un.menlo;
-		cmdiocb = menlo->cmdiocbq;
-		/* hint to completion handler that the job timed out */
-		job->reply->result = -EAGAIN;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		/* this will call our completion handler */
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag.
+		 */
+
+		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
 		spin_lock_irq(&phba->hbalock);
-		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
 		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
 		break;
 	default:
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -5236,5 +5333,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
 	 * otherwise an error message will be displayed on the console
 	 * so always return success (zero)
 	 */
-	return 0;
+	return rc;
 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76ca65dae781..7631893ae005 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
 void lpfc_cleanup(struct lpfc_vport *);
 void lpfc_disc_timeout(unsigned long);
 
+int lpfc_unregister_fcf_prep(struct lpfc_hba *);
 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 void lpfc_worker_wake_up(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 08d156a9094f..bbed8471bf0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -484,6 +484,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 	vport->port_state = LPFC_FABRIC_CFG_LINK;
 	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
 	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
 	mboxq->vport = vport;
 	mboxq->context1 = dmabuf;
@@ -700,6 +701,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		}
 	}
 
+	/*
+	 * For FC we need to do some special processing because of the SLI
+	 * Port's default settings of the Common Service Parameters.
+	 */
+	if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+		if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+			lpfc_unregister_fcf_prep(phba);
+
+		/* This should just update the VFI CSPs*/
+		if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_reg_vfi(vport);
+	}
+
 	if (fabric_param_changed &&
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 
@@ -6225,7 +6240,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
 		spin_unlock_irq(&phba->hbalock);
 	}
 
-	if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
 		mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
 }
 
@@ -6279,7 +6294,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
 			continue;
 
 		list_move_tail(&piocb->list, &completions);
-		pring->txq_cnt--;
 	}
 
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -6339,7 +6353,6 @@ lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
 		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
 			continue;
 		list_move_tail(&piocb->list, &completions);
-		pring->txq_cnt--;
 	}
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@@ -8065,7 +8078,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
 				rxid, 1);
 
 			/* Check if TXQ queue needs to be serviced */
-			if (pring->txq_cnt)
+			if (!(list_empty(&pring->txq)))
 				lpfc_worker_wake_up(phba);
 			return;
 		}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfda18467ee6..326e05a65a73 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,12 +691,15 @@ lpfc_work_done(struct lpfc_hba *phba)
 			/* Set the lpfc data pending flag */
 			set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
-			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
-			lpfc_sli_handle_slow_ring_event(phba, pring,
-							(status &
-							 HA_RXMASK));
+			if (phba->link_state >= LPFC_LINK_UP) {
+				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+				lpfc_sli_handle_slow_ring_event(phba, pring,
+								(status &
+								HA_RXMASK));
+			}
 		}
-		if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
+		if ((phba->sli_rev == LPFC_SLI_REV4) &
+				 (!list_empty(&pring->txq)))
 			lpfc_drain_txq(phba);
 		/*
 		 * Turn on Ring interrupts
@@ -1792,6 +1795,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
 	virt_addr = mboxq->sge_array->addr[0];
 
 	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+	lpfc_sli_pcimem_bcopy(shdr, shdr,
+			      sizeof(union lpfc_sli4_cfg_shdr));
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status) {
@@ -2888,6 +2893,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		goto out_free_mem;
 	}
+
+	/* If the VFI is already registered, there is nothing else to do */
+	if (vport->fc_flag & FC_VFI_REGISTERED)
+		goto out_free_mem;
+
 	/* The VPI is implicitly registered when the VFI is registered */
 	spin_lock_irq(shost->host_lock);
 	vport->vpi_state |= LPFC_VPI_REGISTERED;
@@ -2980,6 +2990,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 	struct lpfc_dmabuf *mp;
 	int rc;
 	struct fcf_record *fcf_record;
+	uint32_t fc_flags = 0;
 
 	spin_lock_irq(&phba->hbalock);
 	switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@@ -3011,11 +3022,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
 				/* Get Loop Map information */
-		if (bf_get(lpfc_mbx_read_top_il, la)) {
-			spin_lock(shost->host_lock);
-			vport->fc_flag |= FC_LBIT;
-			spin_unlock(shost->host_lock);
-		}
+		if (bf_get(lpfc_mbx_read_top_il, la))
+			fc_flags |= FC_LBIT;
 
 		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
 		i = la->lilpBde64.tus.f.bdeSize;
@@ -3064,12 +3072,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
 		}
 		vport->fc_myDID = phba->fc_pref_DID;
-		spin_lock(shost->host_lock);
-		vport->fc_flag |= FC_LBIT;
-		spin_unlock(shost->host_lock);
+		fc_flags |= FC_LBIT;
 	}
 	spin_unlock_irq(&phba->hbalock);
 
+	if (fc_flags) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= fc_flags;
+		spin_unlock_irq(shost->host_lock);
+	}
+
 	lpfc_linkup(phba);
 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!sparam_mbox)
@@ -3237,8 +3249,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		vport->fc_flag &= ~FC_BYPASSED_MODE;
 	spin_unlock_irq(shost->host_lock);
 
-	if ((phba->fc_eventTag  < la->eventTag) ||
-	    (phba->fc_eventTag == la->eventTag)) {
+	if (phba->fc_eventTag <= la->eventTag) {
 		phba->fc_stat.LinkMultiEvent++;
 		if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
 			if (phba->fc_eventTag != 0)
@@ -3246,16 +3257,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	}
 
 	phba->fc_eventTag = la->eventTag;
-	spin_lock_irq(&phba->hbalock);
-	if (bf_get(lpfc_mbx_read_top_mm, la))
-		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
-	else
-		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
-	spin_unlock_irq(&phba->hbalock);
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		spin_lock_irq(&phba->hbalock);
+		if (bf_get(lpfc_mbx_read_top_mm, la))
+			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+		else
+			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+		spin_unlock_irq(&phba->hbalock);
+	}
 
 	phba->link_events++;
 	if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
-	    (!bf_get(lpfc_mbx_read_top_mm, la))) {
+	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3300,8 +3313,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				bf_get(lpfc_mbx_read_top_fa, la));
 		lpfc_mbx_issue_link_down(phba);
 	}
-	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
-	    (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
+	if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
+	    ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
 		if (phba->link_state != LPFC_LINK_DOWN) {
 			phba->fc_stat.LinkDown++;
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3329,8 +3342,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		}
 	}
 
-	if (bf_get(lpfc_mbx_read_top_fa, la)) {
-		if (bf_get(lpfc_mbx_read_top_mm, la))
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
 			lpfc_issue_clear_la(phba, vport);
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
 				"1311 fa %d\n",
@@ -4354,7 +4368,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 					   with an error */
 					list_move_tail(&iocb->list,
 						       &completions);
-					pring->txq_cnt--;
 				}
 			}
 			spin_unlock_irq(&phba->hbalock);
@@ -5055,7 +5068,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
 
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e93b886cd4d..1dd2f6f0a127 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1958,6 +1958,9 @@ struct lpfc_mbx_init_vfi {
 
 struct lpfc_mbx_reg_vfi {
 	uint32_t word1;
+#define lpfc_reg_vfi_upd_SHIFT		29
+#define lpfc_reg_vfi_upd_MASK		0x00000001
+#define lpfc_reg_vfi_upd_WORD		word1
 #define lpfc_reg_vfi_vp_SHIFT		28
 #define lpfc_reg_vfi_vp_MASK		0x00000001
 #define lpfc_reg_vfi_vp_WORD		word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 314b4f61b9e3..5da297290262 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -839,7 +839,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 		 * way, nothing should be on txcmplq as it will NEVER complete.
 		 */
 		list_splice_init(&pring->txcmplq, &completions);
-		pring->txcmplq_cnt = 0;
 		spin_unlock_irq(&phba->hbalock);
 
 		/* Cancel all the IOCBs from the completions list */
@@ -2915,9 +2914,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
 			sglq_entry->state = SGL_FREED;
 			list_add_tail(&sglq_entry->list, &els_sgl_list);
 		}
-		spin_lock(&phba->hbalock);
+		spin_lock_irq(&phba->hbalock);
 		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irq(&phba->hbalock);
 	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
 		/* els xri-sgl shrinked */
 		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3014,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
 		psb->cur_iocbq.sli4_lxritag = lxri;
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 	}
-	spin_lock(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_lock);
 	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_lock);
 
 	return 0;
 
@@ -4004,6 +4003,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform FCF recovery when the in-use FCF either dead or
+ * got modified.
+ **/
+static void
+lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
+				     struct lpfc_acqe_fip *acqe_fip)
+{
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Mark the fast failover process in progress */
+	phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+			"2771 Start FCF fast failover process due to in-use "
+			"FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
+			acqe_fip->event_tag, acqe_fip->index);
+	rc = lpfc_sli4_redisc_fcf_table(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+				"2772 Issue FCF rediscover mabilbox command "
+				"failed, fail through to FCF dead event\n");
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		/*
+		 * Last resort will fail over by treating this as a link
+		 * down to FCF registration.
+		 */
+		lpfc_sli4_fcf_dead_failthrough(phba);
+	} else {
+		/* Reset FCF roundrobin bmask for new discovery */
+		lpfc_sli4_clear_fcf_rr_bmask(phba);
+		/*
+		 * Handling fast FCF failover to a DEAD FCF event is
+		 * considered equalivant to receiving CVL to all vports.
+		 */
+		lpfc_sli4_perform_all_vport_cvl(phba);
+	}
+}
+
+/**
  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -4068,9 +4113,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
 			break;
 		}
 
-		/* If the FCF has been in discovered state, do nothing. */
-		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+		/* If FCF has been in discovered state, perform rediscovery
+		 * only if the FCF with the same index of the in-use FCF got
+		 * modified during normal operation. Otherwise, do nothing.
+		 */
+		if (phba->pport->port_state > LPFC_FLOGI) {
 			spin_unlock_irq(&phba->hbalock);
+			if (phba->fcf.current_rec.fcf_indx ==
+			    acqe_fip->index) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+						"3300 In-use FCF (%d) "
+						"modified, perform FCF "
+						"rediscovery\n",
+						acqe_fip->index);
+				lpfc_sli4_perform_inuse_fcf_recovery(phba,
+								     acqe_fip);
+			}
 			break;
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4181,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
 		 * is no longer valid as we are not in the middle of FCF
 		 * failover process already.
 		 */
-		spin_lock_irq(&phba->hbalock);
-		/* Mark the fast failover process in progress */
-		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
-		spin_unlock_irq(&phba->hbalock);
-
-		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-				"2771 Start FCF fast failover process due to "
-				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
-				"\n", acqe_fip->event_tag, acqe_fip->index);
-		rc = lpfc_sli4_redisc_fcf_table(phba);
-		if (rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
-					LOG_DISCOVERY,
-					"2772 Issue FCF rediscover mabilbox "
-					"command failed, fail through to FCF "
-					"dead event\n");
-			spin_lock_irq(&phba->hbalock);
-			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
-			spin_unlock_irq(&phba->hbalock);
-			/*
-			 * Last resort will fail over by treating this
-			 * as a link down to FCF registration.
-			 */
-			lpfc_sli4_fcf_dead_failthrough(phba);
-		} else {
-			/* Reset FCF roundrobin bmask for new discovery */
-			lpfc_sli4_clear_fcf_rr_bmask(phba);
-			/*
-			 * Handling fast FCF failover to a DEAD FCF event is
-			 * considered equalivant to receiving CVL to all vports.
-			 */
-			lpfc_sli4_perform_all_vport_cvl(phba);
-		}
+		lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
 		break;
 	case LPFC_FIP_EVENT_TYPE_CVL:
 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index efc9cd9def8b..a7a9fa468308 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2126,32 +2126,40 @@ void
 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 {
 	struct lpfc_mbx_reg_vfi *reg_vfi;
+	struct lpfc_hba *phba = vport->phba;
 
 	memset(mbox, 0, sizeof(*mbox));
 	reg_vfi = &mbox->u.mqe.un.reg_vfi;
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
 	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
 	bf_set(lpfc_reg_vfi_vfi, reg_vfi,
-	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
-	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
-	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
+	       phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
 	memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
 	reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
 	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
-	reg_vfi->e_d_tov = vport->phba->fc_edtov;
-	reg_vfi->r_a_tov = vport->phba->fc_ratov;
+	reg_vfi->e_d_tov = phba->fc_edtov;
+	reg_vfi->r_a_tov = phba->fc_ratov;
 	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
 	reg_vfi->bde.addrLow = putPaddrLow(phys);
 	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
 	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+	/* Only FC supports upd bit */
+	if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
+	    (vport->fc_flag & FC_VFI_REGISTERED)) {
+		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
+		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
 			"3134 Register VFI, mydid:x%x, fcfi:%d, "
 			" vfi:%d, vpi:%d, fc_pname:%x%x\n",
 			vport->fc_myDID,
-			vport->phba->fcf.fcfi,
-			vport->phba->sli4_hba.vfi_ids[vport->vfi],
-			vport->phba->vpi_ids[vport->vpi],
+			phba->fcf.fcfi,
+			phba->sli4_hba.vfi_ids[vport->vfi],
+			phba->vpi_ids[vport->vpi],
 			reg_vfi->wwn[0], reg_vfi->wwn[1]);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 46128c679202..82f4d3542289 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -226,7 +226,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
 			/* It matches, so deque and call compl with anp error */
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98af07c6e300..74b8710e1e90 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -732,7 +732,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
 		psb->exch_busy = 0;
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
-		if (pring->txq_cnt)
+		if (!list_empty(&pring->txq))
 			lpfc_worker_wake_up(phba);
 		return;
 
@@ -885,9 +885,9 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
 	int num_posted, rc = 0;
 
 	/* get all SCSI buffers need to repost to a local list */
-	spin_lock(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_lock);
 	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_lock);
 
 	/* post the list of scsi buffer sgls to port if available */
 	if (!list_empty(&post_sblist)) {
@@ -4246,7 +4246,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
 	unsigned long  poll_tmo_expires =
 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
 
-	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
+	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
 		mod_timer(&phba->fcp_poll_timer,
 			  poll_tmo_expires);
 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..35dd17eb0f27 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -873,14 +873,16 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 				xritag, rxid, ndlp->nlp_DID, send_rrq);
 		return -EINVAL;
 	}
-	rrq->send_rrq = send_rrq;
+	if (phba->cfg_enable_rrq == 1)
+		rrq->send_rrq = send_rrq;
+	else
+		rrq->send_rrq = 0;
 	rrq->xritag = xritag;
 	rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
 	rrq->ndlp = ndlp;
 	rrq->nlp_DID = ndlp->nlp_DID;
 	rrq->vport = ndlp->vport;
 	rrq->rxid = rxid;
-	rrq->send_rrq = send_rrq;
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	empty = list_empty(&phba->active_rrq_list);
 	list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -1009,6 +1011,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 	else
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
 
+	/*
+	** This should have been removed from the txcmplq before calling
+	** iocbq_release. The normal completion
+	** path should have already done the list_del_init.
+	*/
+	if (unlikely(!list_empty(&iocbq->list))) {
+		if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
+			iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+		list_del_init(&iocbq->list);
+	}
+
+
 	if (sglq)  {
 		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
 			(sglq->state != SGL_XRI_ABORTED)) {
@@ -1025,7 +1039,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 				&phba->sli4_hba.lpfc_sgl_list);
 
 			/* Check if TXQ queue needs to be serviced */
-			if (pring->txq_cnt)
+			if (!list_empty(&pring->txq))
 				lpfc_worker_wake_up(phba);
 		}
 	}
@@ -1057,6 +1071,14 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
 
 	/*
+	** This should have been removed from the txcmplq before calling
+	** iocbq_release. The normal completion
+	** path should have already done the list_del_init.
+	*/
+	if (unlikely(!list_empty(&iocbq->list)))
+		list_del_init(&iocbq->list);
+
+	/*
 	 * Clean all volatile data fields, preserve iotag and node struct.
 	 */
 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
@@ -1122,7 +1144,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
 
 	while (!list_empty(iocblist)) {
 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-
 		if (!piocb->iocb_cmpl)
 			lpfc_sli_release_iocbq(phba, piocb);
 		else {
@@ -1310,9 +1331,6 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 {
 	list_add_tail(&piocb->list, &pring->txcmplq);
 	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
-	pring->txcmplq_cnt++;
-	if (pring->txcmplq_cnt > pring->txcmplq_max)
-		pring->txcmplq_max = pring->txcmplq_cnt;
 
 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -1344,8 +1362,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	struct lpfc_iocbq *cmd_iocb;
 
 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
-	if (cmd_iocb != NULL)
-		pring->txq_cnt--;
 	return cmd_iocb;
 }
 
@@ -1614,8 +1630,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	 *  (c) link attention events can be processed (fcp ring only)
 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
 	 */
-	if (pring->txq_cnt &&
-	    lpfc_is_link_up(phba) &&
+
+	if (lpfc_is_link_up(phba) &&
+	    (!list_empty(&pring->txq)) &&
 	    (pring->ringno != phba->sli.fcp_ring ||
 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
 
@@ -2612,7 +2629,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
 		list_del_init(&cmd_iocb->list);
 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
-			pring->txcmplq_cnt--;
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 		}
 		return cmd_iocb;
@@ -2650,7 +2666,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
 			/* remove from txcmpl queue list */
 			list_del_init(&cmd_iocb->list);
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
-			pring->txcmplq_cnt--;
 			return cmd_iocb;
 		}
 	}
@@ -3499,7 +3514,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	 */
 	spin_lock_irq(&phba->hbalock);
 	list_splice_init(&pring->txq, &completions);
-	pring->txq_cnt = 0;
 
 	/* Next issue ABTS for everything on the txcmplq */
 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@@ -3536,11 +3550,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
 	spin_lock_irq(&phba->hbalock);
 	/* Retrieve everything on txq */
 	list_splice_init(&pring->txq, &txq);
-	pring->txq_cnt = 0;
 
 	/* Retrieve everything on the txcmplq */
 	list_splice_init(&pring->txcmplq, &txcmplq);
-	pring->txcmplq_cnt = 0;
 
 	/* Indicate the I/O queues are flushed */
 	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@@ -5988,9 +6000,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
 	LIST_HEAD(post_sgl_list);
 	LIST_HEAD(free_sgl_list);
 
-	spin_lock(&phba->hbalock);
+	spin_lock_irq(&phba->hbalock);
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
-	spin_unlock(&phba->hbalock);
+	spin_unlock_irq(&phba->hbalock);
 
 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
 				 &allc_sgl_list, list) {
@@ -6091,10 +6103,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
 
 	/* push els sgls posted to the availble list */
 	if (!list_empty(&post_sgl_list)) {
-		spin_lock(&phba->hbalock);
+		spin_lock_irq(&phba->hbalock);
 		list_splice_init(&post_sgl_list,
 				 &phba->sli4_hba.lpfc_sgl_list);
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irq(&phba->hbalock);
 	} else {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"3161 Failure to post els sgl to port.\n");
@@ -7615,7 +7627,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 {
 	/* Insert the caller's iocb in the txq tail for later processing. */
 	list_add_tail(&piocb->list, &pring->txq);
-	pring->txq_cnt++;
 }
 
 /**
@@ -8387,7 +8398,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
 			sglq = NULL;
 		else {
-			if (pring->txq_cnt) {
+			if (!list_empty(&pring->txq)) {
 				if (!(flag & SLI_IOCB_RET_IOCB)) {
 					__lpfc_sli_ringtx_put(phba,
 						pring, piocb);
@@ -9055,7 +9066,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
 			if (iocb->vport != vport)
 				continue;
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 
 		/* Next issue ABTS for everything on the txcmplq */
@@ -9124,8 +9134,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 		 * given to the FW yet.
 		 */
 		list_splice_init(&pring->txq, &completions);
-		pring->txq_cnt = 0;
-
 	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
@@ -9966,6 +9974,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 	long timeleft, timeout_req = 0;
 	int retval = IOCB_SUCCESS;
 	uint32_t creg_val;
+	struct lpfc_iocbq *iocb;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	/*
 	 * If the caller has provided a response iocbq buffer, then context2
@@ -10013,9 +10024,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 			retval = IOCB_TIMEDOUT;
 		}
 	} else if (retval == IOCB_BUSY) {
-		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
-			phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+		if (phba->cfg_log_verbose & LOG_SLI) {
+			list_for_each_entry(iocb, &pring->txq, list) {
+				txq_cnt++;
+			}
+			list_for_each_entry(iocb, &pring->txcmplq, list) {
+				txcmplq_cnt++;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+		}
 		return retval;
 	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -11298,16 +11317,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 	struct lpfc_iocbq *irspiocbq;
 	unsigned long iflags;
 	struct lpfc_sli_ring *pring = cq->pring;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
+	int fcp_txcmplq_cnt = 0;
 
 	/* Get an irspiocbq for later ELS response processing use */
 	irspiocbq = lpfc_sli_get_iocbq(phba);
 	if (!irspiocbq) {
+		if (!list_empty(&pring->txq))
+			txq_cnt++;
+		if (!list_empty(&pring->txcmplq))
+			txcmplq_cnt++;
+		if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+			fcp_txcmplq_cnt++;
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
 			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
-			pring->txq_cnt, phba->iocb_cnt,
-			phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
-			phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+			txq_cnt, phba->iocb_cnt,
+			fcp_txcmplq_cnt,
+			txcmplq_cnt);
 		return false;
 	}
 
@@ -15482,11 +15510,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
 			LPFC_SLI4_FCF_TBL_INDX_MAX);
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 			"3060 Last IDX %d\n", last_index);
-	if (list_empty(&phba->fcf.fcf_pri_list)) {
+
+	/* Verify the priority list has 2 or more entries */
+	spin_lock_irq(&phba->hbalock);
+	if (list_empty(&phba->fcf.fcf_pri_list) ||
+	    list_is_singular(&phba->fcf.fcf_pri_list)) {
+		spin_unlock_irq(&phba->hbalock);
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 			"3061 Last IDX %d\n", last_index);
 		return 0; /* Empty rr list */
 	}
+	spin_unlock_irq(&phba->hbalock);
+
 	next_fcf_pri = 0;
 	/*
 	 * Clear the rr_bmask and set all of the bits that are at this
@@ -16245,14 +16280,19 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 	char *fail_msg = NULL;
 	struct lpfc_sglq *sglq;
 	union lpfc_wqe wqe;
+	int txq_cnt = 0;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	if (pring->txq_cnt > pring->txq_max)
-		pring->txq_max = pring->txq_cnt;
+	list_for_each_entry(piocbq, &pring->txq, list) {
+		txq_cnt++;
+	}
+
+	if (txq_cnt > pring->txq_max)
+		pring->txq_max = txq_cnt;
 
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
-	while (pring->txq_cnt) {
+	while (!list_empty(&pring->txq)) {
 		spin_lock_irqsave(&phba->hbalock, iflags);
 
 		piocbq = lpfc_sli_ringtx_get(phba, pring);
@@ -16260,7 +16300,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2823 txq empty and txq_cnt is %d\n ",
-				pring->txq_cnt);
+				txq_cnt);
 			break;
 		}
 		sglq = __lpfc_sli_get_sglq(phba, piocbq);
@@ -16269,6 +16309,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			break;
 		}
+		txq_cnt--;
 
 		/* The xri and iocb resources secured,
 		 * attempt to issue request
@@ -16300,5 +16341,5 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 				IOERR_SLI_ABORTED);
 
-	return pring->txq_cnt;
+	return txq_cnt;
 }
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3b7795a296b..664cd04f7cd8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.37"
+#define LPFC_DRIVER_VERSION "8.3.38"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */