summary refs log tree commit diff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 17:43:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 17:43:10 -0800
commit590cf28580c999c8ba70dc39b40bab09d69e2630 (patch)
tree22b9aa4b148bea8a310b760521d1032eef7d743f
parentf54a6ec0fd85002d94d05b4bb679508eeb066683 (diff)
parentfb5edd020fa0fbe991f4a473611ad530d2237425 (diff)
downloadlinux-590cf28580c999c8ba70dc39b40bab09d69e2630.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (104 commits)
  [SCSI] fcoe: fix configuration problems
  [SCSI] cxgb3i: fix select/depend problem
  [SCSI] fcoe: fix incorrect use of struct module
  [SCSI] cxgb3i: remove use of skb->sp
  [SCSI] cxgb3i: Add cxgb3i iSCSI driver.
  [SCSI] zfcp: Remove unnecessary warning message
  [SCSI] zfcp: Add support for unchained FSF requests
  [SCSI] zfcp: Remove busid macro
  [SCSI] zfcp: remove DID_DID flag
  [SCSI] zfcp: Simplify mask lookups for incoming RSCNs
  [SCSI] zfcp: Remove initial device data from zfcp_data
  [SCSI] zfcp: fix compile warning
  [SCSI] zfcp: Remove adapter list
  [SCSI] zfcp: Simplify SBAL allocation to fix sparse warnings
  [SCSI] zfcp: register with SCSI layer on ccw registration
  [SCSI] zfcp: Fix message line break
  [SCSI] qla2xxx: changes in multiq code
  [SCSI] eata: fix the data buffer accessors conversion regression
  [SCSI] ibmvfc: Improve async event handling
  [SCSI] lpfc : correct printk types on PPC compiles
  ...
-rw-r--r--Documentation/scsi/cxgb3i.txt85
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c48
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/message/fusion/mptbase.c15
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/s390/scsi/zfcp_aux.c110
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c6
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h32
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/s390/scsi/zfcp_fc.c88
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c77
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c14
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgb3i/Kbuild4
-rw-r--r--drivers/scsi/cxgb3i/Kconfig7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h139
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c770
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h306
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c107
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c951
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1810
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h231
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c402
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c18
-rw-r--r--drivers/scsi/eata.c15
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/Makefile8
-rw-r--r--drivers/scsi/fcoe/fc_transport_fcoe.c446
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c494
-rw-r--r--drivers/scsi/fcoe/libfcoe.c1510
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c293
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/initio.h2
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c1621
-rw-r--r--drivers/scsi/iscsi_tcp.h88
-rw-r--r--drivers/scsi/libfc/Makefile12
-rw-r--r--drivers/scsi/libfc/fc_disc.c845
-rw-r--r--drivers/scsi/libfc/fc_elsct.c71
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
-rw-r--r--drivers/scsi/libfc/fc_frame.c89
-rw-r--r--drivers/scsi/libfc/fc_lport.c1604
-rw-r--r--drivers/scsi/libfc/fc_rport.c1291
-rw-r--r--drivers/scsi/libiscsi.c236
-rw-r--r--drivers/scsi/libiscsi_tcp.c1163
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c169
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c164
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h249
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c685
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1235
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c159
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c28
-rw-r--r--drivers/scsi/mac_esp.c100
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c328
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h584
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c481
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c840
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c886
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c516
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1471
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/scsi_lib.c149
-rw-r--r--drivers/scsi/scsi_scan.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c13
-rw-r--r--drivers/scsi/scsi_transport_spi.c5
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/ses.c9
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c245
-rw-r--r--drivers/scsi/stex.c3
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c3
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/wd7000.c4
-rw-r--r--include/scsi/fc/fc_els.h816
-rw-r--r--include/scsi/fc/fc_encaps.h138
-rw-r--r--include/scsi/fc/fc_fc2.h124
-rw-r--r--include/scsi/fc/fc_fcoe.h114
-rw-r--r--include/scsi/fc/fc_fcp.h199
-rw-r--r--include/scsi/fc/fc_fs.h340
-rw-r--r--include/scsi/fc/fc_gs.h93
-rw-r--r--include/scsi/fc/fc_ns.h159
-rw-r--r--include/scsi/fc_encode.h309
-rw-r--r--include/scsi/fc_frame.h242
-rw-r--r--include/scsi/fc_transport_fcoe.h54
-rw-r--r--include/scsi/iscsi_if.h7
-rw-r--r--include/scsi/libfc.h938
-rw-r--r--include/scsi/libfcoe.h176
-rw-r--r--include/scsi/libiscsi.h39
-rw-r--r--include/scsi/libiscsi_tcp.h132
-rw-r--r--include/scsi/scsi_device.h7
-rw-r--r--include/scsi/scsi_transport_iscsi.h12
150 files changed, 28769 insertions, 5663 deletions
diff --git a/Documentation/scsi/cxgb3i.txt b/Documentation/scsi/cxgb3i.txt
new file mode 100644
index 000000000000..8141fa01978e
--- /dev/null
+++ b/Documentation/scsi/cxgb3i.txt
@@ -0,0 +1,85 @@
+Chelsio S3 iSCSI Driver for Linux
+
+Introduction
+============
+
+The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.
+series of products) supports iSCSI acceleration and iSCSI Direct Data Placement
+(DDP) where the hardware handles the expensive byte touching operations, such
+as CRC computation and verification, and direct DMA to the final host memory
+destination:
+
+	- iSCSI PDU digest generation and verification
+
+	  On transmitting, Chelsio S3 h/w computes and inserts the Header and
+	  Data digest into the PDUs.
+	  On receiving, Chelsio S3 h/w computes and verifies the Header and
+	  Data digest of the PDUs.
+
+	- Direct Data Placement (DDP)
+
+	  S3 h/w can directly place the iSCSI Data-In or Data-Out PDU's
+	  payload into pre-posted final destination host-memory buffers based
+	  on the Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
+	  in Data-Out PDUs.
+
+	- PDU Transmit and Recovery
+
+	  On transmitting, S3 h/w accepts the complete PDU (header + data)
+	  from the host driver, computes and inserts the digests, decomposes
+	  the PDU into multiple TCP segments if necessary, and transmit all
+	  the TCP segments onto the wire. It handles TCP retransmission if
+	  needed.
+
+	  On receving, S3 h/w recovers the iSCSI PDU by reassembling TCP
+	  segments, separating the header and data, calculating and verifying
+	  the digests, then forwards the header to the host. The payload data,
+	  if possible, will be directly placed into the pre-posted host DDP
+	  buffer. Otherwise, the payload data will be sent to the host too.
+
+The cxgb3i driver interfaces with open-iscsi initiator and provides the iSCSI
+acceleration through Chelsio hardware wherever applicable.
+
+Using the cxgb3i Driver
+=======================
+
+The following steps need to be taken to accelerates the open-iscsi initiator:
+
+1. Load the cxgb3i driver: "modprobe cxgb3i"
+
+   The cxgb3i module registers a new transport class "cxgb3i" with open-iscsi.
+
+   * in the case of recompiling the kernel, the cxgb3i selection is located at
+	Device Drivers
+		SCSI device support --->
+			[*] SCSI low-level drivers  --->
+				<M>   Chelsio S3xx iSCSI support
+
+2. Create an interface file located under /etc/iscsi/ifaces/ for the new
+   transport class "cxgb3i".
+
+   The content of the file should be in the following format:
+	iface.transport_name = cxgb3i
+	iface.net_ifacename = <ethX>
+	iface.ipaddress = <iscsi ip address>
+
+   * if iface.ipaddress is specified, <iscsi ip address> needs to be either the
+	same as the ethX's ip address or an address on the same subnet. Make
+	sure the ip address is unique in the network.
+
+3. edit /etc/iscsi/iscsid.conf
+   The default setting for MaxRecvDataSegmentLength (131072) is too big,
+   replace "node.conn[0].iscsi.MaxRecvDataSegmentLength" to be a value no
+   bigger than 15360 (for example 8192):
+
+	node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
+
+   * The login would fail for a normal session if MaxRecvDataSegmentLength is
+	too big.  A error message in the format of
+	"cxgb3i: ERR! MaxRecvSegmentLength <X> too big. Need to be <= <Y>."
+	would be logged to dmesg.
+
+4. To direct open-iscsi traffic to go through cxgb3i's accelerated path,
+   "-I <iface file name>" option needs to be specified with most of the
+   iscsiadm command. <iface file name> is the transport interface file created
+   in step 2.
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0b2e14f67655..4040d8b53216 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -517,7 +517,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 	/* Good values for timeout and retries?  Values below
 	   from scsi_ioctl_send_command() for default case... */
 	cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
-				  sensebuf, (10*HZ), 5, 0);
+				  sensebuf, (10*HZ), 5, 0, NULL);
 
 	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
 		u8 *desc = sensebuf + 8;
@@ -603,7 +603,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
 	/* Good values for timeout and retries?  Values below
 	   from scsi_ioctl_send_command() for default case... */
 	cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
-				sensebuf, (10*HZ), 5, 0);
+				sensebuf, (10*HZ), 5, 0, NULL);
 
 	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
 		u8 *desc = sensebuf + 8;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1e5b6446231d..12876392516e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -119,6 +119,14 @@ error:
 	iscsi_conn_failure(conn, rc);
 }
 
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+{
+	struct iscsi_iser_task *iser_task = task->dd_data;
+
+	task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
+	task->hdr_max = sizeof(iser_task->desc.iscsi_header);
+	return 0;
+}
 
 /**
  * iscsi_iser_task_init - Initialize task
@@ -180,25 +188,26 @@ static int
 iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
 				 struct iscsi_task *task)
 {
-	struct iscsi_data  hdr;
+	struct iscsi_r2t_info *r2t = &task->unsol_r2t;
+	struct iscsi_data hdr;
 	int error = 0;
 
 	/* Send data-out PDUs while there's still unsolicited data to send */
-	while (task->unsol_count > 0) {
-		iscsi_prep_unsolicit_data_pdu(task, &hdr);
+	while (iscsi_task_has_unsol_data(task)) {
+		iscsi_prep_data_out_pdu(task, r2t, &hdr);
 		debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
-			   hdr.itt, task->data_count);
+			   hdr.itt, r2t->data_count);
 
 		/* the buffer description has been passed with the command */
 		/* Send the command */
 		error = iser_send_data_out(conn, task, &hdr);
 		if (error) {
-			task->unsol_datasn--;
+			r2t->datasn--;
 			goto iscsi_iser_task_xmit_unsol_data_exit;
 		}
-		task->unsol_count -= task->data_count;
+		r2t->sent += r2t->data_count;
 		debug_scsi("Need to send %d more as data-out PDUs\n",
-			   task->unsol_count);
+			   r2t->data_length - r2t->sent);
 	}
 
 iscsi_iser_task_xmit_unsol_data_exit:
@@ -220,7 +229,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
 
 		debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
 			   task->itt, scsi_bufflen(task->sc),
-			   task->imm_count, task->unsol_count);
+			   task->imm_count, task->unsol_r2t.data_length);
 	}
 
 	debug_scsi("task deq [cid %d itt 0x%x]\n",
@@ -235,7 +244,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
 	}
 
 	/* Send unsolicited data-out PDU(s) if necessary */
-	if (task->unsol_count)
+	if (iscsi_task_has_unsol_data(task))
 		error = iscsi_iser_task_xmit_unsol_data(conn, task);
 
  iscsi_iser_task_xmit_exit:
@@ -244,13 +253,15 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
 	return error;
 }
 
-static void
-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
+static void iscsi_iser_cleanup_task(struct iscsi_task *task)
 {
 	struct iscsi_iser_task *iser_task = task->dd_data;
 
-	/* mgmt tasks do not need special cleanup */
-	if (!task->sc)
+	/*
+	 * mgmt tasks do not need special cleanup and we do not
+	 * allocate anything in the init task callout
+	 */
+	if (!task->sc || task->state == ISCSI_TASK_PENDING)
 		return;
 
 	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -391,9 +402,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	struct Scsi_Host *shost;
-	int i;
-	struct iscsi_task *task;
-	struct iscsi_iser_task *iser_task;
 	struct iser_conn *ib_conn;
 
 	shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
@@ -430,13 +438,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
 	session = cls_session->dd_data;
 
 	shost->can_queue = session->scsi_cmds_max;
-	/* libiscsi setup itts, data and pool so just set desc fields */
-	for (i = 0; i < session->cmds_max; i++) {
-		task = session->cmds[i];
-		iser_task = task->dd_data;
-		task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
-		task->hdr_max = sizeof(iser_task->desc.iscsi_header);
-	}
 	return cls_session;
 
 remove_host:
@@ -652,6 +653,7 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.init_task		= iscsi_iser_task_init,
 	.xmit_task		= iscsi_iser_task_xmit,
 	.cleanup_task		= iscsi_iser_cleanup_task,
+	.alloc_pdu		= iscsi_iser_pdu_alloc,
 	/* recovery */
 	.session_recovery_timedout = iscsi_session_recovery_timedout,
 
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ed1aff21b7ea..e209cb8dd948 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -353,8 +353,7 @@ int iser_send_command(struct iscsi_conn *conn,
 	unsigned long edtl;
 	int err = 0;
 	struct iser_data_buf *data_buf;
-
-	struct iscsi_cmd *hdr =  task->hdr;
+	struct iscsi_cmd *hdr =  (struct iscsi_cmd *)task->hdr;
 	struct scsi_cmnd *sc  =  task->sc;
 
 	if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -393,7 +392,7 @@ int iser_send_command(struct iscsi_conn *conn,
 		err = iser_prepare_write_cmd(task,
 					     task->imm_count,
 				             task->imm_count +
-					     task->unsol_count,
+					     task->unsol_r2t.data_length,
 					     edtl);
 		if (err)
 			goto send_command_error;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d6a0074b9dc3..c4e8b9aa3827 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -952,7 +952,6 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_free_msg_frame - Place MPT request frame back on FreeQ.
- *	@handle: Handle of registered MPT protocol driver
  *	@ioc: Pointer to MPT adapter structure
  *	@mf: Pointer to MPT request frame
  *
@@ -4563,7 +4562,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
 			failcnt++;
 		hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
 		/* don't overflow our IOC hs_reply[] buffer! */
-		if (u16cnt < sizeof(ioc->hs_reply) / sizeof(ioc->hs_reply[0]))
+		if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
 			hs_reply[u16cnt] = hword;
 		CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
 	}
@@ -5422,7 +5421,6 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
 /**
  *	mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
  *	@ioc: Pointer to a Adapter Strucutre
- *	@portnum: IOC port number
  *
  *	Return:
  *	0 on success
@@ -6939,7 +6937,6 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
 /**
  *	mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
- *	@mr: Pointer to MPT reply frame
  *	@log_info: U32 LogInfo word from the IOC
  *
  *	Refer to lsi/sp_log.h.
@@ -7176,7 +7173,7 @@ union loginfo_type {
 
 	sas_loginfo.loginfo = log_info;
 	if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
-	    (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
+	    (sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
 		return;
 
 	originator_desc = originator_str[sas_loginfo.dw.originator];
@@ -7185,21 +7182,21 @@ union loginfo_type {
 
 		case 0:  /* IOP */
 			if (sas_loginfo.dw.code <
-			    sizeof(iop_code_str)/sizeof(char*))
+			    ARRAY_SIZE(iop_code_str))
 				code_desc = iop_code_str[sas_loginfo.dw.code];
 			break;
 		case 1:  /* PL */
 			if (sas_loginfo.dw.code <
-			    sizeof(pl_code_str)/sizeof(char*))
+			    ARRAY_SIZE(pl_code_str))
 				code_desc = pl_code_str[sas_loginfo.dw.code];
 			break;
 		case 2:  /* IR */
 			if (sas_loginfo.dw.code >=
-			    sizeof(ir_code_str)/sizeof(char*))
+			    ARRAY_SIZE(ir_code_str))
 				break;
 			code_desc = ir_code_str[sas_loginfo.dw.code];
 			if (sas_loginfo.dw.subcode >=
-			    sizeof(raid_sub_code_str)/sizeof(char*))
+			    ARRAY_SIZE(raid_sub_code_str))
 			break;
 			if (sas_loginfo.dw.code == 0)
 				sub_code_desc =
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 72a9212da865..9a18270c1081 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2399,9 +2399,14 @@ config CHELSIO_T1_1G
           Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
           are using only 10G cards say 'N' here.
 
+config CHELSIO_T3_DEPENDS
+	tristate
+	depends on PCI && INET
+	default y
+
 config CHELSIO_T3
 	tristate "Chelsio Communications T3 10Gb Ethernet support"
-	depends on PCI && INET
+	depends on CHELSIO_T3_DEPENDS
 	select FW_LOADER
 	select INET_LRO
 	help
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e529b55b3ce9..8af7dfbe022c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -34,13 +34,12 @@
 
 #define ZFCP_BUS_ID_SIZE	20
 
-static char *device;
-
 MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
 MODULE_DESCRIPTION("FCP HBA driver");
 MODULE_LICENSE("GPL");
 
-module_param(device, charp, 0400);
+static char *init_device;
+module_param_named(device, init_device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
 static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
@@ -73,46 +72,7 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
 	return 1;
 }
 
-static int __init zfcp_device_setup(char *devstr)
-{
-	char *token;
-	char *str;
-
-	if (!devstr)
-		return 0;
-
-	/* duplicate devstr and keep the original for sysfs presentation*/
-	str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
-	if (!str)
-		return 0;
-
-	strcpy(str, devstr);
-
-	token = strsep(&str, ",");
-	if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
-		goto err_out;
-	strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE);
-
-	token = strsep(&str, ",");
-	if (!token || strict_strtoull(token, 0,
-				(unsigned long long *) &zfcp_data.init_wwpn))
-		goto err_out;
-
-	token = strsep(&str, ",");
-	if (!token || strict_strtoull(token, 0,
-				(unsigned long long *) &zfcp_data.init_fcp_lun))
-		goto err_out;
-
-	kfree(str);
-	return 1;
-
- err_out:
-	kfree(str);
-	pr_err("%s is not a valid SCSI device\n", devstr);
-	return 0;
-}
-
-static void __init zfcp_init_device_configure(void)
+static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
 {
 	struct zfcp_adapter *adapter;
 	struct zfcp_port *port;
@@ -120,17 +80,17 @@ static void __init zfcp_init_device_configure(void)
 
 	down(&zfcp_data.config_sema);
 	read_lock_irq(&zfcp_data.config_lock);
-	adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid);
+	adapter = zfcp_get_adapter_by_busid(busid);
 	if (adapter)
 		zfcp_adapter_get(adapter);
 	read_unlock_irq(&zfcp_data.config_lock);
 
 	if (!adapter)
 		goto out_adapter;
-	port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
+	port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
 	if (IS_ERR(port))
 		goto out_port;
-	unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
+	unit = zfcp_unit_enqueue(port, lun);
 	if (IS_ERR(unit))
 		goto out_unit;
 	up(&zfcp_data.config_sema);
@@ -160,6 +120,42 @@ static struct kmem_cache *zfcp_cache_create(int size, char *name)
 	return kmem_cache_create(name , size, align, 0, NULL);
 }
 
+static void __init zfcp_init_device_setup(char *devstr)
+{
+	char *token;
+	char *str;
+	char busid[ZFCP_BUS_ID_SIZE];
+	u64 wwpn, lun;
+
+	/* duplicate devstr and keep the original for sysfs presentation*/
+	str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
+	if (!str)
+		return;
+
+	strcpy(str, devstr);
+
+	token = strsep(&str, ",");
+	if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
+		goto err_out;
+	strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+
+	token = strsep(&str, ",");
+	if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
+		goto err_out;
+
+	token = strsep(&str, ",");
+	if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
+		goto err_out;
+
+	kfree(str);
+	zfcp_init_device_configure(busid, wwpn, lun);
+	return;
+
+ err_out:
+	kfree(str);
+	pr_err("%s is not a valid SCSI device\n", devstr);
+}
+
 static int __init zfcp_module_init(void)
 {
 	int retval = -ENOMEM;
@@ -181,7 +177,6 @@ static int __init zfcp_module_init(void)
 
 	zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
 
-	INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
 	sema_init(&zfcp_data.config_sema, 1);
 	rwlock_init(&zfcp_data.config_lock);
 
@@ -203,10 +198,9 @@ static int __init zfcp_module_init(void)
 		goto out_ccw_register;
 	}
 
-	if (zfcp_device_setup(device))
-		zfcp_init_device_configure();
-
-	goto out;
+	if (init_device)
+		zfcp_init_device_setup(init_device);
+	return 0;
 
 out_ccw_register:
 	misc_deregister(&zfcp_cfdc_misc);
@@ -527,14 +521,11 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 			       &zfcp_sysfs_adapter_attrs))
 		goto sysfs_failed;
 
-	write_lock_irq(&zfcp_data.config_lock);
 	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-	list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
-	write_unlock_irq(&zfcp_data.config_lock);
-
 	zfcp_fc_nameserver_init(adapter);
 
-	return 0;
+	if (!zfcp_adapter_scsi_register(adapter))
+		return 0;
 
 sysfs_failed:
 	zfcp_adapter_debug_unregister(adapter);
@@ -573,14 +564,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
 		return;
 
 	zfcp_adapter_debug_unregister(adapter);
-
-	/* remove specified adapter data structure from list */
-	write_lock_irq(&zfcp_data.config_lock);
-	list_del(&adapter->list);
-	write_unlock_irq(&zfcp_data.config_lock);
-
 	zfcp_qdio_free(adapter);
-
 	zfcp_free_low_mem_buffers(adapter);
 	kfree(adapter->req_list);
 	kfree(adapter->fc_stats);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 728147131e1d..285881f07648 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -106,10 +106,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
 	if (retval)
 		goto out;
 
-	retval = zfcp_adapter_scsi_register(adapter);
-	if (retval)
-		goto out_scsi_register;
-
 	/* initialize request counter */
 	BUG_ON(!zfcp_reqlist_isempty(adapter));
 	adapter->req_no = 0;
@@ -123,8 +119,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
 	flush_work(&adapter->scan_work);
 	return 0;
 
- out_scsi_register:
-	zfcp_erp_thread_kill(adapter);
  out:
 	up(&zfcp_data.config_sema);
 	return retval;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index f1a7518e67ed..10cbfd172a28 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -85,20 +85,9 @@ static int zfcp_cfdc_copy_to_user(void __user  *user_buffer,
 
 static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
 {
-	struct zfcp_adapter *adapter = NULL, *cur_adapter;
-	struct ccw_dev_id dev_id;
-
-	read_lock_irq(&zfcp_data.config_lock);
-	list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
-		ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
-		if (dev_id.devno == devno) {
-			adapter = cur_adapter;
-			zfcp_adapter_get(adapter);
-			break;
-		}
-	}
-	read_unlock_irq(&zfcp_data.config_lock);
-	return adapter;
+	char busid[9];
+	snprintf(busid, sizeof(busid), "0.0.%04x", devno);
+	return zfcp_get_adapter_by_busid(busid);
 }
 
 static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 735d675623f8..cb6df609953e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -522,7 +522,7 @@ static const char *zfcp_rec_dbf_ids[] = {
 	[29]	= "link down",
 	[30]	= "link up status read",
 	[31]	= "open port failed",
-	[32]	= "open port failed",
+	[32]	= "",
 	[33]	= "close port",
 	[34]	= "open unit failed",
 	[35]	= "exclusive open unit failed",
@@ -936,6 +936,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
 	rct->reason_code = hdr->reason_code;
 	rct->expl = hdr->reason_code_expl;
 	rct->vendor_unique = hdr->vendor_unique;
+	rct->max_res_size = hdr->max_res_size;
 	rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
 		       ZFCP_DBF_SAN_MAX_PAYLOAD);
 	debug_event(adapter->san_dbf, level, r, sizeof(*r));
@@ -1043,6 +1044,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
 		zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
 		zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
 		zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
+		zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
 	} else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
 		   strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
 		   strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
@@ -1249,7 +1251,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
 	char dbf_name[DEBUG_MAX_NAME_LEN];
 
 	/* debug feature area which records recovery activity */
-	sprintf(dbf_name, "zfcp_%s_rec", zfcp_get_busid_by_adapter(adapter));
+	sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
 	adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
 					  sizeof(struct zfcp_rec_dbf_record));
 	if (!adapter->rec_dbf)
@@ -1259,7 +1261,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
 	debug_set_level(adapter->rec_dbf, 3);
 
 	/* debug feature area which records HBA (FSF and QDIO) conditions */
-	sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
+	sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
 	adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
 					  sizeof(struct zfcp_hba_dbf_record));
 	if (!adapter->hba_dbf)
@@ -1269,7 +1271,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
 	debug_set_level(adapter->hba_dbf, 3);
 
 	/* debug feature area which records SAN command failures and recovery */
-	sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
+	sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
 	adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
 					  sizeof(struct zfcp_san_dbf_record));
 	if (!adapter->san_dbf)
@@ -1279,7 +1281,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
 	debug_set_level(adapter->san_dbf, 6);
 
 	/* debug feature area which records SCSI command failures and recovery */
-	sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
+	sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
 	adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
 					   sizeof(struct zfcp_scsi_dbf_record));
 	if (!adapter->scsi_dbf)
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 5d6b2dff855b..74998ff88e57 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -171,6 +171,7 @@ struct zfcp_san_dbf_record_ct_response {
 	u8 reason_code;
 	u8 expl;
 	u8 vendor_unique;
+	u16 max_res_size;
 	u32 len;
 } __attribute__ ((packed));
 
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e19e46ae4a68..510662783a6f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -159,20 +159,6 @@ struct fcp_rscn_element {
         u32 nport_did:24;
 } __attribute__((packed));
 
-#define ZFCP_PORT_ADDRESS   0x0
-#define ZFCP_AREA_ADDRESS   0x1
-#define ZFCP_DOMAIN_ADDRESS 0x2
-#define ZFCP_FABRIC_ADDRESS 0x3
-
-#define ZFCP_PORTS_RANGE_PORT   0xFFFFFF
-#define ZFCP_PORTS_RANGE_AREA   0xFFFF00
-#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
-#define ZFCP_PORTS_RANGE_FABRIC 0x000000
-
-#define ZFCP_NO_PORTS_PER_AREA    0x100
-#define ZFCP_NO_PORTS_PER_DOMAIN  0x10000
-#define ZFCP_NO_PORTS_PER_FABRIC  0x1000000
-
 /* see fc-ph */
 struct fcp_logo {
         u32 command;
@@ -211,7 +197,6 @@ struct zfcp_ls_adisc {
 #define ZFCP_CT_UNABLE_TO_PERFORM_CMD	0x09
 #define ZFCP_CT_GID_PN			0x0121
 #define ZFCP_CT_GPN_FT			0x0172
-#define ZFCP_CT_MAX_SIZE		0x1020
 #define ZFCP_CT_ACCEPT			0x8002
 #define ZFCP_CT_REJECT			0x8001
 
@@ -258,7 +243,6 @@ struct zfcp_ls_adisc {
 
 /* remote port status */
 #define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
-#define ZFCP_STATUS_PORT_DID_DID		0x00000002
 #define ZFCP_STATUS_PORT_PHYS_CLOSING		0x00000004
 #define ZFCP_STATUS_PORT_NO_WWPN		0x00000008
 #define ZFCP_STATUS_PORT_INVALID_WWPN		0x00000020
@@ -340,8 +324,6 @@ struct ct_iu_gid_pn_resp {
  * @wka_port: port where the request is sent to
  * @req: scatter-gather list for request
  * @resp: scatter-gather list for response
- * @req_count: number of elements in request scatter-gather list
- * @resp_count: number of elements in response scatter-gather list
  * @handler: handler function (called for response to the request)
  * @handler_data: data passed to handler function
  * @timeout: FSF timeout for this request
@@ -352,8 +334,6 @@ struct zfcp_send_ct {
 	struct zfcp_wka_port *wka_port;
 	struct scatterlist *req;
 	struct scatterlist *resp;
-	unsigned int req_count;
-	unsigned int resp_count;
 	void (*handler)(unsigned long);
 	unsigned long handler_data;
 	int timeout;
@@ -378,8 +358,6 @@ struct zfcp_gid_pn_data {
  * @d_id: destiniation id of port where request is sent to
  * @req: scatter-gather list for request
  * @resp: scatter-gather list for response
- * @req_count: number of elements in request scatter-gather list
- * @resp_count: number of elements in response scatter-gather list
  * @handler: handler function (called for response to the request)
  * @handler_data: data passed to handler function
  * @completion: completion for synchronization purposes
@@ -392,8 +370,6 @@ struct zfcp_send_els {
 	u32 d_id;
 	struct scatterlist *req;
 	struct scatterlist *resp;
-	unsigned int req_count;
-	unsigned int resp_count;
 	void (*handler)(unsigned long);
 	unsigned long handler_data;
 	struct completion *completion;
@@ -451,7 +427,6 @@ struct zfcp_latencies {
 };
 
 struct zfcp_adapter {
-	struct list_head	list;              /* list of adapters */
 	atomic_t                refcount;          /* reference count */
 	wait_queue_head_t	remove_wq;         /* can be used to wait for
 						      refcount drop to zero */
@@ -593,16 +568,11 @@ struct zfcp_fsf_req {
 struct zfcp_data {
 	struct scsi_host_template scsi_host_template;
 	struct scsi_transport_template *scsi_transport_template;
-	struct list_head	adapter_list_head;  /* head of adapter list */
 	rwlock_t                config_lock;        /* serialises changes
 						       to adapter/port/unit
 						       lists */
 	struct semaphore        config_sema;        /* serialises configuration
 						       changes */
-	atomic_t		loglevel;            /* current loglevel */
-	char			init_busid[20];
-	u64			init_wwpn;
-	u64			init_fcp_lun;
 	struct kmem_cache	*fsf_req_qtcb_cache;
 	struct kmem_cache	*sr_buffer_cache;
 	struct kmem_cache	*gid_pn_cache;
@@ -623,8 +593,6 @@ struct zfcp_fsf_req_qtcb {
 #define ZFCP_SET                0x00000100
 #define ZFCP_CLEAR              0x00000200
 
-#define zfcp_get_busid_by_adapter(adapter) (dev_name(&adapter->ccw_device->dev))
-
 /*
  * Helper functions for request ID management.
  */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4ed4950d994b..387a3af528ac 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -840,7 +840,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
 		return ZFCP_ERP_FAILED;
 	}
 	port->d_id = adapter->peer_d_id;
-	atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
 	return zfcp_erp_port_strategy_open_port(act);
 }
 
@@ -871,12 +870,12 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
 	case ZFCP_ERP_STEP_PORT_CLOSING:
 		if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
 			return zfcp_erp_open_ptp_port(act);
-		if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
+		if (!port->d_id) {
 			queue_work(zfcp_data.work_queue, &port->gid_pn_work);
 			return ZFCP_ERP_CONTINUES;
 		}
 	case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
-		if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
+		if (!port->d_id) {
 			if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
 				zfcp_erp_port_failed(port, 26, NULL);
 				return ZFCP_ERP_EXIT;
@@ -888,7 +887,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
 	case ZFCP_ERP_STEP_PORT_OPENING:
 		/* D_ID might have changed during open */
 		if (p_status & ZFCP_STATUS_COMMON_OPEN) {
-			if (p_status & ZFCP_STATUS_PORT_DID_DID)
+			if (port->d_id)
 				return ZFCP_ERP_SUCCEEDED;
 			else {
 				act->step = ZFCP_ERP_STEP_PORT_CLOSING;
@@ -1385,6 +1384,7 @@ static int zfcp_erp_thread(void *data)
 	struct list_head *next;
 	struct zfcp_erp_action *act;
 	unsigned long flags;
+	int ignore;
 
 	daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
 	/* Block all signals */
@@ -1407,7 +1407,7 @@ static int zfcp_erp_thread(void *data)
 		}
 
 		zfcp_rec_dbf_event_thread_lock(4, adapter);
-		down_interruptible(&adapter->erp_ready_sem);
+		ignore = down_interruptible(&adapter->erp_ready_sem);
 		zfcp_rec_dbf_event_thread_lock(5, adapter);
 	}
 
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f009f2a7ec3e..eabdfe24456e 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,6 +11,20 @@
 
 #include "zfcp_ext.h"
 
+enum rscn_address_format {
+	RSCN_PORT_ADDRESS	= 0x0,
+	RSCN_AREA_ADDRESS	= 0x1,
+	RSCN_DOMAIN_ADDRESS	= 0x2,
+	RSCN_FABRIC_ADDRESS	= 0x3,
+};
+
+static u32 rscn_range_mask[] = {
+	[RSCN_PORT_ADDRESS]		= 0xFFFFFF,
+	[RSCN_AREA_ADDRESS]		= 0xFFFF00,
+	[RSCN_DOMAIN_ADDRESS]		= 0xFF0000,
+	[RSCN_FABRIC_ADDRESS]		= 0x000000,
+};
+
 struct ct_iu_gpn_ft_req {
 	struct ct_hdr header;
 	u8 flags;
@@ -26,9 +40,12 @@ struct gpn_ft_resp_acc {
 	u64 wwpn;
 } __attribute__ ((packed));
 
-#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
-				/ sizeof(struct gpn_ft_resp_acc))
+#define ZFCP_CT_SIZE_ONE_PAGE	(PAGE_SIZE - sizeof(struct ct_hdr))
+#define ZFCP_GPN_FT_ENTRIES	(ZFCP_CT_SIZE_ONE_PAGE \
+					/ sizeof(struct gpn_ft_resp_acc))
 #define ZFCP_GPN_FT_BUFFERS 4
+#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
+				- sizeof(struct ct_hdr))
 #define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
 
 struct ct_iu_gpn_ft_resp {
@@ -160,22 +177,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
 		fcp_rscn_element++;
-		switch (fcp_rscn_element->addr_format) {
-		case ZFCP_PORT_ADDRESS:
-			range_mask = ZFCP_PORTS_RANGE_PORT;
-			break;
-		case ZFCP_AREA_ADDRESS:
-			range_mask = ZFCP_PORTS_RANGE_AREA;
-			break;
-		case ZFCP_DOMAIN_ADDRESS:
-			range_mask = ZFCP_PORTS_RANGE_DOMAIN;
-			break;
-		case ZFCP_FABRIC_ADDRESS:
-			range_mask = ZFCP_PORTS_RANGE_FABRIC;
-			break;
-		default:
-			continue;
-		}
+		range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
 		_zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
 	}
 	schedule_work(&fsf_req->adapter->scan_work);
@@ -266,7 +268,6 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
 		return;
 	/* looks like a valid d_id */
 	port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
-	atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
 }
 
 int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
@@ -284,8 +285,6 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
 	gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
 	gid_pn->ct.req = &gid_pn->req;
 	gid_pn->ct.resp = &gid_pn->resp;
-	gid_pn->ct.req_count = 1;
-	gid_pn->ct.resp_count = 1;
 	sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
 		    sizeof(struct ct_iu_gid_pn_req));
 	sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
@@ -297,7 +296,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
 	gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
 	gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
 	gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
-	gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
+	gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
 	gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
 
 	init_completion(&compl_rec.done);
@@ -407,8 +406,6 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
 	sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
 		    sizeof(struct zfcp_ls_adisc));
 
-	adisc->els.req_count = 1;
-	adisc->els.resp_count = 1;
 	adisc->els.adapter = adapter;
 	adisc->els.port = port;
 	adisc->els.d_id = port->d_id;
@@ -448,17 +445,17 @@ void zfcp_test_link(struct zfcp_port *port)
 		zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
 }
 
-static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
+static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
 {
 	struct scatterlist *sg = &gpn_ft->sg_req;
 
 	kfree(sg_virt(sg)); /* free request buffer */
-	zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
+	zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
 
 	kfree(gpn_ft);
 }
 
-static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
+static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
 {
 	struct zfcp_gpn_ft *gpn_ft;
 	struct ct_iu_gpn_ft_req *req;
@@ -475,8 +472,8 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
 	}
 	sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
 
-	if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
-		zfcp_free_sg_env(gpn_ft);
+	if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
+		zfcp_free_sg_env(gpn_ft, buf_num);
 		gpn_ft = NULL;
 	}
 out:
@@ -485,7 +482,8 @@ out:
 
 
 static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
-				  struct zfcp_adapter *adapter)
+				  struct zfcp_adapter *adapter,
+				  int max_bytes)
 {
 	struct zfcp_send_ct *ct = &gpn_ft->ct;
 	struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@@ -498,8 +496,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
 	req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
 	req->header.options = ZFCP_CT_SYNCHRONOUS;
 	req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
-	req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
-					(ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
+	req->header.max_res_size = max_bytes / 4;
 	req->flags = 0;
 	req->domain_id_scope = 0;
 	req->area_id_scope = 0;
@@ -512,8 +509,6 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
 	ct->timeout = 10;
 	ct->req = &gpn_ft->sg_req;
 	ct->resp = gpn_ft->sg_resp;
-	ct->req_count = 1;
-	ct->resp_count = ZFCP_GPN_FT_BUFFERS;
 
 	init_completion(&compl_rec.done);
 	compl_rec.handler = NULL;
@@ -540,7 +535,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
 	zfcp_port_dequeue(port);
 }
 
-static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
+static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
 {
 	struct zfcp_send_ct *ct = &gpn_ft->ct;
 	struct scatterlist *sg = gpn_ft->sg_resp;
@@ -560,13 +555,17 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
 		return -EIO;
 	}
 
-	if (hdr->max_res_size)
+	if (hdr->max_res_size) {
+		dev_warn(&adapter->ccw_device->dev,
+			 "The name server reported %d words residual data\n",
+			 hdr->max_res_size);
 		return -E2BIG;
+	}
 
 	down(&zfcp_data.config_sema);
 
 	/* first entry is the header */
-	for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES && !last; x++) {
+	for (x = 1; x < max_entries && !last; x++) {
 		if (x % (ZFCP_GPN_FT_ENTRIES + 1))
 			acc++;
 		else
@@ -589,7 +588,6 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
 		}
 
 		port = zfcp_port_enqueue(adapter, acc->wwpn,
-					 ZFCP_STATUS_PORT_DID_DID |
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
 		if (IS_ERR(port))
 			ret = PTR_ERR(port);
@@ -612,6 +610,12 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
 {
 	int ret, i;
 	struct zfcp_gpn_ft *gpn_ft;
+	int chain, max_entries, buf_num, max_bytes;
+
+	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
+	buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
+	max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
+	max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
 
 	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
 		return 0;
@@ -620,23 +624,23 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
 	if (ret)
 		return ret;
 
-	gpn_ft = zfcp_alloc_sg_env();
+	gpn_ft = zfcp_alloc_sg_env(buf_num);
 	if (!gpn_ft) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	for (i = 0; i < 3; i++) {
-		ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
+		ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
 		if (!ret) {
-			ret = zfcp_scan_eval_gpn_ft(gpn_ft);
+			ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
 			if (ret == -EAGAIN)
 				ssleep(1);
 			else
 				break;
 		}
 	}
-	zfcp_free_sg_env(gpn_ft);
+	zfcp_free_sg_env(gpn_ft, buf_num);
 out:
 	zfcp_wka_port_put(&adapter->nsp);
 	return ret;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9c72e083559d..e6416f8541b0 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -644,38 +644,38 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
 	}
 }
 
-static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
+static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
 {
-	struct zfcp_qdio_queue *req_q = &adapter->req_q;
-
-	spin_lock_bh(&adapter->req_q_lock);
-	if (atomic_read(&req_q->count))
+	if (atomic_read(&adapter->req_q.count) > 0)
 		return 1;
-	spin_unlock_bh(&adapter->req_q_lock);
+	atomic_inc(&adapter->qdio_outb_full);
 	return 0;
 }
 
-static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
-{
-	unsigned int count = atomic_read(&adapter->req_q.count);
-	if (!count)
-		atomic_inc(&adapter->qdio_outb_full);
-	return count > 0;
-}
-
 static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
+	__releases(&adapter->req_q_lock)
+	__acquires(&adapter->req_q_lock)
 {
+	struct zfcp_qdio_queue *req_q = &adapter->req_q;
 	long ret;
 
+	if (atomic_read(&req_q->count) <= -REQUEST_LIST_SIZE)
+		return -EIO;
+	if (atomic_read(&req_q->count) > 0)
+		return 0;
+
+	atomic_dec(&req_q->count);
 	spin_unlock_bh(&adapter->req_q_lock);
 	ret = wait_event_interruptible_timeout(adapter->request_wq,
-					zfcp_fsf_sbal_check(adapter), 5 * HZ);
+					atomic_read(&req_q->count) >= 0,
+					5 * HZ);
+	spin_lock_bh(&adapter->req_q_lock);
+	atomic_inc(&req_q->count);
+
 	if (ret > 0)
 		return 0;
 	if (!ret)
 		atomic_inc(&adapter->qdio_outb_full);
-
-	spin_lock_bh(&adapter->req_q_lock);
 	return -EIO;
 }
 
@@ -1013,12 +1013,29 @@ skip_fsfstatus:
 		send_ct->handler(send_ct->handler_data);
 }
 
-static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
-				struct scatterlist *sg_req,
-				struct scatterlist *sg_resp, int max_sbals)
+static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
+				       struct scatterlist *sg_req,
+				       struct scatterlist *sg_resp,
+				       int max_sbals)
 {
+	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
+	u32 feat = req->adapter->adapter_features;
 	int bytes;
 
+	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
+		if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE ||
+		    !sg_is_last(sg_req) || !sg_is_last(sg_resp))
+			return -EOPNOTSUPP;
+
+		sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
+		sbale[2].addr   = sg_virt(sg_req);
+		sbale[2].length = sg_req->length;
+		sbale[3].addr   = sg_virt(sg_resp);
+		sbale[3].length = sg_resp->length;
+		sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
+		return 0;
+	}
+
 	bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
 					sg_req, max_sbals);
 	if (bytes <= 0)
@@ -1060,8 +1077,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
 		goto out;
 	}
 
-	ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
-				   FSF_MAX_SBALS_PER_REQ);
+	ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
+					  FSF_MAX_SBALS_PER_REQ);
 	if (ret)
 		goto failed_send;
 
@@ -1171,7 +1188,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
 		goto out;
 	}
 
-	ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
+	ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
 
 	if (ret)
 		goto failed_send;
@@ -1406,13 +1423,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
-			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-			break;
 		case FSF_SQ_NO_RETRY_POSSIBLE:
-			dev_warn(&req->adapter->ccw_device->dev,
-				 "Remote port 0x%016Lx could not be opened\n",
-				 (unsigned long long)port->wwpn);
-			zfcp_erp_port_failed(port, 32, req);
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 		}
@@ -1440,10 +1451,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
 		 */
 		plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
-		if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
+		if (req->qtcb->bottom.support.els1_length >=
+		    FSF_PLOGI_MIN_LEN) {
 			if (plogi->serv_param.wwpn != port->wwpn)
-				atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
-						  &port->status);
+				port->d_id = 0;
 			else {
 				port->wwnn = plogi->serv_param.wwnn;
 				zfcp_fc_plogi_evaluate(port, plogi);
@@ -1907,7 +1918,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
 				dev_err(&adapter->ccw_device->dev,
 					"Shared read-write access not "
 					"supported (unit 0x%016Lx, port "
-					"0x%016Lx\n)",
+					"0x%016Lx)\n",
 					(unsigned long long)unit->fcp_lun,
 					(unsigned long long)unit->port->wwpn);
 				zfcp_erp_unit_failed(unit, 36, req);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index fa2a31780611..8bb200252347 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -164,6 +164,7 @@
 #define FSF_FEATURE_LUN_SHARING			0x00000004
 #define FSF_FEATURE_NOTIFICATION_LOST		0x00000008
 #define FSF_FEATURE_HBAAPI_MANAGEMENT           0x00000010
+#define FSF_FEATURE_ELS_CT_CHAINED_SBALS	0x00000020
 #define FSF_FEATURE_UPDATE_ALERT		0x00000100
 #define FSF_FEATURE_MEASUREMENT_DATA		0x00000200
 
@@ -322,6 +323,7 @@ struct fsf_nport_serv_param {
 	u8  vendor_version_level[16];
 } __attribute__ ((packed));
 
+#define FSF_PLOGI_MIN_LEN	112
 struct fsf_plogi {
 	u32    code;
 	struct fsf_nport_serv_param serv_param;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index d3b55fb66f13..33e0a206a0a4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -112,7 +112,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
 		 * corruption and must stop the machine immediatly.
 		 */
 		panic("error: unknown request id (%lx) on adapter %s.\n",
-		      req_id, zfcp_get_busid_by_adapter(adapter));
+		      req_id, dev_name(&adapter->ccw_device->dev));
 
 	zfcp_reqlist_remove(adapter, fsf_req);
 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
@@ -392,7 +392,7 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
 
 	init_data->cdev = adapter->ccw_device;
 	init_data->q_format = QDIO_ZFCP_QFMT;
-	memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
+	memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
 	ASCEBC(init_data->adapter_name, 8);
 	init_data->qib_param_field_format = 0;
 	init_data->qib_param_field = NULL;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 403ecad48d4b..152d4aa9354f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -352,6 +352,8 @@ config ISCSI_TCP
 
 	 http://open-iscsi.org
 
+source "drivers/scsi/cxgb3i/Kconfig"
+
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
 	depends on SGI_HAS_WD93 && SCSI
@@ -603,6 +605,19 @@ config SCSI_FLASHPOINT
 	  substantial, so users of MultiMaster Host Adapters may not
 	  wish to include it.
 
+config LIBFC
+	tristate "LibFC module"
+	select SCSI_FC_ATTRS
+	---help---
+	  Fibre Channel library module
+
+config FCOE
+	tristate "FCoE module"
+	depends on PCI
+	select LIBFC
+	---help---
+	  Fibre Channel over Ethernet module
+
 config SCSI_DMX3191D
 	tristate "DMX3191D SCSI support"
 	depends on PCI && SCSI
@@ -1357,6 +1372,13 @@ config SCSI_LPFC
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
 
+config SCSI_LPFC_DEBUG_FS
+	bool "Emulex LightPulse Fibre Channel debugfs Support"
+	depends on SCSI_LPFC && DEBUG_FS
+	help
+	  This makes debugging infomation from the lpfc driver
+	  available via the debugfs filesystem.
+
 config SCSI_SIM710
 	tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
 	depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd5043cfa1..1410697257cb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS)	+= libsas/
 obj-$(CONFIG_SCSI_SRP_ATTRS)	+= scsi_transport_srp.o
 obj-$(CONFIG_SCSI_DH)		+= device_handler/
 
-obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	iscsi_tcp.o
+obj-$(CONFIG_LIBFC)		+= libfc/
+obj-$(CONFIG_FCOE)		+= fcoe/
+obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	libiscsi_tcp.o iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_SCSI_A4000T)	+= 53c700.o	a4000t.o
 obj-$(CONFIG_SCSI_ZORRO7XX)	+= 53c700.o	zorro7xx.o
@@ -124,6 +126,7 @@ obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas.o
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
+obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index eeddbd19eba5..f92da9fd5f20 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -30,7 +30,7 @@
  * $Log: NCR5380.c,v $
 
  * Revision 1.10 1998/9/2	Alan Cox
- *				(alan@redhat.com)
+ *				(alan@lxorguk.ukuu.org.uk)
  * Fixed up the timer lockups reported so far. Things still suck. Looking 
  * forward to 2.3 and per device request queues. Then it'll be possible to
  * SMP thread this beast and improve life no end.
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 84bb61628372..3c298c7253ee 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -54,7 +54,7 @@
  * 9/28/04 Christoph Hellwig <hch@lst.de>
  *	    - merge the two source files
  *	    - remove internal queueing code
- * 14/06/07 Alan Cox <alan@redhat.com>
+ * 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
  *	 - Grand cleanup and Linuxisation
  */
 
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8abfd06b5a72..90d1d0878cb8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index a7355260cfcf..0391d759dfdb 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
@@ -90,14 +90,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
 	if (size < le16_to_cpu(kfib->header.SenderSize))
 		size = le16_to_cpu(kfib->header.SenderSize);
 	if (size > dev->max_fib_size) {
+		dma_addr_t daddr;
+
 		if (size > 2048) {
 			retval = -EINVAL;
 			goto cleanup;
 		}
+
+		kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
+		if (!kfib) {
+			retval = -ENOMEM;
+			goto cleanup;
+		}
+
 		/* Highjack the hw_fib */
 		hw_fib = fibptr->hw_fib_va;
 		hw_fib_pa = fibptr->hw_fib_pa;
-		fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
+		fibptr->hw_fib_va = kfib;
+		fibptr->hw_fib_pa = daddr;
 		memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
 		memcpy(kfib, hw_fib, dev->max_fib_size);
 	}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index cbac06355107..16310443b55a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 289304aab690..d24c2670040b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 933f208eedba..abc9ef5d1b10 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 94acbeed4e7c..36d8aab97efe 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 8cd6588a83e3..16d8db550027 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 073208b0f622..f70d9f8e79e5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index fc1a55796a89..b6a3c5c187b6 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,6 +1,6 @@
 /*
  *	Adaptec AAC series RAID controller driver
- *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *	(c) Copyright 2001 Red Hat Inc.
  *
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 399fe559e4de..2f602720193e 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -13425,8 +13425,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
 		}
 
 		boardp->asc_n_io_port = pci_resource_len(pdev, 1);
-		boardp->ioremap_addr = ioremap(pci_resource_start(pdev, 1),
-					       boardp->asc_n_io_port);
+		boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
 		if (!boardp->ioremap_addr) {
 			shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
 					"returned NULL\n",
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 7c45d88a205b..ed0e3e55652a 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -22,7 +22,7 @@
  * aha1740_makecode may still need even more work
  * if it doesn't work for your devices, take a look.
  *
- * Reworked for new_eh and new locking by Alan Cox <alan@redhat.com>
+ * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
  * Converted to EISA and generic DMA APIs by Marc Zyngier
  * <maz@wild-wind.fr.eu.org>, 4/2003.
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f91f79c8007d..106c04d2d793 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -235,7 +235,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
 		uint32_t intmask_org;
 		int i, j;
 
-		acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+		acb->pmuA = pci_ioremap_bar(pdev, 0);
 		if (!acb->pmuA) {
 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
 							acb->host->host_no);
@@ -329,13 +329,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
 		reg = (struct MessageUnit_B *)(dma_coherent +
 		ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
 		acb->pmuB = reg;
-		mem_base0 = ioremap(pci_resource_start(pdev, 0),
-					pci_resource_len(pdev, 0));
+		mem_base0 = pci_ioremap_bar(pdev, 0);
 		if (!mem_base0)
 			goto out;
 
-		mem_base1 = ioremap(pci_resource_start(pdev, 2),
-					pci_resource_len(pdev, 2));
+		mem_base1 = pci_ioremap_bar(pdev, 2);
 		if (!mem_base1) {
 			iounmap(mem_base0);
 			goto out;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7d311541c76c..20ca0a6374b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1,8 +1,8 @@
 /* 
  *  Copyright (C) 1997	Wu Ching Chen
  *  2.1.x update (C) 1998  Krzysztof G. Baranowski
- *  2.5.x update (C) 2002  Red Hat <alan@redhat.com>
- *  2.6.x update (C) 2004  Red Hat <alan@redhat.com>
+ *  2.5.x update (C) 2002  Red Hat
+ *  2.6.x update (C) 2004  Red Hat
  *
  * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
  *
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 88ecf94ad979..af9725409f43 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -190,7 +190,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
 
         result = scsi_execute_req(ch->device, cmd, direction, buffer,
 				  buflength, &sshdr, timeout * HZ,
-				  MAX_RETRIES);
+				  MAX_RETRIES, NULL);
 
 	dprintk("result: 0x%x\n",result);
 	if (driver_byte(result) & DRIVER_SENSE) {
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
new file mode 100644
index 000000000000..ee7d6d2f9c3b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -0,0 +1,4 @@
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
+
+cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgb3i/Kconfig
new file mode 100644
index 000000000000..bfdcaf5c9c57
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_CXGB3_ISCSI
+	tristate "Chelsio S3xx iSCSI support"
+	depends on CHELSIO_T3_DEPENDS
+	select CHELSIO_T3
+	select SCSI_ISCSI_ATTRS
+	---help---
+	This driver supports iSCSI offload for the Chelsio S3 series devices.
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..fde6e4c634e7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -0,0 +1,139 @@
+/*
+ * cxgb3i.h: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_H__
+#define __CXGB3I_H__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/scatterlist.h>
+#include <scsi/libiscsi_tcp.h>
+
+/* from cxgb3 LLD */
+#include "common.h"
+#include "t3_cpl.h"
+#include "t3cdev.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_offload.h"
+#include "firmware_exports.h"
+
+#include "cxgb3i_offload.h"
+#include "cxgb3i_ddp.h"
+
+#define CXGB3I_SCSI_QDEPTH_DFLT	128
+#define CXGB3I_MAX_TARGET	CXGB3I_MAX_CONN
+#define CXGB3I_MAX_LUN		512
+#define ISCSI_PDU_NONPAYLOAD_MAX \
+	(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
+
+struct cxgb3i_adapter;
+struct cxgb3i_hba;
+struct cxgb3i_endpoint;
+
+/**
+ * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
+ *
+ * @snic:	cxgb3i adapter containing this port
+ * @ndev:	pointer to netdev structure
+ * @shost:	pointer to scsi host structure
+ */
+struct cxgb3i_hba {
+	struct cxgb3i_adapter *snic;
+	struct net_device *ndev;
+	struct Scsi_Host *shost;
+};
+
+/**
+ * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
+ *
+ * @listhead:	list head to link elements
+ * @lock:	lock for this structure
+ * @tdev:	pointer to t3cdev used by cxgb3 driver
+ * @pdev:	pointer to pci dev
+ * @hba_cnt:	# of hbas (the same as # of ports)
+ * @hba:	all the hbas on this adapter
+ * @tx_max_size: max. tx packet size supported
+ * @rx_max_size: max. rx packet size supported
+ * @tag_format: ddp tag format settings
+ */
+struct cxgb3i_adapter {
+	struct list_head list_head;
+	spinlock_t lock;
+	struct t3cdev *tdev;
+	struct pci_dev *pdev;
+	unsigned char hba_cnt;
+	struct cxgb3i_hba *hba[MAX_NPORTS];
+
+	unsigned int tx_max_size;
+	unsigned int rx_max_size;
+
+	struct cxgb3i_tag_format tag_format;
+};
+
+/**
+ * struct cxgb3i_conn - cxgb3i iscsi connection
+ *
+ * @listhead:	list head to link elements
+ * @cep:	pointer to iscsi_endpoint structure
+ * @conn:	pointer to iscsi_conn structure
+ * @hba:	pointer to the hba this conn. is going through
+ * @task_idx_bits: # of bits needed for session->cmds_max
+ */
+struct cxgb3i_conn {
+	struct list_head list_head;
+	struct cxgb3i_endpoint *cep;
+	struct iscsi_conn *conn;
+	struct cxgb3i_hba *hba;
+	unsigned int task_idx_bits;
+};
+
+/**
+ * struct cxgb3i_endpoint - iscsi tcp endpoint
+ *
+ * @c3cn:	the h/w tcp connection representation
+ * @hba:	pointer to the hba this conn. is going through
+ * @cconn:	pointer to the associated cxgb3i iscsi connection
+ */
+struct cxgb3i_endpoint {
+	struct s3_conn *c3cn;
+	struct cxgb3i_hba *hba;
+	struct cxgb3i_conn *cconn;
+};
+
+int cxgb3i_iscsi_init(void);
+void cxgb3i_iscsi_cleanup(void);
+
+struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
+void cxgb3i_adapter_remove(struct t3cdev *);
+int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
+void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
+
+struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
+struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
+				       struct net_device *);
+void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
+
+int cxgb3i_pdu_init(void);
+void cxgb3i_pdu_cleanup(void);
+void cxgb3i_conn_cleanup_task(struct iscsi_task *);
+int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
+int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
+int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
+
+void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
+int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
+
+#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
new file mode 100644
index 000000000000..1a41f04264f7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -0,0 +1,770 @@
+/*
+ * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/skbuff.h>
+
+/* from cxgb3 LLD */
+#include "common.h"
+#include "t3_cpl.h"
+#include "t3cdev.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_offload.h"
+#include "firmware_exports.h"
+
+#include "cxgb3i_ddp.h"
+
+#define DRV_MODULE_NAME         "cxgb3i_ddp"
+#define DRV_MODULE_VERSION      "1.0.0"
+#define DRV_MODULE_RELDATE      "Dec. 1, 2008"
+
+static char version[] =
+	"Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
+	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
+MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
+#define ddp_log_warn(fmt...)  printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
+#define ddp_log_info(fmt...)  printk(KERN_INFO "cxgb3i_ddp: " fmt)
+
+#ifdef __DEBUG_CXGB3I_DDP__
+#define ddp_log_debug(fmt, args...) \
+	printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
+#else
+#define ddp_log_debug(fmt...)
+#endif
+
+/*
+ * iSCSI Direct Data Placement
+ *
+ * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
+ * pre-posted final destination host-memory buffers based on the Initiator
+ * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
+ *
+ * The host memory address is programmed into h/w in the format of pagepod
+ * entries.
+ * The location of the pagepod entry is encoded into ddp tag which is used or
+ * is the base for ITT/TTT.
+ */
+
+#define DDP_PGIDX_MAX		4
+#define DDP_THRESHOLD	2048
+static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
+static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
+static unsigned char page_idx = DDP_PGIDX_MAX;
+
+static LIST_HEAD(cxgb3i_ddp_list);
+static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
+
+/*
+ * functions to program the pagepod in h/w
+ */
+static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
+{
+	struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
+
+	req->wr.wr_lo = 0;
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
+	req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
+				   V_ULPTX_CMD(ULP_MEM_WRITE));
+	req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
+			 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
+}
+
+static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
+		       unsigned int idx, unsigned int npods,
+		       struct cxgb3i_gather_list *gl)
+{
+	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+	int i;
+
+	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+		struct sk_buff *skb = ddp->gl_skb[idx];
+		struct pagepod *ppod;
+		int j, pidx;
+
+		/* hold on to the skb until we clear the ddp mapping */
+		skb_get(skb);
+
+		ulp_mem_io_set_hdr(skb, pm_addr);
+		ppod = (struct pagepod *)
+		       (skb->head + sizeof(struct ulp_mem_io));
+		memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
+		for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
+			ppod->addr[j] = pidx < gl->nelem ?
+				     cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
+
+		skb->priority = CPL_PRIORITY_CONTROL;
+		cxgb3_ofld_send(ddp->tdev, skb);
+	}
+	return 0;
+}
+
+static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
+			 unsigned int npods)
+{
+	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+	int i;
+
+	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+		struct sk_buff *skb = ddp->gl_skb[idx];
+
+		ddp->gl_skb[idx] = NULL;
+		memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
+		ulp_mem_io_set_hdr(skb, pm_addr);
+		skb->priority = CPL_PRIORITY_CONTROL;
+		cxgb3_ofld_send(ddp->tdev, skb);
+	}
+	return 0;
+}
+
+static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
+					  int start, int max, int count,
+					  struct cxgb3i_gather_list *gl)
+{
+	unsigned int i, j;
+
+	spin_lock(&ddp->map_lock);
+	for (i = start; i <= max;) {
+		for (j = 0; j < count; j++) {
+			if (ddp->gl_map[i + j])
+				break;
+		}
+		if (j == count) {
+			for (j = 0; j < count; j++)
+				ddp->gl_map[i + j] = gl;
+			spin_unlock(&ddp->map_lock);
+			return i;
+		}
+		i += j + 1;
+	}
+	spin_unlock(&ddp->map_lock);
+	return -EBUSY;
+}
+
+static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
+				      int start, int count)
+{
+	spin_lock(&ddp->map_lock);
+	memset(&ddp->gl_map[start], 0,
+	       count * sizeof(struct cxgb3i_gather_list *));
+	spin_unlock(&ddp->map_lock);
+}
+
+static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
+				   int idx, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++, idx++)
+		if (ddp->gl_skb[idx]) {
+			kfree_skb(ddp->gl_skb[idx]);
+			ddp->gl_skb[idx] = NULL;
+		}
+}
+
+static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
+				   int count, gfp_t gfp)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
+						PPOD_SIZE, gfp);
+		if (skb) {
+			ddp->gl_skb[idx + i] = skb;
+			skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
+		} else {
+			ddp_free_gl_skb(ddp, idx, i);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
+ * @pgsz: page size
+ * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
+ */
+int cxgb3i_ddp_find_page_index(unsigned long pgsz)
+{
+	int i;
+
+	for (i = 0; i < DDP_PGIDX_MAX; i++) {
+		if (pgsz == (1UL << ddp_page_shift[i]))
+			return i;
+	}
+	ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
+	return DDP_PGIDX_MAX;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
+
+static inline void ddp_gl_unmap(struct pci_dev *pdev,
+				struct cxgb3i_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++)
+		pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+}
+
+static inline int ddp_gl_map(struct pci_dev *pdev,
+			     struct cxgb3i_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++) {
+		gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
+						PAGE_SIZE,
+						PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
+			goto unmap;
+	}
+
+	return i;
+
+unmap:
+	if (i) {
+		unsigned int nelem = gl->nelem;
+
+		gl->nelem = i;
+		ddp_gl_unmap(pdev, gl);
+		gl->nelem = nelem;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * cxgb3i_ddp_make_gl - build ddp page buffer list
+ * @xferlen: total buffer length
+ * @sgl: page buffer scatter-gather list
+ * @sgcnt: # of page buffers
+ * @pdev: pci_dev, used for pci map
+ * @gfp: allocation mode
+ *
+ * construct a ddp page buffer list from the scsi scattergather list.
+ * coalesce buffers as much as possible, and obtain dma addresses for
+ * each page.
+ *
+ * Return the cxgb3i_gather_list constructed from the page buffers if the
+ * memory can be used for ddp. Return NULL otherwise.
+ */
+struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
+					      struct scatterlist *sgl,
+					      unsigned int sgcnt,
+					      struct pci_dev *pdev,
+					      gfp_t gfp)
+{
+	struct cxgb3i_gather_list *gl;
+	struct scatterlist *sg = sgl;
+	struct page *sgpage = sg_page(sg);
+	unsigned int sglen = sg->length;
+	unsigned int sgoffset = sg->offset;
+	unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
+			      PAGE_SHIFT;
+	int i = 1, j = 0;
+
+	if (xferlen < DDP_THRESHOLD) {
+		ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
+			      xferlen, DDP_THRESHOLD);
+		return NULL;
+	}
+
+	gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
+		     npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
+		     gfp);
+	if (!gl)
+		return NULL;
+
+	gl->pages = (struct page **)&gl->phys_addr[npages];
+	gl->length = xferlen;
+	gl->offset = sgoffset;
+	gl->pages[0] = sgpage;
+
+	sg = sg_next(sg);
+	while (sg) {
+		struct page *page = sg_page(sg);
+
+		if (sgpage == page && sg->offset == sgoffset + sglen)
+			sglen += sg->length;
+		else {
+			/* make sure the sgl is fit for ddp:
+			 * each has the same page size, and
+			 * all of the middle pages are used completely
+			 */
+			if ((j && sgoffset) ||
+			    ((i != sgcnt - 1) &&
+			     ((sglen + sgoffset) & ~PAGE_MASK)))
+				goto error_out;
+
+			j++;
+			if (j == gl->nelem || sg->offset)
+				goto error_out;
+			gl->pages[j] = page;
+			sglen = sg->length;
+			sgoffset = sg->offset;
+			sgpage = page;
+		}
+		i++;
+		sg = sg_next(sg);
+	}
+	gl->nelem = ++j;
+
+	if (ddp_gl_map(pdev, gl) < 0)
+		goto error_out;
+
+	return gl;
+
+error_out:
+	kfree(gl);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
+
+/**
+ * cxgb3i_ddp_release_gl - release a page buffer list
+ * @gl: a ddp page buffer list
+ * @pdev: pci_dev used for pci_unmap
+ * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
+ */
+void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
+			   struct pci_dev *pdev)
+{
+	ddp_gl_unmap(pdev, gl);
+	kfree(gl);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
+
+/**
+ * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @tformat: tag format
+ * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
+ *	  ddp/hw tag
+ * @gl: the page momory list
+ * @gfp: allocation mode
+ *
+ * ddp setup for a given page buffer list and construct the ddp tag.
+ * return 0 if success, < 0 otherwise.
+ */
+int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
+			   struct cxgb3i_tag_format *tformat, u32 *tagp,
+			   struct cxgb3i_gather_list *gl, gfp_t gfp)
+{
+	struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
+	struct pagepod_hdr hdr;
+	unsigned int npods;
+	int idx = -1, idx_max;
+	int err = -ENOMEM;
+	u32 sw_tag = *tagp;
+	u32 tag;
+
+	if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
+		gl->length < DDP_THRESHOLD) {
+		ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
+			      page_idx, gl->length, DDP_THRESHOLD);
+		return -EINVAL;
+	}
+
+	npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+	idx_max = ddp->nppods - npods + 1;
+
+	if (ddp->idx_last == ddp->nppods)
+		idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
+	else {
+		idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
+					      idx_max, npods, gl);
+		if (idx < 0 && ddp->idx_last >= npods)
+			idx = ddp_find_unused_entries(ddp, 0,
+						      ddp->idx_last - npods + 1,
+						      npods, gl);
+	}
+	if (idx < 0) {
+		ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
+			      gl->length, gl->nelem, npods);
+		return idx;
+	}
+
+	err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
+	if (err < 0)
+		goto unmark_entries;
+
+	tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
+	tag |= idx << PPOD_IDX_SHIFT;
+
+	hdr.rsvd = 0;
+	hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
+	hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
+	hdr.maxoffset = htonl(gl->length);
+	hdr.pgoffset = htonl(gl->offset);
+
+	err = set_ddp_map(ddp, &hdr, idx, npods, gl);
+	if (err < 0)
+		goto free_gl_skb;
+
+	ddp->idx_last = idx;
+	ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
+		      gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
+		      idx, npods);
+	*tagp = tag;
+	return 0;
+
+free_gl_skb:
+	ddp_free_gl_skb(ddp, idx, npods);
+unmark_entries:
+	ddp_unmark_entries(ddp, idx, npods);
+	return err;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
+
+/**
+ * cxgb3i_ddp_tag_release - release a ddp tag
+ * @tdev: t3cdev adapter
+ * @tag: ddp tag
+ * ddp cleanup for a given ddp tag and release all the resources held
+ */
+void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
+{
+	struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
+	u32 idx;
+
+	if (!ddp) {
+		ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
+		return;
+	}
+
+	idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
+	if (idx < ddp->nppods) {
+		struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
+		unsigned int npods;
+
+		if (!gl) {
+			ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
+				      tag, idx);
+			return;
+		}
+		npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+		ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
+			      tag, idx, npods);
+		clear_ddp_map(ddp, idx, npods);
+		ddp_unmark_entries(ddp, idx, npods);
+		cxgb3i_ddp_release_gl(gl, ddp->pdev);
+	} else
+		ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
+			      tag, idx, ddp->nppods);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
+
+static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
+			    int reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
+
+	if (!skb)
+		return -ENOMEM;
+
+	/* set up ulp submode and page size */
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->cpu_idx = 0;
+	req->word = htons(31);
+	req->mask = cpu_to_be64(0xF0000000);
+	req->val = cpu_to_be64(val << 28);
+	skb->priority = CPL_PRIORITY_CONTROL;
+
+	cxgb3_ofld_send(tdev, skb);
+	return 0;
+}
+
+/**
+ * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @reply: request reply from h/w
+ * set up the ddp page size based on the host PAGE_SIZE for a connection
+ * identified by tid
+ */
+int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
+				    int reply)
+{
+	return setup_conn_pgidx(tdev, tid, page_idx, reply);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
+
+/**
+ * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @reply: request reply from h/w
+ * @pgsz: ddp page size
+ * set up the ddp page size for a connection identified by tid
+ */
+int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
+				int reply, unsigned long pgsz)
+{
+	int pgidx = cxgb3i_ddp_find_page_index(pgsz);
+
+	return setup_conn_pgidx(tdev, tid, pgidx, reply);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
+
+/**
+ * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @hcrc: header digest enabled
+ * @dcrc: data digest enabled
+ * @reply: request reply from h/w
+ * set up the iscsi digest settings for a connection identified by tid
+ */
+int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
+			     int hcrc, int dcrc, int reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
+
+	if (!skb)
+		return -ENOMEM;
+
+	/* set up ulp submode and page size */
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->cpu_idx = 0;
+	req->word = htons(31);
+	req->mask = cpu_to_be64(0x0F000000);
+	req->val = cpu_to_be64(val << 24);
+	skb->priority = CPL_PRIORITY_CONTROL;
+
+	cxgb3_ofld_send(tdev, skb);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
+
+static int ddp_init(struct t3cdev *tdev)
+{
+	struct cxgb3i_ddp_info *ddp;
+	struct ulp_iscsi_info uinfo;
+	unsigned int ppmax, bits;
+	int i, err;
+	static int vers_printed;
+
+	if (!vers_printed) {
+		printk(KERN_INFO "%s", version);
+		vers_printed = 1;
+	}
+
+	err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
+	if (err < 0) {
+		ddp_log_error("%s, failed to get iscsi param err=%d.\n",
+				 tdev->name, err);
+		return err;
+	}
+
+	ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
+	bits = __ilog2_u32(ppmax) + 1;
+	if (bits > PPOD_IDX_MAX_SIZE)
+		bits = PPOD_IDX_MAX_SIZE;
+	ppmax = (1 << (bits - 1)) - 1;
+
+	ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
+				   ppmax *
+					(sizeof(struct cxgb3i_gather_list *) +
+					sizeof(struct sk_buff *)),
+				   GFP_KERNEL);
+	if (!ddp) {
+		ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
+			     tdev->name, ppmax);
+		return 0;
+	}
+	ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
+	ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
+					  ppmax *
+					  sizeof(struct cxgb3i_gather_list *));
+	spin_lock_init(&ddp->map_lock);
+
+	ddp->tdev = tdev;
+	ddp->pdev = uinfo.pdev;
+	ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
+	ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
+	ddp->llimit = uinfo.llimit;
+	ddp->ulimit = uinfo.ulimit;
+	ddp->nppods = ppmax;
+	ddp->idx_last = ppmax;
+	ddp->idx_bits = bits;
+	ddp->idx_mask = (1 << bits) - 1;
+	ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
+
+	uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+	for (i = 0; i < DDP_PGIDX_MAX; i++)
+		uinfo.pgsz_factor[i] = ddp_page_order[i];
+	uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
+
+	err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
+	if (err < 0) {
+		ddp_log_warn("%s unable to set iscsi param err=%d, "
+			      "ddp disabled.\n", tdev->name, err);
+		goto free_ddp_map;
+	}
+
+	tdev->ulp_iscsi = ddp;
+
+	/* add to the list */
+	write_lock(&cxgb3i_ddp_rwlock);
+	list_add_tail(&ddp->list, &cxgb3i_ddp_list);
+	write_unlock(&cxgb3i_ddp_rwlock);
+
+	ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
+			"pkt %u,%u.\n",
+			ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
+			ddp->idx_mask, ddp->rsvd_tag_mask,
+			ddp->max_txsz, ddp->max_rxsz);
+	return 0;
+
+free_ddp_map:
+	cxgb3i_free_big_mem(ddp);
+	return err;
+}
+
+/**
+ * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
+ * @tdev: t3cdev adapter
+ * @tformat: tag format
+ * @txsz: max tx pkt size, filled in by this func.
+ * @rxsz: max rx pkt size, filled in by this func.
+ * initialize the ddp pagepod manager for a given adapter if needed and
+ * setup the tag format for a given iscsi entity
+ */
+int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
+			    struct cxgb3i_tag_format *tformat,
+			    unsigned int *txsz, unsigned int *rxsz)
+{
+	struct cxgb3i_ddp_info *ddp;
+	unsigned char idx_bits;
+
+	if (!tformat)
+		return -EINVAL;
+
+	if (!tdev->ulp_iscsi) {
+		int err = ddp_init(tdev);
+		if (err < 0)
+			return err;
+	}
+	ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
+
+	idx_bits = 32 - tformat->sw_bits;
+	tformat->rsvd_bits = ddp->idx_bits;
+	tformat->rsvd_shift = PPOD_IDX_SHIFT;
+	tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
+
+	ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
+		      tformat->sw_bits, tformat->rsvd_bits,
+		      tformat->rsvd_shift, tformat->rsvd_mask);
+
+	*txsz = ddp->max_txsz;
+	*rxsz = ddp->max_rxsz;
+	ddp_log_info("ddp max pkt size: %u, %u.\n",
+		     ddp->max_txsz, ddp->max_rxsz);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
+
+static void ddp_release(struct cxgb3i_ddp_info *ddp)
+{
+	int i = 0;
+	struct t3cdev *tdev = ddp->tdev;
+
+	tdev->ulp_iscsi = NULL;
+	while (i < ddp->nppods) {
+		struct cxgb3i_gather_list *gl = ddp->gl_map[i];
+		if (gl) {
+			int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
+				     >> PPOD_PAGES_SHIFT;
+
+			kfree(gl);
+			ddp_free_gl_skb(ddp, i, npods);
+		} else
+			i++;
+	}
+	cxgb3i_free_big_mem(ddp);
+}
+
+/**
+ * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
+ * @tdev: t3cdev adapter
+ * release all the resource held by the ddp pagepod manager for a given
+ * adapter if needed
+ */
+void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
+{
+	struct cxgb3i_ddp_info *ddp;
+
+	/* remove from the list */
+	write_lock(&cxgb3i_ddp_rwlock);
+	list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
+		if (ddp->tdev == tdev) {
+			list_del(&ddp->list);
+			break;
+		}
+	}
+	write_unlock(&cxgb3i_ddp_rwlock);
+
+	if (ddp)
+		ddp_release(ddp);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
+
+/**
+ * cxgb3i_ddp_init_module - module init entry point
+ * initialize any driver wide global data structures
+ */
+static int __init cxgb3i_ddp_init_module(void)
+{
+	page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+	ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
+		     PAGE_SIZE, page_idx);
+	return 0;
+}
+
+/**
+ * cxgb3i_ddp_exit_module - module cleanup/exit entry point
+ * go through the ddp list and release any resource held.
+ */
+static void __exit cxgb3i_ddp_exit_module(void)
+{
+	struct cxgb3i_ddp_info *ddp;
+
+	/* release all ddp manager if there is any */
+	write_lock(&cxgb3i_ddp_rwlock);
+	list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
+		list_del(&ddp->list);
+		ddp_release(ddp);
+	}
+	write_unlock(&cxgb3i_ddp_rwlock);
+}
+
+module_init(cxgb3i_ddp_init_module);
+module_exit(cxgb3i_ddp_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
new file mode 100644
index 000000000000..5c7c4d95c493
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -0,0 +1,306 @@
+/*
+ * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_ULP2_DDP_H__
+#define __CXGB3I_ULP2_DDP_H__
+
+/**
+ * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
+ *
+ * @sw_bits:	# of bits used by iscsi software layer
+ * @rsvd_bits:	# of bits used by h/w
+ * @rsvd_shift:	h/w bits shift left
+ * @rsvd_mask:	reserved bit mask
+ */
+struct cxgb3i_tag_format {
+	unsigned char sw_bits;
+	unsigned char rsvd_bits;
+	unsigned char rsvd_shift;
+	unsigned char filler[1];
+	u32 rsvd_mask;
+};
+
+/**
+ * struct cxgb3i_gather_list - cxgb3i direct data placement memory
+ *
+ * @tag:	ddp tag
+ * @length:	total data buffer length
+ * @offset:	initial offset to the 1st page
+ * @nelem:	# of pages
+ * @pages:	page pointers
+ * @phys_addr:	physical address
+ */
+struct cxgb3i_gather_list {
+	u32 tag;
+	unsigned int length;
+	unsigned int offset;
+	unsigned int nelem;
+	struct page **pages;
+	dma_addr_t phys_addr[0];
+};
+
+/**
+ * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
+ *
+ * @list:	list head to link elements
+ * @tdev:	pointer to t3cdev used by cxgb3 driver
+ * @max_txsz:	max tx packet size for ddp
+ * @max_rxsz:	max rx packet size for ddp
+ * @llimit:	lower bound of the page pod memory
+ * @ulimit:	upper bound of the page pod memory
+ * @nppods:	# of page pod entries
+ * @idx_last:	page pod entry last used
+ * @idx_bits:	# of bits the pagepod index would take
+ * @idx_mask:	pagepod index mask
+ * @rsvd_tag_mask: tag mask
+ * @map_lock:	lock to synchonize access to the page pod map
+ * @gl_map:	ddp memory gather list
+ * @gl_skb:	skb used to program the pagepod
+ */
+struct cxgb3i_ddp_info {
+	struct list_head list;
+	struct t3cdev *tdev;
+	struct pci_dev *pdev;
+	unsigned int max_txsz;
+	unsigned int max_rxsz;
+	unsigned int llimit;
+	unsigned int ulimit;
+	unsigned int nppods;
+	unsigned int idx_last;
+	unsigned char idx_bits;
+	unsigned char filler[3];
+	u32 idx_mask;
+	u32 rsvd_tag_mask;
+	spinlock_t map_lock;
+	struct cxgb3i_gather_list **gl_map;
+	struct sk_buff **gl_skb;
+};
+
+#define ULP2_MAX_PKT_SIZE	16224
+#define ULP2_MAX_PDU_PAYLOAD	(ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
+#define PPOD_PAGES_MAX		4
+#define PPOD_PAGES_SHIFT	2	/* 4 pages per pod */
+
+/*
+ * struct pagepod_hdr, pagepod - pagepod format
+ */
+struct pagepod_hdr {
+	u32 vld_tid;
+	u32 pgsz_tag_clr;
+	u32 maxoffset;
+	u32 pgoffset;
+	u64 rsvd;
+};
+
+struct pagepod {
+	struct pagepod_hdr hdr;
+	u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+#define PPOD_SIZE		sizeof(struct pagepod)	/* 64 */
+#define PPOD_SIZE_SHIFT		6
+
+#define PPOD_COLOR_SHIFT	0
+#define PPOD_COLOR_SIZE		6
+#define PPOD_COLOR_MASK		((1 << PPOD_COLOR_SIZE) - 1)
+
+#define PPOD_IDX_SHIFT		PPOD_COLOR_SIZE
+#define PPOD_IDX_MAX_SIZE	24
+
+#define S_PPOD_TID    0
+#define M_PPOD_TID    0xFFFFFF
+#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
+
+#define S_PPOD_VALID    24
+#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
+#define F_PPOD_VALID    V_PPOD_VALID(1U)
+
+#define S_PPOD_COLOR    0
+#define M_PPOD_COLOR    0x3F
+#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
+
+#define S_PPOD_TAG    6
+#define M_PPOD_TAG    0xFFFFFF
+#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
+
+#define S_PPOD_PGSZ    30
+#define M_PPOD_PGSZ    0x3
+#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
+
+/*
+ * large memory chunk allocation/release
+ * use vmalloc() if kmalloc() fails
+ */
+static inline void *cxgb3i_alloc_big_mem(unsigned int size,
+					 gfp_t gfp)
+{
+	void *p = kmalloc(size, gfp);
+	if (!p)
+		p = vmalloc(size);
+	if (p)
+		memset(p, 0, size);
+	return p;
+}
+
+static inline void cxgb3i_free_big_mem(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		vfree(addr);
+	else
+		kfree(addr);
+}
+
+/*
+ * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
+ * non-reserved bits that can be used by the iscsi s/w.
+ * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
+ * in struct cxgb3i_tag_format.
+ *
+ * The upper most reserved bit can be used to check if a tag is ddp tag or not:
+ * 	if the bit is 0, the tag is a valid ddp tag
+ */
+
+/**
+ * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return true if the tag is a ddp tag, false otherwise.
+ */
+static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
+{
+	return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
+}
+
+/**
+ * cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for
+ *			  the reserved/hw bits
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ *
+ * return true if the tag is a ddp tag, false otherwise.
+ */
+static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
+					u32 sw_tag)
+{
+	sw_tag >>= (32 - tformat->rsvd_bits);
+	return !sw_tag;
+}
+
+/**
+ * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ *
+ * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
+ */
+static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
+					 u32 sw_tag)
+{
+	unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+	u32 mask = (1 << shift) - 1;
+
+	if (sw_tag && (sw_tag & ~mask)) {
+		u32 v1 = sw_tag & ((1 << shift) - 1);
+		u32 v2 = (sw_tag >> (shift - 1)) << shift;
+
+		return v2 | v1 | 1 << shift;
+	}
+	return sw_tag | 1 << shift;
+}
+
+/**
+ * cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not
+ *			 used.
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ */
+static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
+				      u32 sw_tag)
+{
+	u32 mask = (1 << tformat->rsvd_shift) - 1;
+
+	if (sw_tag && (sw_tag & ~mask)) {
+		u32 v1 = sw_tag & mask;
+		u32 v2 = sw_tag >> tformat->rsvd_shift;
+
+		v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
+		return v2 | v1;
+	}
+	return sw_tag;
+}
+
+/**
+ * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return the reserved bits in the tag
+ */
+static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
+				       u32 tag)
+{
+	if (cxgb3i_is_ddp_tag(tformat, tag))
+		return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
+	return 0;
+}
+
+/**
+ * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return the non-reserved bits in the tag.
+ */
+static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
+					  u32 tag)
+{
+	unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+	u32 v1, v2;
+
+	if (cxgb3i_is_ddp_tag(tformat, tag)) {
+		v1 = tag & ((1 << tformat->rsvd_shift) - 1);
+		v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
+	} else {
+		u32 mask = (1 << shift) - 1;
+
+		tag &= ~(1 << shift);
+		v1 = tag & mask;
+		v2 = (tag >> 1) & ~mask;
+	}
+	return v1 | v2;
+}
+
+int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
+			   struct cxgb3i_tag_format *, u32 *tag,
+			   struct cxgb3i_gather_list *, gfp_t gfp);
+void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
+
+struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
+				struct scatterlist *sgl,
+				unsigned int sgcnt,
+				struct pci_dev *pdev,
+				gfp_t gfp);
+void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
+				struct pci_dev *pdev);
+
+int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
+				    int reply);
+int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
+			       unsigned long pgsz);
+int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
+				int hcrc, int dcrc, int reply);
+int cxgb3i_ddp_find_page_index(unsigned long pgsz);
+int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
+			    unsigned int *txsz, unsigned int *rxsz);
+void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
+#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
new file mode 100644
index 000000000000..091ecb4d9f3d
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -0,0 +1,107 @@
+/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include "cxgb3i.h"
+
+#define DRV_MODULE_NAME         "cxgb3i"
+#define DRV_MODULE_VERSION	"1.0.0"
+#define DRV_MODULE_RELDATE	"Jun. 1, 2008"
+
+static char version[] =
+	"Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
+	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
+MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static void open_s3_dev(struct t3cdev *);
+static void close_s3_dev(struct t3cdev *);
+
+static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
+static struct cxgb3_client t3c_client = {
+	.name = "iscsi_cxgb3",
+	.handlers = cxgb3i_cpl_handlers,
+	.add = open_s3_dev,
+	.remove = close_s3_dev,
+};
+
+/**
+ * open_s3_dev - register with cxgb3 LLD
+ * @t3dev:	cxgb3 adapter instance
+ */
+static void open_s3_dev(struct t3cdev *t3dev)
+{
+	static int vers_printed;
+
+	if (!vers_printed) {
+		printk(KERN_INFO "%s", version);
+		vers_printed = 1;
+	}
+
+	cxgb3i_sdev_add(t3dev, &t3c_client);
+	cxgb3i_adapter_add(t3dev);
+}
+
+/**
+ * close_s3_dev - de-register with cxgb3 LLD
+ * @t3dev:	cxgb3 adapter instance
+ */
+static void close_s3_dev(struct t3cdev *t3dev)
+{
+	cxgb3i_adapter_remove(t3dev);
+	cxgb3i_sdev_remove(t3dev);
+}
+
+/**
+ * cxgb3i_init_module - module init entry point
+ *
+ * initialize any driver wide global data structures and register itself
+ *	with the cxgb3 module
+ */
+static int __init cxgb3i_init_module(void)
+{
+	int err;
+
+	err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
+	if (err < 0)
+		return err;
+
+	err = cxgb3i_iscsi_init();
+	if (err < 0)
+		return err;
+
+	err = cxgb3i_pdu_init();
+	if (err < 0)
+		return err;
+
+	cxgb3_register_client(&t3c_client);
+
+	return 0;
+}
+
+/**
+ * cxgb3i_exit_module - module cleanup/exit entry point
+ *
+ * go through the driver hba list and for each hba, release any resource held.
+ *	and unregisters iscsi transport and the cxgb3 module
+ */
+static void __exit cxgb3i_exit_module(void)
+{
+	cxgb3_unregister_client(&t3c_client);
+	cxgb3i_pdu_cleanup();
+	cxgb3i_iscsi_cleanup();
+	cxgb3i_sdev_cleanup();
+}
+
+module_init(cxgb3i_init_module);
+module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
new file mode 100644
index 000000000000..d83464b9b3f9
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -0,0 +1,951 @@
+/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/inet.h>
+#include <linux/crypto.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "cxgb3i.h"
+#include "cxgb3i_pdu.h"
+
+#ifdef __DEBUG_CXGB3I_TAG__
+#define cxgb3i_tag_debug	cxgb3i_log_debug
+#else
+#define cxgb3i_tag_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB3I_API__
+#define cxgb3i_api_debug	cxgb3i_log_debug
+#else
+#define cxgb3i_api_debug(fmt...)
+#endif
+
+/*
+ * align pdu size to multiple of 512 for better performance
+ */
+#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
+
+static struct scsi_transport_template *cxgb3i_scsi_transport;
+static struct scsi_host_template cxgb3i_host_template;
+static struct iscsi_transport cxgb3i_iscsi_transport;
+static unsigned char sw_tag_idx_bits;
+static unsigned char sw_tag_age_bits;
+
+static LIST_HEAD(cxgb3i_snic_list);
+static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
+
+/**
+ * cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
+ * @t3dev: t3cdev adapter
+ * return the resulting cxgb3i_adapter struct
+ */
+struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
+{
+	struct cxgb3i_adapter *snic;
+	struct adapter *adapter = tdev2adap(t3dev);
+	int i;
+
+	snic = kzalloc(sizeof(*snic), GFP_KERNEL);
+	if (!snic) {
+		cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
+		return NULL;
+	}
+	spin_lock_init(&snic->lock);
+
+	snic->tdev = t3dev;
+	snic->pdev = adapter->pdev;
+	snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
+
+	if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
+				    &snic->tx_max_size,
+				    &snic->rx_max_size) < 0)
+		goto free_snic;
+
+	for_each_port(adapter, i) {
+		snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
+		if (!snic->hba[i])
+			goto ulp_cleanup;
+	}
+	snic->hba_cnt = adapter->params.nports;
+
+	/* add to the list */
+	write_lock(&cxgb3i_snic_rwlock);
+	list_add_tail(&snic->list_head, &cxgb3i_snic_list);
+	write_unlock(&cxgb3i_snic_rwlock);
+
+	return snic;
+
+ulp_cleanup:
+	cxgb3i_adapter_ddp_cleanup(t3dev);
+free_snic:
+	kfree(snic);
+	return NULL;
+}
+
+/**
+ * cxgb3i_adapter_remove - release all the resources held and cleanup any
+ *	h/w settings
+ * @t3dev: t3cdev adapter
+ */
+void cxgb3i_adapter_remove(struct t3cdev *t3dev)
+{
+	int i;
+	struct cxgb3i_adapter *snic;
+
+	/* remove from the list */
+	write_lock(&cxgb3i_snic_rwlock);
+	list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
+		if (snic->tdev == t3dev) {
+			list_del(&snic->list_head);
+			break;
+		}
+	}
+	write_unlock(&cxgb3i_snic_rwlock);
+
+	if (snic) {
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]) {
+				cxgb3i_hba_host_remove(snic->hba[i]);
+				snic->hba[i] = NULL;
+			}
+		}
+
+		/* release ddp resources */
+		cxgb3i_adapter_ddp_cleanup(snic->tdev);
+		kfree(snic);
+	}
+}
+
+/**
+ * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given
+ *	net_device
+ * @t3dev: t3cdev adapter
+ */
+struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
+{
+	struct cxgb3i_adapter *snic;
+	int i;
+
+	read_lock(&cxgb3i_snic_rwlock);
+	list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]->ndev == ndev) {
+				read_unlock(&cxgb3i_snic_rwlock);
+				return snic->hba[i];
+			}
+		}
+	}
+	read_unlock(&cxgb3i_snic_rwlock);
+	return NULL;
+}
+
+/**
+ * cxgb3i_hba_host_add - register a new host with scsi/iscsi
+ * @snic: the cxgb3i adapter
+ * @ndev: associated net_device
+ */
+struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
+				       struct net_device *ndev)
+{
+	struct cxgb3i_hba *hba;
+	struct Scsi_Host *shost;
+	int err;
+
+	shost = iscsi_host_alloc(&cxgb3i_host_template,
+				 sizeof(struct cxgb3i_hba),
+				 CXGB3I_SCSI_QDEPTH_DFLT);
+	if (!shost) {
+		cxgb3i_log_info("iscsi_host_alloc failed.\n");
+		return NULL;
+	}
+
+	shost->transportt = cxgb3i_scsi_transport;
+	shost->max_lun = CXGB3I_MAX_LUN;
+	shost->max_id = CXGB3I_MAX_TARGET;
+	shost->max_channel = 0;
+	shost->max_cmd_len = 16;
+
+	hba = iscsi_host_priv(shost);
+	hba->snic = snic;
+	hba->ndev = ndev;
+	hba->shost = shost;
+
+	pci_dev_get(snic->pdev);
+	err = iscsi_host_add(shost, &snic->pdev->dev);
+	if (err) {
+		cxgb3i_log_info("iscsi_host_add failed.\n");
+		goto pci_dev_put;
+	}
+
+	cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
+			 shost, hba, shost->host_no);
+
+	return hba;
+
+pci_dev_put:
+	pci_dev_put(snic->pdev);
+	scsi_host_put(shost);
+	return NULL;
+}
+
+/**
+ * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
+ * @hba: the cxgb3i hba
+ */
+void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
+{
+	cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
+			 hba->shost, hba, hba->shost->host_no);
+	iscsi_host_remove(hba->shost);
+	pci_dev_put(hba->snic->pdev);
+	iscsi_host_free(hba->shost);
+}
+
+/**
+ * cxgb3i_ep_connect - establish TCP connection to target portal
+ * @dst_addr:		target IP address
+ * @non_blocking:	blocking or non-blocking call
+ *
+ * Initiates a TCP/IP connection to the dst_addr
+ */
+static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
+						int non_blocking)
+{
+	struct iscsi_endpoint *ep;
+	struct cxgb3i_endpoint *cep;
+	struct cxgb3i_hba *hba;
+	struct s3_conn *c3cn = NULL;
+	int err = 0;
+
+	c3cn = cxgb3i_c3cn_create();
+	if (!c3cn) {
+		cxgb3i_log_info("ep connect OOM.\n");
+		err = -ENOMEM;
+		goto release_conn;
+	}
+
+	err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
+	if (err < 0) {
+		cxgb3i_log_info("ep connect failed.\n");
+		goto release_conn;
+	}
+	hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
+	if (!hba) {
+		err = -ENOSPC;
+		cxgb3i_log_info("NOT going through cxgbi device.\n");
+		goto release_conn;
+	}
+	if (c3cn_is_closing(c3cn)) {
+		err = -ENOSPC;
+		cxgb3i_log_info("ep connect unable to connect.\n");
+		goto release_conn;
+	}
+
+	ep = iscsi_create_endpoint(sizeof(*cep));
+	if (!ep) {
+		err = -ENOMEM;
+		cxgb3i_log_info("iscsi alloc ep, OOM.\n");
+		goto release_conn;
+	}
+	cep = ep->dd_data;
+	cep->c3cn = c3cn;
+	cep->hba = hba;
+
+	cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
+			  ep, cep, c3cn, hba);
+	return ep;
+
+release_conn:
+	cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
+	if (c3cn)
+		cxgb3i_c3cn_release(c3cn);
+	return ERR_PTR(err);
+}
+
+/**
+ * cxgb3i_ep_poll - polls for TCP connection establishement
+ * @ep:		TCP connection (endpoint) handle
+ * @timeout_ms:	timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct cxgb3i_endpoint *cep = ep->dd_data;
+	struct s3_conn *c3cn = cep->c3cn;
+
+	if (!c3cn_is_established(c3cn))
+		return 0;
+	cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
+	return 1;
+}
+
+/**
+ * cxgb3i_ep_disconnect - teardown TCP connection
+ * @ep:		TCP connection (endpoint) handle
+ *
+ * teardown TCP connection
+ */
+static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct cxgb3i_endpoint *cep = ep->dd_data;
+	struct cxgb3i_conn *cconn = cep->cconn;
+
+	cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
+
+	if (cconn && cconn->conn) {
+		/*
+		 * stop the xmit path so the xmit_pdu function is
+		 * not being called
+		 */
+		iscsi_suspend_tx(cconn->conn);
+
+		write_lock_bh(&cep->c3cn->callback_lock);
+		cep->c3cn->user_data = NULL;
+		cconn->cep = NULL;
+		write_unlock_bh(&cep->c3cn->callback_lock);
+	}
+
+	cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
+			 ep, cep, cep->c3cn);
+	cxgb3i_c3cn_release(cep->c3cn);
+	iscsi_destroy_endpoint(ep);
+}
+
+/**
+ * cxgb3i_session_create - create a new iscsi session
+ * @cmds_max:		max # of commands
+ * @qdepth:		scsi queue depth
+ * @initial_cmdsn:	initial iscsi CMDSN for this session
+ * @host_no:		pointer to return host no
+ *
+ * Creates a new iSCSI session
+ */
+static struct iscsi_cls_session *
+cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
+		      u32 initial_cmdsn, u32 *host_no)
+{
+	struct cxgb3i_endpoint *cep;
+	struct cxgb3i_hba *hba;
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
+
+	if (!ep) {
+		cxgb3i_log_error("%s, missing endpoint.\n", __func__);
+		return NULL;
+	}
+
+	cep = ep->dd_data;
+	hba = cep->hba;
+	shost = hba->shost;
+	cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
+	BUG_ON(hba != iscsi_host_priv(shost));
+
+	*host_no = shost->host_no;
+
+	cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
+					  cmds_max,
+					  sizeof(struct iscsi_tcp_task),
+					  initial_cmdsn, ISCSI_MAX_TARGET);
+	if (!cls_session)
+		return NULL;
+	session = cls_session->dd_data;
+	if (iscsi_tcp_r2tpool_alloc(session))
+		goto remove_session;
+
+	return cls_session;
+
+remove_session:
+	iscsi_session_teardown(cls_session);
+	return NULL;
+}
+
+/**
+ * cxgb3i_session_destroy - destroys iscsi session
+ * @cls_session:	pointer to iscsi cls session
+ *
+ * Destroys an iSCSI session instance and releases its all resources held
+ */
+static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+	cxgb3i_api_debug("sess 0x%p.\n", cls_session);
+	iscsi_tcp_r2tpool_free(cls_session->dd_data);
+	iscsi_session_teardown(cls_session);
+}
+
+/**
+ * cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size,
+ * reduce it to be within the hardware limit if needed
+ * @conn: iscsi connection
+ */
+static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
+
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				 cconn->hba->snic->tx_max_size -
+				 ISCSI_PDU_NONPAYLOAD_MAX);
+
+	if (conn->max_xmit_dlength)
+		conn->max_xmit_dlength = min_t(unsigned int,
+						conn->max_xmit_dlength, max);
+	else
+		conn->max_xmit_dlength = max;
+	align_pdu_size(conn->max_xmit_dlength);
+	cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
+			 conn, conn->max_xmit_dlength);
+	return 0;
+}
+
+/**
+ * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against
+ * the hardware limit
+ * @conn: iscsi connection
+ * return 0 if the value is valid, < 0 otherwise.
+ */
+static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				 cconn->hba->snic->rx_max_size -
+				 ISCSI_PDU_NONPAYLOAD_MAX);
+
+	align_pdu_size(max);
+	if (conn->max_recv_dlength) {
+		if (conn->max_recv_dlength > max) {
+			cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
+					 " Need to be <= %u.\n",
+					 conn->max_recv_dlength, max);
+			return -EINVAL;
+		}
+		conn->max_recv_dlength = min_t(unsigned int,
+						conn->max_recv_dlength, max);
+		align_pdu_size(conn->max_recv_dlength);
+	} else
+		conn->max_recv_dlength = max;
+	cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
+			 conn, conn->max_recv_dlength);
+	return 0;
+}
+
+/**
+ * cxgb3i_conn_create - create iscsi connection instance
+ * @cls_session:	pointer to iscsi cls session
+ * @cid:		iscsi cid
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
+						 *cls_session, u32 cid)
+{
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+	struct iscsi_tcp_conn *tcp_conn;
+	struct cxgb3i_conn *cconn;
+
+	cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
+
+	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
+	if (!cls_conn)
+		return NULL;
+	conn = cls_conn->dd_data;
+	tcp_conn = conn->dd_data;
+	cconn = tcp_conn->dd_data;
+
+	cconn->conn = conn;
+	return cls_conn;
+}
+
+/**
+ * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
+ * @cls_session:	pointer to iscsi cls session
+ * @cls_conn:		pointer to iscsi cls conn
+ * @transport_eph:	64-bit EP handle
+ * @is_leading:		leading connection on this session?
+ *
+ * Binds together an iSCSI session, an iSCSI connection and a
+ *	TCP connection. This routine returns error code if the TCP
+ *	connection does not belong on the device iSCSI sess/conn is bound
+ */
+
+static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
+			    struct iscsi_cls_conn *cls_conn,
+			    u64 transport_eph, int is_leading)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	struct cxgb3i_adapter *snic;
+	struct iscsi_endpoint *ep;
+	struct cxgb3i_endpoint *cep;
+	struct s3_conn *c3cn;
+	int err;
+
+	ep = iscsi_lookup_endpoint(transport_eph);
+	if (!ep)
+		return -EINVAL;
+
+	/* setup ddp pagesize */
+	cep = ep->dd_data;
+	c3cn = cep->c3cn;
+	snic = cep->hba->snic;
+	err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
+	if (err < 0)
+		return err;
+
+	cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
+			 ep, cls_session, cls_conn);
+
+	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+	if (err)
+		return -EINVAL;
+
+	/* calculate the tag idx bits needed for this conn based on cmds_max */
+	cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
+	cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
+			 conn->session->cmds_max, cconn->task_idx_bits);
+
+	read_lock(&c3cn->callback_lock);
+	c3cn->user_data = conn;
+	cconn->hba = cep->hba;
+	cconn->cep = cep;
+	cep->cconn = cconn;
+	read_unlock(&c3cn->callback_lock);
+
+	cxgb3i_conn_max_xmit_dlength(conn);
+	cxgb3i_conn_max_recv_dlength(conn);
+
+	spin_lock_bh(&conn->session->lock);
+	sprintf(conn->portal_address, NIPQUAD_FMT,
+		NIPQUAD(c3cn->daddr.sin_addr.s_addr));
+	conn->portal_port = ntohs(c3cn->daddr.sin_port);
+	spin_unlock_bh(&conn->session->lock);
+
+	/* init recv engine */
+	iscsi_tcp_hdr_recv_prep(tcp_conn);
+
+	return 0;
+}
+
+/**
+ * cxgb3i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn:	pointer to iscsi cls conn
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				 enum iscsi_param param, char *buf)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	int len;
+
+	cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		spin_lock_bh(&conn->session->lock);
+		len = sprintf(buf, "%hu\n", conn->portal_port);
+		spin_unlock_bh(&conn->session->lock);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		spin_lock_bh(&conn->session->lock);
+		len = sprintf(buf, "%s\n", conn->portal_address);
+		spin_unlock_bh(&conn->session->lock);
+		break;
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buf);
+	}
+
+	return len;
+}
+
+/**
+ * cxgb3i_conn_set_param - set iscsi connection parameter
+ * @cls_conn:	pointer to iscsi cls conn
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ * @buflen:	buffer length
+ *
+ * set iSCSI connection parameters
+ */
+static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
+				 enum iscsi_param param, char *buf, int buflen)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_session *session = conn->session;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	struct cxgb3i_adapter *snic = cconn->hba->snic;
+	struct s3_conn *c3cn = cconn->cep->c3cn;
+	int value, err = 0;
+
+	switch (param) {
+	case ISCSI_PARAM_HDRDGST_EN:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && conn->hdrdgst_en)
+			err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
+							conn->hdrdgst_en,
+							conn->datadgst_en, 0);
+		break;
+	case ISCSI_PARAM_DATADGST_EN:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && conn->datadgst_en)
+			err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
+							conn->hdrdgst_en,
+							conn->datadgst_en, 0);
+		break;
+	case ISCSI_PARAM_MAX_R2T:
+		sscanf(buf, "%d", &value);
+		if (value <= 0 || !is_power_of_2(value))
+			return -EINVAL;
+		if (session->max_r2t == value)
+			break;
+		iscsi_tcp_r2tpool_free(session);
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && iscsi_tcp_r2tpool_alloc(session))
+			return -ENOMEM;
+	case ISCSI_PARAM_MAX_RECV_DLENGTH:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err)
+			err = cxgb3i_conn_max_recv_dlength(conn);
+		break;
+	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err)
+			err = cxgb3i_conn_max_xmit_dlength(conn);
+		break;
+	default:
+		return iscsi_set_param(cls_conn, param, buf, buflen);
+	}
+	return err;
+}
+
+/**
+ * cxgb3i_host_set_param - configure host (adapter) related parameters
+ * @shost:	scsi host pointer
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ */
+static int cxgb3i_host_set_param(struct Scsi_Host *shost,
+				 enum iscsi_host_param param,
+				 char *buf, int buflen)
+{
+	struct cxgb3i_hba *hba = iscsi_host_priv(shost);
+
+	cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_IPADDRESS:
+	{
+		__be32 addr = in_aton(buf);
+		cxgb3i_set_private_ipv4addr(hba->ndev, addr);
+		return 0;
+	}
+	case ISCSI_HOST_PARAM_HWADDRESS:
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		/* ignore */
+		return 0;
+	default:
+		return iscsi_host_set_param(shost, param, buf, buflen);
+	}
+}
+
+/**
+ * cxgb3i_host_get_param - returns host (adapter) related parameters
+ * @shost:	scsi host pointer
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ */
+static int cxgb3i_host_get_param(struct Scsi_Host *shost,
+				 enum iscsi_host_param param, char *buf)
+{
+	struct cxgb3i_hba *hba = iscsi_host_priv(shost);
+	int len = 0;
+
+	cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
+		break;
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		len = sprintf(buf, "%s\n", hba->ndev->name);
+		break;
+	case ISCSI_HOST_PARAM_IPADDRESS:
+	{
+		__be32 addr;
+
+		addr = cxgb3i_get_private_ipv4addr(hba->ndev);
+		len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
+		break;
+	}
+	default:
+		return iscsi_host_get_param(shost, param, buf);
+	}
+	return len;
+}
+
+/**
+ * cxgb3i_conn_get_stats - returns iSCSI stats
+ * @cls_conn:	pointer to iscsi cls conn
+ * @stats:	pointer to iscsi statistic struct
+ */
+static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				  struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+	stats->digest_err = 0;
+	stats->timeout_err = 0;
+	stats->custom_length = 1;
+	strcpy(stats->custom[0].desc, "eh_abort_cnt");
+	stats->custom[0].value = conn->eh_abort_cnt;
+}
+
+/**
+ * cxgb3i_parse_itt - get the idx and age bits from a given tag
+ * @conn:	iscsi connection
+ * @itt:	itt tag
+ * @idx:	task index, filled in by this function
+ * @age:	session age, filled in by this function
+ */
+static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
+			     int *idx, int *age)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	struct cxgb3i_adapter *snic = cconn->hba->snic;
+	u32 tag = ntohl((__force u32) itt);
+	u32 sw_bits;
+
+	sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
+	if (idx)
+		*idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
+	if (age)
+		*age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
+
+	cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
+			 tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
+			 age ? *age : 0xFF);
+}
+
+/**
+ * cxgb3i_reserve_itt - generate tag for a give task
+ * Try to set up ddp for a scsi read task.
+ * @task: iscsi task
+ * @hdr_itt: tag, filled in by this function
+ */
+int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
+{
+	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *sess = conn->session;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	struct cxgb3i_adapter *snic = cconn->hba->snic;
+	struct cxgb3i_tag_format *tformat = &snic->tag_format;
+	u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
+	u32 tag;
+	int err = -EINVAL;
+
+	if (sc &&
+	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+	    cxgb3i_sw_tag_usable(tformat, sw_tag)) {
+		struct s3_conn *c3cn = cconn->cep->c3cn;
+		struct cxgb3i_gather_list *gl;
+
+		gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
+					scsi_in(sc)->table.sgl,
+					scsi_in(sc)->table.nents,
+					snic->pdev,
+					GFP_ATOMIC);
+		if (gl) {
+			tag = sw_tag;
+			err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
+						     tformat, &tag,
+						     gl, GFP_ATOMIC);
+			if (err < 0)
+				cxgb3i_ddp_release_gl(gl, snic->pdev);
+		}
+	}
+
+	if (err < 0)
+		tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
+	/* the itt need to sent in big-endian order */
+	*hdr_itt = (__force itt_t)htonl(tag);
+
+	cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
+			 tag, *hdr_itt, task->itt, sess->age);
+	return 0;
+}
+
+/**
+ * cxgb3i_release_itt - release the tag for a given task
+ * if the tag is a ddp tag, release the ddp setup
+ * @task:	iscsi task
+ * @hdr_itt:	tag
+ */
+void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
+{
+	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	struct cxgb3i_adapter *snic = cconn->hba->snic;
+	struct cxgb3i_tag_format *tformat = &snic->tag_format;
+	u32 tag = ntohl((__force u32)hdr_itt);
+
+	cxgb3i_tag_debug("release tag 0x%x.\n", tag);
+
+	if (sc &&
+	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+	    cxgb3i_is_ddp_tag(tformat, tag))
+		cxgb3i_ddp_tag_release(snic->tdev, tag);
+}
+
+/**
+ * cxgb3i_host_template -- Scsi_Host_Template structure
+ *	used when registering with the scsi mid layer
+ */
+static struct scsi_host_template cxgb3i_host_template = {
+	.module			= THIS_MODULE,
+	.name			= "Chelsio S3xx iSCSI Initiator",
+	.proc_name		= "cxgb3i",
+	.queuecommand		= iscsi_queuecommand,
+	.change_queue_depth	= iscsi_change_queue_depth,
+	.can_queue		= 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
+	.sg_tablesize		= SG_ALL,
+	.max_sectors		= 0xFFFF,
+	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
+	.eh_abort_handler	= iscsi_eh_abort,
+	.eh_device_reset_handler = iscsi_eh_device_reset,
+	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.this_id		= -1,
+};
+
+static struct iscsi_transport cxgb3i_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= "cxgb3i",
+	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
+				| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
+				CAP_PADDING_OFFLOAD,
+	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
+				ISCSI_MAX_XMIT_DLENGTH |
+				ISCSI_HDRDGST_EN |
+				ISCSI_DATADGST_EN |
+				ISCSI_INITIAL_R2T_EN |
+				ISCSI_MAX_R2T |
+				ISCSI_IMM_DATA_EN |
+				ISCSI_FIRST_BURST |
+				ISCSI_MAX_BURST |
+				ISCSI_PDU_INORDER_EN |
+				ISCSI_DATASEQ_INORDER_EN |
+				ISCSI_ERL |
+				ISCSI_CONN_PORT |
+				ISCSI_CONN_ADDRESS |
+				ISCSI_EXP_STATSN |
+				ISCSI_PERSISTENT_PORT |
+				ISCSI_PERSISTENT_ADDRESS |
+				ISCSI_TARGET_NAME | ISCSI_TPGT |
+				ISCSI_USERNAME | ISCSI_PASSWORD |
+				ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+				ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+				ISCSI_LU_RESET_TMO |
+				ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+			ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
+	.get_host_param		= cxgb3i_host_get_param,
+	.set_host_param		= cxgb3i_host_set_param,
+	/* session management */
+	.create_session		= cxgb3i_session_create,
+	.destroy_session	= cxgb3i_session_destroy,
+	.get_session_param	= iscsi_session_get_param,
+	/* connection management */
+	.create_conn		= cxgb3i_conn_create,
+	.bind_conn		= cxgb3i_conn_bind,
+	.destroy_conn		= iscsi_tcp_conn_teardown,
+	.start_conn		= iscsi_conn_start,
+	.stop_conn		= iscsi_conn_stop,
+	.get_conn_param		= cxgb3i_conn_get_param,
+	.set_param		= cxgb3i_conn_set_param,
+	.get_stats		= cxgb3i_conn_get_stats,
+	/* pdu xmit req. from user space */
+	.send_pdu		= iscsi_conn_send_pdu,
+	/* task */
+	.init_task		= iscsi_tcp_task_init,
+	.xmit_task		= iscsi_tcp_task_xmit,
+	.cleanup_task		= cxgb3i_conn_cleanup_task,
+
+	/* pdu */
+	.alloc_pdu		= cxgb3i_conn_alloc_pdu,
+	.init_pdu		= cxgb3i_conn_init_pdu,
+	.xmit_pdu		= cxgb3i_conn_xmit_pdu,
+	.parse_pdu_itt		= cxgb3i_parse_itt,
+
+	/* TCP connect/disconnect */
+	.ep_connect		= cxgb3i_ep_connect,
+	.ep_poll		= cxgb3i_ep_poll,
+	.ep_disconnect		= cxgb3i_ep_disconnect,
+	/* Error recovery timeout call */
+	.session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+int cxgb3i_iscsi_init(void)
+{
+	sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
+	sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+	cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
+			ISCSI_ITT_MASK, sw_tag_idx_bits,
+			ISCSI_AGE_MASK, sw_tag_age_bits);
+
+	cxgb3i_scsi_transport =
+	    iscsi_register_transport(&cxgb3i_iscsi_transport);
+	if (!cxgb3i_scsi_transport) {
+		cxgb3i_log_error("Could not register cxgb3i transport.\n");
+		return -ENODEV;
+	}
+	cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
+	return 0;
+}
+
+void cxgb3i_iscsi_cleanup(void)
+{
+	if (cxgb3i_scsi_transport) {
+		cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
+				 cxgb3i_scsi_transport);
+		iscsi_unregister_transport(&cxgb3i_iscsi_transport);
+	}
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
new file mode 100644
index 000000000000..a865f1fefe8b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -0,0 +1,1810 @@
+/*
+ * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ *
+ * Written by:	Dimitris Michailidis (dm@chelsio.com)
+ *		Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/version.h>
+
+#include "cxgb3_defs.h"
+#include "cxgb3_ctl_defs.h"
+#include "firmware_exports.h"
+#include "cxgb3i_offload.h"
+#include "cxgb3i_pdu.h"
+#include "cxgb3i_ddp.h"
+
+#ifdef __DEBUG_C3CN_CONN__
+#define c3cn_conn_debug         cxgb3i_log_info
+#else
+#define c3cn_conn_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_C3CN_TX__
+#define c3cn_tx_debug         cxgb3i_log_debug
+#else
+#define c3cn_tx_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_C3CN_RX__
+#define c3cn_rx_debug         cxgb3i_log_debug
+#else
+#define c3cn_rx_debug(fmt...)
+#endif
+
+/*
+ * module parameters releated to offloaded iscsi connection
+ */
+static int cxgb3_rcv_win = 256 * 1024;
+module_param(cxgb3_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int cxgb3_snd_win = 64 * 1024;
+module_param(cxgb3_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)");
+
+static int cxgb3_rx_credit_thres = 10 * 1024;
+module_param(cxgb3_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(rx_credit_thres,
+		 "RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb3_max_connect = 8 * 1024;
+module_param(cxgb3_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
+
+static unsigned int cxgb3_sport_base = 20000;
+module_param(cxgb3_sport_base, uint, 0644);
+MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
+
+/*
+ * cxgb3i tcp connection data(per adapter) list
+ */
+static LIST_HEAD(cdata_list);
+static DEFINE_RWLOCK(cdata_rwlock);
+
+static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
+static void c3cn_release_offload_resources(struct s3_conn *c3cn);
+
+/*
+ * iscsi source port management
+ *
+ * Find a free source port in the port allocation map. We use a very simple
+ * rotor scheme to look for the next free port.
+ *
+ * If a source port has been specified make sure that it doesn't collide with
+ * our normal source port allocation map.  If it's outside the range of our
+ * allocation/deallocation scheme just let them use it.
+ *
+ * If the source port is outside our allocation range, the caller is
+ * responsible for keeping track of their port usage.
+ */
+static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
+{
+	unsigned int start;
+	int idx;
+
+	if (!cdata)
+		goto error_out;
+
+	if (c3cn->saddr.sin_port != 0) {
+		idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
+		if (idx < 0 || idx >= cxgb3_max_connect)
+			return 0;
+		if (!test_and_set_bit(idx, cdata->sport_map))
+			return -EADDRINUSE;
+	}
+
+	/* the sport_map_next may not be accurate but that is okay, sport_map
+	   should be */
+	start = idx = cdata->sport_map_next;
+	do {
+		if (++idx >= cxgb3_max_connect)
+			idx = 0;
+		if (!(test_and_set_bit(idx, cdata->sport_map))) {
+			c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
+			cdata->sport_map_next = idx;
+			c3cn_conn_debug("%s reserve port %u.\n",
+					cdata->cdev->name,
+					cxgb3_sport_base + idx);
+			return 0;
+		}
+	} while (idx != start);
+
+error_out:
+	return -EADDRNOTAVAIL;
+}
+
+static void c3cn_put_port(struct s3_conn *c3cn)
+{
+	struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
+
+	if (c3cn->saddr.sin_port) {
+		int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
+
+		c3cn->saddr.sin_port = 0;
+		if (idx < 0 || idx >= cxgb3_max_connect)
+			return;
+		clear_bit(idx, cdata->sport_map);
+		c3cn_conn_debug("%s, release port %u.\n",
+				cdata->cdev->name, cxgb3_sport_base + idx);
+	}
+}
+
+static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+	__set_bit(flag, &c3cn->flags);
+	c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
+			c3cn, flag, c3cn->state, c3cn->flags);
+}
+
+static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+	__clear_bit(flag, &c3cn->flags);
+	c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
+			c3cn, flag, c3cn->state, c3cn->flags);
+}
+
+static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+	if (c3cn == NULL)
+		return 0;
+	return test_bit(flag, &c3cn->flags);
+}
+
+static void c3cn_set_state(struct s3_conn *c3cn, int state)
+{
+	c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
+	c3cn->state = state;
+}
+
+static inline void c3cn_hold(struct s3_conn *c3cn)
+{
+	atomic_inc(&c3cn->refcnt);
+}
+
+static inline void c3cn_put(struct s3_conn *c3cn)
+{
+	if (atomic_dec_and_test(&c3cn->refcnt)) {
+		c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
+				c3cn, c3cn->state, c3cn->flags);
+		kfree(c3cn);
+	}
+}
+
+static void c3cn_closed(struct s3_conn *c3cn)
+{
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			 c3cn, c3cn->state, c3cn->flags);
+
+	c3cn_put_port(c3cn);
+	c3cn_release_offload_resources(c3cn);
+	c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
+	cxgb3i_conn_closing(c3cn);
+}
+
+/*
+ * CPL (Chelsio Protocol Language) defines a message passing interface between
+ * the host driver and T3 asic.
+ * The section below implments CPLs that related to iscsi tcp connection
+ * open/close/abort and data send/receive.
+ */
+
+/*
+ * CPL connection active open request: host ->
+ */
+static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
+{
+	int i = 0;
+
+	while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
+		++i;
+	return i;
+}
+
+static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
+{
+	unsigned int idx;
+	struct dst_entry *dst = c3cn->dst_cache;
+	struct t3cdev *cdev = c3cn->cdev;
+	const struct t3c_data *td = T3C_DATA(cdev);
+	u16 advmss = dst_metric(dst, RTAX_ADVMSS);
+
+	if (advmss > pmtu - 40)
+		advmss = pmtu - 40;
+	if (advmss < td->mtus[0] - 40)
+		advmss = td->mtus[0] - 40;
+	idx = find_best_mtu(td, advmss + 40);
+	return idx;
+}
+
+static inline int compute_wscale(int win)
+{
+	int wscale = 0;
+	while (wscale < 14 && (65535<<wscale) < win)
+		wscale++;
+	return wscale;
+}
+
+static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
+{
+	int wscale = compute_wscale(cxgb3_rcv_win);
+	return  V_KEEP_ALIVE(1) |
+		F_TCAM_BYPASS |
+		V_WND_SCALE(wscale) |
+		V_MSS_IDX(c3cn->mss_idx);
+}
+
+static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
+{
+	return  V_ULP_MODE(ULP_MODE_ISCSI) |
+		V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
+}
+
+static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
+			      unsigned int atid, const struct l2t_entry *e)
+{
+	struct cpl_act_open_req *req;
+
+	c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
+
+	skb->priority = CPL_PRIORITY_SETUP;
+	req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
+	req->local_port = c3cn->saddr.sin_port;
+	req->peer_port = c3cn->daddr.sin_port;
+	req->local_ip = c3cn->saddr.sin_addr.s_addr;
+	req->peer_ip = c3cn->daddr.sin_addr.s_addr;
+	req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
+			   V_TX_CHANNEL(e->smt_idx));
+	req->opt0l = htonl(calc_opt0l(c3cn));
+	req->params = 0;
+}
+
+static void fail_act_open(struct s3_conn *c3cn, int errno)
+{
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+	c3cn->err = errno;
+	c3cn_closed(c3cn);
+}
+
+static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
+
+	c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+	c3cn_hold(c3cn);
+	spin_lock_bh(&c3cn->lock);
+	if (c3cn->state == C3CN_STATE_CONNECTING)
+		fail_act_open(c3cn, EHOSTUNREACH);
+	spin_unlock_bh(&c3cn->lock);
+	c3cn_put(c3cn);
+	__kfree_skb(skb);
+}
+
+/*
+ * CPL connection close request: host ->
+ *
+ * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
+ * the write queue (i.e., after any unsent txt data).
+ */
+static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
+		       int flags)
+{
+	CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
+	CXGB3_SKB_CB(skb)->flags = flags;
+	__skb_queue_tail(&c3cn->write_queue, skb);
+}
+
+static void send_close_req(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb = c3cn->cpl_close;
+	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+	unsigned int tid = c3cn->tid;
+
+	c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	c3cn->cpl_close = NULL;
+
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
+	req->wr.wr_lo = htonl(V_WR_TID(tid));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+	req->rsvd = htonl(c3cn->write_seq);
+
+	skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
+	if (c3cn->state != C3CN_STATE_CONNECTING)
+		c3cn_push_tx_frames(c3cn, 1);
+}
+
+/*
+ * CPL connection abort request: host ->
+ *
+ * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
+ * for the same connection and also that we do not try to send a message
+ * after the connection has closed.
+ */
+static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req = cplhdr(skb);
+
+	c3cn_conn_debug("tdev 0x%p.\n", cdev);
+
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgb3_ofld_send(cdev, skb);
+}
+
+static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&c3cn->write_queue)))
+		__kfree_skb(skb);
+}
+
+static void send_abort_req(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb = c3cn->cpl_abort_req;
+	struct cpl_abort_req *req;
+	unsigned int tid = c3cn->tid;
+
+	if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
+		     !c3cn->cdev)
+		return;
+
+	c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
+
+	c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
+
+	c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
+
+	/* Purge the send queue so we don't send anything after an abort. */
+	c3cn_purge_write_queue(c3cn);
+
+	c3cn->cpl_abort_req = NULL;
+	req = (struct cpl_abort_req *)skb->head;
+
+	skb->priority = CPL_PRIORITY_DATA;
+	set_arp_failure_handler(skb, abort_arp_failure);
+
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
+	req->wr.wr_lo = htonl(V_WR_TID(tid));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+	req->rsvd0 = htonl(c3cn->snd_nxt);
+	req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
+	req->cmd = CPL_ABORT_SEND_RST;
+
+	l2t_send(c3cn->cdev, skb, c3cn->l2t);
+}
+
+/*
+ * CPL connection abort reply: host ->
+ *
+ * Send an ABORT_RPL message in response of the ABORT_REQ received.
+ */
+static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
+{
+	struct sk_buff *skb = c3cn->cpl_abort_rpl;
+	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+
+	c3cn->cpl_abort_rpl = NULL;
+
+	skb->priority = CPL_PRIORITY_DATA;
+	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+	rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
+	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
+	rpl->cmd = rst_status;
+
+	cxgb3_ofld_send(c3cn->cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
+ * credits sent.
+ */
+static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_data_ack *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb)
+		return 0;
+
+	req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
+	req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
+	skb->priority = CPL_PRIORITY_ACK;
+	cxgb3_ofld_send(c3cn->cdev, skb);
+	return credits;
+}
+
+/*
+ * CPL connection tx data: host ->
+ *
+ * Send iscsi PDU via TX_DATA CPL message. Returns the number of
+ * credits sent.
+ * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
+ * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
+ */
+
+/*
+ * For ULP connections HW may inserts digest bytes into the pdu. Those digest
+ * bytes are not sent by the host but are part of the TCP payload and therefore
+ * consume TCP sequence space.
+ */
+static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
+static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
+{
+	return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
+}
+
+static unsigned int wrlen __read_mostly;
+
+/*
+ * The number of WRs needed for an skb depends on the number of fragments
+ * in the skb and whether it has any payload in its main body.  This maps the
+ * length of the gather list represented by an skb into the # of necessary WRs.
+ *
+ * The max. length of an skb is controlled by the max pdu size which is ~16K.
+ * Also, assume the min. fragment length is the sector size (512), then add
+ * extra fragment counts for iscsi bhs and payload padding.
+ */
+#define SKB_WR_LIST_SIZE	(16384/512 + 3)
+static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
+
+static void s3_init_wr_tab(unsigned int wr_len)
+{
+	int i;
+
+	if (skb_wrs[1])		/* already initialized */
+		return;
+
+	for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
+		int sgl_len = (3 * i) / 2 + (i & 1);
+
+		sgl_len += 3;
+		skb_wrs[i] = (sgl_len <= wr_len
+			      ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
+	}
+
+	wrlen = wr_len * 8;
+}
+
+static inline void reset_wr_list(struct s3_conn *c3cn)
+{
+	c3cn->wr_pending_head = NULL;
+}
+
+/*
+ * Add a WR to a connections's list of pending WRs.  This is a singly-linked
+ * list of sk_buffs operating as a FIFO.  The head is kept in wr_pending_head
+ * and the tail in wr_pending_tail.
+ */
+static inline void enqueue_wr(struct s3_conn *c3cn,
+			      struct sk_buff *skb)
+{
+	skb_wr_data(skb) = NULL;
+
+	/*
+	 * We want to take an extra reference since both us and the driver
+	 * need to free the packet before it's really freed. We know there's
+	 * just one user currently so we use atomic_set rather than skb_get
+	 * to avoid the atomic op.
+	 */
+	atomic_set(&skb->users, 2);
+
+	if (!c3cn->wr_pending_head)
+		c3cn->wr_pending_head = skb;
+	else
+		skb_wr_data(skb) = skb;
+	c3cn->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
+{
+	return c3cn->wr_pending_head;
+}
+
+static inline void free_wr_skb(struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb = c3cn->wr_pending_head;
+
+	if (likely(skb)) {
+		/* Don't bother clearing the tail */
+		c3cn->wr_pending_head = skb_wr_data(skb);
+		skb_wr_data(skb) = NULL;
+	}
+	return skb;
+}
+
+static void purge_wr_queue(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb;
+	while ((skb = dequeue_wr(c3cn)) != NULL)
+		free_wr_skb(skb);
+}
+
+static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
+				   int len)
+{
+	struct tx_data_wr *req;
+
+	skb_reset_transport_header(skb);
+	req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+	req->wr_lo = htonl(V_WR_TID(c3cn->tid));
+	req->sndseq = htonl(c3cn->snd_nxt);
+	/* len includes the length of any HW ULP additions */
+	req->len = htonl(len);
+	req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
+	/* V_TX_ULP_SUBMODE sets both the mode and submode */
+	req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
+			   V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
+
+	if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
+		req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
+				    V_TX_CPU_IDX(c3cn->qset));
+		/* Sendbuffer is in units of 32KB. */
+		req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
+		c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
+	}
+}
+
+/**
+ * c3cn_push_tx_frames -- start transmit
+ * @c3cn: the offloaded connection
+ * @req_completion: request wr_ack or not
+ *
+ * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
+ * connection's send queue and sends them on to T3.  Must be called with the
+ * connection's lock held.  Returns the amount of send buffer space that was
+ * freed as a result of sending queued data to T3.
+ */
+static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
+{
+	int total_size = 0;
+	struct sk_buff *skb;
+	struct t3cdev *cdev;
+	struct cxgb3i_sdev_data *cdata;
+
+	if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
+		     c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
+		     c3cn->state == C3CN_STATE_ABORTING)) {
+		c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
+			      c3cn, c3cn->state);
+		return 0;
+	}
+
+	cdev = c3cn->cdev;
+	cdata = CXGB3_SDEV_DATA(cdev);
+
+	while (c3cn->wr_avail
+	       && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
+		int len = skb->len;	/* length before skb_push */
+		int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
+		int wrs_needed = skb_wrs[frags];
+
+		if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
+			wrs_needed = 1;
+
+		WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
+
+		if (c3cn->wr_avail < wrs_needed) {
+			c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
+				      "wr %d < %u.\n",
+				      c3cn, skb->len, skb->datalen, frags,
+				      wrs_needed, c3cn->wr_avail);
+			break;
+		}
+
+		__skb_unlink(skb, &c3cn->write_queue);
+		skb->priority = CPL_PRIORITY_DATA;
+		skb->csum = wrs_needed;	/* remember this until the WR_ACK */
+		c3cn->wr_avail -= wrs_needed;
+		c3cn->wr_unacked += wrs_needed;
+		enqueue_wr(c3cn, skb);
+
+		if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) {
+			len += ulp_extra_len(skb);
+			make_tx_data_wr(c3cn, skb, len);
+			c3cn->snd_nxt += len;
+			if ((req_completion
+			     && c3cn->wr_unacked == wrs_needed)
+			    || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
+			    || c3cn->wr_unacked >= c3cn->wr_max / 2) {
+				struct work_request_hdr *wr = cplhdr(skb);
+
+				wr->wr_hi |= htonl(F_WR_COMPL);
+				c3cn->wr_unacked = 0;
+			}
+			CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
+		}
+
+		total_size += skb->truesize;
+		set_arp_failure_handler(skb, arp_failure_discard);
+		l2t_send(cdev, skb, c3cn->l2t);
+	}
+	return total_size;
+}
+
+/*
+ * process_cpl_msg: -> host
+ * Top-level CPL message processing used by most CPL messages that
+ * pertain to connections.
+ */
+static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
+					      struct sk_buff *),
+				   struct s3_conn *c3cn,
+				   struct sk_buff *skb)
+{
+	spin_lock_bh(&c3cn->lock);
+	fn(c3cn, skb);
+	spin_unlock_bh(&c3cn->lock);
+}
+
+/*
+ * process_cpl_msg_ref: -> host
+ * Similar to process_cpl_msg() but takes an extra connection reference around
+ * the call to the handler.  Should be used if the handler may drop a
+ * connection reference.
+ */
+static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
+						   struct sk_buff *),
+				       struct s3_conn *c3cn,
+				       struct sk_buff *skb)
+{
+	c3cn_hold(c3cn);
+	process_cpl_msg(fn, c3cn, skb);
+	c3cn_put(c3cn);
+}
+
+/*
+ * Process a CPL_ACT_ESTABLISH message: -> host
+ * Updates connection state from an active establish CPL message.  Runs with
+ * the connection lock held.
+ */
+
+static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
+{
+	struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
+	if (c3cn)
+		c3cn_put(c3cn);
+}
+
+static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
+			     unsigned int opt)
+{
+	c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+	c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
+
+	/*
+	 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
+	 * pass through opt0.
+	 */
+	if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
+		c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
+
+	dst_confirm(c3cn->dst_cache);
+
+	smp_mb();
+
+	c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
+}
+
+static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct cpl_act_establish *req = cplhdr(skb);
+	u32 rcv_isn = ntohl(req->rcv_isn);	/* real RCV_ISN + 1 */
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
+		cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
+				 c3cn->tid, c3cn->state);
+
+	c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
+	c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+	__kfree_skb(skb);
+
+	if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
+		/* upper layer has requested closing */
+		send_abort_req(c3cn);
+	else if (c3cn_push_tx_frames(c3cn, 1))
+		cxgb3i_conn_tx_open(c3cn);
+}
+
+static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
+			    void *ctx)
+{
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int tid = GET_TID(req);
+	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	struct s3_conn *c3cn = ctx;
+	struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
+
+	c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
+			tid, c3cn, c3cn->state, c3cn->flags);
+
+	c3cn->tid = tid;
+	c3cn_hold(c3cn);
+	cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
+	s3_free_atid(cdev, atid);
+
+	c3cn->qset = G_QNUM(ntohl(skb->csum));
+
+	process_cpl_msg(process_act_establish, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process a CPL_ACT_OPEN_RPL message: -> host
+ * Handle active open failures.
+ */
+static int act_open_rpl_status_to_errno(int status)
+{
+	switch (status) {
+	case CPL_ERR_CONN_RESET:
+		return ECONNREFUSED;
+	case CPL_ERR_ARP_MISS:
+		return EHOSTUNREACH;
+	case CPL_ERR_CONN_TIMEDOUT:
+		return ETIMEDOUT;
+	case CPL_ERR_TCAM_FULL:
+		return ENOMEM;
+	case CPL_ERR_CONN_EXIST:
+		cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
+		return EADDRINUSE;
+	default:
+		return EIO;
+	}
+}
+
+static void act_open_retry_timer(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct s3_conn *c3cn = (struct s3_conn *)data;
+
+	c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+	spin_lock_bh(&c3cn->lock);
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
+	if (!skb)
+		fail_act_open(c3cn, ENOMEM);
+	else {
+		skb->sk = (struct sock *)c3cn;
+		set_arp_failure_handler(skb, act_open_req_arp_failure);
+		make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
+		l2t_send(c3cn->cdev, skb, c3cn->l2t);
+	}
+	spin_unlock_bh(&c3cn->lock);
+	c3cn_put(c3cn);
+}
+
+static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (rpl->status == CPL_ERR_CONN_EXIST &&
+	    c3cn->retry_timer.function != act_open_retry_timer) {
+		c3cn->retry_timer.function = act_open_retry_timer;
+		if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
+			c3cn_hold(c3cn);
+	} else
+		fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
+	__kfree_skb(skb);
+}
+
+static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+	struct s3_conn *c3cn = ctx;
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+	c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
+			rpl->status, c3cn, c3cn->state, c3cn->flags);
+
+	if (rpl->status != CPL_ERR_TCAM_FULL &&
+	    rpl->status != CPL_ERR_CONN_EXIST &&
+	    rpl->status != CPL_ERR_ARP_MISS)
+		cxgb3_queue_tid_release(cdev, GET_TID(rpl));
+
+	process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process PEER_CLOSE CPL messages: -> host
+ * Handle peer FIN.
+ */
+static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (c3cn->state) {
+	case C3CN_STATE_ESTABLISHED:
+		c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
+		break;
+	case C3CN_STATE_ACTIVE_CLOSE:
+		c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
+		break;
+	case C3CN_STATE_CLOSE_WAIT_1:
+		c3cn_closed(c3cn);
+		break;
+	case C3CN_STATE_ABORTING:
+		break;
+	default:
+		cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
+				 c3cn->cdev->name, c3cn->tid, c3cn->state);
+	}
+
+	cxgb3i_conn_closing(c3cn);
+out:
+	__kfree_skb(skb);
+}
+
+static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+	struct s3_conn *c3cn = ctx;
+
+	c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+	process_cpl_msg_ref(process_peer_close, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process CLOSE_CONN_RPL CPL message: -> host
+ * Process a peer ACK to our FIN.
+ */
+static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct cpl_close_con_rpl *rpl = cplhdr(skb);
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	c3cn->snd_una = ntohl(rpl->snd_nxt) - 1;	/* exclude FIN */
+
+	if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (c3cn->state) {
+	case C3CN_STATE_ACTIVE_CLOSE:
+		c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
+		break;
+	case C3CN_STATE_CLOSE_WAIT_1:
+	case C3CN_STATE_CLOSE_WAIT_2:
+		c3cn_closed(c3cn);
+		break;
+	case C3CN_STATE_ABORTING:
+		break;
+	default:
+		cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
+				 c3cn->cdev->name, c3cn->tid, c3cn->state);
+	}
+
+out:
+	kfree_skb(skb);
+}
+
+static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
+			    void *ctx)
+{
+	struct s3_conn *c3cn = ctx;
+
+	c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
+			 c3cn, c3cn->state, c3cn->flags);
+
+	process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process ABORT_REQ_RSS CPL message: -> host
+ * Process abort requests.  If we are waiting for an ABORT_RPL we ignore this
+ * request except that we need to reply to it.
+ */
+
+static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
+				 int *need_rst)
+{
+	switch (abort_reason) {
+	case CPL_ERR_BAD_SYN: /* fall through */
+	case CPL_ERR_CONN_RESET:
+		return c3cn->state > C3CN_STATE_ESTABLISHED ?
+			EPIPE : ECONNRESET;
+	case CPL_ERR_XMIT_TIMEDOUT:
+	case CPL_ERR_PERSIST_TIMEDOUT:
+	case CPL_ERR_FINWAIT2_TIMEDOUT:
+	case CPL_ERR_KEEPALIVE_TIMEDOUT:
+		return ETIMEDOUT;
+	default:
+		return EIO;
+	}
+}
+
+static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	int rst_status = CPL_ABORT_NO_RST;
+	const struct cpl_abort_req_rss *req = cplhdr(skb);
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
+		c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
+		c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
+		__kfree_skb(skb);
+		return;
+	}
+
+	c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
+	send_abort_rpl(c3cn, rst_status);
+
+	if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
+		c3cn->err =
+		    abort_status_to_errno(c3cn, req->status, &rst_status);
+		c3cn_closed(c3cn);
+	}
+}
+
+static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+	const struct cpl_abort_req_rss *req = cplhdr(skb);
+	struct s3_conn *c3cn = ctx;
+
+	c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+	    req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
+		__kfree_skb(skb);
+		return 0;
+	}
+
+	process_cpl_msg_ref(process_abort_req, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process ABORT_RPL_RSS CPL message: -> host
+ * Process abort replies.  We only process these messages if we anticipate
+ * them as the coordination between SW and HW in this area is somewhat lacking
+ * and sometimes we get ABORT_RPLs after we are done with the connection that
+ * originated the ABORT_REQ.
+ */
+static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+
+	if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
+		if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
+			c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
+		else {
+			c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
+			c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
+			if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
+				cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
+						 c3cn->cdev->name, c3cn->tid);
+			c3cn_closed(c3cn);
+		}
+	}
+	__kfree_skb(skb);
+}
+
+static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+	struct s3_conn *c3cn = ctx;
+
+	c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
+			rpl->status, c3cn, c3cn ? c3cn->state : 0,
+			c3cn ? c3cn->flags : 0UL);
+
+	/*
+	 * Ignore replies to post-close aborts indicating that the abort was
+	 * requested too late.  These connections are terminated when we get
+	 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
+	 * arrives the TID is either no longer used or it has been recycled.
+	 */
+	if (rpl->status == CPL_ERR_ABORT_FAILED)
+		goto discard;
+
+	/*
+	 * Sometimes we've already closed the connection, e.g., a post-close
+	 * abort races with ABORT_REQ_RSS, the latter frees the connection
+	 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
+	 * but FW turns the ABORT_REQ into a regular one and so we get
+	 * ABORT_RPL_RSS with status 0 and no connection.
+	 */
+	if (!c3cn)
+		goto discard;
+
+	process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
+	return 0;
+
+discard:
+	__kfree_skb(skb);
+	return 0;
+}
+
+/*
+ * Process RX_ISCSI_HDR CPL message: -> host
+ * Handle received PDUs, the payload could be DDP'ed. If not, the payload
+ * follow after the bhs.
+ */
+static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
+	struct cpl_iscsi_hdr_norss data_cpl;
+	struct cpl_rx_data_ddp_norss ddp_cpl;
+	unsigned int hdr_len, data_len, status;
+	unsigned int len;
+	int err;
+
+	if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
+		if (c3cn->state != C3CN_STATE_ABORTING)
+			send_abort_req(c3cn);
+		__kfree_skb(skb);
+		return;
+	}
+
+	CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq);
+	CXGB3_SKB_CB(skb)->flags = 0;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
+
+	len = hdr_len = ntohs(hdr_cpl->len);
+	/* msg coalesce is off or not enough data received */
+	if (skb->len <= hdr_len) {
+		cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
+				 c3cn->cdev->name, c3cn->tid,
+				 skb->len, hdr_len);
+		goto abort_conn;
+	}
+
+	err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
+			    sizeof(ddp_cpl));
+	if (err < 0)
+		goto abort_conn;
+
+	skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
+	skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len);
+	skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
+	status = ntohl(ddp_cpl.ddp_status);
+
+	c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
+		      skb, skb->len, skb_ulp_pdulen(skb), status);
+
+	if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
+		skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
+		skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
+		skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
+
+	if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
+		err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
+		if (err < 0)
+			goto abort_conn;
+		data_len = ntohs(data_cpl.len);
+		len += sizeof(data_cpl) + data_len;
+	} else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
+		skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
+
+	c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb);
+	__pskb_trim(skb, len);
+	__skb_queue_tail(&c3cn->receive_queue, skb);
+	cxgb3i_conn_pdu_ready(c3cn);
+
+	return;
+
+abort_conn:
+	send_abort_req(c3cn);
+	__kfree_skb(skb);
+}
+
+static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
+{
+	struct s3_conn *c3cn = ctx;
+
+	process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
+	return 0;
+}
+
+/*
+ * Process TX_DATA_ACK CPL messages: -> host
+ * Process an acknowledgment of WR completion.  Advance snd_una and send the
+ * next batch of work requests from the write queue.
+ */
+static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct cpl_wr_ack *hdr = cplhdr(skb);
+	unsigned int credits = ntohs(hdr->credits);
+	u32 snd_una = ntohl(hdr->snd_una);
+
+	c3cn->wr_avail += credits;
+	if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
+		c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
+
+	while (credits) {
+		struct sk_buff *p = peek_wr(c3cn);
+
+		if (unlikely(!p)) {
+			cxgb3i_log_error("%u WR_ACK credits for TID %u with "
+					 "nothing pending, state %u\n",
+					 credits, c3cn->tid, c3cn->state);
+			break;
+		}
+		if (unlikely(credits < p->csum)) {
+			p->csum -= credits;
+			break;
+		} else {
+			dequeue_wr(c3cn);
+			credits -= p->csum;
+			free_wr_skb(p);
+		}
+	}
+
+	if (unlikely(before(snd_una, c3cn->snd_una)))
+		goto out_free;
+
+	if (c3cn->snd_una != snd_una) {
+		c3cn->snd_una = snd_una;
+		dst_confirm(c3cn->dst_cache);
+	}
+
+	if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
+		cxgb3i_conn_tx_open(c3cn);
+out_free:
+	__kfree_skb(skb);
+}
+
+static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+	struct s3_conn *c3cn = ctx;
+
+	process_cpl_msg(process_wr_ack, c3cn, skb);
+	return 0;
+}
+
+/*
+ * for each connection, pre-allocate skbs needed for close/abort requests. So
+ * that we can service the request right away.
+ */
+static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
+{
+	if (c3cn->cpl_close)
+		kfree_skb(c3cn->cpl_close);
+	if (c3cn->cpl_abort_req)
+		kfree_skb(c3cn->cpl_abort_req);
+	if (c3cn->cpl_abort_rpl)
+		kfree_skb(c3cn->cpl_abort_rpl);
+}
+
+static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
+{
+	c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
+				   GFP_KERNEL);
+	if (!c3cn->cpl_close)
+		return -ENOMEM;
+	skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
+
+	c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
+					GFP_KERNEL);
+	if (!c3cn->cpl_abort_req)
+		goto free_cpl_skbs;
+	skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
+
+	c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
+					GFP_KERNEL);
+	if (!c3cn->cpl_abort_rpl)
+		goto free_cpl_skbs;
+	skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
+
+	return 0;
+
+free_cpl_skbs:
+	c3cn_free_cpl_skbs(c3cn);
+	return -ENOMEM;
+}
+
+/**
+ * c3cn_release_offload_resources - release offload resource
+ * @c3cn: the offloaded iscsi tcp connection.
+ * Release resources held by an offload connection (TID, L2T entry, etc.)
+ */
+static void c3cn_release_offload_resources(struct s3_conn *c3cn)
+{
+	struct t3cdev *cdev = c3cn->cdev;
+	unsigned int tid = c3cn->tid;
+
+	if (!cdev)
+		return;
+
+	c3cn->qset = 0;
+
+	c3cn_free_cpl_skbs(c3cn);
+
+	if (c3cn->wr_avail != c3cn->wr_max) {
+		purge_wr_queue(c3cn);
+		reset_wr_list(c3cn);
+	}
+
+	if (c3cn->l2t) {
+		l2t_release(L2DATA(cdev), c3cn->l2t);
+		c3cn->l2t = NULL;
+	}
+
+	if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
+		s3_free_atid(cdev, tid);
+	else {		/* we have TID */
+		cxgb3_remove_tid(cdev, (void *)c3cn, tid);
+		c3cn_put(c3cn);
+	}
+
+	c3cn->cdev = NULL;
+}
+
+/**
+ * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
+ * returns the s3_conn structure allocated.
+ */
+struct s3_conn *cxgb3i_c3cn_create(void)
+{
+	struct s3_conn *c3cn;
+
+	c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
+	if (!c3cn)
+		return NULL;
+
+	/* pre-allocate close/abort cpl, so we don't need to wait for memory
+	   when close/abort is requested. */
+	if (c3cn_alloc_cpl_skbs(c3cn) < 0)
+		goto free_c3cn;
+
+	c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
+
+	c3cn->flags = 0;
+	spin_lock_init(&c3cn->lock);
+	atomic_set(&c3cn->refcnt, 1);
+	skb_queue_head_init(&c3cn->receive_queue);
+	skb_queue_head_init(&c3cn->write_queue);
+	setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
+	rwlock_init(&c3cn->callback_lock);
+
+	return c3cn;
+
+free_c3cn:
+	kfree(c3cn);
+	return NULL;
+}
+
+static void c3cn_active_close(struct s3_conn *c3cn)
+{
+	int data_lost;
+	int close_req = 0;
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			 c3cn, c3cn->state, c3cn->flags);
+
+	dst_confirm(c3cn->dst_cache);
+
+	c3cn_hold(c3cn);
+	spin_lock_bh(&c3cn->lock);
+
+	data_lost = skb_queue_len(&c3cn->receive_queue);
+	__skb_queue_purge(&c3cn->receive_queue);
+
+	switch (c3cn->state) {
+	case C3CN_STATE_CLOSED:
+	case C3CN_STATE_ACTIVE_CLOSE:
+	case C3CN_STATE_CLOSE_WAIT_1:
+	case C3CN_STATE_CLOSE_WAIT_2:
+	case C3CN_STATE_ABORTING:
+		/* nothing need to be done */
+		break;
+	case C3CN_STATE_CONNECTING:
+		/* defer until cpl_act_open_rpl or cpl_act_establish */
+		c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
+		break;
+	case C3CN_STATE_ESTABLISHED:
+		close_req = 1;
+		c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
+		break;
+	case C3CN_STATE_PASSIVE_CLOSE:
+		close_req = 1;
+		c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
+		break;
+	}
+
+	if (close_req) {
+		if (data_lost)
+			/* Unread data was tossed, zap the connection. */
+			send_abort_req(c3cn);
+		else
+			send_close_req(c3cn);
+	}
+
+	spin_unlock_bh(&c3cn->lock);
+	c3cn_put(c3cn);
+}
+
+/**
+ * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
+ * 	resource held
+ * @c3cn: the iscsi tcp connection
+ */
+void cxgb3i_c3cn_release(struct s3_conn *c3cn)
+{
+	c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+	if (likely(c3cn->state != C3CN_STATE_CONNECTING))
+		c3cn_active_close(c3cn);
+	else
+		c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
+	c3cn_put(c3cn);
+}
+
+static int is_cxgb3_dev(struct net_device *dev)
+{
+	struct cxgb3i_sdev_data *cdata;
+
+	write_lock(&cdata_rwlock);
+	list_for_each_entry(cdata, &cdata_list, list) {
+		struct adap_ports *ports = &cdata->ports;
+		int i;
+
+		for (i = 0; i < ports->nports; i++)
+			if (dev == ports->lldevs[i]) {
+				write_unlock(&cdata_rwlock);
+				return 1;
+			}
+	}
+	write_unlock(&cdata_rwlock);
+	return 0;
+}
+
+/**
+ * cxgb3_egress_dev - return the cxgb3 egress device
+ * @root_dev: the root device anchoring the search
+ * @c3cn: the connection used to determine egress port in bonding mode
+ * @context: in bonding mode, indicates a connection set up or failover
+ *
+ * Return egress device or NULL if the egress device isn't one of our ports.
+ */
+static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
+					   struct s3_conn *c3cn,
+					   int context)
+{
+	while (root_dev) {
+		if (root_dev->priv_flags & IFF_802_1Q_VLAN)
+			root_dev = vlan_dev_real_dev(root_dev);
+		else if (is_cxgb3_dev(root_dev))
+			return root_dev;
+		else
+			return NULL;
+	}
+	return NULL;
+}
+
+static struct rtable *find_route(__be32 saddr, __be32 daddr,
+				 __be16 sport, __be16 dport)
+{
+	struct rtable *rt;
+	struct flowi fl = {
+		.oif = 0,
+		.nl_u = {
+			 .ip4_u = {
+				   .daddr = daddr,
+				   .saddr = saddr,
+				   .tos = 0 } },
+		.proto = IPPROTO_TCP,
+		.uli_u = {
+			  .ports = {
+				    .sport = sport,
+				    .dport = dport } } };
+
+	if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+		return NULL;
+	return rt;
+}
+
+/*
+ * Assign offload parameters to some connection fields.
+ */
+static void init_offload_conn(struct s3_conn *c3cn,
+			      struct t3cdev *cdev,
+			      struct dst_entry *dst)
+{
+	BUG_ON(c3cn->cdev != cdev);
+	c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs;
+	c3cn->wr_unacked = 0;
+	c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
+
+	reset_wr_list(c3cn);
+}
+
+static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
+{
+	struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
+	struct t3cdev *cdev = cdata->cdev;
+	struct dst_entry *dst = c3cn->dst_cache;
+	struct sk_buff *skb;
+
+	c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+			c3cn, c3cn->state, c3cn->flags);
+	/*
+	 * Initialize connection data.  Note that the flags and ULP mode are
+	 * initialized higher up ...
+	 */
+	c3cn->dev = dev;
+	c3cn->cdev = cdev;
+	c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
+	if (c3cn->tid < 0)
+		goto out_err;
+
+	c3cn->qset = 0;
+	c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
+	if (!c3cn->l2t)
+		goto free_tid;
+
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
+	if (!skb)
+		goto free_l2t;
+
+	skb->sk = (struct sock *)c3cn;
+	set_arp_failure_handler(skb, act_open_req_arp_failure);
+
+	c3cn_hold(c3cn);
+
+	init_offload_conn(c3cn, cdev, dst);
+	c3cn->err = 0;
+
+	make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
+	l2t_send(cdev, skb, c3cn->l2t);
+	return 0;
+
+free_l2t:
+	l2t_release(L2DATA(cdev), c3cn->l2t);
+free_tid:
+	s3_free_atid(cdev, c3cn->tid);
+	c3cn->tid = 0;
+out_err:
+	return -1;
+}
+
+
+/**
+ * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
+ * @c3cn: the iscsi tcp connection
+ * @usin: destination address
+ *
+ * return 0 if active open request is sent, < 0 otherwise.
+ */
+int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
+{
+	struct rtable *rt;
+	struct net_device *dev;
+	struct cxgb3i_sdev_data *cdata;
+	struct t3cdev *cdev;
+	__be32 sipv4;
+	int err;
+
+	if (usin->sin_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	c3cn->daddr.sin_port = usin->sin_port;
+	c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
+
+	rt = find_route(c3cn->saddr.sin_addr.s_addr,
+			c3cn->daddr.sin_addr.s_addr,
+			c3cn->saddr.sin_port,
+			c3cn->daddr.sin_port);
+	if (rt == NULL) {
+		c3cn_conn_debug("NO route to 0x%x, port %u.\n",
+				c3cn->daddr.sin_addr.s_addr,
+				ntohs(c3cn->daddr.sin_port));
+		return -ENETUNREACH;
+	}
+
+	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+		c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
+				c3cn->daddr.sin_addr.s_addr,
+				ntohs(c3cn->daddr.sin_port));
+		ip_rt_put(rt);
+		return -ENETUNREACH;
+	}
+
+	if (!c3cn->saddr.sin_addr.s_addr)
+		c3cn->saddr.sin_addr.s_addr = rt->rt_src;
+
+	/* now commit destination to connection */
+	c3cn->dst_cache = &rt->u.dst;
+
+	/* try to establish an offloaded connection */
+	dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
+	if (dev == NULL) {
+		c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
+		return -ENETUNREACH;
+	}
+	cdata = NDEV2CDATA(dev);
+	cdev = cdata->cdev;
+
+	/* get a source port if one hasn't been provided */
+	err = c3cn_get_port(c3cn, cdata);
+	if (err)
+		return err;
+
+	c3cn_conn_debug("c3cn 0x%p get port %u.\n",
+			c3cn, ntohs(c3cn->saddr.sin_port));
+
+	sipv4 = cxgb3i_get_private_ipv4addr(dev);
+	if (!sipv4) {
+		c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
+		sipv4 = c3cn->saddr.sin_addr.s_addr;
+		cxgb3i_set_private_ipv4addr(dev, sipv4);
+	} else
+		c3cn->saddr.sin_addr.s_addr = sipv4;
+
+	c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
+			c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
+			ntohs(c3cn->saddr.sin_port),
+			NIPQUAD(c3cn->daddr.sin_addr.s_addr),
+			ntohs(c3cn->daddr.sin_port));
+
+	c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
+	if (!initiate_act_open(c3cn, dev))
+		return 0;
+
+	/*
+	 * If we get here, we don't have an offload connection so simply
+	 * return a failure.
+	 */
+	err = -ENOTSUPP;
+
+	/*
+	 * This trashes the connection and releases the local port,
+	 * if necessary.
+	 */
+	c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
+	c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
+	ip_rt_put(rt);
+	c3cn_put_port(c3cn);
+	c3cn->daddr.sin_port = 0;
+	return err;
+}
+
+/**
+ * cxgb3i_c3cn_rx_credits - ack received tcp data.
+ * @c3cn: iscsi tcp connection
+ * @copied: # of bytes processed
+ *
+ * Called after some received data has been read.  It returns RX credits
+ * to the HW for the amount of data processed.
+ */
+void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
+{
+	struct t3cdev *cdev;
+	int must_send;
+	u32 credits, dack = 0;
+
+	if (c3cn->state != C3CN_STATE_ESTABLISHED)
+		return;
+
+	credits = c3cn->copied_seq - c3cn->rcv_wup;
+	if (unlikely(!credits))
+		return;
+
+	cdev = c3cn->cdev;
+
+	if (unlikely(cxgb3_rx_credit_thres == 0))
+		return;
+
+	dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
+
+	/*
+	 * For coalescing to work effectively ensure the receive window has
+	 * at least 16KB left.
+	 */
+	must_send = credits + 16384 >= cxgb3_rcv_win;
+
+	if (must_send || credits >= cxgb3_rx_credit_thres)
+		c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
+}
+
+/**
+ * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
+ * @c3cn: iscsi tcp connection
+ * @skb: skb contains the iscsi pdu
+ *
+ * Add a list of skbs to a connection send queue. The skbs must comply with
+ * the max size limit of the device and have a headroom of at least
+ * TX_HEADER_LEN bytes.
+ * Return # of bytes queued.
+ */
+int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+	struct sk_buff *next;
+	int err, copied = 0;
+
+	spin_lock_bh(&c3cn->lock);
+
+	if (c3cn->state != C3CN_STATE_ESTABLISHED) {
+		c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
+			      c3cn, c3cn->state);
+		err = -EAGAIN;
+		goto out_err;
+	}
+
+	err = -EPIPE;
+	if (c3cn->err) {
+		c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
+		goto out_err;
+	}
+
+	while (skb) {
+		int frags = skb_shinfo(skb)->nr_frags +
+				(skb->len != skb->data_len);
+
+		if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
+			c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		if (frags >= SKB_WR_LIST_SIZE) {
+			cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
+					 c3cn, skb_shinfo(skb)->nr_frags,
+					 skb->len, skb->data_len);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		next = skb->next;
+		skb->next = NULL;
+		skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
+		copied += skb->len;
+		c3cn->write_seq += skb->len + ulp_extra_len(skb);
+		skb = next;
+	}
+done:
+	if (likely(skb_queue_len(&c3cn->write_queue)))
+		c3cn_push_tx_frames(c3cn, 1);
+	spin_unlock_bh(&c3cn->lock);
+	return copied;
+
+out_err:
+	if (copied == 0 && err == -EPIPE)
+		copied = c3cn->err ? c3cn->err : -EPIPE;
+	goto done;
+}
+
+static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
+{
+	struct adap_ports *ports = &cdata->ports;
+	int i;
+
+	for (i = 0; i < ports->nports; i++)
+		NDEV2CDATA(ports->lldevs[i]) = NULL;
+	cxgb3i_free_big_mem(cdata);
+}
+
+void cxgb3i_sdev_cleanup(void)
+{
+	struct cxgb3i_sdev_data *cdata;
+
+	write_lock(&cdata_rwlock);
+	list_for_each_entry(cdata, &cdata_list, list) {
+		list_del(&cdata->list);
+		sdev_data_cleanup(cdata);
+	}
+	write_unlock(&cdata_rwlock);
+}
+
+int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
+{
+	cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
+	cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
+	cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
+	cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
+	cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
+	cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
+	cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
+	cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
+
+	if (cxgb3_max_connect > CXGB3I_MAX_CONN)
+		cxgb3_max_connect = CXGB3I_MAX_CONN;
+	return 0;
+}
+
+/**
+ * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
+ * @cdev:	t3cdev adapter
+ * @client:	cxgb3 driver client
+ */
+void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
+{
+	struct cxgb3i_sdev_data *cdata;
+	struct ofld_page_info rx_page_info;
+	unsigned int wr_len;
+	int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
+				   8 * sizeof(unsigned long));
+	int i;
+
+	cdata =  cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
+	if (!cdata)
+		return;
+
+	if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
+	    cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
+	    cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
+		goto free_cdata;
+
+	s3_init_wr_tab(wr_len);
+
+	INIT_LIST_HEAD(&cdata->list);
+	cdata->cdev = cdev;
+	cdata->client = client;
+
+	for (i = 0; i < cdata->ports.nports; i++)
+		NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
+
+	write_lock(&cdata_rwlock);
+	list_add_tail(&cdata->list, &cdata_list);
+	write_unlock(&cdata_rwlock);
+
+	return;
+
+free_cdata:
+	cxgb3i_free_big_mem(cdata);
+}
+
+/**
+ * cxgb3i_sdev_remove - free the allocated resources for the adapter
+ * @cdev:	t3cdev adapter
+ */
+void cxgb3i_sdev_remove(struct t3cdev *cdev)
+{
+	struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
+
+	write_lock(&cdata_rwlock);
+	list_del(&cdata->list);
+	write_unlock(&cdata_rwlock);
+
+	sdev_data_cleanup(cdata);
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
new file mode 100644
index 000000000000..d23156907ffd
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -0,0 +1,231 @@
+/*
+ * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ *
+ * Written by:	Dimitris Michailidis (dm@chelsio.com)
+ *		Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef _CXGB3I_OFFLOAD_H
+#define _CXGB3I_OFFLOAD_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "common.h"
+#include "adapter.h"
+#include "t3cdev.h"
+#include "cxgb3_offload.h"
+
+#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
+#define cxgb3i_log_warn(fmt...)	 printk(KERN_WARNING "cxgb3i: WARN! " fmt)
+#define cxgb3i_log_info(fmt...)  printk(KERN_INFO "cxgb3i: " fmt)
+#define cxgb3i_log_debug(fmt, args...) \
+	printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
+
+/**
+ * struct s3_conn - an iscsi tcp connection structure
+ *
+ * @dev:	net device of with connection
+ * @cdev:	adapter t3cdev for net device
+ * @flags:	see c3cn_flags below
+ * @tid:	connection id assigned by the h/w
+ * @qset:	queue set used by connection
+ * @mss_idx:	Maximum Segment Size table index
+ * @l2t:	ARP resolution entry for offload packets
+ * @wr_max:	maximum in-flight writes
+ * @wr_avail:	number of writes available
+ * @wr_unacked:	writes since last request for completion notification
+ * @wr_pending_head: head of pending write queue
+ * @wr_pending_tail: tail of pending write queue
+ * @cpl_close:	skb for cpl_close_req
+ * @cpl_abort_req: skb for cpl_abort_req
+ * @cpl_abort_rpl: skb for cpl_abort_rpl
+ * @lock:	connection status lock
+ * @refcnt:	reference count on connection
+ * @state:	connection state
+ * @saddr:	source ip/port address
+ * @daddr:	destination ip/port address
+ * @dst_cache:	reference to destination route
+ * @receive_queue: received PDUs
+ * @write_queue: un-pushed pending writes
+ * @retry_timer: retry timer for various operations
+ * @err:	connection error status
+ * @callback_lock: lock for opaque user context
+ * @user_data:	opaque user context
+ * @rcv_nxt:	next receive seq. #
+ * @copied_seq:	head of yet unread data
+ * @rcv_wup:	rcv_nxt on last window update sent
+ * @snd_nxt:	next sequence we send
+ * @snd_una:	first byte we want an ack for
+ * @write_seq:	tail+1 of data held in send buffer
+ */
+struct s3_conn {
+	struct net_device *dev;
+	struct t3cdev *cdev;
+	unsigned long flags;
+	int tid;
+	int qset;
+	int mss_idx;
+	struct l2t_entry *l2t;
+	int wr_max;
+	int wr_avail;
+	int wr_unacked;
+	struct sk_buff *wr_pending_head;
+	struct sk_buff *wr_pending_tail;
+	struct sk_buff *cpl_close;
+	struct sk_buff *cpl_abort_req;
+	struct sk_buff *cpl_abort_rpl;
+	spinlock_t lock;
+	atomic_t refcnt;
+	volatile unsigned int state;
+	struct sockaddr_in saddr;
+	struct sockaddr_in daddr;
+	struct dst_entry *dst_cache;
+	struct sk_buff_head receive_queue;
+	struct sk_buff_head write_queue;
+	struct timer_list retry_timer;
+	int err;
+	rwlock_t callback_lock;
+	void *user_data;
+
+	u32 rcv_nxt;
+	u32 copied_seq;
+	u32 rcv_wup;
+	u32 snd_nxt;
+	u32 snd_una;
+	u32 write_seq;
+};
+
+/*
+ * connection state
+ */
+enum conn_states {
+	C3CN_STATE_CONNECTING = 1,
+	C3CN_STATE_ESTABLISHED,
+	C3CN_STATE_ACTIVE_CLOSE,
+	C3CN_STATE_PASSIVE_CLOSE,
+	C3CN_STATE_CLOSE_WAIT_1,
+	C3CN_STATE_CLOSE_WAIT_2,
+	C3CN_STATE_ABORTING,
+	C3CN_STATE_CLOSED,
+};
+
+static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
+{
+	return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
+}
+static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
+{
+	return c3cn->state == C3CN_STATE_ESTABLISHED;
+}
+
+/*
+ * Connection flags -- many to track some close related events.
+ */
+enum c3cn_flags {
+	C3CN_ABORT_RPL_RCVD,	/* received one ABORT_RPL_RSS message */
+	C3CN_ABORT_REQ_RCVD,	/* received one ABORT_REQ_RSS message */
+	C3CN_ABORT_RPL_PENDING,	/* expecting an abort reply */
+	C3CN_TX_DATA_SENT,	/* already sent a TX_DATA WR */
+	C3CN_ACTIVE_CLOSE_NEEDED,	/* need to be closed */
+};
+
+/**
+ * cxgb3i_sdev_data - Per adapter data.
+ * Linked off of each Ethernet device port on the adapter.
+ * Also available via the t3cdev structure since we have pointers to our port
+ * net_device's there ...
+ *
+ * @list:	list head to link elements
+ * @cdev:	t3cdev adapter
+ * @client:	CPL client pointer
+ * @ports:	array of adapter ports
+ * @sport_map_next: next index into the port map
+ * @sport_map:	source port map
+ */
+struct cxgb3i_sdev_data {
+	struct list_head list;
+	struct t3cdev *cdev;
+	struct cxgb3_client *client;
+	struct adap_ports ports;
+	unsigned int sport_map_next;
+	unsigned long sport_map[0];
+};
+#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
+#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
+
+void cxgb3i_sdev_cleanup(void);
+int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
+void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
+void cxgb3i_sdev_remove(struct t3cdev *);
+
+struct s3_conn *cxgb3i_c3cn_create(void);
+int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
+void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
+int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
+void cxgb3i_c3cn_release(struct s3_conn *);
+
+/**
+ * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
+ *
+ * @flag:	see C3CB_FLAG_* below
+ * @ulp_mode:	ULP mode/submode of sk_buff
+ * @seq:	tcp sequence number
+ * @ddigest:	pdu data digest
+ * @pdulen:	recovered pdu length
+ * @wr_data:	scratch area for tx wr
+ */
+struct cxgb3_skb_cb {
+	__u8 flags;
+	__u8 ulp_mode;
+	__u32 seq;
+	__u32 ddigest;
+	__u32 pdulen;
+	struct sk_buff *wr_data;
+};
+
+#define CXGB3_SKB_CB(skb)	((struct cxgb3_skb_cb *)&((skb)->cb[0]))
+
+#define skb_ulp_mode(skb)	(CXGB3_SKB_CB(skb)->ulp_mode)
+#define skb_ulp_ddigest(skb)	(CXGB3_SKB_CB(skb)->ddigest)
+#define skb_ulp_pdulen(skb)	(CXGB3_SKB_CB(skb)->pdulen)
+#define skb_wr_data(skb)	(CXGB3_SKB_CB(skb)->wr_data)
+
+enum c3cb_flags {
+	C3CB_FLAG_NEED_HDR = 1 << 0,	/* packet needs a TX_DATA_WR header */
+	C3CB_FLAG_NO_APPEND = 1 << 1,	/* don't grow this skb */
+	C3CB_FLAG_COMPL = 1 << 2,	/* request WR completion */
+};
+
+/**
+ * sge_opaque_hdr -
+ * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
+ * and for which we must reserve space.
+ */
+struct sge_opaque_hdr {
+	void *dev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define TX_HEADER_LEN \
+		(sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
+
+/*
+ * get and set private ip for iscsi traffic
+ */
+#define cxgb3i_get_private_ipv4addr(ndev) \
+	(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
+#define cxgb3i_set_private_ipv4addr(ndev, addr) \
+	(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+
+/* max. connections per adapter */
+#define CXGB3I_MAX_CONN		16384
+#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
new file mode 100644
index 000000000000..ce7ce8c6094c
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -0,0 +1,402 @@
+/*
+ * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include "cxgb3i.h"
+#include "cxgb3i_pdu.h"
+
+#ifdef __DEBUG_CXGB3I_RX__
+#define cxgb3i_rx_debug		cxgb3i_log_debug
+#else
+#define cxgb3i_rx_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB3I_TX__
+#define cxgb3i_tx_debug		cxgb3i_log_debug
+#else
+#define cxgb3i_tx_debug(fmt...)
+#endif
+
+static struct page *pad_page;
+
+/*
+ * pdu receive, interact with libiscsi_tcp
+ */
+static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+			       unsigned int offset, int offloaded)
+{
+	int status = 0;
+	int bytes_read;
+
+	bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
+	switch (status) {
+	case ISCSI_TCP_CONN_ERR:
+		return -EIO;
+	case ISCSI_TCP_SUSPENDED:
+		/* no transfer - just have caller flush queue */
+		return bytes_read;
+	case ISCSI_TCP_SKB_DONE:
+		/*
+		 * pdus should always fit in the skb and we should get
+		 * segment done notifcation.
+		 */
+		iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
+		return -EFAULT;
+	case ISCSI_TCP_SEGMENT_DONE:
+		return bytes_read;
+	default:
+		iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
+				  "status %d\n", status);
+		return -EINVAL;
+	}
+}
+
+static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
+				    struct sk_buff *skb)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	bool offloaded = 0;
+	unsigned int offset;
+	int rc;
+
+	cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
+			conn, skb, skb->len, skb_ulp_mode(skb));
+
+	if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
+		return -EIO;
+	}
+
+	if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
+		return -EIO;
+	}
+
+	if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+		return -EIO;
+	}
+
+	/* iscsi hdr */
+	rc = read_pdu_skb(conn, skb, 0, 0);
+	if (rc <= 0)
+		return rc;
+
+	if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
+		return 0;
+
+	offset = rc;
+	if (conn->hdrdgst_en)
+		offset += ISCSI_DIGEST_SIZE;
+
+	/* iscsi data */
+	if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
+		cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
+				"itt 0x%x.\n",
+				skb,
+				tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+				tcp_conn->in.datalen,
+				ntohl(tcp_conn->in.hdr->itt));
+		offloaded = 1;
+	} else {
+		cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
+				"itt 0x%x.\n",
+				skb,
+				tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+				tcp_conn->in.datalen,
+				ntohl(tcp_conn->in.hdr->itt));
+		offset += sizeof(struct cpl_iscsi_hdr_norss);
+	}
+
+	rc = read_pdu_skb(conn, skb, offset, offloaded);
+	if (rc < 0)
+		return rc;
+	else
+		return 0;
+}
+
+/*
+ * pdu transmit, interact with libiscsi_tcp
+ */
+static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
+{
+	u8 submode = 0;
+
+	if (hcrc)
+		submode |= 1;
+	if (dcrc)
+		submode |= 2;
+	skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
+}
+
+void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+	/* never reached the xmit task callout */
+	if (tcp_task->dd_data)
+		kfree_skb(tcp_task->dd_data);
+	tcp_task->dd_data = NULL;
+
+	/* MNC - Do we need a check in case this is called but
+	 * cxgb3i_conn_alloc_pdu has never been called on the task */
+	cxgb3i_release_itt(task, task->hdr_itt);
+	iscsi_tcp_cleanup_task(task);
+}
+
+/*
+ * We do not support ahs yet
+ */
+int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct sk_buff *skb;
+
+	task->hdr = NULL;
+	/* always allocate rooms for AHS */
+	skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
+			TX_HEADER_LEN,  GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
+			task, opcode, skb);
+
+	tcp_task->dd_data = skb;
+	skb_reserve(skb, TX_HEADER_LEN);
+	task->hdr = (struct iscsi_hdr *)skb->data;
+	task->hdr_max = sizeof(struct iscsi_hdr);
+
+	/* data_out uses scsi_cmd's itt */
+	if (opcode != ISCSI_OP_SCSI_DATA_OUT)
+		cxgb3i_reserve_itt(task, &task->hdr->itt);
+
+	return 0;
+}
+
+int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
+			      unsigned int count)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct sk_buff *skb = tcp_task->dd_data;
+	struct iscsi_conn *conn = task->conn;
+	struct page *pg;
+	unsigned int datalen = count;
+	int i, padlen = iscsi_padding(count);
+	skb_frag_t *frag;
+
+	cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
+			task, task->sc, offset, count, skb);
+
+	skb_put(skb, task->hdr_len);
+	tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
+	if (!count)
+		return 0;
+
+	if (task->sc) {
+		struct scatterlist *sg;
+		struct scsi_data_buffer *sdb;
+		unsigned int sgoffset = offset;
+		struct page *sgpg;
+		unsigned int sglen;
+
+		sdb = scsi_out(task->sc);
+		sg = sdb->table.sgl;
+
+		for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
+			cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
+					i, sg_page(sg), sg->length, sg->offset);
+
+			if (sgoffset < sg->length)
+				break;
+			sgoffset -= sg->length;
+		}
+		sgpg = sg_page(sg);
+		sglen = sg->length - sgoffset;
+
+		do {
+			int j = skb_shinfo(skb)->nr_frags;
+			unsigned int copy;
+
+			if (!sglen) {
+				sg = sg_next(sg);
+				sgpg = sg_page(sg);
+				sgoffset = 0;
+				sglen = sg->length;
+				++i;
+			}
+			copy = min(sglen, datalen);
+			if (j && skb_can_coalesce(skb, j, sgpg,
+						  sg->offset + sgoffset)) {
+				skb_shinfo(skb)->frags[j - 1].size += copy;
+			} else {
+				get_page(sgpg);
+				skb_fill_page_desc(skb, j, sgpg,
+						   sg->offset + sgoffset, copy);
+			}
+			sgoffset += copy;
+			sglen -= copy;
+			datalen -= copy;
+		} while (datalen);
+	} else {
+		pg = virt_to_page(task->data);
+
+		while (datalen) {
+			i = skb_shinfo(skb)->nr_frags;
+			frag = &skb_shinfo(skb)->frags[i];
+
+			get_page(pg);
+			frag->page = pg;
+			frag->page_offset = 0;
+			frag->size = min((unsigned int)PAGE_SIZE, datalen);
+
+			skb_shinfo(skb)->nr_frags++;
+			datalen -= frag->size;
+			pg++;
+		}
+	}
+
+	if (padlen) {
+		i = skb_shinfo(skb)->nr_frags;
+		frag = &skb_shinfo(skb)->frags[i];
+		frag->page = pad_page;
+		frag->page_offset = 0;
+		frag->size = padlen;
+		skb_shinfo(skb)->nr_frags++;
+	}
+
+	datalen = count + padlen;
+	skb->data_len += datalen;
+	skb->truesize += datalen;
+	skb->len += datalen;
+	return 0;
+}
+
+int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct sk_buff *skb = tcp_task->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+	unsigned int datalen;
+	int err;
+
+	if (!skb)
+		return 0;
+
+	datalen = skb->data_len;
+	tcp_task->dd_data = NULL;
+	err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
+	cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+			task, skb, skb->len, skb->data_len, err);
+	if (err > 0) {
+		int pdulen = err;
+
+		if (task->conn->hdrdgst_en)
+			pdulen += ISCSI_DIGEST_SIZE;
+		if (datalen && task->conn->datadgst_en)
+			pdulen += ISCSI_DIGEST_SIZE;
+
+		task->conn->txdata_octets += pdulen;
+		return 0;
+	}
+
+	if (err < 0 && err != -EAGAIN) {
+		kfree_skb(skb);
+		cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
+				task->itt, skb, skb->len, skb->data_len, err);
+		iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
+		iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
+		return err;
+	}
+	/* reset skb to send when we are called again */
+	tcp_task->dd_data = skb;
+	return -EAGAIN;
+}
+
+int cxgb3i_pdu_init(void)
+{
+	pad_page = alloc_page(GFP_KERNEL);
+	if (!pad_page)
+		return -ENOMEM;
+	memset(page_address(pad_page), 0, PAGE_SIZE);
+	return 0;
+}
+
+void cxgb3i_pdu_cleanup(void)
+{
+	if (pad_page) {
+		__free_page(pad_page);
+		pad_page = NULL;
+	}
+}
+
+void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
+{
+	struct sk_buff *skb;
+	unsigned int read = 0;
+	struct iscsi_conn *conn = c3cn->user_data;
+	int err = 0;
+
+	cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
+
+	read_lock(&c3cn->callback_lock);
+	if (unlikely(!conn || conn->suspend_rx)) {
+		cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
+				conn, conn ? conn->id : 0xFF,
+				conn ? conn->suspend_rx : 0xFF);
+		read_unlock(&c3cn->callback_lock);
+		return;
+	}
+	skb = skb_peek(&c3cn->receive_queue);
+	while (!err && skb) {
+		__skb_unlink(skb, &c3cn->receive_queue);
+		read += skb_ulp_pdulen(skb);
+		err = cxgb3i_conn_read_pdu_skb(conn, skb);
+		__kfree_skb(skb);
+		skb = skb_peek(&c3cn->receive_queue);
+	}
+	read_unlock(&c3cn->callback_lock);
+	if (c3cn) {
+		c3cn->copied_seq += read;
+		cxgb3i_c3cn_rx_credits(c3cn, read);
+	}
+	conn->rxdata_octets += read;
+}
+
+void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
+{
+	struct iscsi_conn *conn = c3cn->user_data;
+
+	cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
+	if (conn) {
+		cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
+		scsi_queue_work(conn->session->host, &conn->xmitwork);
+	}
+}
+
+void cxgb3i_conn_closing(struct s3_conn *c3cn)
+{
+	struct iscsi_conn *conn;
+
+	read_lock(&c3cn->callback_lock);
+	conn = c3cn->user_data;
+	if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	read_unlock(&c3cn->callback_lock);
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
new file mode 100644
index 000000000000..a3f685cc2362
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -0,0 +1,59 @@
+/*
+ * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_ULP2_PDU_H__
+#define __CXGB3I_ULP2_PDU_H__
+
+struct cpl_iscsi_hdr_norss {
+	union opcode_tid ot;
+	u16 pdu_len_ddp;
+	u16 len;
+	u32 seq;
+	u16 urg;
+	u8 rsvd;
+	u8 status;
+};
+
+struct cpl_rx_data_ddp_norss {
+	union opcode_tid ot;
+	u16 urg;
+	u16 len;
+	u32 seq;
+	u32 nxt_seq;
+	u32 ulp_crc;
+	u32 ddp_status;
+};
+
+#define RX_DDP_STATUS_IPP_SHIFT		27	/* invalid pagepod */
+#define RX_DDP_STATUS_TID_SHIFT		26	/* tid mismatch */
+#define RX_DDP_STATUS_COLOR_SHIFT	25	/* color mismatch */
+#define RX_DDP_STATUS_OFFSET_SHIFT	24	/* offset mismatch */
+#define RX_DDP_STATUS_ULIMIT_SHIFT	23	/* ulimit error */
+#define RX_DDP_STATUS_TAG_SHIFT		22	/* tag mismatch */
+#define RX_DDP_STATUS_DCRC_SHIFT	21	/* dcrc error */
+#define RX_DDP_STATUS_HCRC_SHIFT	20	/* hcrc error */
+#define RX_DDP_STATUS_PAD_SHIFT		19	/* pad error */
+#define RX_DDP_STATUS_PPP_SHIFT		18	/* pagepod parity error */
+#define RX_DDP_STATUS_LLIMIT_SHIFT	17	/* llimit error */
+#define RX_DDP_STATUS_DDP_SHIFT		16	/* ddp'able */
+#define RX_DDP_STATUS_PMM_SHIFT		15	/* pagepod mismatch */
+
+#define ULP2_FLAG_DATA_READY		0x1
+#define ULP2_FLAG_DATA_DDPED		0x2
+#define ULP2_FLAG_HCRC_ERROR		0x10
+#define ULP2_FLAG_DCRC_ERROR		0x20
+#define ULP2_FLAG_PAD_ERROR		0x40
+
+void cxgb3i_conn_closing(struct s3_conn *);
+void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
+void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
+#endif
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3d50cabca7ee..53664765570a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -24,6 +24,7 @@
 #include <scsi/scsi_dh.h>
 
 #define RDAC_NAME "rdac"
+#define RDAC_RETRY_COUNT 5
 
 /*
  * LSI mode page stuff
@@ -386,6 +387,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
 	struct c9_inquiry *inqp;
 
 	h->lun_state = RDAC_LUN_UNOWNED;
+	h->state = RDAC_STATE_ACTIVE;
 	err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
 	if (err == SCSI_DH_OK) {
 		inqp = &h->inq.c9;
@@ -477,21 +479,27 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
 {
 	struct request *rq;
 	struct request_queue *q = sdev->request_queue;
-	int err = SCSI_DH_RES_TEMP_UNAVAIL;
+	int err, retry_cnt = RDAC_RETRY_COUNT;
 
+retry:
+	err = SCSI_DH_RES_TEMP_UNAVAIL;
 	rq = rdac_failover_get(sdev, h);
 	if (!rq)
 		goto done;
 
-	sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
+	sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
+		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
 
 	err = blk_execute_rq(q, NULL, rq, 1);
-	if (err != SCSI_DH_OK)
+	blk_put_request(rq);
+	if (err != SCSI_DH_OK) {
 		err = mode_select_handle_sense(sdev, h->sense);
+		if (err == SCSI_DH_RETRY && retry_cnt--)
+			goto retry;
+	}
 	if (err == SCSI_DH_OK)
 		h->state = RDAC_STATE_ACTIVE;
 
-	blk_put_request(rq);
 done:
 	return err;
 }
@@ -594,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
 	{"SUN", "LCSM100_F"},
 	{"DELL", "MD3000"},
 	{"DELL", "MD3000i"},
+	{"LSI", "INF-01-00"},
+	{"ENGENIO", "INF-01-00"},
 	{NULL, NULL},
 };
 
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index a73a6bbb1b2b..976cdd5c94ef 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1626,8 +1626,15 @@ static void map_dma(unsigned int i, struct hostdata *ha)
 
 	cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
 
-	count = scsi_dma_map(SCpnt);
-	BUG_ON(count < 0);
+	if (!scsi_sg_count(SCpnt)) {
+		cpp->data_len = 0;
+		return;
+	}
+
+	count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+			   pci_dir);
+	BUG_ON(!count);
+
 	scsi_for_each_sg(SCpnt, sg, count, k) {
 		cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
 		cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
@@ -1655,7 +1662,9 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
 		pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
 				 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
 
-	scsi_dma_unmap(SCpnt);
+	if (scsi_sg_count(SCpnt))
+		pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+			     pci_dir);
 
 	if (!DEV2H(cpp->data_len))
 		pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 952505c006df..152dd15db276 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -14,8 +14,8 @@
  *             neuffer@goofy.zdv.uni-mainz.de               *
  *             a.arnold@kfa-juelich.de                      * 
  *                                                          *
- *  Updated 2002 by Alan Cox <alan@redhat.com> for Linux    *
- *  2.5.x and the newer locking and error handling          *
+ *  Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
+ *   Linux 2.5.x and the newer locking and error handling   *
  *                                                          *
  *  This program is free software; you can redistribute it  *
  *  and/or modify it under the terms of the GNU General     *
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 62a4618530d0..a680e18b5f3b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1453,7 +1453,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
 		offset = 0;
 
 	if (offset) {
-		int rounded_up, one_clock;
+		int one_clock;
 
 		if (period > esp->max_period) {
 			period = offset = 0;
@@ -1463,9 +1463,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
 			goto do_reject;
 
 		one_clock = esp->ccycle / 1000;
-		rounded_up = (period << 2);
-		rounded_up = (rounded_up + one_clock - 1) / one_clock;
-		stp = rounded_up;
+		stp = DIV_ROUND_UP(period << 2, one_clock);
 		if (stp && esp->rev >= FAS236) {
 			if (stp >= 50)
 				stp--;
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 000000000000..b78da06d7c0e
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,8 @@
+# $Id: Makefile
+
+obj-$(CONFIG_FCOE) += fcoe.o
+
+fcoe-y := \
+	libfcoe.o \
+	fcoe_sw.o \
+	fc_transport_fcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
new file mode 100644
index 000000000000..bf7fe6fc0820
--- /dev/null
+++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/pci.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+/* internal fcoe transport */
+struct fcoe_transport_internal {
+	struct fcoe_transport *t;
+	struct net_device *netdev;
+	struct list_head list;
+};
+
+/* fcoe transports list and its lock */
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(fcoe_transports_lock);
+
+/**
+ * fcoe_transport_default - returns ptr to the default transport fcoe_sw
+ **/
+struct fcoe_transport *fcoe_transport_default(void)
+{
+	return &fcoe_sw_transport;
+}
+
+/**
+ * fcoe_transport_to_pcidev - get the pci dev from a netdev
+ * @netdev: the netdev that pci dev will be retrived from
+ *
+ * Returns: NULL or the corrsponding pci_dev
+ **/
+struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
+{
+	if (!netdev->dev.parent)
+		return NULL;
+	return to_pci_dev(netdev->dev.parent);
+}
+
+/**
+ * fcoe_transport_device_lookup - find out netdev is managed by the
+ * transport
+ * assign a transport to a device
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+static struct fcoe_transport_internal *fcoe_transport_device_lookup(
+	struct fcoe_transport *t, struct net_device *netdev)
+{
+	struct fcoe_transport_internal *ti;
+
+	/* assign the transpor to this device */
+	mutex_lock(&t->devlock);
+	list_for_each_entry(ti, &t->devlist, list) {
+		if (ti->netdev == netdev) {
+			mutex_unlock(&t->devlock);
+			return ti;
+		}
+	}
+	mutex_unlock(&t->devlock);
+	return NULL;
+}
+/**
+ * fcoe_transport_device_add - assign a transport to a device
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_transport_device_add(struct fcoe_transport *t,
+				     struct net_device *netdev)
+{
+	struct fcoe_transport_internal *ti;
+
+	ti = fcoe_transport_device_lookup(t, netdev);
+	if (ti) {
+		printk(KERN_DEBUG "fcoe_transport_device_add:"
+		       "device %s is already added to transport %s\n",
+		       netdev->name, t->name);
+		return -EEXIST;
+	}
+	/* allocate an internal struct to host the netdev and the list */
+	ti = kzalloc(sizeof(*ti), GFP_KERNEL);
+	if (!ti)
+		return -ENOMEM;
+
+	ti->t = t;
+	ti->netdev = netdev;
+	INIT_LIST_HEAD(&ti->list);
+	dev_hold(ti->netdev);
+
+	mutex_lock(&t->devlock);
+	list_add(&ti->list, &t->devlist);
+	mutex_unlock(&t->devlock);
+
+	printk(KERN_DEBUG "fcoe_transport_device_add:"
+		       "device %s added to transport %s\n",
+		       netdev->name, t->name);
+
+	return 0;
+}
+
+/**
+ * fcoe_transport_device_remove - remove a device from its transport
+ * @netdev: the netdev the transport to be attached to
+ *
+ * this removes the device from the transport so the given transport will
+ * not manage this device any more
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_transport_device_remove(struct fcoe_transport *t,
+					struct net_device *netdev)
+{
+	struct fcoe_transport_internal *ti;
+
+	ti = fcoe_transport_device_lookup(t, netdev);
+	if (!ti) {
+		printk(KERN_DEBUG "fcoe_transport_device_remove:"
+		       "device %s is not managed by transport %s\n",
+		       netdev->name, t->name);
+		return -ENODEV;
+	}
+	mutex_lock(&t->devlock);
+	list_del(&ti->list);
+	mutex_unlock(&t->devlock);
+	printk(KERN_DEBUG "fcoe_transport_device_remove:"
+	       "device %s removed from transport %s\n",
+	       netdev->name, t->name);
+	dev_put(ti->netdev);
+	kfree(ti);
+	return 0;
+}
+
+/**
+ * fcoe_transport_device_remove_all - remove all from transport devlist
+ *
+ * this removes the device from the transport so the given transport will
+ * not manage this device any more
+ *
+ * Returns: 0 for success
+ **/
+static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
+{
+	struct fcoe_transport_internal *ti, *tmp;
+
+	mutex_lock(&t->devlock);
+	list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
+		list_del(&ti->list);
+		kfree(ti);
+	}
+	mutex_unlock(&t->devlock);
+}
+
+/**
+ * fcoe_transport_match - use the bus device match function to match the hw
+ * @t: the fcoe transport
+ * @netdev:
+ *
+ * This function is used to check if the givne transport wants to manage the
+ * input netdev. if the transports implements the match function, it will be
+ * called, o.w. we just compare the pci vendor and device id.
+ *
+ * Returns: true for match up
+ **/
+static bool fcoe_transport_match(struct fcoe_transport *t,
+				struct net_device *netdev)
+{
+	/* match transport by vendor and device id */
+	struct pci_dev *pci;
+
+	pci = fcoe_transport_pcidev(netdev);
+
+	if (pci) {
+		printk(KERN_DEBUG "fcoe_transport_match:"
+		       "%s:%x:%x -- %s:%x:%x\n",
+		       t->name, t->vendor, t->device,
+		       netdev->name, pci->vendor, pci->device);
+
+		/* if transport supports match */
+		if (t->match)
+			return t->match(netdev);
+
+		/* else just compare the vendor and device id: pci only */
+		return (t->vendor == pci->vendor) && (t->device == pci->device);
+	}
+	return false;
+}
+
+/**
+ * fcoe_transport_lookup - check if the transport is already registered
+ * @t: the transport to be looked up
+ *
+ * This compares the parent device (pci) vendor and device id
+ *
+ * Returns: NULL if not found
+ *
+ * TODO - return default sw transport if no other transport is found
+ **/
+static struct fcoe_transport *fcoe_transport_lookup(
+	struct net_device *netdev)
+{
+	struct fcoe_transport *t;
+
+	mutex_lock(&fcoe_transports_lock);
+	list_for_each_entry(t, &fcoe_transports, list) {
+		if (fcoe_transport_match(t, netdev)) {
+			mutex_unlock(&fcoe_transports_lock);
+			return t;
+		}
+	}
+	mutex_unlock(&fcoe_transports_lock);
+
+	printk(KERN_DEBUG "fcoe_transport_lookup:"
+	       "use default transport for %s\n", netdev->name);
+	return fcoe_transport_default();
+}
+
+/**
+ * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
+ * @t: ptr to the fcoe transport to be added
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_register(struct fcoe_transport *t)
+{
+	struct fcoe_transport *tt;
+
+	/* TODO - add fcoe_transport specific initialization here */
+	mutex_lock(&fcoe_transports_lock);
+	list_for_each_entry(tt, &fcoe_transports, list) {
+		if (tt == t) {
+			mutex_unlock(&fcoe_transports_lock);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&t->list, &fcoe_transports);
+	mutex_unlock(&fcoe_transports_lock);
+
+	mutex_init(&t->devlock);
+	INIT_LIST_HEAD(&t->devlist);
+
+	printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_register);
+
+/**
+ * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
+ * @t: ptr to the fcoe transport to be removed
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_unregister(struct fcoe_transport *t)
+{
+	struct fcoe_transport *tt, *tmp;
+
+	mutex_lock(&fcoe_transports_lock);
+	list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
+		if (tt == t) {
+			list_del(&t->list);
+			mutex_unlock(&fcoe_transports_lock);
+			fcoe_transport_device_remove_all(t);
+			printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
+			       t->name);
+			return 0;
+		}
+	}
+	mutex_unlock(&fcoe_transports_lock);
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
+
+/*
+ * fcoe_load_transport_driver - load an offload driver by alias name
+ * @netdev: the target net device
+ *
+ * Requests for an offload driver module as the fcoe transport, if fails, it
+ * falls back to use the SW HBA (fcoe_sw) as its transport
+ *
+ * TODO -
+ * 	1. supports only PCI device
+ * 	2. needs fix for VLAn and bonding
+ * 	3. pure hw fcoe hba may not have netdev
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_load_transport_driver(struct net_device *netdev)
+{
+	struct pci_dev *pci;
+	struct device *dev = netdev->dev.parent;
+
+	if (fcoe_transport_lookup(netdev)) {
+		/* load default transport */
+		printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
+		       netdev->name);
+		return -EEXIST;
+	}
+
+	pci = to_pci_dev(dev);
+	if (dev->bus != &pci_bus_type) {
+		printk(KERN_DEBUG "fcoe: support noly PCI device\n");
+		return -ENODEV;
+	}
+	printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
+	       pci->vendor, pci->device);
+
+	return request_module("fcoe-pci-0x%04x-0x%04x",
+			      pci->vendor, pci->device);
+
+}
+EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
+
+/**
+ * fcoe_transport_attach - load transport to fcoe
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_attach(struct net_device *netdev)
+{
+	struct fcoe_transport *t;
+
+	/* find the corresponding transport */
+	t = fcoe_transport_lookup(netdev);
+	if (!t) {
+		printk(KERN_DEBUG "fcoe_transport_attach"
+		       ":no transport for %s:use %s\n",
+		       netdev->name, t->name);
+		return -ENODEV;
+	}
+	/* add to the transport */
+	if (fcoe_transport_device_add(t, netdev)) {
+		printk(KERN_DEBUG "fcoe_transport_attach"
+		       ":failed to add %s to tramsport %s\n",
+		       netdev->name, t->name);
+		return -EIO;
+	}
+	/* transport create function */
+	if (t->create)
+		t->create(netdev);
+
+	printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
+	       t->name, netdev->name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_release - unload transport from fcoe
+ * @netdev: the net device on which fcoe is to be released
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_release(struct net_device *netdev)
+{
+	struct fcoe_transport *t;
+
+	/* find the corresponding transport */
+	t = fcoe_transport_lookup(netdev);
+	if (!t) {
+		printk(KERN_DEBUG "fcoe_transport_release:"
+		       "no transport for %s:use %s\n",
+		       netdev->name, t->name);
+		return -ENODEV;
+	}
+	/* remove the device from the transport */
+	if (fcoe_transport_device_remove(t, netdev)) {
+		printk(KERN_DEBUG "fcoe_transport_release:"
+		       "failed to add %s to tramsport %s\n",
+		       netdev->name, t->name);
+		return -EIO;
+	}
+	/* transport destroy function */
+	if (t->destroy)
+		t->destroy(netdev);
+
+	printk(KERN_DEBUG "fcoe_transport_release:"
+	       "device %s dettached from transport %s\n",
+	       netdev->name, t->name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_release);
+
+/**
+ * fcoe_transport_init - initializes fcoe transport layer
+ *
+ * This prepares for the fcoe transport layer
+ *
+ * Returns: none
+ **/
+int __init fcoe_transport_init(void)
+{
+	INIT_LIST_HEAD(&fcoe_transports);
+	mutex_init(&fcoe_transports_lock);
+	return 0;
+}
+
+/**
+ * fcoe_transport_exit - cleans up the fcoe transport layer
+ * This cleans up the fcoe transport layer. removing any transport on the list,
+ * note that the transport destroy func is not called here.
+ *
+ * Returns: none
+ **/
+int __exit fcoe_transport_exit(void)
+{
+	struct fcoe_transport *t, *tmp;
+
+	mutex_lock(&fcoe_transports_lock);
+	list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
+		list_del(&t->list);
+		mutex_unlock(&fcoe_transports_lock);
+		fcoe_transport_device_remove_all(t);
+		mutex_lock(&fcoe_transports_lock);
+	}
+	mutex_unlock(&fcoe_transports_lock);
+	return 0;
+}
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
new file mode 100644
index 000000000000..dc4cd5e25760
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sw.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+#define FCOE_SW_VERSION	"0.1"
+#define	FCOE_SW_NAME	"fcoesw"
+#define	FCOE_SW_VENDOR	"Open-FCoE.org"
+
+#define FCOE_MAX_LUN		255
+#define FCOE_MAX_FCP_TARGET	256
+
+#define FCOE_MAX_OUTSTANDING_COMMANDS	1024
+
+#define FCOE_MIN_XID		0x0001	/* the min xid supported by fcoe_sw */
+#define FCOE_MAX_XID		0x07ef	/* the max xid supported by fcoe_sw */
+
+static struct scsi_transport_template *scsi_transport_fcoe_sw;
+
+struct fc_function_template fcoe_sw_transport_function = {
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fc_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+
+	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+	.get_fc_host_stats = fc_get_host_stats,
+	.issue_fc_host_lip = fcoe_reset,
+
+	.terminate_rport_io = fc_rport_terminate_io,
+};
+
+static struct scsi_host_template fcoe_sw_shost_template = {
+	.module = THIS_MODULE,
+	.name = "FCoE Driver",
+	.proc_name = FCOE_SW_NAME,
+	.queuecommand = fc_queuecommand,
+	.eh_abort_handler = fc_eh_abort,
+	.eh_device_reset_handler = fc_eh_device_reset,
+	.eh_host_reset_handler = fc_eh_host_reset,
+	.slave_alloc = fc_slave_alloc,
+	.change_queue_depth = fc_change_queue_depth,
+	.change_queue_type = fc_change_queue_type,
+	.this_id = -1,
+	.cmd_per_lun = 32,
+	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
+	.use_clustering = ENABLE_CLUSTERING,
+	.sg_tablesize = SG_ALL,
+	.max_sectors = 0xffff,
+};
+
+/*
+ * fcoe_sw_lport_config - sets up the fc_lport
+ * @lp: ptr to the fc_lport
+ * @shost: ptr to the parent scsi host
+ *
+ * Returns: 0 for success
+ *
+ */
+static int fcoe_sw_lport_config(struct fc_lport *lp)
+{
+	int i = 0;
+
+	lp->link_status = 0;
+	lp->max_retry_count = 3;
+	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
+	lp->r_a_tov = 2 * 2 * 1000;
+	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+
+	/*
+	 * allocate per cpu stats block
+	 */
+	for_each_online_cpu(i)
+		lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
+					   GFP_KERNEL);
+
+	/* lport fc_lport related configuration */
+	fc_lport_config(lp);
+
+	return 0;
+}
+
+/*
+ * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
+ * related properties
+ * @lp : ptr to the fc_lport
+ * @netdev : ptr to the associated netdevice struct
+ *
+ * Must be called after fcoe_sw_lport_config() as it will use lport mutex
+ *
+ * Returns : 0 for success
+ *
+ */
+static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
+{
+	u32 mfs;
+	u64 wwnn, wwpn;
+	struct fcoe_softc *fc;
+	u8 flogi_maddr[ETH_ALEN];
+
+	/* Setup lport private data to point to fcoe softc */
+	fc = lport_priv(lp);
+	fc->lp = lp;
+	fc->real_dev = netdev;
+	fc->phys_dev = netdev;
+
+	/* Require support for get_pauseparam ethtool op. */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		fc->phys_dev = vlan_dev_real_dev(netdev);
+
+	/* Do not support for bonding device */
+	if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
+	    (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
+	    (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Determine max frame size based on underlying device and optional
+	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
+	 * will return 0, so do this first.
+	 */
+	mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
+				   sizeof(struct fcoe_crc_eof));
+	if (fc_set_mfs(lp, mfs))
+		return -EINVAL;
+
+	lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+	if (!fcoe_link_ok(lp))
+		lp->link_status |= FC_LINK_UP;
+
+	/* offload features support */
+	if (fc->real_dev->features & NETIF_F_SG)
+		lp->sg_supp = 1;
+
+
+	skb_queue_head_init(&fc->fcoe_pending_queue);
+
+	/* setup Source Mac Address */
+	memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
+	       fc->real_dev->addr_len);
+
+	wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
+	fc_set_wwnn(lp, wwnn);
+	/* XXX - 3rd arg needs to be vlan id */
+	wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
+	fc_set_wwpn(lp, wwpn);
+
+	/*
+	 * Add FCoE MAC address as second unicast MAC address
+	 * or enter promiscuous mode if not capable of listening
+	 * for multiple unicast MACs.
+	 */
+	rtnl_lock();
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
+	rtnl_unlock();
+
+	/*
+	 * setup the receive function from ethernet driver
+	 * on the ethertype for the given device
+	 */
+	fc->fcoe_packet_type.func = fcoe_rcv;
+	fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+	fc->fcoe_packet_type.dev = fc->real_dev;
+	dev_add_pack(&fc->fcoe_packet_type);
+
+	return 0;
+}
+
+/*
+ * fcoe_sw_shost_config - sets up fc_lport->host
+ * @lp : ptr to the fc_lport
+ * @shost : ptr to the associated scsi host
+ * @dev : device associated to scsi host
+ *
+ * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
+ *
+ * Returns : 0 for success
+ *
+ */
+static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
+				struct device *dev)
+{
+	int rc = 0;
+
+	/* lport scsi host config */
+	lp->host = shost;
+
+	lp->host->max_lun = FCOE_MAX_LUN;
+	lp->host->max_id = FCOE_MAX_FCP_TARGET;
+	lp->host->max_channel = 0;
+	lp->host->transportt = scsi_transport_fcoe_sw;
+
+	/* add the new host to the SCSI-ml */
+	rc = scsi_add_host(lp->host, dev);
+	if (rc) {
+		FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
+		return rc;
+	}
+	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
+		FCOE_SW_NAME, FCOE_SW_VERSION,
+		fcoe_netdev(lp)->name);
+
+	return 0;
+}
+
+/*
+ * fcoe_sw_em_config - allocates em for this lport
+ * @lp: the port that em is to allocated for
+ *
+ * Returns : 0 on success
+ */
+static inline int fcoe_sw_em_config(struct fc_lport *lp)
+{
+	BUG_ON(lp->emp);
+
+	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+				    FCOE_MIN_XID, FCOE_MAX_XID);
+	if (!lp->emp)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*
+ * fcoe_sw_destroy - FCoE software HBA tear-down function
+ * @netdev: ptr to the associated net_device
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ */
+static int fcoe_sw_destroy(struct net_device *netdev)
+{
+	int cpu;
+	struct fc_lport *lp = NULL;
+	struct fcoe_softc *fc;
+	u8 flogi_maddr[ETH_ALEN];
+
+	BUG_ON(!netdev);
+
+	printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
+	       netdev->name);
+
+	lp = fcoe_hostlist_lookup(netdev);
+	if (!lp)
+		return -ENODEV;
+
+	fc = fcoe_softc(lp);
+
+	/* Logout of the fabric */
+	fc_fabric_logoff(lp);
+
+	/* Remove the instance from fcoe's list */
+	fcoe_hostlist_remove(lp);
+
+	/* Don't listen for Ethernet packets anymore */
+	dev_remove_pack(&fc->fcoe_packet_type);
+
+	/* Cleanup the fc_lport */
+	fc_lport_destroy(lp);
+	fc_fcp_destroy(lp);
+
+	/* Detach from the scsi-ml */
+	fc_remove_host(lp->host);
+	scsi_remove_host(lp->host);
+
+	/* There are no more rports or I/O, free the EM */
+	if (lp->emp)
+		fc_exch_mgr_free(lp->emp);
+
+	/* Delete secondary MAC addresses */
+	rtnl_lock();
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
+	if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+		dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+	rtnl_unlock();
+
+	/* Free the per-CPU revieve threads */
+	fcoe_percpu_clean(lp);
+
+	/* Free existing skbs */
+	fcoe_clean_pending_queue(lp);
+
+	/* Free memory used by statistical counters */
+	for_each_online_cpu(cpu)
+		kfree(lp->dev_stats[cpu]);
+
+	/* Release the net_device and Scsi_Host */
+	dev_put(fc->real_dev);
+	scsi_host_put(lp->host);
+
+	return 0;
+}
+
+static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
+	.frame_send = fcoe_xmit,
+};
+
+/*
+ * fcoe_sw_create - this function creates the fcoe interface
+ * @netdev: pointer the associated netdevice
+ *
+ * Creates fc_lport struct and scsi_host for lport, configures lport
+ * and starts fabric login.
+ *
+ * Returns : 0 on success
+ */
+static int fcoe_sw_create(struct net_device *netdev)
+{
+	int rc;
+	struct fc_lport *lp = NULL;
+	struct fcoe_softc *fc;
+	struct Scsi_Host *shost;
+
+	BUG_ON(!netdev);
+
+	printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
+	       netdev->name);
+
+	lp = fcoe_hostlist_lookup(netdev);
+	if (lp)
+		return -EEXIST;
+
+	shost = fcoe_host_alloc(&fcoe_sw_shost_template,
+				sizeof(struct fcoe_softc));
+	if (!shost) {
+		FC_DBG("Could not allocate host structure\n");
+		return -ENOMEM;
+	}
+	lp = shost_priv(shost);
+	fc = lport_priv(lp);
+
+	/* configure fc_lport, e.g., em */
+	rc = fcoe_sw_lport_config(lp);
+	if (rc) {
+		FC_DBG("Could not configure lport\n");
+		goto out_host_put;
+	}
+
+	/* configure lport network properties */
+	rc = fcoe_sw_netdev_config(lp, netdev);
+	if (rc) {
+		FC_DBG("Could not configure netdev for lport\n");
+		goto out_host_put;
+	}
+
+	/* configure lport scsi host properties */
+	rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
+	if (rc) {
+		FC_DBG("Could not configure shost for lport\n");
+		goto out_host_put;
+	}
+
+	/* lport exch manager allocation */
+	rc = fcoe_sw_em_config(lp);
+	if (rc) {
+		FC_DBG("Could not configure em for lport\n");
+		goto out_host_put;
+	}
+
+	/* Initialize the library */
+	rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
+	if (rc) {
+		FC_DBG("Could not configure libfc for lport!\n");
+		goto out_lp_destroy;
+	}
+
+	/* add to lports list */
+	fcoe_hostlist_add(lp);
+
+	lp->boot_time = jiffies;
+
+	fc_fabric_login(lp);
+
+	dev_hold(netdev);
+
+	return rc;
+
+out_lp_destroy:
+	fc_exch_mgr_free(lp->emp); /* Free the EM */
+out_host_put:
+	scsi_host_put(lp->host);
+	return rc;
+}
+
+/*
+ * fcoe_sw_match - the fcoe sw transport match function
+ *
+ * Returns : false always
+ */
+static bool fcoe_sw_match(struct net_device *netdev)
+{
+	/* FIXME - for sw transport, always return false */
+	return false;
+}
+
+/* the sw hba fcoe transport */
+struct fcoe_transport fcoe_sw_transport = {
+	.name = "fcoesw",
+	.create = fcoe_sw_create,
+	.destroy = fcoe_sw_destroy,
+	.match = fcoe_sw_match,
+	.vendor = 0x0,
+	.device = 0xffff,
+};
+
+/*
+ * fcoe_sw_init - registers fcoe_sw_transport
+ *
+ * Returns : 0 on success
+ */
+int __init fcoe_sw_init(void)
+{
+	/* attach to scsi transport */
+	scsi_transport_fcoe_sw =
+		fc_attach_transport(&fcoe_sw_transport_function);
+	if (!scsi_transport_fcoe_sw) {
+		printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
+		return -ENODEV;
+	}
+	/* register sw transport */
+	fcoe_transport_register(&fcoe_sw_transport);
+	return 0;
+}
+
+/*
+ * fcoe_sw_exit - unregisters fcoe_sw_transport
+ *
+ * Returns : 0 on success
+ */
+int __exit fcoe_sw_exit(void)
+{
+	/* dettach the transport */
+	fc_release_transport(scsi_transport_fcoe_sw);
+	fcoe_transport_unregister(&fcoe_sw_transport);
+	return 0;
+}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
new file mode 100644
index 000000000000..e419f486cdb3
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -0,0 +1,1510 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_encaps.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+static int debug_fcoe;
+
+#define FCOE_MAX_QUEUE_DEPTH  256
+
+/* destination address mode */
+#define FCOE_GW_ADDR_MODE	    0x00
+#define FCOE_FCOUI_ADDR_MODE	    0x01
+
+#define FCOE_WORD_TO_BYTE  4
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FCoE");
+MODULE_LICENSE("GPL");
+
+/* fcoe host list */
+LIST_HEAD(fcoe_hostlist);
+DEFINE_RWLOCK(fcoe_hostlist_lock);
+DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
+struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
+
+
+/* Function Prototyes */
+static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
+static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
+static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
+#ifdef CONFIG_HOTPLUG_CPU
+static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
+#endif /* CONFIG_HOTPLUG_CPU */
+static int fcoe_device_notification(struct notifier_block *, ulong, void *);
+static void fcoe_dev_setup(void);
+static void fcoe_dev_cleanup(void);
+
+/* notification function from net device */
+static struct notifier_block fcoe_notifier = {
+	.notifier_call = fcoe_device_notification,
+};
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+static struct notifier_block fcoe_cpu_notifier = {
+	.notifier_call = fcoe_cpu_callback,
+};
+
+/**
+ * fcoe_create_percpu_data - creates the associated cpu data
+ * @cpu: index for the cpu where fcoe cpu data will be created
+ *
+ * create percpu stats block, from cpu add notifier
+ *
+ * Returns: none
+ **/
+static void fcoe_create_percpu_data(int cpu)
+{
+	struct fc_lport *lp;
+	struct fcoe_softc *fc;
+
+	write_lock_bh(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		lp = fc->lp;
+		if (lp->dev_stats[cpu] == NULL)
+			lp->dev_stats[cpu] =
+				kzalloc(sizeof(struct fcoe_dev_stats),
+					GFP_KERNEL);
+	}
+	write_unlock_bh(&fcoe_hostlist_lock);
+}
+
+/**
+ * fcoe_destroy_percpu_data - destroys the associated cpu data
+ * @cpu: index for the cpu where fcoe cpu data will destroyed
+ *
+ * destroy percpu stats block called by cpu add/remove notifier
+ *
+ * Retuns: none
+ **/
+static void fcoe_destroy_percpu_data(int cpu)
+{
+	struct fc_lport *lp;
+	struct fcoe_softc *fc;
+
+	write_lock_bh(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		lp = fc->lp;
+		kfree(lp->dev_stats[cpu]);
+		lp->dev_stats[cpu] = NULL;
+	}
+	write_unlock_bh(&fcoe_hostlist_lock);
+}
+
+/**
+ * fcoe_cpu_callback - fcoe cpu hotplug event callback
+ * @nfb: callback data block
+ * @action: event triggering the callback
+ * @hcpu: index for the cpu of this event
+ *
+ * this creates or destroys per cpu data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ **/
+static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
+			     void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+		fcoe_create_percpu_data(cpu);
+		break;
+	case CPU_DEAD:
+		fcoe_destroy_percpu_data(cpu);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
+ * @skb: the receive skb
+ * @dev: associated net device
+ * @ptype: context
+ * @odldev: last device
+ *
+ * this function will receive the packet and build fc frame and pass it up
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
+	     struct packet_type *ptype, struct net_device *olddev)
+{
+	struct fc_lport *lp;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_softc *fc;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	unsigned short oxid;
+	int cpu_idx;
+	struct fcoe_percpu_s *fps;
+
+	fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
+	lp = fc->lp;
+	if (unlikely(lp == NULL)) {
+		FC_DBG("cannot find hba structure");
+		goto err2;
+	}
+
+	if (unlikely(debug_fcoe)) {
+		FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
+		       "end:%p sum:%d dev:%s", skb->len, skb->data_len,
+		       skb->head, skb->data, skb_tail_pointer(skb),
+		       skb_end_pointer(skb), skb->csum,
+		       skb->dev ? skb->dev->name : "<NULL>");
+
+	}
+
+	/* check for FCOE packet type */
+	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+		FC_DBG("wrong FC type frame");
+		goto err;
+	}
+
+	/*
+	 * Check for minimum frame length, and make sure required FCoE
+	 * and FC headers are pulled into the linear data area.
+	 */
+	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+		goto err;
+
+	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+	fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+	oxid = ntohs(fh->fh_ox_id);
+
+	fr = fcoe_dev_from_skb(skb);
+	fr->fr_dev = lp;
+	fr->ptype = ptype;
+	cpu_idx = 0;
+#ifdef CONFIG_SMP
+	/*
+	 * The incoming frame exchange id(oxid) is ANDed with num of online
+	 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
+	 * a per cpu kernel thread from fcoe_percpu. In case the cpu is
+	 * offline or no kernel thread for derived cpu_idx then cpu_idx is
+	 * initialize to first online cpu index.
+	 */
+	cpu_idx = oxid & (num_online_cpus() - 1);
+	if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
+		cpu_idx = first_cpu(cpu_online_map);
+#endif
+	fps = fcoe_percpu[cpu_idx];
+
+	spin_lock_bh(&fps->fcoe_rx_list.lock);
+	__skb_queue_tail(&fps->fcoe_rx_list, skb);
+	if (fps->fcoe_rx_list.qlen == 1)
+		wake_up_process(fps->thread);
+
+	spin_unlock_bh(&fps->fcoe_rx_list.lock);
+
+	return 0;
+err:
+#ifdef CONFIG_SMP
+	stats = lp->dev_stats[smp_processor_id()];
+#else
+	stats = lp->dev_stats[0];
+#endif
+	if (stats)
+		stats->ErrorFrames++;
+
+err2:
+	kfree_skb(skb);
+	return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_rcv);
+
+/**
+ * fcoe_start_io - pass to netdev to start xmit for fcoe
+ * @skb: the skb to be xmitted
+ *
+ * Returns: 0 for success
+ **/
+static inline int fcoe_start_io(struct sk_buff *skb)
+{
+	int rc;
+
+	skb_get(skb);
+	rc = dev_queue_xmit(skb);
+	if (rc != 0)
+		return rc;
+	kfree_skb(skb);
+	return 0;
+}
+
+/**
+ * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
+ * @skb: the skb to be xmitted
+ * @tlen: total len
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+	struct fcoe_percpu_s *fps;
+	struct page *page;
+	int cpu_idx;
+
+	cpu_idx = get_cpu();
+	fps = fcoe_percpu[cpu_idx];
+	page = fps->crc_eof_page;
+	if (!page) {
+		page = alloc_page(GFP_ATOMIC);
+		if (!page) {
+			put_cpu();
+			return -ENOMEM;
+		}
+		fps->crc_eof_page = page;
+		WARN_ON(fps->crc_eof_offset != 0);
+	}
+
+	get_page(page);
+	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+			   fps->crc_eof_offset, tlen);
+	skb->len += tlen;
+	skb->data_len += tlen;
+	skb->truesize += tlen;
+	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+	if (fps->crc_eof_offset >= PAGE_SIZE) {
+		fps->crc_eof_page = NULL;
+		fps->crc_eof_offset = 0;
+		put_page(page);
+	}
+	put_cpu();
+	return 0;
+}
+
+/**
+ * fcoe_fc_crc - calculates FC CRC in this fcoe skb
+ * @fp: the fc_frame containg data to be checksummed
+ *
+ * This uses crc32() to calculate the crc for fc frame
+ * Return   : 32 bit crc
+ *
+ **/
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+	struct sk_buff *skb = fp_skb(fp);
+	struct skb_frag_struct *frag;
+	unsigned char *data;
+	unsigned long off, len, clen;
+	u32 crc;
+	unsigned i;
+
+	crc = crc32(~0, skb->data, skb_headlen(skb));
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		off = frag->page_offset;
+		len = frag->size;
+		while (len > 0) {
+			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+					   KM_SKB_DATA_SOFTIRQ);
+			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+			off += clen;
+			len -= clen;
+		}
+	}
+	return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_xmit - FCoE frame transmit function
+ * @lp:	the associated local port
+ * @fp: the fc_frame to be transmitted
+ *
+ * Return   : 0 for success
+ *
+ **/
+int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+{
+	int wlen, rc = 0;
+	u32 crc;
+	struct ethhdr *eh;
+	struct fcoe_crc_eof *cp;
+	struct sk_buff *skb;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	unsigned int hlen;		/* header length implies the version */
+	unsigned int tlen;		/* trailer length */
+	unsigned int elen;		/* eth header, may include vlan */
+	int flogi_in_progress = 0;
+	struct fcoe_softc *fc;
+	u8 sof, eof;
+	struct fcoe_hdr *hp;
+
+	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
+
+	fc = fcoe_softc(lp);
+	/*
+	 * if it is a flogi then we need to learn gw-addr
+	 * and my own fcid
+	 */
+	fh = fc_frame_header_get(fp);
+	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+		if (fc_frame_payload_op(fp) == ELS_FLOGI) {
+			fc->flogi_oxid = ntohs(fh->fh_ox_id);
+			fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+			fc->flogi_progress = 1;
+			flogi_in_progress = 1;
+		} else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
+			/*
+			 * Here we must've gotten an SID by accepting an FLOGI
+			 * from a point-to-point connection.  Switch to using
+			 * the source mac based on the SID.  The destination
+			 * MAC in this case would have been set by receving the
+			 * FLOGI.
+			 */
+			fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
+			fc->flogi_progress = 0;
+		}
+	}
+
+	skb = fp_skb(fp);
+	sof = fr_sof(fp);
+	eof = fr_eof(fp);
+
+	elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
+		sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+	hlen = sizeof(struct fcoe_hdr);
+	tlen = sizeof(struct fcoe_crc_eof);
+	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+	/* crc offload */
+	if (likely(lp->crc_offload)) {
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum_start = skb_headroom(skb);
+		skb->csum_offset = skb->len;
+		crc = 0;
+	} else {
+		skb->ip_summed = CHECKSUM_NONE;
+		crc = fcoe_fc_crc(fp);
+	}
+
+	/* copy fc crc and eof to the skb buff */
+	if (skb_is_nonlinear(skb)) {
+		skb_frag_t *frag;
+		if (fcoe_get_paged_crc_eof(skb, tlen)) {
+			kfree(skb);
+			return -ENOMEM;
+		}
+		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+			+ frag->page_offset;
+	} else {
+		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+	}
+
+	memset(cp, 0, sizeof(*cp));
+	cp->fcoe_eof = eof;
+	cp->fcoe_crc32 = cpu_to_le32(~crc);
+
+	if (skb_is_nonlinear(skb)) {
+		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+		cp = NULL;
+	}
+
+	/* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
+	skb_push(skb, elen + hlen);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb->mac_len = elen;
+	skb->protocol = htons(ETH_P_802_3);
+	skb->dev = fc->real_dev;
+
+	/* fill up mac and fcoe headers */
+	eh = eth_hdr(skb);
+	eh->h_proto = htons(ETH_P_FCOE);
+	if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
+		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+	else
+		/* insert GW address */
+		memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
+
+	if (unlikely(flogi_in_progress))
+		memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
+	else
+		memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
+
+	hp = (struct fcoe_hdr *)(eh + 1);
+	memset(hp, 0, sizeof(*hp));
+	if (FC_FCOE_VER)
+		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+	hp->fcoe_sof = sof;
+
+	/* update tx stats: regardless if LLD fails */
+	stats = lp->dev_stats[smp_processor_id()];
+	if (stats) {
+		stats->TxFrames++;
+		stats->TxWords += wlen;
+	}
+
+	/* send down to lld */
+	fr_dev(fp) = lp;
+	if (fc->fcoe_pending_queue.qlen)
+		rc = fcoe_check_wait_queue(lp);
+
+	if (rc == 0)
+		rc = fcoe_start_io(skb);
+
+	if (rc) {
+		fcoe_insert_wait_queue(lp, skb);
+		if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+			fc_pause(lp);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_xmit);
+
+/*
+ * fcoe_percpu_receive_thread - recv thread per cpu
+ * @arg: ptr to the fcoe per cpu struct
+ *
+ * Return: 0 for success
+ *
+ */
+int fcoe_percpu_receive_thread(void *arg)
+{
+	struct fcoe_percpu_s *p = arg;
+	u32 fr_len;
+	struct fc_lport *lp;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	struct sk_buff *skb;
+	struct fcoe_crc_eof crc_eof;
+	struct fc_frame *fp;
+	u8 *mac = NULL;
+	struct fcoe_softc *fc;
+	struct fcoe_hdr *hp;
+
+	set_user_nice(current, 19);
+
+	while (!kthread_should_stop()) {
+
+		spin_lock_bh(&p->fcoe_rx_list.lock);
+		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_bh(&p->fcoe_rx_list.lock);
+			schedule();
+			set_current_state(TASK_RUNNING);
+			if (kthread_should_stop())
+				return 0;
+			spin_lock_bh(&p->fcoe_rx_list.lock);
+		}
+		spin_unlock_bh(&p->fcoe_rx_list.lock);
+		fr = fcoe_dev_from_skb(skb);
+		lp = fr->fr_dev;
+		if (unlikely(lp == NULL)) {
+			FC_DBG("invalid HBA Structure");
+			kfree_skb(skb);
+			continue;
+		}
+
+		stats = lp->dev_stats[smp_processor_id()];
+
+		if (unlikely(debug_fcoe)) {
+			FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
+			       "tail:%p end:%p sum:%d dev:%s",
+			       skb->len, skb->data_len,
+			       skb->head, skb->data, skb_tail_pointer(skb),
+			       skb_end_pointer(skb), skb->csum,
+			       skb->dev ? skb->dev->name : "<NULL>");
+		}
+
+		/*
+		 * Save source MAC address before discarding header.
+		 */
+		fc = lport_priv(lp);
+		if (unlikely(fc->flogi_progress))
+			mac = eth_hdr(skb)->h_source;
+
+		if (skb_is_nonlinear(skb))
+			skb_linearize(skb);	/* not ideal */
+
+		/*
+		 * Frame length checks and setting up the header pointers
+		 * was done in fcoe_rcv already.
+		 */
+		hp = (struct fcoe_hdr *) skb_network_header(skb);
+		fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+		if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+			if (stats) {
+				if (stats->ErrorFrames < 5)
+					FC_DBG("unknown FCoE version %x",
+					       FC_FCOE_DECAPS_VER(hp));
+				stats->ErrorFrames++;
+			}
+			kfree_skb(skb);
+			continue;
+		}
+
+		skb_pull(skb, sizeof(struct fcoe_hdr));
+		fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+		if (stats) {
+			stats->RxFrames++;
+			stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+		}
+
+		fp = (struct fc_frame *)skb;
+		fc_frame_init(fp);
+		fr_dev(fp) = lp;
+		fr_sof(fp) = hp->fcoe_sof;
+
+		/* Copy out the CRC and EOF trailer for access */
+		if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+			kfree_skb(skb);
+			continue;
+		}
+		fr_eof(fp) = crc_eof.fcoe_eof;
+		fr_crc(fp) = crc_eof.fcoe_crc32;
+		if (pskb_trim(skb, fr_len)) {
+			kfree_skb(skb);
+			continue;
+		}
+
+		/*
+		 * We only check CRC if no offload is available and if it is
+		 * it's solicited data, in which case, the FCP layer would
+		 * check it during the copy.
+		 */
+		if (lp->crc_offload)
+			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+		else
+			fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+		fh = fc_frame_header_get(fp);
+		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+		    fh->fh_type == FC_TYPE_FCP) {
+			fc_exch_recv(lp, lp->emp, fp);
+			continue;
+		}
+		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+			if (le32_to_cpu(fr_crc(fp)) !=
+			    ~crc32(~0, skb->data, fr_len)) {
+				if (debug_fcoe || stats->InvalidCRCCount < 5)
+					printk(KERN_WARNING "fcoe: dropping "
+					       "frame with CRC error\n");
+				stats->InvalidCRCCount++;
+				stats->ErrorFrames++;
+				fc_frame_free(fp);
+				continue;
+			}
+			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+		}
+		/* non flogi and non data exchanges are handled here */
+		if (unlikely(fc->flogi_progress))
+			fcoe_recv_flogi(fc, fp, mac);
+		fc_exch_recv(lp, lp->emp, fp);
+	}
+	return 0;
+}
+
+/**
+ * fcoe_recv_flogi - flogi receive function
+ * @fc: associated fcoe_softc
+ * @fp: the recieved frame
+ * @sa: the source address of this flogi
+ *
+ * This is responsible to parse the flogi response and sets the corresponding
+ * mac address for the initiator, eitehr OUI based or GW based.
+ *
+ * Returns: none
+ **/
+static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
+{
+	struct fc_frame_header *fh;
+	u8 op;
+
+	fh = fc_frame_header_get(fp);
+	if (fh->fh_type != FC_TYPE_ELS)
+		return;
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+	    fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
+		/*
+		 * FLOGI accepted.
+		 * If the src mac addr is FC_OUI-based, then we mark the
+		 * address_mode flag to use FC_OUI-based Ethernet DA.
+		 * Otherwise we use the FCoE gateway addr
+		 */
+		if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
+			fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+		} else {
+			memcpy(fc->dest_addr, sa, ETH_ALEN);
+			fc->address_mode = FCOE_GW_ADDR_MODE;
+		}
+
+		/*
+		 * Remove any previously-set unicast MAC filter.
+		 * Add secondary FCoE MAC address filter for our OUI.
+		 */
+		rtnl_lock();
+		if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+			dev_unicast_delete(fc->real_dev, fc->data_src_addr,
+					   ETH_ALEN);
+		fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
+		dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+		rtnl_unlock();
+
+		fc->flogi_progress = 0;
+	} else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+		/*
+		 * Save source MAC for point-to-point responses.
+		 */
+		memcpy(fc->dest_addr, sa, ETH_ALEN);
+		fc->address_mode = FCOE_GW_ADDR_MODE;
+	}
+}
+
+/**
+ * fcoe_watchdog - fcoe timer callback
+ * @vp:
+ *
+ * This checks the pending queue length for fcoe and put fcoe to be paused state
+ * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
+ * fcoe_hostlist.
+ *
+ * Returns: 0 for success
+ **/
+void fcoe_watchdog(ulong vp)
+{
+	struct fc_lport *lp;
+	struct fcoe_softc *fc;
+	int paused = 0;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		lp = fc->lp;
+		if (lp) {
+			if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+				paused = 1;
+			if (fcoe_check_wait_queue(lp) <	 FCOE_MAX_QUEUE_DEPTH) {
+				if (paused)
+					fc_unpause(lp);
+			}
+		}
+	}
+	read_unlock(&fcoe_hostlist_lock);
+
+	fcoe_timer.expires = jiffies + (1 * HZ);
+	add_timer(&fcoe_timer);
+}
+
+
+/**
+ * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * This empties the wait_queue, dequeue the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet, if all skb have been
+ * transmitted, return 0 if a error occurs, then restore wait_queue and
+ * try again later.
+ *
+ * The wait_queue is used when the skb transmit fails. skb will go
+ * in the wait_queue which will be emptied by the time function OR
+ * by the next skb transmit.
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_check_wait_queue(struct fc_lport *lp)
+{
+	int rc, unpause = 0;
+	int paused = 0;
+	struct sk_buff *skb;
+	struct fcoe_softc *fc;
+
+	fc = fcoe_softc(lp);
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+	/*
+	 * is this interface paused?
+	 */
+	if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+		paused = 1;
+	if (fc->fcoe_pending_queue.qlen) {
+		while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+			spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+			rc = fcoe_start_io(skb);
+			if (rc) {
+				fcoe_insert_wait_queue_head(lp, skb);
+				return rc;
+			}
+			spin_lock_bh(&fc->fcoe_pending_queue.lock);
+		}
+		if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
+			unpause = 1;
+	}
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	if ((unpause) && (paused))
+		fc_unpause(lp);
+	return fc->fcoe_pending_queue.qlen;
+}
+
+/**
+ * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * Returns: none
+ **/
+static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
+					struct sk_buff *skb)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_softc(lp);
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	__skb_queue_head(&fc->fcoe_pending_queue, skb);
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+
+/**
+ * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * Returns: none
+ **/
+static void fcoe_insert_wait_queue(struct fc_lport *lp,
+				   struct sk_buff *skb)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_softc(lp);
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	__skb_queue_tail(&fc->fcoe_pending_queue, skb);
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+
+/**
+ * fcoe_dev_setup - setup link change notification interface
+ *
+ **/
+static void fcoe_dev_setup(void)
+{
+	/*
+	 * here setup a interface specific wd time to
+	 * monitor the link state
+	 */
+	register_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_dev_setup - cleanup link change notification interface
+ **/
+static void fcoe_dev_cleanup(void)
+{
+	unregister_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_device_notification - netdev event notification callback
+ * @notifier: context of the notification
+ * @event: type of event
+ * @ptr: fixed array for output parsed ifname
+ *
+ * This function is called by the ethernet driver in case of link change event
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_device_notification(struct notifier_block *notifier,
+				    ulong event, void *ptr)
+{
+	struct fc_lport *lp = NULL;
+	struct net_device *real_dev = ptr;
+	struct fcoe_softc *fc;
+	struct fcoe_dev_stats *stats;
+	u16 new_status;
+	u32 mfs;
+	int rc = NOTIFY_OK;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		if (fc->real_dev == real_dev) {
+			lp = fc->lp;
+			break;
+		}
+	}
+	read_unlock(&fcoe_hostlist_lock);
+	if (lp == NULL) {
+		rc = NOTIFY_DONE;
+		goto out;
+	}
+
+	new_status = lp->link_status;
+	switch (event) {
+	case NETDEV_DOWN:
+	case NETDEV_GOING_DOWN:
+		new_status &= ~FC_LINK_UP;
+		break;
+	case NETDEV_UP:
+	case NETDEV_CHANGE:
+		new_status &= ~FC_LINK_UP;
+		if (!fcoe_link_ok(lp))
+			new_status |= FC_LINK_UP;
+		break;
+	case NETDEV_CHANGEMTU:
+		mfs = fc->real_dev->mtu -
+			(sizeof(struct fcoe_hdr) +
+			 sizeof(struct fcoe_crc_eof));
+		if (mfs >= FC_MIN_MAX_FRAME)
+			fc_set_mfs(lp, mfs);
+		new_status &= ~FC_LINK_UP;
+		if (!fcoe_link_ok(lp))
+			new_status |= FC_LINK_UP;
+		break;
+	case NETDEV_REGISTER:
+		break;
+	default:
+		FC_DBG("unknown event %ld call", event);
+	}
+	if (lp->link_status != new_status) {
+		if ((new_status & FC_LINK_UP) == FC_LINK_UP)
+			fc_linkup(lp);
+		else {
+			stats = lp->dev_stats[smp_processor_id()];
+			if (stats)
+				stats->LinkFailureCount++;
+			fc_linkdown(lp);
+			fcoe_clean_pending_queue(lp);
+		}
+	}
+out:
+	return rc;
+}
+
+/**
+ * fcoe_if_to_netdev - parse a name buffer to get netdev
+ * @ifname: fixed array for output parsed ifname
+ * @buffer: incoming buffer to be copied
+ *
+ * Returns: NULL or ptr to netdeive
+ **/
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+	char *cp;
+	char ifname[IFNAMSIZ + 2];
+
+	if (buffer) {
+		strlcpy(ifname, buffer, IFNAMSIZ);
+		cp = ifname + strlen(ifname);
+		while (--cp >= ifname && *cp == '\n')
+			*cp = '\0';
+		return dev_get_by_name(&init_net, ifname);
+	}
+	return NULL;
+}
+
+/**
+ * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev
+ * @netdev: the target netdev
+ *
+ * Returns: ptr to the struct module, NULL for failure
+ **/
+static struct module *fcoe_netdev_to_module_owner(
+	const struct net_device *netdev)
+{
+	struct device *dev;
+
+	if (!netdev)
+		return NULL;
+
+	dev = netdev->dev.parent;
+	if (!dev)
+		return NULL;
+
+	if (!dev->driver)
+		return NULL;
+
+	return dev->driver->owner;
+}
+
+/**
+ * fcoe_ethdrv_get - holds the nic driver module by  try_module_get() for
+ * the corresponding netdev.
+ * @netdev: the target netdev
+ *
+ * Returns: 0 for succsss
+ **/
+static int fcoe_ethdrv_get(const struct net_device *netdev)
+{
+	struct module *owner;
+
+	owner = fcoe_netdev_to_module_owner(netdev);
+	if (owner) {
+		printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
+		       module_name(owner), netdev->name);
+		return  try_module_get(owner);
+	}
+	return -ENODEV;
+}
+
+/**
+ * fcoe_ethdrv_get - releases the nic driver module by module_put for
+ * the corresponding netdev.
+ * @netdev: the target netdev
+ *
+ * Returns: 0 for succsss
+ **/
+static int fcoe_ethdrv_put(const struct net_device *netdev)
+{
+	struct module *owner;
+
+	owner = fcoe_netdev_to_module_owner(netdev);
+	if (owner) {
+		printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
+		       module_name(owner), netdev->name);
+		module_put(owner);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+/**
+ * fcoe_destroy- handles the destroy from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+{
+	int rc;
+	struct net_device *netdev;
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+	/* look for existing lport */
+	if (!fcoe_hostlist_lookup(netdev)) {
+		rc = -ENODEV;
+		goto out_putdev;
+	}
+	/* pass to transport */
+	rc = fcoe_transport_release(netdev);
+	if (rc) {
+		printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
+		       netdev->name);
+		rc = -EIO;
+		goto out_putdev;
+	}
+	fcoe_ethdrv_put(netdev);
+	rc = 0;
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	return rc;
+}
+
+/**
+ * fcoe_create - handles the create call from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_create(const char *buffer, struct kernel_param *kp)
+{
+	int rc;
+	struct net_device *netdev;
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+	/* look for existing lport */
+	if (fcoe_hostlist_lookup(netdev)) {
+		rc = -EEXIST;
+		goto out_putdev;
+	}
+	fcoe_ethdrv_get(netdev);
+
+	/* pass to transport */
+	rc = fcoe_transport_attach(netdev);
+	if (rc) {
+		printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
+		       netdev->name);
+		fcoe_ethdrv_put(netdev);
+		rc = -EIO;
+		goto out_putdev;
+	}
+	rc = 0;
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	return rc;
+}
+
+module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
+module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, "Destroy fcoe port");
+
+/*
+ * fcoe_link_ok - check if link is ok for the fc_lport
+ * @lp: ptr to the fc_lport
+ *
+ * Any permanently-disqualifying conditions have been previously checked.
+ * This also updates the speed setting, which may change with link for 100/1000.
+ *
+ * This function should probably be checking for PAUSE support at some point
+ * in the future. Currently Per-priority-pause is not determinable using
+ * ethtool, so we shouldn't be restrictive until that problem is resolved.
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ *
+ */
+int fcoe_link_ok(struct fc_lport *lp)
+{
+	struct fcoe_softc *fc = fcoe_softc(lp);
+	struct net_device *dev = fc->real_dev;
+	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+	int rc = 0;
+
+	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
+		dev = fc->phys_dev;
+		if (dev->ethtool_ops->get_settings) {
+			dev->ethtool_ops->get_settings(dev, &ecmd);
+			lp->link_supported_speeds &=
+				~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+			if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+					      SUPPORTED_1000baseT_Full))
+				lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+			if (ecmd.supported & SUPPORTED_10000baseT_Full)
+				lp->link_supported_speeds |=
+					FC_PORTSPEED_10GBIT;
+			if (ecmd.speed == SPEED_1000)
+				lp->link_speed = FC_PORTSPEED_1GBIT;
+			if (ecmd.speed == SPEED_10000)
+				lp->link_speed = FC_PORTSPEED_10GBIT;
+		}
+	} else
+		rc = -1;
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_ok);
+
+/*
+ * fcoe_percpu_clean - frees skb of the corresponding lport from the per
+ * cpu queue.
+ * @lp: the fc_lport
+ */
+void fcoe_percpu_clean(struct fc_lport *lp)
+{
+	int idx;
+	struct fcoe_percpu_s *pp;
+	struct fcoe_rcv_info *fr;
+	struct sk_buff_head *list;
+	struct sk_buff *skb, *next;
+	struct sk_buff *head;
+
+	for (idx = 0; idx < NR_CPUS; idx++) {
+		if (fcoe_percpu[idx]) {
+			pp = fcoe_percpu[idx];
+			spin_lock_bh(&pp->fcoe_rx_list.lock);
+			list = &pp->fcoe_rx_list;
+			head = list->next;
+			for (skb = head; skb != (struct sk_buff *)list;
+			     skb = next) {
+				next = skb->next;
+				fr = fcoe_dev_from_skb(skb);
+				if (fr->fr_dev == lp) {
+					__skb_unlink(skb, list);
+					kfree_skb(skb);
+				}
+			}
+			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
+
+/**
+ * fcoe_clean_pending_queue - dequeue skb and free it
+ * @lp: the corresponding fc_lport
+ *
+ * Returns: none
+ **/
+void fcoe_clean_pending_queue(struct fc_lport *lp)
+{
+	struct fcoe_softc  *fc = lport_priv(lp);
+	struct sk_buff *skb;
+
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+		kfree_skb(skb);
+		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	}
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
+ * @sht: ptr to the scsi host templ
+ * @priv_size: size of private data after fc_lport
+ *
+ * Returns: ptr to Scsi_Host
+ * TODO - to libfc?
+ */
+static inline struct Scsi_Host *libfc_host_alloc(
+	struct scsi_host_template *sht, int priv_size)
+{
+	return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
+}
+
+/**
+ * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
+ * @sht: ptr to the scsi host templ
+ * @priv_size: size of private data after fc_lport
+ *
+ * Returns: ptr to Scsi_Host
+ */
+struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
+{
+	return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
+}
+EXPORT_SYMBOL_GPL(fcoe_host_alloc);
+
+/*
+ * fcoe_reset - resets the fcoe
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+int fcoe_reset(struct Scsi_Host *shost)
+{
+	struct fc_lport *lport = shost_priv(shost);
+	fc_lport_reset(lport);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_reset);
+
+/*
+ * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
+ * @mac: mac address
+ * @scheme: check port
+ * @port: port indicator for converting
+ *
+ * Returns: u64 fc world wide name
+ */
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+		      unsigned int scheme, unsigned int port)
+{
+	u64 wwn;
+	u64 host_mac;
+
+	/* The MAC is in NO, so flip only the low 48 bits */
+	host_mac = ((u64) mac[0] << 40) |
+		((u64) mac[1] << 32) |
+		((u64) mac[2] << 24) |
+		((u64) mac[3] << 16) |
+		((u64) mac[4] << 8) |
+		(u64) mac[5];
+
+	WARN_ON(host_mac >= (1ULL << 48));
+	wwn = host_mac | ((u64) scheme << 60);
+	switch (scheme) {
+	case 1:
+		WARN_ON(port != 0);
+		break;
+	case 2:
+		WARN_ON(port >= 0xfff);
+		wwn |= (u64) port << 48;
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return wwn;
+}
+EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
+/*
+ * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
+ * @device: this is currently ptr to net_device
+ *
+ * Returns: NULL or the located fcoe_softc
+ */
+static struct fcoe_softc *fcoe_hostlist_lookup_softc(
+	const struct net_device *dev)
+{
+	struct fcoe_softc *fc;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		if (fc->real_dev == dev) {
+			read_unlock(&fcoe_hostlist_lock);
+			return fc;
+		}
+	}
+	read_unlock(&fcoe_hostlist_lock);
+	return NULL;
+}
+
+/*
+ * fcoe_hostlist_lookup - find the corresponding lport by netdev
+ * @netdev: ptr to net_device
+ *
+ * Returns: 0 for success
+ */
+struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(netdev);
+
+	return (fc) ? fc->lp : NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
+
+/*
+ * fcoe_hostlist_add - add a lport to lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_add(const struct fc_lport *lp)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+	if (!fc) {
+		fc = fcoe_softc(lp);
+		write_lock_bh(&fcoe_hostlist_lock);
+		list_add_tail(&fc->list, &fcoe_hostlist);
+		write_unlock_bh(&fcoe_hostlist_lock);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
+
+/*
+ * fcoe_hostlist_remove - remove a lport from lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_remove(const struct fc_lport *lp)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+	BUG_ON(!fc);
+	write_lock_bh(&fcoe_hostlist_lock);
+	list_del(&fc->list);
+	write_unlock_bh(&fcoe_hostlist_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
+
+/**
+ * fcoe_libfc_config - sets up libfc related properties for lport
+ * @lp: ptr to the fc_lport
+ * @tt: libfc function template
+ *
+ * Returns : 0 for success
+ **/
+int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
+{
+	/* Set the function pointers set by the LLDD */
+	memcpy(&lp->tt, tt, sizeof(*tt));
+	if (fc_fcp_init(lp))
+		return -ENOMEM;
+	fc_exch_init(lp);
+	fc_elsct_init(lp);
+	fc_lport_init(lp);
+	fc_rport_init(lp);
+	fc_disc_init(lp);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+/**
+ * fcoe_init - fcoe module loading initialization
+ *
+ * Initialization routine
+ * 1. Will create fc transport software structure
+ * 2. initialize the link list of port information structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int __init fcoe_init(void)
+{
+	int cpu;
+	struct fcoe_percpu_s *p;
+
+
+	INIT_LIST_HEAD(&fcoe_hostlist);
+	rwlock_init(&fcoe_hostlist_lock);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	register_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+	/*
+	 * initialize per CPU interrupt thread
+	 */
+	for_each_online_cpu(cpu) {
+		p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
+		if (p) {
+			p->thread = kthread_create(fcoe_percpu_receive_thread,
+						   (void *)p,
+						   "fcoethread/%d", cpu);
+
+			/*
+			 * if there is no error then bind the thread to the cpu
+			 * initialize the semaphore and skb queue head
+			 */
+			if (likely(!IS_ERR(p->thread))) {
+				p->cpu = cpu;
+				fcoe_percpu[cpu] = p;
+				skb_queue_head_init(&p->fcoe_rx_list);
+				kthread_bind(p->thread, cpu);
+				wake_up_process(p->thread);
+			} else {
+				fcoe_percpu[cpu] = NULL;
+				kfree(p);
+
+			}
+		}
+	}
+
+	/*
+	 * setup link change notification
+	 */
+	fcoe_dev_setup();
+
+	init_timer(&fcoe_timer);
+	fcoe_timer.data = 0;
+	fcoe_timer.function = fcoe_watchdog;
+	fcoe_timer.expires = (jiffies + (10 * HZ));
+	add_timer(&fcoe_timer);
+
+	/* initiatlize the fcoe transport */
+	fcoe_transport_init();
+
+	fcoe_sw_init();
+
+	return 0;
+}
+module_init(fcoe_init);
+
+/**
+ * fcoe_exit - fcoe module unloading cleanup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static void __exit fcoe_exit(void)
+{
+	u32 idx;
+	struct fcoe_softc *fc, *tmp;
+	struct fcoe_percpu_s *p;
+	struct sk_buff *skb;
+
+	/*
+	 * Stop all call back interfaces
+	 */
+#ifdef CONFIG_HOTPLUG_CPU
+	unregister_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+	fcoe_dev_cleanup();
+
+	/*
+	 * stop timer
+	 */
+	del_timer_sync(&fcoe_timer);
+
+	/* releases the assocaited fcoe transport for each lport */
+	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
+		fcoe_transport_release(fc->real_dev);
+
+	for (idx = 0; idx < NR_CPUS; idx++) {
+		if (fcoe_percpu[idx]) {
+			kthread_stop(fcoe_percpu[idx]->thread);
+			p = fcoe_percpu[idx];
+			spin_lock_bh(&p->fcoe_rx_list.lock);
+			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+				kfree_skb(skb);
+			spin_unlock_bh(&p->fcoe_rx_list.lock);
+			if (fcoe_percpu[idx]->crc_eof_page)
+				put_page(fcoe_percpu[idx]->crc_eof_page);
+			kfree(fcoe_percpu[idx]);
+		}
+	}
+
+	/* remove sw trasnport */
+	fcoe_sw_exit();
+
+	/* detach the transport */
+	fcoe_transport_exit();
+}
+module_exit(fcoe_exit);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 56f4e6bffc21..32eef66114c7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -3,7 +3,7 @@
  * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
  * Author: Rickard E. Faith, faith@cs.unc.edu
  * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
- * Shared IRQ supported added 7/7/2001  Alan Cox <alan@redhat.com>
+ * Shared IRQ supported added 7/7/2001  Alan Cox <alan@lxorguk.ukuu.org.uk>
 
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 4d15a62914e9..9c1e6a5b5af0 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -10,7 +10,7 @@
  See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
  updates, info and ADF-files for adapters supported by this driver.
 
- Alan Cox <alan@redhat.com>
+ Alan Cox <alan@lxorguk.ukuu.org.uk>
  Updated for Linux 2.5.45 to use the new error handler, cleaned up the
  lock macros and did a few unavoidable locking tweaks, plus one locking
  fix in the irq and completion path.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7650707a40de..44f202f33101 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -121,6 +121,7 @@ static const struct {
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
 
 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
@@ -278,13 +279,6 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
 	     rsp->data.info.rsp_code))
 		return DID_ERROR << 16;
 
-	if (!vfc_cmd->status) {
-		if (rsp->flags & FCP_RESID_OVER)
-			return rsp->scsi_status | (DID_ERROR << 16);
-		else
-			return rsp->scsi_status | (DID_OK << 16);
-	}
-
 	err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
 	if (err >= 0)
 		return rsp->scsi_status | (cmd_status[err].result << 16);
@@ -503,6 +497,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 	case IBMVFC_HOST_ACTION_TGT_ADD:
 	case IBMVFC_HOST_ACTION_NONE:
 	default:
@@ -566,7 +561,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
 	struct ibmvfc_target *tgt;
 
 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
-		if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
 			dev_err(vhost->dev,
 				"Host initialization retries exceeded. Taking adapter offline\n");
 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
@@ -765,6 +760,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
 		cmnd->scsi_done(cmnd);
 	}
 
+	if (evt->eh_comp)
+		complete(evt->eh_comp);
+
 	ibmvfc_free_event(evt);
 }
 
@@ -847,11 +845,12 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
 static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
 {
 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
-		if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+		vhost->delay_init = 1;
+		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
 			dev_err(vhost->dev,
 				"Host initialization retries exceeded. Taking adapter offline\n");
 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
-		} else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
+		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
 			__ibmvfc_reset_host(vhost);
 		else
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -1252,6 +1251,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
 	evt->sync_iu = NULL;
 	evt->crq.format = format;
 	evt->done = done;
+	evt->eh_comp = NULL;
 }
 
 /**
@@ -1381,6 +1381,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
 		add_timer(&evt->timer);
 	}
 
+	mb();
+
 	if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
 		list_del(&evt->queue);
 		del_timer(&evt->timer);
@@ -1477,6 +1479,11 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+			if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
+				ibmvfc_reinit_host(evt->vhost);
+
+			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
+				cmnd->result = (DID_ERROR << 16);
 
 			ibmvfc_log_error(evt);
 		}
@@ -1489,6 +1496,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
 		cmnd->scsi_done(cmnd);
 	}
 
+	if (evt->eh_comp)
+		complete(evt->eh_comp);
+
 	ibmvfc_free_event(evt);
 }
 
@@ -1627,7 +1637,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
 	struct ibmvfc_cmd *tmf;
-	struct ibmvfc_event *evt;
+	struct ibmvfc_event *evt = NULL;
 	union ibmvfc_iu rsp_iu;
 	struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
 	int rsp_rc = -EBUSY;
@@ -1789,7 +1799,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 {
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
-	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct scsi_target *starget = scsi_target(sdev);
+	struct fc_rport *rport = starget_to_rport(starget);
 	struct ibmvfc_tmf *tmf;
 	struct ibmvfc_event *evt, *found_evt;
 	union ibmvfc_iu rsp;
@@ -1827,7 +1838,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 		int_to_scsilun(sdev->lun, &tmf->lun);
 		tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
 		tmf->cancel_key = (unsigned long)sdev->hostdata;
-		tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
+		tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
 		evt->sync_iu = &rsp;
 		init_completion(&evt->comp);
@@ -1859,6 +1870,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 }
 
 /**
+ * ibmvfc_match_target - Match function for specified target
+ * @evt:	ibmvfc event struct
+ * @device:	device to match (starget)
+ *
+ * Returns:
+ *	1 if event matches starget / 0 if event does not match starget
+ **/
+static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+{
+	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+		return 1;
+	return 0;
+}
+
+/**
+ * ibmvfc_match_lun - Match function for specified LUN
+ * @evt:	ibmvfc event struct
+ * @device:	device to match (sdev)
+ *
+ * Returns:
+ *	1 if event matches sdev / 0 if event does not match sdev
+ **/
+static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+{
+	if (evt->cmnd && evt->cmnd->device == device)
+		return 1;
+	return 0;
+}
+
+/**
+ * ibmvfc_wait_for_ops - Wait for ops to complete
+ * @vhost:	ibmvfc host struct
+ * @device:	device to match (starget or sdev)
+ * @match:	match function
+ *
+ * Returns:
+ *	SUCCESS / FAILED
+ **/
+static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
+			       int (*match) (struct ibmvfc_event *, void *))
+{
+	struct ibmvfc_event *evt;
+	DECLARE_COMPLETION_ONSTACK(comp);
+	int wait;
+	unsigned long flags;
+	signed long timeout = init_timeout * HZ;
+
+	ENTER;
+	do {
+		wait = 0;
+		spin_lock_irqsave(vhost->host->host_lock, flags);
+		list_for_each_entry(evt, &vhost->sent, queue) {
+			if (match(evt, device)) {
+				evt->eh_comp = &comp;
+				wait++;
+			}
+		}
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+		if (wait) {
+			timeout = wait_for_completion_timeout(&comp, timeout);
+
+			if (!timeout) {
+				wait = 0;
+				spin_lock_irqsave(vhost->host->host_lock, flags);
+				list_for_each_entry(evt, &vhost->sent, queue) {
+					if (match(evt, device)) {
+						evt->eh_comp = NULL;
+						wait++;
+					}
+				}
+				spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				if (wait)
+					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
+				LEAVE;
+				return wait ? FAILED : SUCCESS;
+			}
+		}
+	} while (wait);
+
+	LEAVE;
+	return SUCCESS;
+}
+
+/**
  * ibmvfc_eh_abort_handler - Abort a command
  * @cmd:	scsi command to abort
  *
@@ -1867,29 +1963,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
-	struct ibmvfc_event *evt, *pos;
+	struct scsi_device *sdev = cmd->device;
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
 	int cancel_rc, abort_rc;
-	unsigned long flags;
+	int rc = FAILED;
 
 	ENTER;
 	ibmvfc_wait_while_resetting(vhost);
-	cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
-	abort_rc = ibmvfc_abort_task_set(cmd->device);
+	cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+	abort_rc = ibmvfc_abort_task_set(sdev);
 
-	if (!cancel_rc && !abort_rc) {
-		spin_lock_irqsave(vhost->host->host_lock, flags);
-		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
-			if (evt->cmnd && evt->cmnd->device == cmd->device)
-				ibmvfc_fail_request(evt, DID_ABORT);
-		}
-		spin_unlock_irqrestore(vhost->host->host_lock, flags);
-		LEAVE;
-		return SUCCESS;
-	}
+	if (!cancel_rc && !abort_rc)
+		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
 	LEAVE;
-	return FAILED;
+	return rc;
 }
 
 /**
@@ -1901,29 +1989,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
-	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
-	struct ibmvfc_event *evt, *pos;
+	struct scsi_device *sdev = cmd->device;
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
 	int cancel_rc, reset_rc;
-	unsigned long flags;
+	int rc = FAILED;
 
 	ENTER;
 	ibmvfc_wait_while_resetting(vhost);
-	cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
-	reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
+	cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+	reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
 
-	if (!cancel_rc && !reset_rc) {
-		spin_lock_irqsave(vhost->host->host_lock, flags);
-		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
-			if (evt->cmnd && evt->cmnd->device == cmd->device)
-				ibmvfc_fail_request(evt, DID_ABORT);
-		}
-		spin_unlock_irqrestore(vhost->host->host_lock, flags);
-		LEAVE;
-		return SUCCESS;
-	}
+	if (!cancel_rc && !reset_rc)
+		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
 	LEAVE;
-	return FAILED;
+	return rc;
 }
 
 /**
@@ -1959,31 +2039,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
-	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
-	struct scsi_target *starget = scsi_target(cmd->device);
-	struct ibmvfc_event *evt, *pos;
+	struct scsi_device *sdev = cmd->device;
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
+	struct scsi_target *starget = scsi_target(sdev);
 	int reset_rc;
+	int rc = FAILED;
 	unsigned long cancel_rc = 0;
-	unsigned long flags;
 
 	ENTER;
 	ibmvfc_wait_while_resetting(vhost);
 	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
-	reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
+	reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
 
-	if (!cancel_rc && !reset_rc) {
-		spin_lock_irqsave(vhost->host->host_lock, flags);
-		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
-			if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
-				ibmvfc_fail_request(evt, DID_ABORT);
-		}
-		spin_unlock_irqrestore(vhost->host->host_lock, flags);
-		LEAVE;
-		return SUCCESS;
-	}
+	if (!cancel_rc && !reset_rc)
+		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
 	LEAVE;
-	return FAILED;
+	return rc;
 }
 
 /**
@@ -2013,23 +2085,18 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
 	struct scsi_target *starget = to_scsi_target(&rport->dev);
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 	struct ibmvfc_host *vhost = shost_priv(shost);
-	struct ibmvfc_event *evt, *pos;
 	unsigned long cancel_rc = 0;
 	unsigned long abort_rc = 0;
-	unsigned long flags;
+	int rc = FAILED;
 
 	ENTER;
 	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
 	starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
 
-	if (!cancel_rc && !abort_rc) {
-		spin_lock_irqsave(shost->host_lock, flags);
-		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
-			if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
-				ibmvfc_fail_request(evt, DID_ABORT);
-		}
-		spin_unlock_irqrestore(shost->host_lock, flags);
-	} else
+	if (!cancel_rc && !abort_rc)
+		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
+
+	if (rc == FAILED)
 		ibmvfc_issue_fc_host_lip(shost);
 	LEAVE;
 }
@@ -2089,15 +2156,17 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
 	case IBMVFC_AE_LINK_UP:
 	case IBMVFC_AE_RESUME:
 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
-		ibmvfc_init_host(vhost, 1);
+		vhost->delay_init = 1;
+		__ibmvfc_reset_host(vhost);
 		break;
 	case IBMVFC_AE_SCN_FABRIC:
+	case IBMVFC_AE_SCN_DOMAIN:
 		vhost->events_to_log |= IBMVFC_AE_RSCN;
-		ibmvfc_init_host(vhost, 1);
+		vhost->delay_init = 1;
+		__ibmvfc_reset_host(vhost);
 		break;
 	case IBMVFC_AE_SCN_NPORT:
 	case IBMVFC_AE_SCN_GROUP:
-	case IBMVFC_AE_SCN_DOMAIN:
 		vhost->events_to_log |= IBMVFC_AE_RSCN;
 	case IBMVFC_AE_ELS_LOGO:
 	case IBMVFC_AE_ELS_PRLO:
@@ -2263,6 +2332,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
 }
 
 /**
+ * ibmvfc_target_alloc - Setup the target's task set value
+ * @starget:	struct scsi_target
+ *
+ * Set the target's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ *	0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_target_alloc(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return 0;
+}
+
+/**
  * ibmvfc_slave_configure - Configure the device
  * @sdev:	struct scsi_device device to configure
  *
@@ -2541,6 +2632,7 @@ static struct scsi_host_template driver_template = {
 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
 	.slave_alloc = ibmvfc_slave_alloc,
 	.slave_configure = ibmvfc_slave_configure,
+	.target_alloc = ibmvfc_target_alloc,
 	.scan_finished = ibmvfc_scan_finished,
 	.change_queue_depth = ibmvfc_change_queue_depth,
 	.change_queue_type = ibmvfc_change_queue_type,
@@ -2637,7 +2729,7 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
 		} else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
 			vio_disable_interrupts(vdev);
 			ibmvfc_handle_async(async, vhost);
-			crq->valid = 0;
+			async->valid = 0;
 		} else
 			done = 1;
 	}
@@ -2669,7 +2761,7 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
 static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
 				  void (*job_step) (struct ibmvfc_target *))
 {
-	if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		wake_up(&tgt->vhost->work_wait_q);
 	} else
@@ -2708,6 +2800,8 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 			rsp->status, rsp->error, status);
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+		else
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		break;
 	};
 
@@ -2802,6 +2896,8 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
 
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+		else
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		break;
 	};
 
@@ -3093,6 +3189,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+		else
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		break;
 	};
 
@@ -3423,6 +3521,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
 	case IBMVFC_HOST_ACTION_TGT_ADD:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
+	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 	case IBMVFC_HOST_ACTION_QUERY:
 	default:
 		break;
@@ -3519,7 +3618,13 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 		break;
 	case IBMVFC_HOST_ACTION_INIT:
 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
-		vhost->job_step(vhost);
+		if (vhost->delay_init) {
+			vhost->delay_init = 0;
+			spin_unlock_irqrestore(vhost->host->host_lock, flags);
+			ssleep(15);
+			return;
+		} else
+			vhost->job_step(vhost);
 		break;
 	case IBMVFC_HOST_ACTION_QUERY:
 		list_for_each_entry(tgt, &vhost->targets, queue)
@@ -3538,6 +3643,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
 		break;
 	case IBMVFC_HOST_ACTION_TGT_DEL:
+	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 		list_for_each_entry(tgt, &vhost->targets, queue) {
 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
 				tgt_dbg(tgt, "Deleting rport\n");
@@ -3553,8 +3659,17 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 		}
 
 		if (vhost->state == IBMVFC_INITIALIZING) {
-			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
-			vhost->job_step = ibmvfc_discover_targets;
+			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
+				ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
+				vhost->init_retries = 0;
+				spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				scsi_unblock_requests(vhost->host);
+				return;
+			} else {
+				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+				vhost->job_step = ibmvfc_discover_targets;
+			}
 		} else {
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -3577,14 +3692,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 			}
 		}
 
-		if (!ibmvfc_dev_init_to_do(vhost)) {
-			ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
-			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
-			vhost->init_retries = 0;
-			spin_unlock_irqrestore(vhost->host->host_lock, flags);
-			scsi_unblock_requests(vhost->host);
-			return;
-		}
+		if (!ibmvfc_dev_init_to_do(vhost))
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
 		break;
 	case IBMVFC_HOST_ACTION_TGT_ADD:
 		list_for_each_entry(tgt, &vhost->targets, queue) {
@@ -3592,16 +3701,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
 				ibmvfc_tgt_add_rport(tgt);
 				return;
-			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
-				tgt_dbg(tgt, "Deleting rport\n");
-				rport = tgt->rport;
-				tgt->rport = NULL;
-				list_del(&tgt->queue);
-				spin_unlock_irqrestore(vhost->host->host_lock, flags);
-				if (rport)
-					fc_remote_port_delete(rport);
-				kref_put(&tgt->kref, ibmvfc_release_tgt);
-				return;
 			}
 		}
 
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index fb3177ab6691..babdf3db59df 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,11 +29,11 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME	"ibmvfc"
-#define IBMVFC_DRIVER_VERSION		"1.0.2"
-#define IBMVFC_DRIVER_DATE		"(August 14, 2008)"
+#define IBMVFC_DRIVER_VERSION		"1.0.4"
+#define IBMVFC_DRIVER_DATE		"(November 14, 2008)"
 
 #define IBMVFC_DEFAULT_TIMEOUT	15
-#define IBMVFC_INIT_TIMEOUT		30
+#define IBMVFC_INIT_TIMEOUT		120
 #define IBMVFC_MAX_REQUESTS_DEFAULT	100
 
 #define IBMVFC_DEBUG			0
@@ -43,7 +43,8 @@
 #define IBMVFC_MAX_DISC_THREADS	4
 #define IBMVFC_TGT_MEMPOOL_SZ		64
 #define IBMVFC_MAX_CMDS_PER_LUN	64
-#define IBMVFC_MAX_INIT_RETRIES	3
+#define IBMVFC_MAX_HOST_INIT_RETRIES	6
+#define IBMVFC_MAX_TGT_INIT_RETRIES		3
 #define IBMVFC_DEV_LOSS_TMO		(5 * 60)
 #define IBMVFC_DEFAULT_LOG_LEVEL	2
 #define IBMVFC_MAX_CDB_LEN		16
@@ -109,6 +110,7 @@ enum ibmvfc_vios_errors {
 	IBMVFC_TRANS_CANCELLED			= 0x0006,
 	IBMVFC_TRANS_CANCELLED_IMPLICIT	= 0x0007,
 	IBMVFC_INSUFFICIENT_RESOURCE		= 0x0008,
+	IBMVFC_PLOGI_REQUIRED			= 0x0010,
 	IBMVFC_COMMAND_FAILED			= 0x8000,
 };
 
@@ -337,7 +339,6 @@ struct ibmvfc_tmf {
 #define IBMVFC_TMF_LUA_VALID		0x40
 	u32 cancel_key;
 	u32 my_cancel_key;
-#define IBMVFC_TMF_CANCEL_KEY		0x80000000
 	u32 pad;
 	u64 reserved[2];
 }__attribute__((packed, aligned (8)));
@@ -524,10 +525,10 @@ enum ibmvfc_async_event {
 };
 
 struct ibmvfc_crq {
-	u8 valid;
-	u8 format;
+	volatile u8 valid;
+	volatile u8 format;
 	u8 reserved[6];
-	u64 ioba;
+	volatile u64 ioba;
 }__attribute__((packed, aligned (8)));
 
 struct ibmvfc_crq_queue {
@@ -537,13 +538,13 @@ struct ibmvfc_crq_queue {
 };
 
 struct ibmvfc_async_crq {
-	u8 valid;
+	volatile u8 valid;
 	u8 pad[3];
 	u32 pad2;
-	u64 event;
-	u64 scsi_id;
-	u64 wwpn;
-	u64 node_name;
+	volatile u64 event;
+	volatile u64 scsi_id;
+	volatile u64 wwpn;
+	volatile u64 node_name;
 	u64 reserved;
 }__attribute__((packed, aligned (8)));
 
@@ -606,6 +607,7 @@ struct ibmvfc_event {
 	struct srp_direct_buf *ext_list;
 	dma_addr_t ext_list_token;
 	struct completion comp;
+	struct completion *eh_comp;
 	struct timer_list timer;
 };
 
@@ -626,6 +628,7 @@ enum ibmvfc_host_action {
 	IBMVFC_HOST_ACTION_TGT_DEL,
 	IBMVFC_HOST_ACTION_ALLOC_TGTS,
 	IBMVFC_HOST_ACTION_TGT_INIT,
+	IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
 	IBMVFC_HOST_ACTION_TGT_ADD,
 };
 
@@ -671,6 +674,7 @@ struct ibmvfc_host {
 	int discovery_threads;
 	int client_migrated;
 	int reinit;
+	int delay_init;
 	int events_to_log;
 #define IBMVFC_AE_LINKUP	0x0001
 #define IBMVFC_AE_LINKDOWN	0x0002
@@ -700,7 +704,7 @@ struct ibmvfc_host {
 
 #define ibmvfc_log(vhost, level, ...) \
 	do { \
-		if (level >= (vhost)->log_level) \
+		if ((vhost)->log_level >= level) \
 			dev_err((vhost)->dev, ##__VA_ARGS__); \
 	} while (0)
 
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 6cad1758243a..868d35ea01bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -107,7 +107,7 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_channel, "Largest channel value");
 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
-module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
+module_param_named(max_requests, max_requests, int, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
 
 /* ------------------------------------------------------------
@@ -1657,7 +1657,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
 	vdev->dev.driver_data = NULL;
 
-	driver_template.can_queue = max_requests;
+	driver_template.can_queue = max_requests - 2;
 	host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
 	if (!host) {
 		dev_err(&vdev->dev, "couldn't allocate host data\n");
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 8053b1e86ccb..52bdc6df6b92 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -107,7 +107,7 @@
  * this thing into as good a shape as possible, and I'm positive
  * there are lots of lurking bugs and "Stupid Places".
  *
- * Updated for Linux 2.5 by Alan Cox <alan@redhat.com>
+ * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
  *	- Using new_eh handler
  *	- Hopefully got all the locking right again
  *	See "FIXME" notes for items that could do with more work
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e3f739776bad..5529518ff2fa 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -4,7 +4,7 @@
  * Copyright (c) 1994-1998 Initio Corporation
  * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
  * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
- * Copyright (c) 2007 Red Hat <alan@redhat.com>
+ * Copyright (c) 2007 Red Hat
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index cb48efa81fe2..e58af9e95506 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,7 +4,7 @@
  * Copyright (c) 1994-1998 Initio Corporation
  * All rights reserved.
  *
- * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
+ * Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index ded854a6dd35..0edfb1fa63ce 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5389,9 +5389,9 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 	wake_up_all(&ioa_cfg->reset_wait_q);
 
-	spin_unlock_irq(ioa_cfg->host->host_lock);
+	spin_unlock(ioa_cfg->host->host_lock);
 	scsi_unblock_requests(ioa_cfg->host);
-	spin_lock_irq(ioa_cfg->host->host_lock);
+	spin_lock(ioa_cfg->host->host_lock);
 
 	if (!ioa_cfg->allow_cmds)
 		scsi_block_requests(ioa_cfg->host);
@@ -7473,7 +7473,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
 		goto out_scsi_host_put;
 	}
 
-	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
+	ipr_regs = pci_ioremap_bar(pdev, 0);
 
 	if (!ipr_regs) {
 		dev_err(&pdev->dev,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4871dd1f2582..59459141b437 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -19,7 +19,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
- * Alan Cox <alan@redhat.com> - Removed several careless u32/dma_addr_t errors
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
  *				that broke 64bit platforms.
  */
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e11bce6ab63c..23808dfe22ba 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -27,7 +27,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/list.h>
 #include <linux/inet.h>
 #include <linux/file.h>
 #include <linux/blkdev.h>
@@ -44,12 +43,12 @@
 
 #include "iscsi_tcp.h"
 
-MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+	      "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
 	      "Alex Aizman <itn780@yahoo.com>");
 MODULE_DESCRIPTION("iSCSI/TCP data-path");
 MODULE_LICENSE("GPL");
 #undef DEBUG_TCP
-#define DEBUG_ASSERT
 
 #ifdef DEBUG_TCP
 #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
@@ -57,934 +56,41 @@ MODULE_LICENSE("GPL");
 #define debug_tcp(fmt...)
 #endif
 
-#ifndef DEBUG_ASSERT
-#ifdef BUG_ON
-#undef BUG_ON
-#endif
-#define BUG_ON(expr)
-#endif
-
-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
-static struct scsi_host_template iscsi_sht;
-static struct iscsi_transport iscsi_tcp_transport;
+static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sw_tcp_sht;
+static struct iscsi_transport iscsi_sw_tcp_transport;
 
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
-				   struct iscsi_segment *segment);
-
-/*
- * Scatterlist handling: inside the iscsi_segment, we
- * remember an index into the scatterlist, and set data/size
- * to the current scatterlist entry. For highmem pages, we
- * kmap as needed.
- *
- * Note that the page is unmapped when we return from
- * TCP's data_ready handler, so we may end up mapping and
- * unmapping the same page repeatedly. The whole reason
- * for this is that we shouldn't keep the page mapped
- * outside the softirq.
- */
-
-/**
- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
- * @segment: the buffer object
- * @sg: scatterlist
- * @offset: byte offset into that sg entry
- *
- * This function sets up the segment so that subsequent
- * data is copied to the indicated sg entry, at the given
- * offset.
- */
-static inline void
-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
-			  struct scatterlist *sg, unsigned int offset)
-{
-	segment->sg = sg;
-	segment->sg_offset = offset;
-	segment->size = min(sg->length - offset,
-			    segment->total_size - segment->total_copied);
-	segment->data = NULL;
-}
-
-/**
- * iscsi_tcp_segment_map - map the current S/G page
- * @segment: iscsi_segment
- * @recv: 1 if called from recv path
- *
- * We only need to possibly kmap data if scatter lists are being used,
- * because the iscsi passthrough and internal IO paths will never use high
- * mem pages.
- */
-static inline void
-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
-{
-	struct scatterlist *sg;
-
-	if (segment->data != NULL || !segment->sg)
-		return;
-
-	sg = segment->sg;
-	BUG_ON(segment->sg_mapped);
-	BUG_ON(sg->length == 0);
-
-	/*
-	 * If the page count is greater than one it is ok to send
-	 * to the network layer's zero copy send path. If not we
-	 * have to go the slow sendmsg path. We always map for the
-	 * recv path.
-	 */
-	if (page_count(sg_page(sg)) >= 1 && !recv)
-		return;
-
-	debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
-		  segment);
-	segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
-	segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
-}
-
-static inline void
-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
-{
-	debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
-
-	if (segment->sg_mapped) {
-		debug_tcp("iscsi_tcp_segment_unmap valid\n");
-		kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
-		segment->sg_mapped = NULL;
-		segment->data = NULL;
-	}
-}
-
-/*
- * Splice the digest buffer into the buffer
- */
-static inline void
-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
-{
-	segment->data = digest;
-	segment->digest_len = ISCSI_DIGEST_SIZE;
-	segment->total_size += ISCSI_DIGEST_SIZE;
-	segment->size = ISCSI_DIGEST_SIZE;
-	segment->copied = 0;
-	segment->sg = NULL;
-	segment->hash = NULL;
-}
-
-/**
- * iscsi_tcp_segment_done - check whether the segment is complete
- * @segment: iscsi segment to check
- * @recv: set to one of this is called from the recv path
- * @copied: number of bytes copied
- *
- * Check if we're done receiving this segment. If the receive
- * buffer is full but we expect more data, move on to the
- * next entry in the scatterlist.
- *
- * If the amount of data we received isn't a multiple of 4,
- * we will transparently receive the pad bytes, too.
- *
- * This function must be re-entrant.
- */
-static inline int
-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
-{
-	static unsigned char padbuf[ISCSI_PAD_LEN];
-	struct scatterlist sg;
-	unsigned int pad;
-
-	debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
-		  segment->size, recv ? "recv" : "xmit");
-	if (segment->hash && copied) {
-		/*
-		 * If a segment is kmapd we must unmap it before sending
-		 * to the crypto layer since that will try to kmap it again.
-		 */
-		iscsi_tcp_segment_unmap(segment);
-
-		if (!segment->data) {
-			sg_init_table(&sg, 1);
-			sg_set_page(&sg, sg_page(segment->sg), copied,
-				    segment->copied + segment->sg_offset +
-							segment->sg->offset);
-		} else
-			sg_init_one(&sg, segment->data + segment->copied,
-				    copied);
-		crypto_hash_update(segment->hash, &sg, copied);
-	}
-
-	segment->copied += copied;
-	if (segment->copied < segment->size) {
-		iscsi_tcp_segment_map(segment, recv);
-		return 0;
-	}
-
-	segment->total_copied += segment->copied;
-	segment->copied = 0;
-	segment->size = 0;
-
-	/* Unmap the current scatterlist page, if there is one. */
-	iscsi_tcp_segment_unmap(segment);
-
-	/* Do we have more scatterlist entries? */
-	debug_tcp("total copied %u total size %u\n", segment->total_copied,
-		   segment->total_size);
-	if (segment->total_copied < segment->total_size) {
-		/* Proceed to the next entry in the scatterlist. */
-		iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
-					  0);
-		iscsi_tcp_segment_map(segment, recv);
-		BUG_ON(segment->size == 0);
-		return 0;
-	}
-
-	/* Do we need to handle padding? */
-	pad = iscsi_padding(segment->total_copied);
-	if (pad != 0) {
-		debug_tcp("consume %d pad bytes\n", pad);
-		segment->total_size += pad;
-		segment->size = pad;
-		segment->data = padbuf;
-		return 0;
-	}
-
-	/*
-	 * Set us up for transferring the data digest. hdr digest
-	 * is completely handled in hdr done function.
-	 */
-	if (segment->hash) {
-		crypto_hash_final(segment->hash, segment->digest);
-		iscsi_tcp_segment_splice_digest(segment,
-				 recv ? segment->recv_digest : segment->digest);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
- * iscsi_tcp_xmit_segment - transmit segment
- * @tcp_conn: the iSCSI TCP connection
- * @segment: the buffer to transmnit
- *
- * This function transmits as much of the buffer as
- * the network layer will accept, and returns the number of
- * bytes transmitted.
- *
- * If CRC hashing is enabled, the function will compute the
- * hash as it goes. When the entire segment has been transmitted,
- * it will retrieve the hash value and send it as well.
- */
-static int
-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
-		       struct iscsi_segment *segment)
-{
-	struct socket *sk = tcp_conn->sock;
-	unsigned int copied = 0;
-	int r = 0;
-
-	while (!iscsi_tcp_segment_done(segment, 0, r)) {
-		struct scatterlist *sg;
-		unsigned int offset, copy;
-		int flags = 0;
-
-		r = 0;
-		offset = segment->copied;
-		copy = segment->size - offset;
-
-		if (segment->total_copied + segment->size < segment->total_size)
-			flags |= MSG_MORE;
-
-		/* Use sendpage if we can; else fall back to sendmsg */
-		if (!segment->data) {
-			sg = segment->sg;
-			offset += segment->sg_offset + sg->offset;
-			r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
-					       flags);
-		} else {
-			struct msghdr msg = { .msg_flags = flags };
-			struct kvec iov = {
-				.iov_base = segment->data + offset,
-				.iov_len = copy
-			};
-
-			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
-		}
-
-		if (r < 0) {
-			iscsi_tcp_segment_unmap(segment);
-			if (copied || r == -EAGAIN)
-				break;
-			return r;
-		}
-		copied += r;
-	}
-	return copied;
-}
-
-/**
- * iscsi_tcp_segment_recv - copy data to segment
- * @tcp_conn: the iSCSI TCP connection
- * @segment: the buffer to copy to
- * @ptr: data pointer
- * @len: amount of data available
- *
- * This function copies up to @len bytes to the
- * given buffer, and returns the number of bytes
- * consumed, which can actually be less than @len.
- *
- * If hash digest is enabled, the function will update the
- * hash while copying.
- * Combining these two operations doesn't buy us a lot (yet),
- * but in the future we could implement combined copy+crc,
- * just way we do for network layer checksums.
- */
-static int
-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
-		       struct iscsi_segment *segment, const void *ptr,
-		       unsigned int len)
-{
-	unsigned int copy = 0, copied = 0;
-
-	while (!iscsi_tcp_segment_done(segment, 1, copy)) {
-		if (copied == len) {
-			debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
-				  len);
-			break;
-		}
-
-		copy = min(len - copied, segment->size - segment->copied);
-		debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
-		memcpy(segment->data + segment->copied, ptr + copied, copy);
-		copied += copy;
-	}
-	return copied;
-}
-
-static inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
-		      unsigned char digest[ISCSI_DIGEST_SIZE])
-{
-	struct scatterlist sg;
-
-	sg_init_one(&sg, hdr, hdrlen);
-	crypto_hash_digest(hash, &sg, hdrlen, digest);
-}
-
-static inline int
-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
-		      struct iscsi_segment *segment)
-{
-	if (!segment->digest_len)
-		return 1;
-
-	if (memcmp(segment->recv_digest, segment->digest,
-		   segment->digest_len)) {
-		debug_scsi("digest mismatch\n");
-		return 0;
-	}
-
-	return 1;
-}
-
-/*
- * Helper function to set up segment buffer
- */
-static inline void
-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
-		     iscsi_segment_done_fn_t *done, struct hash_desc *hash)
-{
-	memset(segment, 0, sizeof(*segment));
-	segment->total_size = size;
-	segment->done = done;
-
-	if (hash) {
-		segment->hash = hash;
-		crypto_hash_init(hash);
-	}
-}
-
-static inline void
-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
-			  size_t size, iscsi_segment_done_fn_t *done,
-			  struct hash_desc *hash)
-{
-	__iscsi_segment_init(segment, size, done, hash);
-	segment->data = data;
-	segment->size = size;
-}
-
-static inline int
-iscsi_segment_seek_sg(struct iscsi_segment *segment,
-		      struct scatterlist *sg_list, unsigned int sg_count,
-		      unsigned int offset, size_t size,
-		      iscsi_segment_done_fn_t *done, struct hash_desc *hash)
-{
-	struct scatterlist *sg;
-	unsigned int i;
-
-	debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
-		  offset, size);
-	__iscsi_segment_init(segment, size, done, hash);
-	for_each_sg(sg_list, sg, sg_count, i) {
-		debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
-			   sg->offset);
-		if (offset < sg->length) {
-			iscsi_tcp_segment_init_sg(segment, sg, offset);
-			return 0;
-		}
-		offset -= sg->length;
-	}
-
-	return ISCSI_ERR_DATA_OFFSET;
-}
-
-/**
- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
- * @tcp_conn: iscsi connection to prep for
- *
- * This function always passes NULL for the hash argument, because when this
- * function is called we do not yet know the final size of the header and want
- * to delay the digest processing until we know that.
- */
-static void
-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
-{
-	debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
-		  tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
-	iscsi_segment_init_linear(&tcp_conn->in.segment,
-				tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
-				iscsi_tcp_hdr_recv_done, NULL);
-}
-
-/*
- * Handle incoming reply to any other type of command
- */
-static int
-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
-			 struct iscsi_segment *segment)
-{
-	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-	int rc = 0;
-
-	if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
-		return ISCSI_ERR_DATA_DGST;
-
-	rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
-			conn->data, tcp_conn->in.datalen);
-	if (rc)
-		return rc;
-
-	iscsi_tcp_hdr_recv_prep(tcp_conn);
-	return 0;
-}
-
-static void
-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
-{
-	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-	struct hash_desc *rx_hash = NULL;
-
-	if (conn->datadgst_en)
-		rx_hash = &tcp_conn->rx_hash;
-
-	iscsi_segment_init_linear(&tcp_conn->in.segment,
-				conn->data, tcp_conn->in.datalen,
-				iscsi_tcp_data_recv_done, rx_hash);
-}
-
-/*
- * must be called with session lock
- */
-static void
-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
-{
-	struct iscsi_tcp_task *tcp_task = task->dd_data;
-	struct iscsi_r2t_info *r2t;
-
-	/* nothing to do for mgmt tasks */
-	if (!task->sc)
-		return;
-
-	/* flush task's r2t queues */
-	while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
-	}
-
-	r2t = tcp_task->r2t;
-	if (r2t != NULL) {
-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		tcp_task->r2t = NULL;
-	}
-}
-
-/**
- * iscsi_data_in - SCSI Data-In Response processing
- * @conn: iscsi connection
- * @task: scsi command task
- **/
-static int
-iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
-{
-	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_tcp_task *tcp_task = task->dd_data;
-	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
-	int datasn = be32_to_cpu(rhdr->datasn);
-	unsigned total_in_length = scsi_in(task->sc)->length;
-
-	iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
-	if (tcp_conn->in.datalen == 0)
-		return 0;
-
-	if (tcp_task->exp_datasn != datasn) {
-		debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
-		          __func__, tcp_task->exp_datasn, datasn);
-		return ISCSI_ERR_DATASN;
-	}
-
-	tcp_task->exp_datasn++;
-
-	tcp_task->data_offset = be32_to_cpu(rhdr->offset);
-	if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
-		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
-		          __func__, tcp_task->data_offset,
-		          tcp_conn->in.datalen, total_in_length);
-		return ISCSI_ERR_DATA_OFFSET;
-	}
-
-	conn->datain_pdus_cnt++;
-	return 0;
-}
-
-/**
- * iscsi_solicit_data_init - initialize first Data-Out
- * @conn: iscsi connection
- * @task: scsi command task
- * @r2t: R2T info
- *
- * Notes:
- *	Initialize first Data-Out within this R2T sequence and finds
- *	proper data_offset within this SCSI command.
- *
- *	This function is called with connection lock taken.
- **/
-static void
-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
-			struct iscsi_r2t_info *r2t)
-{
-	struct iscsi_data *hdr;
-
-	hdr = &r2t->dtask.hdr;
-	memset(hdr, 0, sizeof(struct iscsi_data));
-	hdr->ttt = r2t->ttt;
-	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
-	r2t->solicit_datasn++;
-	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
-	hdr->itt = task->hdr->itt;
-	hdr->exp_statsn = r2t->exp_statsn;
-	hdr->offset = cpu_to_be32(r2t->data_offset);
-	if (r2t->data_length > conn->max_xmit_dlength) {
-		hton24(hdr->dlength, conn->max_xmit_dlength);
-		r2t->data_count = conn->max_xmit_dlength;
-		hdr->flags = 0;
-	} else {
-		hton24(hdr->dlength, r2t->data_length);
-		r2t->data_count = r2t->data_length;
-		hdr->flags = ISCSI_FLAG_CMD_FINAL;
-	}
-	conn->dataout_pdus_cnt++;
-
-	r2t->sent = 0;
-}
-
-/**
- * iscsi_r2t_rsp - iSCSI R2T Response processing
- * @conn: iscsi connection
- * @task: scsi command task
- **/
-static int
-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
-{
-	struct iscsi_r2t_info *r2t;
-	struct iscsi_session *session = conn->session;
-	struct iscsi_tcp_task *tcp_task = task->dd_data;
-	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
-	int r2tsn = be32_to_cpu(rhdr->r2tsn);
-	int rc;
-
-	if (tcp_conn->in.datalen) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2t with datalen %d\n",
-				  tcp_conn->in.datalen);
-		return ISCSI_ERR_DATALEN;
-	}
-
-	if (tcp_task->exp_datasn != r2tsn){
-		debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
-		          __func__, tcp_task->exp_datasn, r2tsn);
-		return ISCSI_ERR_R2TSN;
-	}
-
-	/* fill-in new R2T associated with the task */
-	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
-
-	if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
-		iscsi_conn_printk(KERN_INFO, conn,
-				  "dropping R2T itt %d in recovery.\n",
-				  task->itt);
-		return 0;
-	}
-
-	rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
-	BUG_ON(!rc);
-
-	r2t->exp_statsn = rhdr->statsn;
-	r2t->data_length = be32_to_cpu(rhdr->data_length);
-	if (r2t->data_length == 0) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with zero data len\n");
-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
-
-	if (r2t->data_length > session->max_burst)
-		debug_scsi("invalid R2T with data len %u and max burst %u."
-			   "Attempting to execute request.\n",
-			    r2t->data_length, session->max_burst);
-
-	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with data len %u at offset %u "
-				  "and total length %d\n", r2t->data_length,
-				  r2t->data_offset, scsi_out(task->sc)->length);
-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
-
-	r2t->ttt = rhdr->ttt; /* no flip */
-	r2t->solicit_datasn = 0;
-
-	iscsi_solicit_data_init(conn, task, r2t);
-
-	tcp_task->exp_datasn = r2tsn + 1;
-	__kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
-	conn->r2t_pdus_cnt++;
-
-	iscsi_requeue_task(task);
-	return 0;
-}
-
-/*
- * Handle incoming reply to DataIn command
- */
-static int
-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
-			  struct iscsi_segment *segment)
-{
-	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-	struct iscsi_hdr *hdr = tcp_conn->in.hdr;
-	int rc;
-
-	if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
-		return ISCSI_ERR_DATA_DGST;
-
-	/* check for non-exceptional status */
-	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
-		rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
-		if (rc)
-			return rc;
-	}
-
-	iscsi_tcp_hdr_recv_prep(tcp_conn);
-	return 0;
-}
-
-/**
- * iscsi_tcp_hdr_dissect - process PDU header
- * @conn: iSCSI connection
- * @hdr: PDU header
- *
- * This function analyzes the header of the PDU received,
- * and performs several sanity checks. If the PDU is accompanied
- * by data, the receive buffer is set up to copy the incoming data
- * to the correct location.
- */
-static int
-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
-{
-	int rc = 0, opcode, ahslen;
-	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_task *task;
-
-	/* verify PDU length */
-	tcp_conn->in.datalen = ntoh24(hdr->dlength);
-	if (tcp_conn->in.datalen > conn->max_recv_dlength) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "iscsi_tcp: datalen %d > %d\n",
-				  tcp_conn->in.datalen, conn->max_recv_dlength);
-		return ISCSI_ERR_DATALEN;
-	}
-
-	/* Additional header segments. So far, we don't
-	 * process additional headers.
-	 */
-	ahslen = hdr->hlength << 2;
-
-	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
-	/* verify itt (itt encoding: age+cid+itt) */
-	rc = iscsi_verify_itt(conn, hdr->itt);
-	if (rc)
-		return rc;
-
-	debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
-		  opcode, ahslen, tcp_conn->in.datalen);
-
-	switch(opcode) {
-	case ISCSI_OP_SCSI_DATA_IN:
-		spin_lock(&conn->session->lock);
-		task = iscsi_itt_to_ctask(conn, hdr->itt);
-		if (!task)
-			rc = ISCSI_ERR_BAD_ITT;
-		else
-			rc = iscsi_data_in(conn, task);
-		if (rc) {
-			spin_unlock(&conn->session->lock);
-			break;
-		}
-
-		if (tcp_conn->in.datalen) {
-			struct iscsi_tcp_task *tcp_task = task->dd_data;
-			struct hash_desc *rx_hash = NULL;
-			struct scsi_data_buffer *sdb = scsi_in(task->sc);
-
-			/*
-			 * Setup copy of Data-In into the Scsi_Cmnd
-			 * Scatterlist case:
-			 * We set up the iscsi_segment to point to the next
-			 * scatterlist entry to copy to. As we go along,
-			 * we move on to the next scatterlist entry and
-			 * update the digest per-entry.
-			 */
-			if (conn->datadgst_en)
-				rx_hash = &tcp_conn->rx_hash;
-
-			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
-				  "datalen=%d)\n", tcp_conn,
-				  tcp_task->data_offset,
-				  tcp_conn->in.datalen);
-			rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
-						   sdb->table.sgl,
-						   sdb->table.nents,
-						   tcp_task->data_offset,
-						   tcp_conn->in.datalen,
-						   iscsi_tcp_process_data_in,
-						   rx_hash);
-			spin_unlock(&conn->session->lock);
-			return rc;
-		}
-		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
-		spin_unlock(&conn->session->lock);
-		break;
-	case ISCSI_OP_SCSI_CMD_RSP:
-		if (tcp_conn->in.datalen) {
-			iscsi_tcp_data_recv_prep(tcp_conn);
-			return 0;
-		}
-		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
-		break;
-	case ISCSI_OP_R2T:
-		spin_lock(&conn->session->lock);
-		task = iscsi_itt_to_ctask(conn, hdr->itt);
-		if (!task)
-			rc = ISCSI_ERR_BAD_ITT;
-		else if (ahslen)
-			rc = ISCSI_ERR_AHSLEN;
-		else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
-			rc = iscsi_r2t_rsp(conn, task);
-		else
-			rc = ISCSI_ERR_PROTO;
-		spin_unlock(&conn->session->lock);
-		break;
-	case ISCSI_OP_LOGIN_RSP:
-	case ISCSI_OP_TEXT_RSP:
-	case ISCSI_OP_REJECT:
-	case ISCSI_OP_ASYNC_EVENT:
-		/*
-		 * It is possible that we could get a PDU with a buffer larger
-		 * than 8K, but there are no targets that currently do this.
-		 * For now we fail until we find a vendor that needs it
-		 */
-		if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
-			iscsi_conn_printk(KERN_ERR, conn,
-					  "iscsi_tcp: received buffer of "
-					  "len %u but conn buffer is only %u "
-					  "(opcode %0x)\n",
-					  tcp_conn->in.datalen,
-					  ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
-			rc = ISCSI_ERR_PROTO;
-			break;
-		}
-
-		/* If there's data coming in with the response,
-		 * receive it to the connection's buffer.
-		 */
-		if (tcp_conn->in.datalen) {
-			iscsi_tcp_data_recv_prep(tcp_conn);
-			return 0;
-		}
-	/* fall through */
-	case ISCSI_OP_LOGOUT_RSP:
-	case ISCSI_OP_NOOP_IN:
-	case ISCSI_OP_SCSI_TMFUNC_RSP:
-		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
-		break;
-	default:
-		rc = ISCSI_ERR_BAD_OPCODE;
-		break;
-	}
-
-	if (rc == 0) {
-		/* Anything that comes with data should have
-		 * been handled above. */
-		if (tcp_conn->in.datalen)
-			return ISCSI_ERR_PROTO;
-		iscsi_tcp_hdr_recv_prep(tcp_conn);
-	}
-
-	return rc;
-}
-
 /**
- * iscsi_tcp_hdr_recv_done - process PDU header
- *
- * This is the callback invoked when the PDU header has
- * been received. If the header is followed by additional
- * header segments, we go back for more data.
- */
-static int
-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
-			struct iscsi_segment *segment)
-{
-	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-	struct iscsi_hdr *hdr;
-
-	/* Check if there are additional header segments
-	 * *prior* to computing the digest, because we
-	 * may need to go back to the caller for more.
-	 */
-	hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
-	if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
-		/* Bump the header length - the caller will
-		 * just loop around and get the AHS for us, and
-		 * call again. */
-		unsigned int ahslen = hdr->hlength << 2;
-
-		/* Make sure we don't overflow */
-		if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
-			return ISCSI_ERR_AHSLEN;
-
-		segment->total_size += ahslen;
-		segment->size += ahslen;
-		return 0;
-	}
-
-	/* We're done processing the header. See if we're doing
-	 * header digests; if so, set up the recv_digest buffer
-	 * and go back for more. */
-	if (conn->hdrdgst_en) {
-		if (segment->digest_len == 0) {
-			iscsi_tcp_segment_splice_digest(segment,
-							segment->recv_digest);
-			return 0;
-		}
-		iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
-				      segment->total_copied - ISCSI_DIGEST_SIZE,
-				      segment->digest);
-
-		if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
-			return ISCSI_ERR_HDR_DGST;
-	}
-
-	tcp_conn->in.hdr = hdr;
-	return iscsi_tcp_hdr_dissect(conn, hdr);
-}
-
-/**
- * iscsi_tcp_recv - TCP receive in sendfile fashion
+ * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
  * @rd_desc: read descriptor
  * @skb: socket buffer
  * @offset: offset in skb
  * @len: skb->len - offset
- **/
-static int
-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
-	       unsigned int offset, size_t len)
+ */
+static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+			     unsigned int offset, size_t len)
 {
 	struct iscsi_conn *conn = rd_desc->arg.data;
-	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_segment *segment = &tcp_conn->in.segment;
-	struct skb_seq_state seq;
-	unsigned int consumed = 0;
-	int rc = 0;
+	unsigned int consumed, total_consumed = 0;
+	int status;
 
 	debug_tcp("in %d bytes\n", skb->len - offset);
 
-	if (unlikely(conn->suspend_rx)) {
-		debug_tcp("conn %d Rx suspended!\n", conn->id);
-		return 0;
-	}
+	do {
+		status = 0;
+		consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
+		offset += consumed;
+		total_consumed += consumed;
+	} while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
 
-	skb_prepare_seq_read(skb, offset, skb->len, &seq);
-	while (1) {
-		unsigned int avail;
-		const u8 *ptr;
-
-		avail = skb_seq_read(consumed, &ptr, &seq);
-		if (avail == 0) {
-			debug_tcp("no more data avail. Consumed %d\n",
-				  consumed);
-			break;
-		}
-		BUG_ON(segment->copied >= segment->size);
-
-		debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
-		rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
-		BUG_ON(rc == 0);
-		consumed += rc;
-
-		if (segment->total_copied >= segment->total_size) {
-			debug_tcp("segment done\n");
-			rc = segment->done(tcp_conn, segment);
-			if (rc != 0) {
-				skb_abort_seq_read(&seq);
-				goto error;
-			}
-
-			/* The done() functions sets up the
-			 * next segment. */
-		}
-	}
-	skb_abort_seq_read(&seq);
-	conn->rxdata_octets += consumed;
-	return consumed;
-
-error:
-	debug_tcp("Error receiving PDU, errno=%d\n", rc);
-	iscsi_conn_failure(conn, rc);
-	return 0;
+	debug_tcp("read %d bytes status %d\n", skb->len - offset, status);
+	return total_consumed;
 }
 
-static void
-iscsi_tcp_data_ready(struct sock *sk, int flag)
+static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
 {
 	struct iscsi_conn *conn = sk->sk_user_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -1000,7 +106,7 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
 	 */
 	rd_desc.arg.data = conn;
 	rd_desc.count = 1;
-	tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
+	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
 
 	read_unlock(&sk->sk_callback_lock);
 
@@ -1009,10 +115,10 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
 	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
 }
 
-static void
-iscsi_tcp_state_change(struct sock *sk)
+static void iscsi_sw_tcp_state_change(struct sock *sk)
 {
 	struct iscsi_tcp_conn *tcp_conn;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn;
 	struct iscsi_conn *conn;
 	struct iscsi_session *session;
 	void (*old_state_change)(struct sock *);
@@ -1030,7 +136,8 @@ iscsi_tcp_state_change(struct sock *sk)
 	}
 
 	tcp_conn = conn->dd_data;
-	old_state_change = tcp_conn->old_state_change;
+	tcp_sw_conn = tcp_conn->dd_data;
+	old_state_change = tcp_sw_conn->old_state_change;
 
 	read_unlock(&sk->sk_callback_lock);
 
@@ -1041,63 +148,123 @@ iscsi_tcp_state_change(struct sock *sk)
  * iscsi_write_space - Called when more output buffer space is available
  * @sk: socket space is available for
  **/
-static void
-iscsi_write_space(struct sock *sk)
+static void iscsi_sw_tcp_write_space(struct sock *sk)
 {
 	struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 
-	tcp_conn->old_write_space(sk);
+	tcp_sw_conn->old_write_space(sk);
 	debug_tcp("iscsi_write_space: cid %d\n", conn->id);
 	scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
 
-static void
-iscsi_conn_set_callbacks(struct iscsi_conn *conn)
+static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct sock *sk = tcp_conn->sock->sk;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct sock *sk = tcp_sw_conn->sock->sk;
 
 	/* assign new callbacks */
 	write_lock_bh(&sk->sk_callback_lock);
 	sk->sk_user_data = conn;
-	tcp_conn->old_data_ready = sk->sk_data_ready;
-	tcp_conn->old_state_change = sk->sk_state_change;
-	tcp_conn->old_write_space = sk->sk_write_space;
-	sk->sk_data_ready = iscsi_tcp_data_ready;
-	sk->sk_state_change = iscsi_tcp_state_change;
-	sk->sk_write_space = iscsi_write_space;
+	tcp_sw_conn->old_data_ready = sk->sk_data_ready;
+	tcp_sw_conn->old_state_change = sk->sk_state_change;
+	tcp_sw_conn->old_write_space = sk->sk_write_space;
+	sk->sk_data_ready = iscsi_sw_tcp_data_ready;
+	sk->sk_state_change = iscsi_sw_tcp_state_change;
+	sk->sk_write_space = iscsi_sw_tcp_write_space;
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void
-iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
 {
-	struct sock *sk = tcp_conn->sock->sk;
+	struct sock *sk = tcp_sw_conn->sock->sk;
 
 	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
 	write_lock_bh(&sk->sk_callback_lock);
 	sk->sk_user_data    = NULL;
-	sk->sk_data_ready   = tcp_conn->old_data_ready;
-	sk->sk_state_change = tcp_conn->old_state_change;
-	sk->sk_write_space  = tcp_conn->old_write_space;
+	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
+	sk->sk_state_change = tcp_sw_conn->old_state_change;
+	sk->sk_write_space  = tcp_sw_conn->old_write_space;
 	sk->sk_no_check	 = 0;
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
- * iscsi_xmit - TCP transmit
+ * iscsi_sw_tcp_xmit_segment - transmit segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to transmnit
+ *
+ * This function transmits as much of the buffer as
+ * the network layer will accept, and returns the number of
+ * bytes transmitted.
+ *
+ * If CRC hashing is enabled, the function will compute the
+ * hash as it goes. When the entire segment has been transmitted,
+ * it will retrieve the hash value and send it as well.
+ */
+static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+				     struct iscsi_segment *segment)
+{
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct socket *sk = tcp_sw_conn->sock;
+	unsigned int copied = 0;
+	int r = 0;
+
+	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
+		struct scatterlist *sg;
+		unsigned int offset, copy;
+		int flags = 0;
+
+		r = 0;
+		offset = segment->copied;
+		copy = segment->size - offset;
+
+		if (segment->total_copied + segment->size < segment->total_size)
+			flags |= MSG_MORE;
+
+		/* Use sendpage if we can; else fall back to sendmsg */
+		if (!segment->data) {
+			sg = segment->sg;
+			offset += segment->sg_offset + sg->offset;
+			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
+						  copy, flags);
+		} else {
+			struct msghdr msg = { .msg_flags = flags };
+			struct kvec iov = {
+				.iov_base = segment->data + offset,
+				.iov_len = copy
+			};
+
+			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
+		}
+
+		if (r < 0) {
+			iscsi_tcp_segment_unmap(segment);
+			if (copied || r == -EAGAIN)
+				break;
+			return r;
+		}
+		copied += r;
+	}
+	return copied;
+}
+
+/**
+ * iscsi_sw_tcp_xmit - TCP transmit
  **/
-static int
-iscsi_xmit(struct iscsi_conn *conn)
+static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_segment *segment = &tcp_conn->out.segment;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
 	unsigned int consumed = 0;
 	int rc = 0;
 
 	while (1) {
-		rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
 		if (rc < 0) {
 			rc = ISCSI_ERR_XMIT_FAILED;
 			goto error;
@@ -1132,22 +299,22 @@ error:
 /**
  * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
  */
-static inline int
-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_segment *segment = &tcp_conn->out.segment;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
 
 	return segment->total_copied - segment->total_size;
 }
 
-static inline int
-iscsi_tcp_flush(struct iscsi_conn *conn)
+static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
 {
+	struct iscsi_conn *conn = task->conn;
 	int rc;
 
-	while (iscsi_tcp_xmit_qlen(conn)) {
-		rc = iscsi_xmit(conn);
+	while (iscsi_sw_tcp_xmit_qlen(conn)) {
+		rc = iscsi_sw_tcp_xmit(conn);
 		if (rc == 0)
 			return -EAGAIN;
 		if (rc < 0)
@@ -1161,27 +328,31 @@ iscsi_tcp_flush(struct iscsi_conn *conn)
  * This is called when we're done sending the header.
  * Simply copy the data_segment to the send segment, and return.
  */
-static int
-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
-			struct iscsi_segment *segment)
+static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+				      struct iscsi_segment *segment)
 {
-	tcp_conn->out.segment = tcp_conn->out.data_segment;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
 	debug_tcp("Header done. Next segment size %u total_size %u\n",
-		  tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+		  tcp_sw_conn->out.segment.size,
+		  tcp_sw_conn->out.segment.total_size);
 	return 0;
 }
 
-static void
-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
+				       size_t hdrlen)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 
 	debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
 			conn->hdrdgst_en? ", digest enabled" : "");
 
 	/* Clear the data segment - needs to be filled in by the
 	 * caller using iscsi_tcp_send_data_prep() */
-	memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+	memset(&tcp_sw_conn->out.data_segment, 0,
+	       sizeof(struct iscsi_segment));
 
 	/* If header digest is enabled, compute the CRC and
 	 * place the digest into the same buffer. We make
@@ -1189,7 +360,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 	 * sufficient room.
 	 */
 	if (conn->hdrdgst_en) {
-		iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+		iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
 				      hdr + hdrlen);
 		hdrlen += ISCSI_DIGEST_SIZE;
 	}
@@ -1197,10 +368,10 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 	/* Remember header pointer for later, when we need
 	 * to decide whether there's a payload to go along
 	 * with the header. */
-	tcp_conn->out.hdr = hdr;
+	tcp_sw_conn->out.hdr = hdr;
 
-	iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
-				iscsi_tcp_send_hdr_done, NULL);
+	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
+				  iscsi_sw_tcp_send_hdr_done, NULL);
 }
 
 /*
@@ -1209,11 +380,12 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
  * of by the iscsi_segment routines.
  */
 static int
-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
-			 unsigned int count, unsigned int offset,
-			 unsigned int len)
+iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+			    unsigned int count, unsigned int offset,
+			    unsigned int len)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	struct hash_desc *tx_hash = NULL;
 	unsigned int hdr_spec_len;
 
@@ -1223,22 +395,23 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
 
 	/* Make sure the datalen matches what the caller
 	   said he would send. */
-	hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
 	if (conn->datadgst_en)
-		tx_hash = &tcp_conn->tx_hash;
+		tx_hash = &tcp_sw_conn->tx_hash;
 
-	return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
-				   sg, count, offset, len,
-				   NULL, tx_hash);
+	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
+				     sg, count, offset, len,
+				     NULL, tx_hash);
 }
 
 static void
-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
 				   size_t len)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	struct hash_desc *tx_hash = NULL;
 	unsigned int hdr_spec_len;
 
@@ -1247,341 +420,160 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
 
 	/* Make sure the datalen matches what the caller
 	   said he would send. */
-	hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
 	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
 	if (conn->datadgst_en)
-		tx_hash = &tcp_conn->tx_hash;
+		tx_hash = &tcp_sw_conn->tx_hash;
 
-	iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
 				data, len, NULL, tx_hash);
 }
 
-/**
- * iscsi_solicit_data_cont - initialize next Data-Out
- * @conn: iscsi connection
- * @task: scsi command task
- * @r2t: R2T info
- * @left: bytes left to transfer
- *
- * Notes:
- *	Initialize next Data-Out within this R2T sequence and continue
- *	to process next Scatter-Gather element(if any) of this SCSI command.
- *
- *	Called under connection lock.
- **/
-static int
-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
-			struct iscsi_r2t_info *r2t)
+static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
+				 unsigned int offset, unsigned int count)
 {
-	struct iscsi_data *hdr;
-	int new_offset, left;
-
-	BUG_ON(r2t->data_length - r2t->sent < 0);
-	left = r2t->data_length - r2t->sent;
-	if (left == 0)
-		return 0;
-
-	hdr = &r2t->dtask.hdr;
-	memset(hdr, 0, sizeof(struct iscsi_data));
-	hdr->ttt = r2t->ttt;
-	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
-	r2t->solicit_datasn++;
-	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
-	hdr->itt = task->hdr->itt;
-	hdr->exp_statsn = r2t->exp_statsn;
-	new_offset = r2t->data_offset + r2t->sent;
-	hdr->offset = cpu_to_be32(new_offset);
-	if (left > conn->max_xmit_dlength) {
-		hton24(hdr->dlength, conn->max_xmit_dlength);
-		r2t->data_count = conn->max_xmit_dlength;
-	} else {
-		hton24(hdr->dlength, left);
-		r2t->data_count = left;
-		hdr->flags = ISCSI_FLAG_CMD_FINAL;
-	}
-
-	conn->dataout_pdus_cnt++;
-	return 1;
-}
-
-/**
- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
- * @task: scsi command task
- * @sc: scsi command
- **/
-static int
-iscsi_tcp_task_init(struct iscsi_task *task)
-{
-	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_conn *conn = task->conn;
-	struct scsi_cmnd *sc = task->sc;
-	int err;
+	int err = 0;
 
-	if (!sc) {
-		/*
-		 * mgmt tasks do not have a scatterlist since they come
-		 * in from the iscsi interface.
-		 */
-		debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
-			   task->itt);
-
-		/* Prepare PDU, optionally w/ immediate data */
-		iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
-
-		/* If we have immediate data, attach a payload */
-		if (task->data_count)
-			iscsi_tcp_send_linear_data_prepare(conn, task->data,
-							   task->data_count);
-		return 0;
-	}
+	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
 
-	BUG_ON(__kfifo_len(tcp_task->r2tqueue));
-	tcp_task->sent = 0;
-	tcp_task->exp_datasn = 0;
+	if (!count)
+		return 0;
 
-	/* Prepare PDU, optionally w/ immediate data */
-	debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
-		    conn->id, task->itt, task->imm_count,
-		    task->unsol_count);
-	iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
+	if (!task->sc)
+		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
+	else {
+		struct scsi_data_buffer *sdb = scsi_out(task->sc);
 
-	if (!task->imm_count)
-		return 0;
+		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
+						  sdb->table.nents, offset,
+						  count);
+	}
 
-	/* If we have immediate data, attach a payload */
-	err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
-				       scsi_out(sc)->table.nents,
-				       0, task->imm_count);
-	if (err)
-		return err;
-	tcp_task->sent += task->imm_count;
-	task->imm_count = 0;
+	if (err) {
+		iscsi_conn_failure(conn, err);
+		return -EIO;
+	}
 	return 0;
 }
 
-/*
- * iscsi_tcp_task_xmit - xmit normal PDU task
- * @task: iscsi command task
- *
- * We're expected to return 0 when everything was transmitted succesfully,
- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
- * of error.
- */
-static int
-iscsi_tcp_task_xmit(struct iscsi_task *task)
+static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
 {
-	struct iscsi_conn *conn = task->conn;
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
-	struct scsi_cmnd *sc = task->sc;
-	struct scsi_data_buffer *sdb;
-	int rc = 0;
-
-flush:
-	/* Flush any pending data first. */
-	rc = iscsi_tcp_flush(conn);
-	if (rc < 0)
-		return rc;
-
-	/* mgmt command */
-	if (!sc) {
-		if (task->hdr->itt == RESERVED_ITT)
-			iscsi_put_task(task);
-		return 0;
-	}
-
-	/* Are we done already? */
-	if (sc->sc_data_direction != DMA_TO_DEVICE)
-		return 0;
 
-	sdb = scsi_out(sc);
-	if (task->unsol_count != 0) {
-		struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
-
-		/* Prepare a header for the unsolicited PDU.
-		 * The amount of data we want to send will be
-		 * in task->data_count.
-		 * FIXME: return the data count instead.
-		 */
-		iscsi_prep_unsolicit_data_pdu(task, hdr);
-
-		debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
-				task->itt, tcp_task->sent, task->data_count);
-
-		iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
-		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
-					      sdb->table.nents, tcp_task->sent,
-					      task->data_count);
-		if (rc)
-			goto fail;
-		tcp_task->sent += task->data_count;
-		task->unsol_count -= task->data_count;
-		goto flush;
-	} else {
-		struct iscsi_session *session = conn->session;
-		struct iscsi_r2t_info *r2t;
-
-		/* All unsolicited PDUs sent. Check for solicited PDUs.
-		 */
-		spin_lock_bh(&session->lock);
-		r2t = tcp_task->r2t;
-		if (r2t != NULL) {
-			/* Continue with this R2T? */
-			if (!iscsi_solicit_data_cont(conn, task, r2t)) {
-				debug_scsi("  done with r2t %p\n", r2t);
-
-				__kfifo_put(tcp_task->r2tpool.queue,
-					    (void*)&r2t, sizeof(void*));
-				tcp_task->r2t = r2t = NULL;
-			}
-		}
-
-		if (r2t == NULL) {
-			__kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
-				    sizeof(void*));
-			r2t = tcp_task->r2t;
-		}
-		spin_unlock_bh(&session->lock);
-
-		/* Waiting for more R2Ts to arrive. */
-		if (r2t == NULL) {
-			debug_tcp("no R2Ts yet\n");
-			return 0;
-		}
-
-		debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
-			r2t, r2t->solicit_datasn - 1, task->itt,
-			r2t->data_offset + r2t->sent, r2t->data_count);
-
-		iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
-					sizeof(struct iscsi_hdr));
-
-		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
-					      sdb->table.nents,
-					      r2t->data_offset + r2t->sent,
-					      r2t->data_count);
-		if (rc)
-			goto fail;
-		tcp_task->sent += r2t->data_count;
-		r2t->sent += r2t->data_count;
-		goto flush;
-	}
+	task->hdr = task->dd_data + sizeof(*tcp_task);
+	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
 	return 0;
-fail:
-	iscsi_conn_failure(conn, rc);
-	return -EIO;
 }
 
 static struct iscsi_cls_conn *
-iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
+			 uint32_t conn_idx)
 {
 	struct iscsi_conn *conn;
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_tcp_conn *tcp_conn;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn;
 
-	cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
+					conn_idx);
 	if (!cls_conn)
 		return NULL;
 	conn = cls_conn->dd_data;
-	/*
-	 * due to strange issues with iser these are not set
-	 * in iscsi_conn_setup
-	 */
-	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
-
 	tcp_conn = conn->dd_data;
-	tcp_conn->iscsi_conn = conn;
+	tcp_sw_conn = tcp_conn->dd_data;
 
-	tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-						  CRYPTO_ALG_ASYNC);
-	tcp_conn->tx_hash.flags = 0;
-	if (IS_ERR(tcp_conn->tx_hash.tfm))
+	tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						     CRYPTO_ALG_ASYNC);
+	tcp_sw_conn->tx_hash.flags = 0;
+	if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
 		goto free_conn;
 
-	tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-						  CRYPTO_ALG_ASYNC);
-	tcp_conn->rx_hash.flags = 0;
-	if (IS_ERR(tcp_conn->rx_hash.tfm))
+	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						     CRYPTO_ALG_ASYNC);
+	tcp_sw_conn->rx_hash.flags = 0;
+	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
 		goto free_tx_tfm;
+	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
 
 	return cls_conn;
 
 free_tx_tfm:
-	crypto_free_hash(tcp_conn->tx_hash.tfm);
+	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
 free_conn:
 	iscsi_conn_printk(KERN_ERR, conn,
 			  "Could not create connection due to crc32c "
 			  "loading error. Make sure the crc32c "
 			  "module is built as a module or into the "
 			  "kernel\n");
-	iscsi_conn_teardown(cls_conn);
+	iscsi_tcp_conn_teardown(cls_conn);
 	return NULL;
 }
 
-static void
-iscsi_tcp_release_conn(struct iscsi_conn *conn)
+static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct socket *sock = tcp_conn->sock;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct socket *sock = tcp_sw_conn->sock;
 
 	if (!sock)
 		return;
 
 	sock_hold(sock->sk);
-	iscsi_conn_restore_callbacks(tcp_conn);
+	iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
 	sock_put(sock->sk);
 
 	spin_lock_bh(&session->lock);
-	tcp_conn->sock = NULL;
+	tcp_sw_conn->sock = NULL;
 	spin_unlock_bh(&session->lock);
 	sockfd_put(sock);
 }
 
-static void
-iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 
-	iscsi_tcp_release_conn(conn);
+	iscsi_sw_tcp_release_conn(conn);
 
-	if (tcp_conn->tx_hash.tfm)
-		crypto_free_hash(tcp_conn->tx_hash.tfm);
-	if (tcp_conn->rx_hash.tfm)
-		crypto_free_hash(tcp_conn->rx_hash.tfm);
+	if (tcp_sw_conn->tx_hash.tfm)
+		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+	if (tcp_sw_conn->rx_hash.tfm)
+		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
 
-	iscsi_conn_teardown(cls_conn);
+	iscsi_tcp_conn_teardown(cls_conn);
 }
 
-static void
-iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 
 	/* userspace may have goofed up and not bound us */
-	if (!tcp_conn->sock)
+	if (!tcp_sw_conn->sock)
 		return;
 	/*
 	 * Make sure our recv side is stopped.
 	 * Older tools called conn stop before ep_disconnect
 	 * so IO could still be coming in.
 	 */
-	write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+	write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-	write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+	write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 
 	iscsi_conn_stop(cls_conn, flag);
-	iscsi_tcp_release_conn(conn);
+	iscsi_sw_tcp_release_conn(conn);
 }
 
-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
-			      char *buf, int *port,
-			      int (*getname)(struct socket *, struct sockaddr *,
-					int *addrlen))
+static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+				 char *buf, int *port,
+				 int (*getname)(struct socket *,
+						struct sockaddr *,
+						int *addrlen))
 {
 	struct sockaddr_storage *addr;
 	struct sockaddr_in6 *sin6;
@@ -1619,14 +611,15 @@ free_addr:
 }
 
 static int
-iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
-		    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
-		    int is_leading)
+iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+		       int is_leading)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
 	struct iscsi_host *ihost = shost_priv(shost);
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	struct sock *sk;
 	struct socket *sock;
 	int err;
@@ -1643,13 +636,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 	 * userspace may still want to query the values since we will
 	 * be using them for the reconnect
 	 */
-	err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
-				 &conn->portal_port, kernel_getpeername);
+	err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
+				    &conn->portal_port, kernel_getpeername);
 	if (err)
 		goto free_socket;
 
-	err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
-				&ihost->local_port, kernel_getsockname);
+	err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
+				    &ihost->local_port, kernel_getsockname);
 	if (err)
 		goto free_socket;
 
@@ -1658,7 +651,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 		goto free_socket;
 
 	/* bind iSCSI connection and socket */
-	tcp_conn->sock = sock;
+	tcp_sw_conn->sock = sock;
 
 	/* setup Socket parameters */
 	sk = sock->sk;
@@ -1666,8 +659,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
 	sk->sk_allocation = GFP_ATOMIC;
 
-	iscsi_conn_set_callbacks(conn);
-	tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+	iscsi_sw_tcp_conn_set_callbacks(conn);
+	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
 	/*
 	 * set receive state machine into initial state
 	 */
@@ -1679,74 +672,14 @@ free_socket:
 	return err;
 }
 
-static int
-iscsi_r2tpool_alloc(struct iscsi_session *session)
-{
-	int i;
-	int cmd_i;
-
-	/*
-	 * initialize per-task: R2T pool and xmit queue
-	 */
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-	        struct iscsi_task *task = session->cmds[cmd_i];
-		struct iscsi_tcp_task *tcp_task = task->dd_data;
-
-		/*
-		 * pre-allocated x4 as much r2ts to handle race when
-		 * target acks DataOut faster than we data_xmit() queues
-		 * could replenish r2tqueue.
-		 */
-
-		/* R2T pool */
-		if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
-				    sizeof(struct iscsi_r2t_info))) {
-			goto r2t_alloc_fail;
-		}
-
-		/* R2T xmit queue */
-		tcp_task->r2tqueue = kfifo_alloc(
-		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
-		if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
-			iscsi_pool_free(&tcp_task->r2tpool);
-			goto r2t_alloc_fail;
-		}
-	}
-
-	return 0;
-
-r2t_alloc_fail:
-	for (i = 0; i < cmd_i; i++) {
-		struct iscsi_task *task = session->cmds[i];
-		struct iscsi_tcp_task *tcp_task = task->dd_data;
-
-		kfifo_free(tcp_task->r2tqueue);
-		iscsi_pool_free(&tcp_task->r2tpool);
-	}
-	return -ENOMEM;
-}
-
-static void
-iscsi_r2tpool_free(struct iscsi_session *session)
-{
-	int i;
-
-	for (i = 0; i < session->cmds_max; i++) {
-		struct iscsi_task *task = session->cmds[i];
-		struct iscsi_tcp_task *tcp_task = task->dd_data;
-
-		kfifo_free(tcp_task->r2tqueue);
-		iscsi_pool_free(&tcp_task->r2tpool);
-	}
-}
-
-static int
-iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
-		     char *buf, int buflen)
+static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
+				       enum iscsi_param param, char *buf,
+				       int buflen)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	int value;
 
 	switch(param) {
@@ -1755,8 +688,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
 		break;
 	case ISCSI_PARAM_DATADGST_EN:
 		iscsi_set_param(cls_conn, param, buf, buflen);
-		tcp_conn->sendpage = conn->datadgst_en ?
-			sock_no_sendpage : tcp_conn->sock->ops->sendpage;
+		tcp_sw_conn->sendpage = conn->datadgst_en ?
+			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
 		break;
 	case ISCSI_PARAM_MAX_R2T:
 		sscanf(buf, "%d", &value);
@@ -1764,9 +697,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
 			return -EINVAL;
 		if (session->max_r2t == value)
 			break;
-		iscsi_r2tpool_free(session);
+		iscsi_tcp_r2tpool_free(session);
 		iscsi_set_param(cls_conn, param, buf, buflen);
-		if (iscsi_r2tpool_alloc(session))
+		if (iscsi_tcp_r2tpool_alloc(session))
 			return -ENOMEM;
 		break;
 	default:
@@ -1776,9 +709,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
 	return 0;
 }
 
-static int
-iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
-			 enum iscsi_param param, char *buf)
+static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				       enum iscsi_param param, char *buf)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	int len;
@@ -1802,48 +734,42 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
 }
 
 static void
-iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+			    struct iscsi_stats *stats)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 
-	stats->txdata_octets = conn->txdata_octets;
-	stats->rxdata_octets = conn->rxdata_octets;
-	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
-	stats->dataout_pdus = conn->dataout_pdus_cnt;
-	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
-	stats->datain_pdus = conn->datain_pdus_cnt;
-	stats->r2t_pdus = conn->r2t_pdus_cnt;
-	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
-	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
 	stats->custom_length = 3;
 	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
-	stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
+	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
 	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
-	stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
+	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
 	strcpy(stats->custom[2].desc, "eh_abort_cnt");
 	stats->custom[2].value = conn->eh_abort_cnt;
+
+	iscsi_tcp_conn_get_stats(cls_conn, stats);
 }
 
 static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
-			 uint16_t qdepth, uint32_t initial_cmdsn,
-			 uint32_t *hostno)
+iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+			    uint16_t qdepth, uint32_t initial_cmdsn,
+			    uint32_t *hostno)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	struct Scsi_Host *shost;
-	int cmd_i;
 
 	if (ep) {
 		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
 		return NULL;
 	}
 
-	shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, qdepth);
 	if (!shost)
 		return NULL;
-	shost->transportt = iscsi_tcp_scsi_transport;
+	shost->transportt = iscsi_sw_tcp_scsi_transport;
 	shost->max_lun = iscsi_max_lun;
 	shost->max_id = 0;
 	shost->max_channel = 0;
@@ -1853,23 +779,17 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 		goto free_host;
 	*hostno = shost->host_no;
 
-	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
-					  sizeof(struct iscsi_tcp_task),
+	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
+					  cmds_max,
+					  sizeof(struct iscsi_tcp_task) +
+					  sizeof(struct iscsi_sw_tcp_hdrbuf),
 					  initial_cmdsn, 0);
 	if (!cls_session)
 		goto remove_host;
 	session = cls_session->dd_data;
 
 	shost->can_queue = session->scsi_cmds_max;
-	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		struct iscsi_task *task = session->cmds[cmd_i];
-		struct iscsi_tcp_task *tcp_task = task->dd_data;
-
-		task->hdr = &tcp_task->hdr.cmd_hdr;
-		task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
-	}
-
-	if (iscsi_r2tpool_alloc(session))
+	if (iscsi_tcp_r2tpool_alloc(session))
 		goto remove_session;
 	return cls_session;
 
@@ -1882,25 +802,25 @@ free_host:
 	return NULL;
 }
 
-static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
 
-	iscsi_r2tpool_free(cls_session->dd_data);
+	iscsi_tcp_r2tpool_free(cls_session->dd_data);
 	iscsi_session_teardown(cls_session);
 
 	iscsi_host_remove(shost);
 	iscsi_host_free(shost);
 }
 
-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
 {
 	blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
 	blk_queue_dma_alignment(sdev->request_queue, 0);
 	return 0;
 }
 
-static struct scsi_host_template iscsi_sht = {
+static struct scsi_host_template iscsi_sw_tcp_sht = {
 	.module			= THIS_MODULE,
 	.name			= "iSCSI Initiator over TCP/IP",
 	.queuecommand           = iscsi_queuecommand,
@@ -1913,12 +833,12 @@ static struct scsi_host_template iscsi_sht = {
 	.eh_device_reset_handler= iscsi_eh_device_reset,
 	.eh_target_reset_handler= iscsi_eh_target_reset,
 	.use_clustering         = DISABLE_CLUSTERING,
-	.slave_configure        = iscsi_tcp_slave_configure,
+	.slave_configure        = iscsi_sw_tcp_slave_configure,
 	.proc_name		= "iscsi_tcp",
 	.this_id		= -1,
 };
 
-static struct iscsi_transport iscsi_tcp_transport = {
+static struct iscsi_transport iscsi_sw_tcp_transport = {
 	.owner			= THIS_MODULE,
 	.name			= "tcp",
 	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
@@ -1951,32 +871,36 @@ static struct iscsi_transport iscsi_tcp_transport = {
 				  ISCSI_HOST_INITIATOR_NAME |
 				  ISCSI_HOST_NETDEV_NAME,
 	/* session management */
-	.create_session		= iscsi_tcp_session_create,
-	.destroy_session	= iscsi_tcp_session_destroy,
+	.create_session		= iscsi_sw_tcp_session_create,
+	.destroy_session	= iscsi_sw_tcp_session_destroy,
 	/* connection management */
-	.create_conn		= iscsi_tcp_conn_create,
-	.bind_conn		= iscsi_tcp_conn_bind,
-	.destroy_conn		= iscsi_tcp_conn_destroy,
-	.set_param		= iscsi_conn_set_param,
-	.get_conn_param		= iscsi_tcp_conn_get_param,
+	.create_conn		= iscsi_sw_tcp_conn_create,
+	.bind_conn		= iscsi_sw_tcp_conn_bind,
+	.destroy_conn		= iscsi_sw_tcp_conn_destroy,
+	.set_param		= iscsi_sw_tcp_conn_set_param,
+	.get_conn_param		= iscsi_sw_tcp_conn_get_param,
 	.get_session_param	= iscsi_session_get_param,
 	.start_conn		= iscsi_conn_start,
-	.stop_conn		= iscsi_tcp_conn_stop,
+	.stop_conn		= iscsi_sw_tcp_conn_stop,
 	/* iscsi host params */
 	.get_host_param		= iscsi_host_get_param,
 	.set_host_param		= iscsi_host_set_param,
 	/* IO */
 	.send_pdu		= iscsi_conn_send_pdu,
-	.get_stats		= iscsi_conn_get_stats,
+	.get_stats		= iscsi_sw_tcp_conn_get_stats,
+	/* iscsi task/cmd helpers */
 	.init_task		= iscsi_tcp_task_init,
 	.xmit_task		= iscsi_tcp_task_xmit,
 	.cleanup_task		= iscsi_tcp_cleanup_task,
+	/* low level pdu helpers */
+	.xmit_pdu		= iscsi_sw_tcp_pdu_xmit,
+	.init_pdu		= iscsi_sw_tcp_pdu_init,
+	.alloc_pdu		= iscsi_sw_tcp_pdu_alloc,
 	/* recovery */
 	.session_recovery_timedout = iscsi_session_recovery_timedout,
 };
 
-static int __init
-iscsi_tcp_init(void)
+static int __init iscsi_sw_tcp_init(void)
 {
 	if (iscsi_max_lun < 1) {
 		printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
@@ -1984,19 +908,18 @@ iscsi_tcp_init(void)
 		return -EINVAL;
 	}
 
-	iscsi_tcp_scsi_transport = iscsi_register_transport(
-							&iscsi_tcp_transport);
-	if (!iscsi_tcp_scsi_transport)
+	iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
+						&iscsi_sw_tcp_transport);
+	if (!iscsi_sw_tcp_scsi_transport)
 		return -ENODEV;
 
 	return 0;
 }
 
-static void __exit
-iscsi_tcp_exit(void)
+static void __exit iscsi_sw_tcp_exit(void)
 {
-	iscsi_unregister_transport(&iscsi_tcp_transport);
+	iscsi_unregister_transport(&iscsi_sw_tcp_transport);
 }
 
-module_init(iscsi_tcp_init);
-module_exit(iscsi_tcp_exit);
+module_init(iscsi_sw_tcp_init);
+module_exit(iscsi_sw_tcp_exit);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 498d8ca39848..ca6b7bc64de0 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -19,67 +19,27 @@
  * See the file COPYING included with this distribution for more details.
  */
 
-#ifndef ISCSI_TCP_H
-#define ISCSI_TCP_H
+#ifndef ISCSI_SW_TCP_H
+#define ISCSI_SW_TCP_H
 
 #include <scsi/libiscsi.h>
+#include <scsi/libiscsi_tcp.h>
 
-struct crypto_hash;
 struct socket;
 struct iscsi_tcp_conn;
-struct iscsi_segment;
-
-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
-				    struct iscsi_segment *);
-
-struct iscsi_segment {
-	unsigned char		*data;
-	unsigned int		size;
-	unsigned int		copied;
-	unsigned int		total_size;
-	unsigned int		total_copied;
-
-	struct hash_desc	*hash;
-	unsigned char		recv_digest[ISCSI_DIGEST_SIZE];
-	unsigned char		digest[ISCSI_DIGEST_SIZE];
-	unsigned int		digest_len;
-
-	struct scatterlist	*sg;
-	void			*sg_mapped;
-	unsigned int		sg_offset;
-
-	iscsi_segment_done_fn_t	*done;
-};
-
-/* Socket connection recieve helper */
-struct iscsi_tcp_recv {
-	struct iscsi_hdr	*hdr;
-	struct iscsi_segment	segment;
-
-	/* Allocate buffer for BHS + AHS */
-	uint32_t		hdr_buf[64];
-
-	/* copied and flipped values */
-	int			datalen;
-};
 
 /* Socket connection send helper */
-struct iscsi_tcp_send {
+struct iscsi_sw_tcp_send {
 	struct iscsi_hdr	*hdr;
 	struct iscsi_segment	segment;
 	struct iscsi_segment	data_segment;
 };
 
-struct iscsi_tcp_conn {
+struct iscsi_sw_tcp_conn {
 	struct iscsi_conn	*iscsi_conn;
 	struct socket		*sock;
-	int			stop_stage;	/* conn_stop() flag: *
-						 * stop to recover,  *
-						 * stop to terminate */
-	/* control data */
-	struct iscsi_tcp_recv	in;		/* TCP receive context */
-	struct iscsi_tcp_send	out;		/* TCP send context */
 
+	struct iscsi_sw_tcp_send out;
 	/* old values for socket callbacks */
 	void			(*old_data_ready)(struct sock *, int);
 	void			(*old_state_change)(struct sock *);
@@ -93,41 +53,13 @@ struct iscsi_tcp_conn {
 	uint32_t		sendpage_failures_cnt;
 	uint32_t		discontiguous_hdr_cnt;
 
-	int			error;
-
 	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
 };
 
-struct iscsi_data_task {
-	struct iscsi_data	hdr;			/* PDU */
-	char			hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
-};
-
-struct iscsi_r2t_info {
-	__be32			ttt;		/* copied from R2T */
-	__be32			exp_statsn;	/* copied from R2T */
-	uint32_t		data_length;	/* copied from R2T */
-	uint32_t		data_offset;	/* copied from R2T */
-	int			sent;		/* R2T sequence progress */
-	int			data_count;	/* DATA-Out payload progress */
-	int			solicit_datasn;
-	struct iscsi_data_task	dtask;		/* Data-Out header buf */
-};
-
-struct iscsi_tcp_task {
-	struct iscsi_hdr_buff {
-		struct iscsi_cmd	cmd_hdr;
-		char			hdrextbuf[ISCSI_MAX_AHS_SIZE +
+struct iscsi_sw_tcp_hdrbuf {
+	struct iscsi_hdr	hdrbuf;
+	char			hdrextbuf[ISCSI_MAX_AHS_SIZE +
 		                                  ISCSI_DIGEST_SIZE];
-	} hdr;
-
-	int			sent;
-	uint32_t		exp_datasn;	/* expected target's R2TSN/DataSN */
-	int			data_offset;
-	struct iscsi_r2t_info	*r2t;		/* in progress R2T    */
-	struct iscsi_pool	r2tpool;
-	struct kfifo		*r2tqueue;
-	struct iscsi_data_task	unsol_dtask;	/* Data-Out header buf */
 };
 
-#endif /* ISCSI_H */
+#endif /* ISCSI_SW_TCP_H */
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000000..55f982de3a9a
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+	fc_disc.o \
+	fc_exch.o \
+	fc_elsct.o \
+	fc_frame.o \
+	fc_lport.o \
+	fc_rport.o \
+	fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000000..dd1564c9e04a
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ *
+ * This block discovers all FC-4 remote ports, including FCP initiators. It
+ * also handles RSCN events and re-discovery if necessary.
+ */
+
+/*
+ * DISC LOCKING
+ *
+ * The disc mutex is can be locked when acquiring rport locks, but may not
+ * be held when acquiring the lport lock. Refer to fc_lport.c for more
+ * details.
+ */
+
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+
+#define FC_DISC_RETRY_LIMIT	3	/* max retries */
+#define FC_DISC_RETRY_DELAY	500UL	/* (msecs) delay */
+
+#define	FC_DISC_DELAY		3
+
+static int fc_disc_debug;
+
+#define FC_DEBUG_DISC(fmt...)			\
+	do {					\
+		if (fc_disc_debug)		\
+			FC_DBG(fmt);		\
+	} while (0)
+
+static void fc_disc_gpn_ft_req(struct fc_disc *);
+static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
+			      struct fc_rport_identifiers *);
+static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
+static void fc_disc_done(struct fc_disc *);
+static void fc_disc_timeout(struct work_struct *);
+static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
+static void fc_disc_restart(struct fc_disc *);
+
+/**
+ * fc_disc_lookup_rport - lookup a remote port by port_id
+ * @lport: Fibre Channel host port instance
+ * @port_id: remote port port_id to match
+ */
+struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
+				      u32 port_id)
+{
+	const struct fc_disc *disc = &lport->disc;
+	struct fc_rport *rport, *found = NULL;
+	struct fc_rport_libfc_priv *rdata;
+	int disc_found = 0;
+
+	list_for_each_entry(rdata, &disc->rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		if (rport->port_id == port_id) {
+			disc_found = 1;
+			found = rport;
+			break;
+		}
+	}
+
+	if (!disc_found)
+		found = NULL;
+
+	return found;
+}
+
+/**
+ * fc_disc_stop_rports - delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop rports on
+ *
+ * Locking Note: This function expects that the lport mutex is locked before
+ * calling it.
+ */
+void fc_disc_stop_rports(struct fc_disc *disc)
+{
+	struct fc_lport *lport;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata, *next;
+
+	lport = disc->lport;
+
+	mutex_lock(&disc->disc_mutex);
+	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		list_del(&rdata->peers);
+		lport->tt.rport_logoff(rport);
+	}
+
+	mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+ *
+ * Locking Note: The rport lock should not be held when calling
+ *		 this function.
+ */
+static void fc_disc_rport_callback(struct fc_lport *lport,
+				   struct fc_rport *rport,
+				   enum fc_rport_event event)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_disc *disc = &lport->disc;
+	int found = 0;
+
+	FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
+		      rport->port_id);
+
+	if (event == RPORT_EV_CREATED) {
+		if (disc) {
+			found = 1;
+			mutex_lock(&disc->disc_mutex);
+			list_add_tail(&rdata->peers, &disc->rports);
+			mutex_unlock(&disc->disc_mutex);
+		}
+	}
+
+	if (!found)
+		FC_DEBUG_DISC("The rport (%6x) is not maintained "
+			      "by the discovery layer\n", rport->port_id);
+}
+
+/**
+ * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
+ * @sp: Current sequence of the RSCN exchange
+ * @fp: RSCN Frame
+ * @lport: Fibre Channel host port instance
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ */
+static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+				  struct fc_disc *disc)
+{
+	struct fc_lport *lport;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata;
+	struct fc_els_rscn *rp;
+	struct fc_els_rscn_page *pp;
+	struct fc_seq_els_data rjt_data;
+	unsigned int len;
+	int redisc = 0;
+	enum fc_els_rscn_ev_qual ev_qual;
+	enum fc_els_rscn_addr_fmt fmt;
+	LIST_HEAD(disc_ports);
+	struct fc_disc_port *dp, *next;
+
+	lport = disc->lport;
+
+	FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
+		      fc_host_port_id(lport->host));
+
+	/* make sure the frame contains an RSCN message */
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+	if (!rp)
+		goto reject;
+	/* make sure the page length is as expected (4 bytes) */
+	if (rp->rscn_page_len != sizeof(*pp))
+		goto reject;
+	/* get the RSCN payload length */
+	len = ntohs(rp->rscn_plen);
+	if (len < sizeof(*rp))
+		goto reject;
+	/* make sure the frame contains the expected payload */
+	rp = fc_frame_payload_get(fp, len);
+	if (!rp)
+		goto reject;
+	/* payload must be a multiple of the RSCN page size */
+	len -= sizeof(*rp);
+	if (len % sizeof(*pp))
+		goto reject;
+
+	for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
+		ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+		ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+		fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+		fmt &= ELS_RSCN_ADDR_FMT_MASK;
+		/*
+		 * if we get an address format other than port
+		 * (area, domain, fabric), then do a full discovery
+		 */
+		switch (fmt) {
+		case ELS_ADDR_FMT_PORT:
+			FC_DEBUG_DISC("Port address format for port (%6x)\n",
+				      ntoh24(pp->rscn_fid));
+			dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+			if (!dp) {
+				redisc = 1;
+				break;
+			}
+			dp->lp = lport;
+			dp->ids.port_id = ntoh24(pp->rscn_fid);
+			dp->ids.port_name = -1;
+			dp->ids.node_name = -1;
+			dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+			list_add_tail(&dp->peers, &disc_ports);
+			break;
+		case ELS_ADDR_FMT_AREA:
+		case ELS_ADDR_FMT_DOM:
+		case ELS_ADDR_FMT_FAB:
+		default:
+			FC_DEBUG_DISC("Address format is (%d)\n", fmt);
+			redisc = 1;
+			break;
+		}
+	}
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	if (redisc) {
+		FC_DEBUG_DISC("RSCN received: rediscovering\n");
+		fc_disc_restart(disc);
+	} else {
+		FC_DEBUG_DISC("RSCN received: not rediscovering. "
+			      "redisc %d state %d in_prog %d\n",
+			      redisc, lport->state, disc->pending);
+		list_for_each_entry_safe(dp, next, &disc_ports, peers) {
+			list_del(&dp->peers);
+			rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+			if (rport) {
+				rdata = RPORT_TO_PRIV(rport);
+				list_del(&rdata->peers);
+				lport->tt.rport_logoff(rport);
+			}
+			fc_disc_single(disc, dp);
+		}
+	}
+	fc_frame_free(fp);
+	return;
+reject:
+	FC_DEBUG_DISC("Received a bad RSCN frame\n");
+	rjt_data.fp = NULL;
+	rjt_data.reason = ELS_RJT_LOGIC;
+	rjt_data.explan = ELS_EXPL_NONE;
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_recv_req - Handle incoming requests
+ * @sp: Current sequence of the request exchange
+ * @fp: The frame
+ * @lport: The FC local port
+ *
+ * Locking Note: This function is called from the EM and will lock
+ *		 the disc_mutex before calling the handler for the
+ *		 request.
+ */
+static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+			     struct fc_lport *lport)
+{
+	u8 op;
+	struct fc_disc *disc = &lport->disc;
+
+	op = fc_frame_payload_op(fp);
+	switch (op) {
+	case ELS_RSCN:
+		mutex_lock(&disc->disc_mutex);
+		fc_disc_recv_rscn_req(sp, fp, disc);
+		mutex_unlock(&disc->disc_mutex);
+		break;
+	default:
+		FC_DBG("Received an unsupported request. opcode (%x)\n", op);
+		break;
+	}
+}
+
+/**
+ * fc_disc_restart - Restart discovery
+ * @lport: FC discovery context
+ *
+ * Locking Note: This function expects that the disc mutex
+ *		 is already locked.
+ */
+static void fc_disc_restart(struct fc_disc *disc)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata, *next;
+	struct fc_lport *lport = disc->lport;
+
+	FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
+		      fc_host_port_id(lport->host));
+
+	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
+		list_del(&rdata->peers);
+		lport->tt.rport_logoff(rport);
+	}
+
+	disc->requested = 1;
+	if (!disc->pending)
+		fc_disc_gpn_ft_req(disc);
+}
+
+/**
+ * fc_disc_start - Fibre Channel Target discovery
+ * @lport: FC local port
+ *
+ * Returns non-zero if discovery cannot be started.
+ */
+static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
+						enum fc_disc_event),
+			  struct fc_lport *lport)
+{
+	struct fc_rport *rport;
+	struct fc_rport_identifiers ids;
+	struct fc_disc *disc = &lport->disc;
+
+	/*
+	 * At this point we may have a new disc job or an existing
+	 * one. Either way, let's lock when we make changes to it
+	 * and send the GPN_FT request.
+	 */
+	mutex_lock(&disc->disc_mutex);
+
+	disc->disc_callback = disc_callback;
+
+	/*
+	 * If not ready, or already running discovery, just set request flag.
+	 */
+	disc->requested = 1;
+
+	if (disc->pending) {
+		mutex_unlock(&disc->disc_mutex);
+		return;
+	}
+
+	/*
+	 * Handle point-to-point mode as a simple discovery
+	 * of the remote port. Yucky, yucky, yuck, yuck!
+	 */
+	rport = disc->lport->ptp_rp;
+	if (rport) {
+		ids.port_id = rport->port_id;
+		ids.port_name = rport->port_name;
+		ids.node_name = rport->node_name;
+		ids.roles = FC_RPORT_ROLE_UNKNOWN;
+		get_device(&rport->dev);
+
+		if (!fc_disc_new_target(disc, rport, &ids)) {
+			disc->event = DISC_EV_SUCCESS;
+			fc_disc_done(disc);
+		}
+		put_device(&rport->dev);
+	} else {
+		fc_disc_gpn_ft_req(disc);	/* get ports by FC-4 type */
+	}
+
+	mutex_unlock(&disc->disc_mutex);
+}
+
+static struct fc_rport_operations fc_disc_rport_ops = {
+	.event_callback = fc_disc_rport_callback,
+};
+
+/**
+ * fc_disc_new_target - Handle new target found by discovery
+ * @lport: FC local port
+ * @rport: The previous FC remote port (NULL if new remote port)
+ * @ids: Identifiers for the new FC remote port
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ */
+static int fc_disc_new_target(struct fc_disc *disc,
+			      struct fc_rport *rport,
+			      struct fc_rport_identifiers *ids)
+{
+	struct fc_lport *lport = disc->lport;
+	struct fc_rport_libfc_priv *rp;
+	int error = 0;
+
+	if (rport && ids->port_name) {
+		if (rport->port_name == -1) {
+			/*
+			 * Set WWN and fall through to notify of create.
+			 */
+			fc_rport_set_name(rport, ids->port_name,
+					  rport->node_name);
+		} else if (rport->port_name != ids->port_name) {
+			/*
+			 * This is a new port with the same FCID as
+			 * a previously-discovered port.  Presumably the old
+			 * port logged out and a new port logged in and was
+			 * assigned the same FCID.  This should be rare.
+			 * Delete the old one and fall thru to re-create.
+			 */
+			fc_disc_del_target(disc, rport);
+			rport = NULL;
+		}
+	}
+	if (((ids->port_name != -1) || (ids->port_id != -1)) &&
+	    ids->port_id != fc_host_port_id(lport->host) &&
+	    ids->port_name != lport->wwpn) {
+		if (!rport) {
+			rport = lport->tt.rport_lookup(lport, ids->port_id);
+			if (!rport) {
+				struct fc_disc_port dp;
+				dp.lp = lport;
+				dp.ids.port_id = ids->port_id;
+				dp.ids.port_name = ids->port_name;
+				dp.ids.node_name = ids->node_name;
+				dp.ids.roles = ids->roles;
+				rport = fc_rport_rogue_create(&dp);
+			}
+			if (!rport)
+				error = -ENOMEM;
+		}
+		if (rport) {
+			rp = rport->dd_data;
+			rp->ops = &fc_disc_rport_ops;
+			rp->rp_state = RPORT_ST_INIT;
+			lport->tt.rport_login(rport);
+		}
+	}
+	return error;
+}
+
+/**
+ * fc_disc_del_target - Delete a target
+ * @disc: FC discovery context
+ * @rport: The remote port to be removed
+ */
+static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
+{
+	struct fc_lport *lport = disc->lport;
+	struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
+	list_del(&rdata->peers);
+	lport->tt.rport_logoff(rport);
+}
+
+/**
+ * fc_disc_done - Discovery has been completed
+ * @disc: FC discovery context
+ */
+static void fc_disc_done(struct fc_disc *disc)
+{
+	struct fc_lport *lport = disc->lport;
+
+	FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
+		      fc_host_port_id(lport->host));
+
+	disc->disc_callback(lport, disc->event);
+	disc->event = DISC_EV_NONE;
+
+	if (disc->requested)
+		fc_disc_gpn_ft_req(disc);
+	else
+		disc->pending = 0;
+}
+
+/**
+ * fc_disc_error - Handle error on dNS request
+ * @disc: FC discovery context
+ * @fp: The frame pointer
+ */
+static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
+{
+	struct fc_lport *lport = disc->lport;
+	unsigned long delay = 0;
+	if (fc_disc_debug)
+		FC_DBG("Error %ld, retries %d/%d\n",
+		       PTR_ERR(fp), disc->retry_count,
+		       FC_DISC_RETRY_LIMIT);
+
+	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+		/*
+		 * Memory allocation failure, or the exchange timed out,
+		 * retry after delay.
+		 */
+		if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
+			/* go ahead and retry */
+			if (!fp)
+				delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
+			else {
+				delay = msecs_to_jiffies(lport->e_d_tov);
+
+				/* timeout faster first time */
+				if (!disc->retry_count)
+					delay /= 4;
+			}
+			disc->retry_count++;
+			schedule_delayed_work(&disc->disc_work, delay);
+		} else {
+			/* exceeded retries */
+			disc->event = DISC_EV_FAILED;
+			fc_disc_done(disc);
+		}
+	}
+}
+
+/**
+ * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
+ * @lport: FC discovery context
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ */
+static void fc_disc_gpn_ft_req(struct fc_disc *disc)
+{
+	struct fc_frame *fp;
+	struct fc_lport *lport = disc->lport;
+
+	WARN_ON(!fc_lport_test_ready(lport));
+
+	disc->pending = 1;
+	disc->requested = 0;
+
+	disc->buf_len = 0;
+	disc->seq_count = 0;
+	fp = fc_frame_alloc(lport,
+			    sizeof(struct fc_ct_hdr) +
+			    sizeof(struct fc_ns_gid_ft));
+	if (!fp)
+		goto err;
+
+	if (lport->tt.elsct_send(lport, NULL, fp,
+				 FC_NS_GPN_FT,
+				 fc_disc_gpn_ft_resp,
+				 disc, lport->e_d_tov))
+		return;
+err:
+	fc_disc_error(disc, fp);
+}
+
+/**
+ * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
+ * @lport: Fibre Channel host port instance
+ * @buf: GPN_FT response buffer
+ * @len: size of response buffer
+ */
+static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
+{
+	struct fc_lport *lport;
+	struct fc_gpn_ft_resp *np;
+	char *bp;
+	size_t plen;
+	size_t tlen;
+	int error = 0;
+	struct fc_disc_port dp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata;
+
+	lport = disc->lport;
+
+	/*
+	 * Handle partial name record left over from previous call.
+	 */
+	bp = buf;
+	plen = len;
+	np = (struct fc_gpn_ft_resp *)bp;
+	tlen = disc->buf_len;
+	if (tlen) {
+		WARN_ON(tlen >= sizeof(*np));
+		plen = sizeof(*np) - tlen;
+		WARN_ON(plen <= 0);
+		WARN_ON(plen >= sizeof(*np));
+		if (plen > len)
+			plen = len;
+		np = &disc->partial_buf;
+		memcpy((char *)np + tlen, bp, plen);
+
+		/*
+		 * Set bp so that the loop below will advance it to the
+		 * first valid full name element.
+		 */
+		bp -= tlen;
+		len += tlen;
+		plen += tlen;
+		disc->buf_len = (unsigned char) plen;
+		if (plen == sizeof(*np))
+			disc->buf_len = 0;
+	}
+
+	/*
+	 * Handle full name records, including the one filled from above.
+	 * Normally, np == bp and plen == len, but from the partial case above,
+	 * bp, len describe the overall buffer, and np, plen describe the
+	 * partial buffer, which if would usually be full now.
+	 * After the first time through the loop, things return to "normal".
+	 */
+	while (plen >= sizeof(*np)) {
+		dp.lp = lport;
+		dp.ids.port_id = ntoh24(np->fp_fid);
+		dp.ids.port_name = ntohll(np->fp_wwpn);
+		dp.ids.node_name = -1;
+		dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+		if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
+		    (dp.ids.port_name != lport->wwpn)) {
+			rport = fc_rport_rogue_create(&dp);
+			if (rport) {
+				rdata = rport->dd_data;
+				rdata->ops = &fc_disc_rport_ops;
+				rdata->local_port = lport;
+				lport->tt.rport_login(rport);
+			} else
+				FC_DBG("Failed to allocate memory for "
+				       "the newly discovered port (%6x)\n",
+				       dp.ids.port_id);
+		}
+
+		if (np->fp_flags & FC_NS_FID_LAST) {
+			disc->event = DISC_EV_SUCCESS;
+			fc_disc_done(disc);
+			len = 0;
+			break;
+		}
+		len -= sizeof(*np);
+		bp += sizeof(*np);
+		np = (struct fc_gpn_ft_resp *)bp;
+		plen = len;
+	}
+
+	/*
+	 * Save any partial record at the end of the buffer for next time.
+	 */
+	if (error == 0 && len > 0 && len < sizeof(*np)) {
+		if (np != &disc->partial_buf) {
+			FC_DEBUG_DISC("Partial buffer remains "
+				      "for discovery by (%6x)\n",
+				      fc_host_port_id(lport->host));
+			memcpy(&disc->partial_buf, np, len);
+		}
+		disc->buf_len = (unsigned char) len;
+	} else {
+		disc->buf_len = 0;
+	}
+	return error;
+}
+
+/*
+ * Handle retry of memory allocation for remote ports.
+ */
+static void fc_disc_timeout(struct work_struct *work)
+{
+	struct fc_disc *disc = container_of(work,
+					    struct fc_disc,
+					    disc_work.work);
+	mutex_lock(&disc->disc_mutex);
+	if (disc->requested && !disc->pending)
+		fc_disc_gpn_ft_req(disc);
+	mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
+ * @sp: Current sequence of GPN_FT exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ */
+static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+				void *disc_arg)
+{
+	struct fc_disc *disc = disc_arg;
+	struct fc_ct_hdr *cp;
+	struct fc_frame_header *fh;
+	unsigned int seq_cnt;
+	void *buf = NULL;
+	unsigned int len;
+	int error;
+
+	FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
+		      fc_host_port_id(disc->lport->host));
+
+	if (IS_ERR(fp)) {
+		fc_disc_error(disc, fp);
+		return;
+	}
+
+	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
+	fh = fc_frame_header_get(fp);
+	len = fr_len(fp) - sizeof(*fh);
+	seq_cnt = ntohs(fh->fh_seq_cnt);
+	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
+	    disc->seq_count == 0) {
+		cp = fc_frame_payload_get(fp, sizeof(*cp));
+		if (!cp) {
+			FC_DBG("GPN_FT response too short, len %d\n",
+			       fr_len(fp));
+		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+			/*
+			 * Accepted.  Parse response.
+			 */
+			buf = cp + 1;
+			len -= sizeof(*cp);
+		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+			FC_DBG("GPN_FT rejected reason %x exp %x "
+			       "(check zoning)\n", cp->ct_reason,
+			       cp->ct_explan);
+			disc->event = DISC_EV_FAILED;
+			fc_disc_done(disc);
+		} else {
+			FC_DBG("GPN_FT unexpected response code %x\n",
+			       ntohs(cp->ct_cmd));
+		}
+	} else if (fr_sof(fp) == FC_SOF_N3 &&
+		   seq_cnt == disc->seq_count) {
+		buf = fh + 1;
+	} else {
+		FC_DBG("GPN_FT unexpected frame - out of sequence? "
+		       "seq_cnt %x expected %x sof %x eof %x\n",
+		       seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+	}
+	if (buf) {
+		error = fc_disc_gpn_ft_parse(disc, buf, len);
+		if (error)
+			fc_disc_error(disc, fp);
+		else
+			disc->seq_count++;
+	}
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_single - Discover the directory information for a single target
+ * @lport: FC local port
+ * @dp: The port to rediscover
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ */
+static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
+{
+	struct fc_lport *lport;
+	struct fc_rport *rport;
+	struct fc_rport *new_rport;
+	struct fc_rport_libfc_priv *rdata;
+
+	lport = disc->lport;
+
+	if (dp->ids.port_id == fc_host_port_id(lport->host))
+		goto out;
+
+	rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+	if (rport)
+		fc_disc_del_target(disc, rport);
+
+	new_rport = fc_rport_rogue_create(dp);
+	if (new_rport) {
+		rdata = new_rport->dd_data;
+		rdata->ops = &fc_disc_rport_ops;
+		kfree(dp);
+		lport->tt.rport_login(new_rport);
+	}
+	return;
+out:
+	kfree(dp);
+}
+
+/**
+ * fc_disc_stop - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop for
+ */
+void fc_disc_stop(struct fc_lport *lport)
+{
+	struct fc_disc *disc = &lport->disc;
+
+	if (disc) {
+		cancel_delayed_work_sync(&disc->disc_work);
+		fc_disc_stop_rports(disc);
+	}
+}
+
+/**
+ * fc_disc_stop_final - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop for
+ *
+ * This function will block until discovery has been
+ * completely stopped and all rports have been deleted.
+ */
+void fc_disc_stop_final(struct fc_lport *lport)
+{
+	fc_disc_stop(lport);
+	lport->tt.rport_flush_queue();
+}
+
+/**
+ * fc_disc_init - Initialize the discovery block
+ * @lport: FC local port
+ */
+int fc_disc_init(struct fc_lport *lport)
+{
+	struct fc_disc *disc;
+
+	if (!lport->tt.disc_start)
+		lport->tt.disc_start = fc_disc_start;
+
+	if (!lport->tt.disc_stop)
+		lport->tt.disc_stop = fc_disc_stop;
+
+	if (!lport->tt.disc_stop_final)
+		lport->tt.disc_stop_final = fc_disc_stop_final;
+
+	if (!lport->tt.disc_recv_req)
+		lport->tt.disc_recv_req = fc_disc_recv_req;
+
+	if (!lport->tt.rport_lookup)
+		lport->tt.rport_lookup = fc_disc_lookup_rport;
+
+	disc = &lport->disc;
+	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+	mutex_init(&disc->disc_mutex);
+	INIT_LIST_HEAD(&disc->rports);
+
+	disc->lport = lport;
+	disc->delay = FC_DISC_DELAY;
+	disc->event = DISC_EV_NONE;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000000..dd47fe619d1e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Provide interface to send ELS/CT FC frames
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+/*
+ * fc_elsct_send - sends ELS/CT frame
+ */
+static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
+				    struct fc_rport *rport,
+				    struct fc_frame *fp,
+				    unsigned int op,
+				    void (*resp)(struct fc_seq *,
+						 struct fc_frame *fp,
+						 void *arg),
+				    void *arg, u32 timer_msec)
+{
+	enum fc_rctl r_ctl;
+	u32 did;
+	enum fc_fh_type fh_type;
+	int rc;
+
+	/* ELS requests */
+	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
+		rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
+	else
+		/* CT requests */
+		rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
+
+	if (rc)
+		return NULL;
+
+	fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+}
+
+int fc_elsct_init(struct fc_lport *lport)
+{
+	if (!lport->tt.elsct_send)
+		lport->tt.elsct_send = fc_elsct_send;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_elsct_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#define	  FC_DEF_R_A_TOV      (10 * 1000) /* resource allocation timeout */
+
+/*
+ * fc_exch_debug can be set in debugger or at compile time to get more logs.
+ */
+static int fc_exch_debug;
+
+#define FC_DEBUG_EXCH(fmt...)			\
+	do {					\
+		if (fc_exch_debug)		\
+			FC_DBG(fmt);		\
+	} while (0)
+
+static struct kmem_cache *fc_em_cachep;	/* cache for exchanges */
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/*
+ * Exchange manager.
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+	enum fc_class	class;		/* default class for sequences */
+	spinlock_t	em_lock;	/* exchange manager lock,
+					   must be taken before ex_lock */
+	u16		last_xid;	/* last allocated exchange ID */
+	u16		min_xid;	/* min exchange ID */
+	u16		max_xid;	/* max exchange ID */
+	u16		max_read;	/* max exchange ID for read */
+	u16		last_read;	/* last xid allocated for read */
+	u32	total_exches;		/* total allocated exchanges */
+	struct list_head	ex_list;	/* allocated exchanges list */
+	struct fc_lport	*lp;		/* fc device instance */
+	mempool_t	*ep_pool;	/* reserve ep's */
+
+	/*
+	 * currently exchange mgr stats are updated but not used.
+	 * either stats can be expose via sysfs or remove them
+	 * all together if not used XXX
+	 */
+	struct {
+		atomic_t no_free_exch;
+		atomic_t no_free_exch_xid;
+		atomic_t xid_not_found;
+		atomic_t xid_busy;
+		atomic_t seq_not_found;
+		atomic_t non_bls_resp;
+	} stats;
+	struct fc_exch **exches;	/* for exch pointers indexed by xid */
+};
+#define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_seq *);
+static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
+			  enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies:  "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open."   Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session.	 For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ *	Send.
+ *	    For now, the whole thing is sent.
+ *	Receive ACK
+ *	    This applies only to class F.
+ *	    The sequence is marked complete.
+ *	ULP completion.
+ *	    The upper layer calls fc_exch_done() when done
+ *	    with exchange and sequence tuple.
+ *	RX-inferred completion.
+ *	    When we receive the next sequence on the same exchange, we can
+ *	    retire the previous sequence ID.  (XXX not implemented).
+ *	Timeout.
+ *	    R_A_TOV frees the sequence ID.  If we're waiting for ACK,
+ *	    E_D_TOV causes abort and calls upper layer response handler
+ *	    with FC_EX_TIMEOUT error.
+ *	Receive RJT
+ *	    XXX defer.
+ *	Send ABTS
+ *	    On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ *	Receive
+ *	    Allocate sequence for first frame received.
+ *	    Hold during receive handler.
+ *	    Release when final frame received.
+ *	    Keep status of last N of these for the ELS RES command.  XXX TBD.
+ *	Receive ABTS
+ *	    Deallocate sequence
+ *	Send RJT
+ *	    Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ *  - exchange refcnt can be done atomicly without locks.
+ *  - sequence allocation must be locked by exch lock.
+ *  - If the em_lock and ex_lock must be taken at the same time, then the
+ *    em_lock must be taken before the ex_lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+#define FC_TABLE_SIZE(x)   (sizeof(x) / sizeof(x[0]))
+
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+					      unsigned int max_index)
+{
+	const char *name = NULL;
+
+	if (op < max_index)
+		name = table[op];
+	if (!name)
+		name = "unknown";
+	return name;
+}
+
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+	return fc_exch_name_lookup(op, fc_exch_rctl_names,
+				   FC_TABLE_SIZE(fc_exch_rctl_names));
+}
+
+/*
+ * Hold an exchange - keep it from being freed.
+ */
+static void fc_exch_hold(struct fc_exch *ep)
+{
+	atomic_inc(&ep->ex_refcnt);
+}
+
+/*
+ * setup fc hdr by initializing few more FC header fields and sof/eof.
+ * Initialized fields by this func:
+ *	- fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
+ *	- sof and eof
+ */
+static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
+			      u32 f_ctl)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	u16 fill;
+
+	fr_sof(fp) = ep->class;
+	if (ep->seq.cnt)
+		fr_sof(fp) = fc_sof_normal(ep->class);
+
+	if (f_ctl & FC_FC_END_SEQ) {
+		fr_eof(fp) = FC_EOF_T;
+		if (fc_sof_needs_ack(ep->class))
+			fr_eof(fp) = FC_EOF_N;
+		/*
+		 * Form f_ctl.
+		 * The number of fill bytes to make the length a 4-byte
+		 * multiple is the low order 2-bits of the f_ctl.
+		 * The fill itself will have been cleared by the frame
+		 * allocation.
+		 * After this, the length will be even, as expected by
+		 * the transport.
+		 */
+		fill = fr_len(fp) & 3;
+		if (fill) {
+			fill = 4 - fill;
+			/* TODO, this may be a problem with fragmented skb */
+			skb_put(fp_skb(fp), fill);
+			hton24(fh->fh_f_ctl, f_ctl | fill);
+		}
+	} else {
+		WARN_ON(fr_len(fp) % 4 != 0);	/* no pad to non last frame */
+		fr_eof(fp) = FC_EOF_N;
+	}
+
+	/*
+	 * Initialize remainig fh fields
+	 * from fc_fill_fc_hdr
+	 */
+	fh->fh_ox_id = htons(ep->oxid);
+	fh->fh_rx_id = htons(ep->rxid);
+	fh->fh_seq_id = ep->seq.id;
+	fh->fh_seq_cnt = htons(ep->seq.cnt);
+}
+
+
+/*
+ * Release a reference to an exchange.
+ * If the refcnt goes to zero and the exchange is complete, it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+	struct fc_exch_mgr *mp;
+
+	if (atomic_dec_and_test(&ep->ex_refcnt)) {
+		mp = ep->em;
+		if (ep->destructor)
+			ep->destructor(&ep->seq, ep->arg);
+		if (ep->lp->tt.exch_put)
+			ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
+		WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
+		mempool_free(ep, mp->ep_pool);
+	}
+}
+
+static int fc_exch_done_locked(struct fc_exch *ep)
+{
+	int rc = 1;
+
+	/*
+	 * We must check for completion in case there are two threads
+	 * tyring to complete this. But the rrq code will reuse the
+	 * ep, and in that case we only clear the resp and set it as
+	 * complete, so it can be reused by the timer to send the rrq.
+	 */
+	ep->resp = NULL;
+	if (ep->state & FC_EX_DONE)
+		return rc;
+	ep->esb_stat |= ESB_ST_COMPLETE;
+
+	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+		ep->state |= FC_EX_DONE;
+		if (cancel_delayed_work(&ep->timeout_work))
+			atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+		rc = 0;
+	}
+	return rc;
+}
+
+static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
+{
+	struct fc_exch_mgr *mp;
+
+	mp = ep->em;
+	spin_lock_bh(&mp->em_lock);
+	WARN_ON(mp->total_exches <= 0);
+	mp->total_exches--;
+	mp->exches[ep->xid - mp->min_xid] = NULL;
+	list_del(&ep->ex_list);
+	spin_unlock_bh(&mp->em_lock);
+	fc_exch_release(ep);	/* drop hold for exch in mp */
+}
+
+/*
+ * Internal version of fc_exch_timer_set - used with lock held.
+ */
+static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
+					    unsigned int timer_msec)
+{
+	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+		return;
+
+	FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
+		      ep->xid);
+	if (schedule_delayed_work(&ep->timeout_work,
+				  msecs_to_jiffies(timer_msec)))
+		fc_exch_hold(ep);		/* hold for timer */
+}
+
+/*
+ * Set timer for an exchange.
+ * The time is a minimum delay in milliseconds until the timer fires.
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ * Returns non-zero if a timer couldn't be allocated.
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+	spin_lock_bh(&ep->ex_lock);
+	fc_exch_timer_set_locked(ep, timer_msec);
+	spin_unlock_bh(&ep->ex_lock);
+}
+
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
+{
+	struct fc_seq *sp;
+	struct fc_exch *ep;
+	struct fc_frame *fp;
+	int error;
+
+	ep = fc_seq_exch(req_sp);
+
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
+	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+		spin_unlock_bh(&ep->ex_lock);
+		return -ENXIO;
+	}
+
+	/*
+	 * Send the abort on a new sequence if possible.
+	 */
+	sp = fc_seq_start_next_locked(&ep->seq);
+	if (!sp) {
+		spin_unlock_bh(&ep->ex_lock);
+		return -ENOMEM;
+	}
+
+	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
+	if (timer_msec)
+		fc_exch_timer_set_locked(ep, timer_msec);
+	spin_unlock_bh(&ep->ex_lock);
+
+	/*
+	 * If not logged into the fabric, don't send ABTS but leave
+	 * sequence active until next timeout.
+	 */
+	if (!ep->sid)
+		return 0;
+
+	/*
+	 * Send an abort for the sequence that timed out.
+	 */
+	fp = fc_frame_alloc(ep->lp, 0);
+	if (fp) {
+		fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+			       FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+		error = fc_seq_send(ep->lp, sp, fp);
+	} else
+		error = -ENOBUFS;
+	return error;
+}
+EXPORT_SYMBOL(fc_seq_exch_abort);
+
+/*
+ * Exchange timeout - handle exchange timer expiration.
+ * The timer will have been cancelled before this is called.
+ */
+static void fc_exch_timeout(struct work_struct *work)
+{
+	struct fc_exch *ep = container_of(work, struct fc_exch,
+					  timeout_work.work);
+	struct fc_seq *sp = &ep->seq;
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *arg;
+	u32 e_stat;
+	int rc = 1;
+
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+		goto unlock;
+
+	e_stat = ep->esb_stat;
+	if (e_stat & ESB_ST_COMPLETE) {
+		ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+		if (e_stat & ESB_ST_REC_QUAL)
+			fc_exch_rrq(ep);
+		spin_unlock_bh(&ep->ex_lock);
+		goto done;
+	} else {
+		resp = ep->resp;
+		arg = ep->arg;
+		ep->resp = NULL;
+		if (e_stat & ESB_ST_ABNORMAL)
+			rc = fc_exch_done_locked(ep);
+		spin_unlock_bh(&ep->ex_lock);
+		if (!rc)
+			fc_exch_mgr_delete_ep(ep);
+		if (resp)
+			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
+		goto done;
+	}
+unlock:
+	spin_unlock_bh(&ep->ex_lock);
+done:
+	/*
+	 * This release matches the hold taken when the timer was set.
+	 */
+	fc_exch_release(ep);
+}
+
+/*
+ * Allocate a sequence.
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+	struct fc_seq *sp;
+
+	sp = &ep->seq;
+	sp->ssb_stat = 0;
+	sp->cnt = 0;
+	sp->id = seq_id;
+	return sp;
+}
+
+/*
+ * fc_em_alloc_xid - returns an xid based on request type
+ * @lp : ptr to associated lport
+ * @fp : ptr to the assocated frame
+ *
+ * check the associated fc_fsp_pkt to get scsi command type and
+ * command direction to decide from which range this exch id
+ * will be allocated from.
+ *
+ * Returns : 0 or an valid xid
+ */
+static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
+{
+	u16 xid, min, max;
+	u16 *plast;
+	struct fc_exch *ep = NULL;
+
+	if (mp->max_read) {
+		if (fc_frame_is_read(fp)) {
+			min = mp->min_xid;
+			max = mp->max_read;
+			plast = &mp->last_read;
+		} else {
+			min = mp->max_read + 1;
+			max = mp->max_xid;
+			plast = &mp->last_xid;
+		}
+	} else {
+		min = mp->min_xid;
+		max = mp->max_xid;
+		plast = &mp->last_xid;
+	}
+	xid = *plast;
+	do {
+		xid = (xid == max) ? min : xid + 1;
+		ep = mp->exches[xid - mp->min_xid];
+	} while ((ep != NULL) && (xid != *plast));
+
+	if (unlikely(ep))
+		xid = 0;
+	else
+		*plast = xid;
+
+	return xid;
+}
+
+/*
+ * fc_exch_alloc - allocate an exchange.
+ * @mp : ptr to the exchange manager
+ * @xid: input xid
+ *
+ * if xid is supplied zero then assign next free exchange ID
+ * from exchange manager, otherwise use supplied xid.
+ * Returns with exch lock held.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
+			      struct fc_frame *fp, u16 xid)
+{
+	struct fc_exch *ep;
+
+	/* allocate memory for exchange */
+	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+	if (!ep) {
+		atomic_inc(&mp->stats.no_free_exch);
+		goto out;
+	}
+	memset(ep, 0, sizeof(*ep));
+
+	spin_lock_bh(&mp->em_lock);
+	/* alloc xid if input xid 0 */
+	if (!xid) {
+		/* alloc a new xid */
+		xid = fc_em_alloc_xid(mp, fp);
+		if (!xid) {
+			printk(KERN_ERR "fc_em_alloc_xid() failed\n");
+			goto err;
+		}
+	}
+
+	fc_exch_hold(ep);	/* hold for exch in mp */
+	spin_lock_init(&ep->ex_lock);
+	/*
+	 * Hold exch lock for caller to prevent fc_exch_reset()
+	 * from releasing exch	while fc_exch_alloc() caller is
+	 * still working on exch.
+	 */
+	spin_lock_bh(&ep->ex_lock);
+
+	mp->exches[xid - mp->min_xid] = ep;
+	list_add_tail(&ep->ex_list, &mp->ex_list);
+	fc_seq_alloc(ep, ep->seq_id++);
+	mp->total_exches++;
+	spin_unlock_bh(&mp->em_lock);
+
+	/*
+	 *  update exchange
+	 */
+	ep->oxid = ep->xid = xid;
+	ep->em = mp;
+	ep->lp = mp->lp;
+	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
+	ep->rxid = FC_XID_UNKNOWN;
+	ep->class = mp->class;
+	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
+out:
+	return ep;
+err:
+	spin_unlock_bh(&mp->em_lock);
+	atomic_inc(&mp->stats.no_free_exch_xid);
+	mempool_free(ep, mp->ep_pool);
+	return NULL;
+}
+EXPORT_SYMBOL(fc_exch_alloc);
+
+/*
+ * Lookup and hold an exchange.
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+	struct fc_exch *ep = NULL;
+
+	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+		spin_lock_bh(&mp->em_lock);
+		ep = mp->exches[xid - mp->min_xid];
+		if (ep) {
+			fc_exch_hold(ep);
+			WARN_ON(ep->xid != xid);
+		}
+		spin_unlock_bh(&mp->em_lock);
+	}
+	return ep;
+}
+
+void fc_exch_done(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+	int rc;
+
+	spin_lock_bh(&ep->ex_lock);
+	rc = fc_exch_done_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	if (!rc)
+		fc_exch_mgr_delete_ep(ep);
+}
+EXPORT_SYMBOL(fc_exch_done);
+
+/*
+ * Allocate a new exchange as responder.
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	u16 rxid;
+
+	ep = mp->lp->tt.exch_get(mp->lp, fp);
+	if (ep) {
+		ep->class = fc_frame_class(fp);
+
+		/*
+		 * Set EX_CTX indicating we're responding on this exchange.
+		 */
+		ep->f_ctl |= FC_FC_EX_CTX;	/* we're responding */
+		ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not new */
+		fh = fc_frame_header_get(fp);
+		ep->sid = ntoh24(fh->fh_d_id);
+		ep->did = ntoh24(fh->fh_s_id);
+		ep->oid = ep->did;
+
+		/*
+		 * Allocated exchange has placed the XID in the
+		 * originator field. Move it to the responder field,
+		 * and set the originator XID from the frame.
+		 */
+		ep->rxid = ep->xid;
+		ep->oxid = ntohs(fh->fh_ox_id);
+		ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+		/*
+		 * Set the responder ID in the frame header.
+		 * The old one should've been 0xffff.
+		 * If it isn't, don't assign one.
+		 * Incoming basic link service frames may specify
+		 * a referenced RX_ID.
+		 */
+		if (fh->fh_type != FC_TYPE_BLS) {
+			rxid = ntohs(fh->fh_rx_id);
+			WARN_ON(rxid != FC_XID_UNKNOWN);
+			fh->fh_rx_id = htons(ep->rxid);
+		}
+		fc_exch_hold(ep);	/* hold for caller */
+		spin_unlock_bh(&ep->ex_lock);	/* lock from exch_get */
+	}
+	return ep;
+}
+
+/*
+ * Find a sequence for receive where the other end is originating the sequence.
+ * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
+ * on the ep that should be released by the caller.
+ */
+static enum fc_pf_rjt_reason
+fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_exch *ep = NULL;
+	struct fc_seq *sp = NULL;
+	enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+	u32 f_ctl;
+	u16 xid;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+	/*
+	 * Lookup or create the exchange if we will be creating the sequence.
+	 */
+	if (f_ctl & FC_FC_EX_CTX) {
+		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
+		ep = fc_exch_find(mp, xid);
+		if (!ep) {
+			atomic_inc(&mp->stats.xid_not_found);
+			reject = FC_RJT_OX_ID;
+			goto out;
+		}
+		if (ep->rxid == FC_XID_UNKNOWN)
+			ep->rxid = ntohs(fh->fh_rx_id);
+		else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+			reject = FC_RJT_OX_ID;
+			goto rel;
+		}
+	} else {
+		xid = ntohs(fh->fh_rx_id);	/* we are the responder */
+
+		/*
+		 * Special case for MDS issuing an ELS TEST with a
+		 * bad rxid of 0.
+		 * XXX take this out once we do the proper reject.
+		 */
+		if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+		    fc_frame_payload_op(fp) == ELS_TEST) {
+			fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+			xid = FC_XID_UNKNOWN;
+		}
+
+		/*
+		 * new sequence - find the exchange
+		 */
+		ep = fc_exch_find(mp, xid);
+		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+			if (ep) {
+				atomic_inc(&mp->stats.xid_busy);
+				reject = FC_RJT_RX_ID;
+				goto rel;
+			}
+			ep = fc_exch_resp(mp, fp);
+			if (!ep) {
+				reject = FC_RJT_EXCH_EST;	/* XXX */
+				goto out;
+			}
+			xid = ep->xid;	/* get our XID */
+		} else if (!ep) {
+			atomic_inc(&mp->stats.xid_not_found);
+			reject = FC_RJT_RX_ID;	/* XID not found */
+			goto out;
+		}
+	}
+
+	/*
+	 * At this point, we have the exchange held.
+	 * Find or create the sequence.
+	 */
+	if (fc_sof_is_init(fr_sof(fp))) {
+		sp = fc_seq_start_next(&ep->seq);
+		if (!sp) {
+			reject = FC_RJT_SEQ_XS;	/* exchange shortage */
+			goto rel;
+		}
+		sp->id = fh->fh_seq_id;
+		sp->ssb_stat |= SSB_ST_RESP;
+	} else {
+		sp = &ep->seq;
+		if (sp->id != fh->fh_seq_id) {
+			atomic_inc(&mp->stats.seq_not_found);
+			reject = FC_RJT_SEQ_ID;	/* sequence/exch should exist */
+			goto rel;
+		}
+	}
+	WARN_ON(ep != fc_seq_exch(sp));
+
+	if (f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+	fr_seq(fp) = sp;
+out:
+	return reject;
+rel:
+	fc_exch_done(&ep->seq);
+	fc_exch_release(ep);	/* hold from fc_exch_find/fc_exch_resp */
+	return reject;
+}
+
+/*
+ * Find the sequence for a frame being received.
+ * We originated the sequence, so it should be found.
+ * We may or may not have originated the exchange.
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+					 struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_exch *ep;
+	struct fc_seq *sp = NULL;
+	u32 f_ctl;
+	u16 xid;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+	xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+	ep = fc_exch_find(mp, xid);
+	if (!ep)
+		return NULL;
+	if (ep->seq.id == fh->fh_seq_id) {
+		/*
+		 * Save the RX_ID if we didn't previously know it.
+		 */
+		sp = &ep->seq;
+		if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+		    ep->rxid == FC_XID_UNKNOWN) {
+			ep->rxid = ntohs(fh->fh_rx_id);
+		}
+	}
+	fc_exch_release(ep);
+	return sp;
+}
+
+/*
+ * Set addresses for an exchange.
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+			     u32 orig_id, u32 resp_id)
+{
+	ep->oid = orig_id;
+	if (ep->esb_stat & ESB_ST_RESP) {
+		ep->sid = resp_id;
+		ep->did = orig_id;
+	} else {
+		ep->sid = orig_id;
+		ep->did = resp_id;
+	}
+}
+
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	sp = fc_seq_alloc(ep, ep->seq_id++);
+	FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
+		      ep->xid, ep->f_ctl, sp->id);
+	return sp;
+}
+/*
+ * Allocate a new sequence on the same exchange as the supplied sequence.
+ * This will never return NULL.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	spin_lock_bh(&ep->ex_lock);
+	WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
+	sp = fc_seq_start_next_locked(sp);
+	spin_unlock_bh(&ep->ex_lock);
+
+	return sp;
+}
+EXPORT_SYMBOL(fc_seq_start_next);
+
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
+{
+	struct fc_exch *ep;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	int error;
+	u32	f_ctl;
+
+	ep = fc_seq_exch(sp);
+	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fc_exch_setup_hdr(ep, fp, f_ctl);
+
+	/*
+	 * update sequence count if this frame is carrying
+	 * multiple FC frames when sequence offload is enabled
+	 * by LLD.
+	 */
+	if (fr_max_payload(fp))
+		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
+					fr_max_payload(fp));
+	else
+		sp->cnt++;
+
+	/*
+	 * Send the frame.
+	 */
+	error = lp->tt.frame_send(lp, fp);
+
+	/*
+	 * Update the exchange and sequence flags,
+	 * assuming all frames for the sequence have been sent.
+	 * We can only be called to send once for each sequence.
+	 */
+	spin_lock_bh(&ep->ex_lock);
+	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
+	if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
+	return error;
+}
+EXPORT_SYMBOL(fc_seq_send);
+
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+			 struct fc_seq_els_data *els_data)
+{
+	switch (els_cmd) {
+	case ELS_LS_RJT:
+		fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
+		break;
+	case ELS_LS_ACC:
+		fc_seq_ls_acc(sp);
+		break;
+	case ELS_RRQ:
+		fc_exch_els_rrq(sp, els_data->fp);
+		break;
+	case ELS_REC:
+		fc_exch_els_rec(sp, els_data->fp);
+		break;
+	default:
+		FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+	}
+}
+EXPORT_SYMBOL(fc_seq_els_rsp_send);
+
+/*
+ * Send a sequence, which is also the last sequence in the exchange.
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+			     enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+	u32 f_ctl;
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+	f_ctl |= ep->f_ctl;
+	fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
+	fc_seq_send(ep->lp, sp, fp);
+}
+
+/*
+ * Send ACK_1 (or equiv.) indicating we received something.
+ * The frame we're acking is supplied.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *rx_fh;
+	struct fc_frame_header *fh;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	struct fc_lport *lp = ep->lp;
+	unsigned int f_ctl;
+
+	/*
+	 * Don't send ACKs for class 3.
+	 */
+	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+		fp = fc_frame_alloc(lp, 0);
+		if (!fp)
+			return;
+
+		fh = fc_frame_header_get(fp);
+		fh->fh_r_ctl = FC_RCTL_ACK_1;
+		fh->fh_type = FC_TYPE_BLS;
+
+		/*
+		 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+		 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+		 * Bits 9-8 are meaningful (retransmitted or unidirectional).
+		 * Last ACK uses bits 7-6 (continue sequence),
+		 * bits 5-4 are meaningful (what kind of ACK to use).
+		 */
+		rx_fh = fc_frame_header_get(rx_fp);
+		f_ctl = ntoh24(rx_fh->fh_f_ctl);
+		f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+			FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+			FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+			FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+		f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+		hton24(fh->fh_f_ctl, f_ctl);
+
+		fc_exch_setup_hdr(ep, fp, f_ctl);
+		fh->fh_seq_id = rx_fh->fh_seq_id;
+		fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+		fh->fh_parm_offset = htonl(1);	/* ack single frame */
+
+		fr_sof(fp) = fr_sof(rx_fp);
+		if (f_ctl & FC_FC_END_SEQ)
+			fr_eof(fp) = FC_EOF_T;
+		else
+			fr_eof(fp) = FC_EOF_N;
+
+		(void) lp->tt.frame_send(lp, fp);
+	}
+}
+
+/*
+ * Send BLS Reject.
+ * This is for rejecting BA_ABTS only.
+ */
+static void
+fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
+		    enum fc_ba_rjt_explan explan)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *rx_fh;
+	struct fc_frame_header *fh;
+	struct fc_ba_rjt *rp;
+	struct fc_lport *lp;
+	unsigned int f_ctl;
+
+	lp = fr_dev(rx_fp);
+	fp = fc_frame_alloc(lp, sizeof(*rp));
+	if (!fp)
+		return;
+	fh = fc_frame_header_get(fp);
+	rx_fh = fc_frame_header_get(rx_fp);
+
+	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+	rp->br_reason = reason;
+	rp->br_explan = explan;
+
+	/*
+	 * seq_id, cs_ctl, df_ctl and param/offset are zero.
+	 */
+	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+	fh->fh_ox_id = rx_fh->fh_rx_id;
+	fh->fh_rx_id = rx_fh->fh_ox_id;
+	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+	fh->fh_r_ctl = FC_RCTL_BA_RJT;
+	fh->fh_type = FC_TYPE_BLS;
+
+	/*
+	 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+	 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+	 * Bits 9-8 are meaningful (retransmitted or unidirectional).
+	 * Last ACK uses bits 7-6 (continue sequence),
+	 * bits 5-4 are meaningful (what kind of ACK to use).
+	 * Always set LAST_SEQ, END_SEQ.
+	 */
+	f_ctl = ntoh24(rx_fh->fh_f_ctl);
+	f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+		FC_FC_END_CONN | FC_FC_SEQ_INIT |
+		FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+	f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+	f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+	f_ctl &= ~FC_FC_FIRST_SEQ;
+	hton24(fh->fh_f_ctl, f_ctl);
+
+	fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+	fr_eof(fp) = FC_EOF_T;
+	if (fc_sof_needs_ack(fr_sof(fp)))
+		fr_eof(fp) = FC_EOF_N;
+
+	(void) lp->tt.frame_send(lp, fp);
+}
+
+/*
+ * Handle an incoming ABTS.  This would be for target mode usually,
+ * but could be due to lost FCP transfer ready, confirm or RRQ.
+ * We always handle this as an exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+	struct fc_frame *fp;
+	struct fc_ba_acc *ap;
+	struct fc_frame_header *fh;
+	struct fc_seq *sp;
+
+	if (!ep)
+		goto reject;
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->esb_stat & ESB_ST_COMPLETE) {
+		spin_unlock_bh(&ep->ex_lock);
+		goto reject;
+	}
+	if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+		fc_exch_hold(ep);		/* hold for REC_QUAL */
+	ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
+	fc_exch_timer_set_locked(ep, ep->r_a_tov);
+
+	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+	if (!fp) {
+		spin_unlock_bh(&ep->ex_lock);
+		goto free;
+	}
+	fh = fc_frame_header_get(fp);
+	ap = fc_frame_payload_get(fp, sizeof(*ap));
+	memset(ap, 0, sizeof(*ap));
+	sp = &ep->seq;
+	ap->ba_high_seq_cnt = htons(0xffff);
+	if (sp->ssb_stat & SSB_ST_RESP) {
+		ap->ba_seq_id = sp->id;
+		ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+		ap->ba_low_seq_cnt = htons(sp->cnt);
+	}
+	sp = fc_seq_start_next(sp);
+	spin_unlock_bh(&ep->ex_lock);
+	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+	fc_frame_free(rx_fp);
+	return;
+
+reject:
+	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+free:
+	fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle receive where the other end is originating the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
+			     struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_seq *sp = NULL;
+	struct fc_exch *ep = NULL;
+	enum fc_sof sof;
+	enum fc_eof eof;
+	u32 f_ctl;
+	enum fc_pf_rjt_reason reject;
+
+	fr_seq(fp) = NULL;
+	reject = fc_seq_lookup_recip(mp, fp);
+	if (reject == FC_RJT_NONE) {
+		sp = fr_seq(fp);	/* sequence will be held */
+		ep = fc_seq_exch(sp);
+		sof = fr_sof(fp);
+		eof = fr_eof(fp);
+		f_ctl = ntoh24(fh->fh_f_ctl);
+		fc_seq_send_ack(sp, fp);
+
+		/*
+		 * Call the receive function.
+		 *
+		 * The receive function may allocate a new sequence
+		 * over the old one, so we shouldn't change the
+		 * sequence after this.
+		 *
+		 * The frame will be freed by the receive function.
+		 * If new exch resp handler is valid then call that
+		 * first.
+		 */
+		if (ep->resp)
+			ep->resp(sp, fp, ep->arg);
+		else
+			lp->tt.lport_recv(lp, sp, fp);
+		fc_exch_release(ep);	/* release from lookup */
+	} else {
+		FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
+		fc_frame_free(fp);
+	}
+}
+
+/*
+ * Handle receive where the other end is originating the sequence in
+ * response to our exchange.
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_seq *sp;
+	struct fc_exch *ep;
+	enum fc_sof sof;
+	u32 f_ctl;
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *ex_resp_arg;
+	int rc;
+
+	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+	if (!ep) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto out;
+	}
+	if (ep->rxid == FC_XID_UNKNOWN)
+		ep->rxid = ntohs(fh->fh_rx_id);
+	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto rel;
+	}
+	if (ep->did != ntoh24(fh->fh_s_id) &&
+	    ep->did != FC_FID_FLOGI) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto rel;
+	}
+	sof = fr_sof(fp);
+	if (fc_sof_is_init(sof)) {
+		sp = fc_seq_start_next(&ep->seq);
+		sp->id = fh->fh_seq_id;
+		sp->ssb_stat |= SSB_ST_RESP;
+	} else {
+		sp = &ep->seq;
+		if (sp->id != fh->fh_seq_id) {
+			atomic_inc(&mp->stats.seq_not_found);
+			goto rel;
+		}
+	}
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fr_seq(fp) = sp;
+	if (f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+	if (fc_sof_needs_ack(sof))
+		fc_seq_send_ack(sp, fp);
+	resp = ep->resp;
+	ex_resp_arg = ep->arg;
+
+	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+		spin_lock_bh(&ep->ex_lock);
+		rc = fc_exch_done_locked(ep);
+		WARN_ON(fc_seq_exch(sp) != ep);
+		spin_unlock_bh(&ep->ex_lock);
+		if (!rc)
+			fc_exch_mgr_delete_ep(ep);
+	}
+
+	/*
+	 * Call the receive function.
+	 * The sequence is held (has a refcnt) for us,
+	 * but not for the receive function.
+	 *
+	 * The receive function may allocate a new sequence
+	 * over the old one, so we shouldn't change the
+	 * sequence after this.
+	 *
+	 * The frame will be freed by the receive function.
+	 * If new exch resp handler is valid then call that
+	 * first.
+	 */
+	if (resp)
+		resp(sp, fp, ex_resp_arg);
+	else
+		fc_frame_free(fp);
+	fc_exch_release(ep);
+	return;
+rel:
+	fc_exch_release(ep);
+out:
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle receive for a sequence where other end is responding to our sequence.
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_seq *sp;
+
+	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
+	if (!sp) {
+		atomic_inc(&mp->stats.xid_not_found);
+		FC_DEBUG_EXCH("seq lookup failed\n");
+	} else {
+		atomic_inc(&mp->stats.non_bls_resp);
+		FC_DEBUG_EXCH("non-BLS response to sequence");
+	}
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle the response to an ABTS for exchange or sequence.
+ * This can be BA_ACC or BA_RJT.
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *ex_resp_arg;
+	struct fc_frame_header *fh;
+	struct fc_ba_acc *ap;
+	struct fc_seq *sp;
+	u16 low;
+	u16 high;
+	int rc = 1, has_rec = 0;
+
+	fh = fc_frame_header_get(fp);
+	FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
+		      fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+
+	if (cancel_delayed_work_sync(&ep->timeout_work))
+		fc_exch_release(ep);	/* release from pending timer hold */
+
+	spin_lock_bh(&ep->ex_lock);
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_BA_ACC:
+		ap = fc_frame_payload_get(fp, sizeof(*ap));
+		if (!ap)
+			break;
+
+		/*
+		 * Decide whether to establish a Recovery Qualifier.
+		 * We do this if there is a non-empty SEQ_CNT range and
+		 * SEQ_ID is the same as the one we aborted.
+		 */
+		low = ntohs(ap->ba_low_seq_cnt);
+		high = ntohs(ap->ba_high_seq_cnt);
+		if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+		    (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+		     ap->ba_seq_id == ep->seq_id) && low != high) {
+			ep->esb_stat |= ESB_ST_REC_QUAL;
+			fc_exch_hold(ep);  /* hold for recovery qualifier */
+			has_rec = 1;
+		}
+		break;
+	case FC_RCTL_BA_RJT:
+		break;
+	default:
+		break;
+	}
+
+	resp = ep->resp;
+	ex_resp_arg = ep->arg;
+
+	/* do we need to do some other checks here. Can we reuse more of
+	 * fc_exch_recv_seq_resp
+	 */
+	sp = &ep->seq;
+	/*
+	 * do we want to check END_SEQ as well as LAST_SEQ here?
+	 */
+	if (ep->fh_type != FC_TYPE_FCP &&
+	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+		rc = fc_exch_done_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	if (!rc)
+		fc_exch_mgr_delete_ep(ep);
+
+	if (resp)
+		resp(sp, fp, ex_resp_arg);
+	else
+		fc_frame_free(fp);
+
+	if (has_rec)
+		fc_exch_timer_set(ep, ep->r_a_tov);
+
+}
+
+/*
+ * Receive BLS sequence.
+ * This is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_exch *ep;
+	u32 f_ctl;
+
+	fh = fc_frame_header_get(fp);
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fr_seq(fp) = NULL;
+
+	ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+			  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+	if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+		spin_lock_bh(&ep->ex_lock);
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+		spin_unlock_bh(&ep->ex_lock);
+	}
+	if (f_ctl & FC_FC_SEQ_CTX) {
+		/*
+		 * A response to a sequence we initiated.
+		 * This should only be ACKs for class 2 or F.
+		 */
+		switch (fh->fh_r_ctl) {
+		case FC_RCTL_ACK_1:
+		case FC_RCTL_ACK_0:
+			break;
+		default:
+			FC_DEBUG_EXCH("BLS rctl %x - %s received",
+				      fh->fh_r_ctl,
+				      fc_exch_rctl_name(fh->fh_r_ctl));
+			break;
+		}
+		fc_frame_free(fp);
+	} else {
+		switch (fh->fh_r_ctl) {
+		case FC_RCTL_BA_RJT:
+		case FC_RCTL_BA_ACC:
+			if (ep)
+				fc_exch_abts_resp(ep, fp);
+			else
+				fc_frame_free(fp);
+			break;
+		case FC_RCTL_BA_ABTS:
+			fc_exch_recv_abts(ep, fp);
+			break;
+		default:			/* ignore junk */
+			fc_frame_free(fp);
+			break;
+		}
+	}
+	if (ep)
+		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
+}
+
+/*
+ * Accept sequence with LS_ACC.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_seq *req_sp)
+{
+	struct fc_seq *sp;
+	struct fc_els_ls_acc *acc;
+	struct fc_frame *fp;
+
+	sp = fc_seq_start_next(req_sp);
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+	if (fp) {
+		acc = fc_frame_payload_get(fp, sizeof(*acc));
+		memset(acc, 0, sizeof(*acc));
+		acc->la_cmd = ELS_LS_ACC;
+		fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+	}
+}
+
+/*
+ * Reject sequence with ELS LS_RJT.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
+			  enum fc_els_rjt_explan explan)
+{
+	struct fc_seq *sp;
+	struct fc_els_ls_rjt *rjt;
+	struct fc_frame *fp;
+
+	sp = fc_seq_start_next(req_sp);
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
+	if (fp) {
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		memset(rjt, 0, sizeof(*rjt));
+		rjt->er_cmd = ELS_LS_RJT;
+		rjt->er_reason = reason;
+		rjt->er_explan = explan;
+		fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+	}
+}
+
+static void fc_exch_reset(struct fc_exch *ep)
+{
+	struct fc_seq *sp;
+	void (*resp)(struct fc_seq *, struct fc_frame *, void *);
+	void *arg;
+	int rc = 1;
+
+	spin_lock_bh(&ep->ex_lock);
+	ep->state |= FC_EX_RST_CLEANUP;
+	/*
+	 * we really want to call del_timer_sync, but cannot due
+	 * to the lport calling with the lport lock held (some resp
+	 * functions can also grab the lport lock which could cause
+	 * a deadlock).
+	 */
+	if (cancel_delayed_work(&ep->timeout_work))
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
+	resp = ep->resp;
+	ep->resp = NULL;
+	if (ep->esb_stat & ESB_ST_REC_QUAL)
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
+	ep->esb_stat &= ~ESB_ST_REC_QUAL;
+	arg = ep->arg;
+	sp = &ep->seq;
+	rc = fc_exch_done_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	if (!rc)
+		fc_exch_mgr_delete_ep(ep);
+
+	if (resp)
+		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+}
+
+/*
+ * Reset an exchange manager, releasing all sequences and exchanges.
+ * If sid is non-zero, reset only exchanges we source from that FID.
+ * If did is non-zero, reset only exchanges destined to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
+{
+	struct fc_exch *ep;
+	struct fc_exch *next;
+
+	spin_lock_bh(&mp->em_lock);
+restart:
+	list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
+		if ((sid == 0 || sid == ep->sid) &&
+		    (did == 0 || did == ep->did)) {
+			fc_exch_hold(ep);
+			spin_unlock_bh(&mp->em_lock);
+
+			fc_exch_reset(ep);
+
+			fc_exch_release(ep);
+			spin_lock_bh(&mp->em_lock);
+
+			/*
+			 * must restart loop incase while lock was down
+			 * multiple eps were released.
+			 */
+			goto restart;
+		}
+	}
+	spin_unlock_bh(&mp->em_lock);
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+/*
+ * Handle incoming ELS REC - Read Exchange Concise.
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
+{
+	struct fc_frame *fp;
+	struct fc_exch *ep;
+	struct fc_exch_mgr *em;
+	struct fc_els_rec *rp;
+	struct fc_els_rec_acc *acc;
+	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+	enum fc_els_rjt_explan explan;
+	u32 sid;
+	u16 rxid;
+	u16 oxid;
+
+	rp = fc_frame_payload_get(rfp, sizeof(*rp));
+	explan = ELS_EXPL_INV_LEN;
+	if (!rp)
+		goto reject;
+	sid = ntoh24(rp->rec_s_id);
+	rxid = ntohs(rp->rec_rx_id);
+	oxid = ntohs(rp->rec_ox_id);
+
+	/*
+	 * Currently it's hard to find the local S_ID from the exchange
+	 * manager.  This will eventually be fixed, but for now it's easier
+	 * to lookup the subject exchange twice, once as if we were
+	 * the initiator, and then again if we weren't.
+	 */
+	em = fc_seq_exch(sp)->em;
+	ep = fc_exch_find(em, oxid);
+	explan = ELS_EXPL_OXID_RXID;
+	if (ep && ep->oid == sid) {
+		if (ep->rxid != FC_XID_UNKNOWN &&
+		    rxid != FC_XID_UNKNOWN &&
+		    ep->rxid != rxid)
+			goto rel;
+	} else {
+		if (ep)
+			fc_exch_release(ep);
+		ep = NULL;
+		if (rxid != FC_XID_UNKNOWN)
+			ep = fc_exch_find(em, rxid);
+		if (!ep)
+			goto reject;
+	}
+
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+	if (!fp) {
+		fc_exch_done(sp);
+		goto out;
+	}
+	sp = fc_seq_start_next(sp);
+	acc = fc_frame_payload_get(fp, sizeof(*acc));
+	memset(acc, 0, sizeof(*acc));
+	acc->reca_cmd = ELS_LS_ACC;
+	acc->reca_ox_id = rp->rec_ox_id;
+	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+	acc->reca_rx_id = htons(ep->rxid);
+	if (ep->sid == ep->oid)
+		hton24(acc->reca_rfid, ep->did);
+	else
+		hton24(acc->reca_rfid, ep->sid);
+	acc->reca_fc4value = htonl(ep->seq.rec_data);
+	acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+						 ESB_ST_SEQ_INIT |
+						 ESB_ST_COMPLETE));
+	sp = fc_seq_start_next(sp);
+	fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+out:
+	fc_exch_release(ep);
+	fc_frame_free(rfp);
+	return;
+
+rel:
+	fc_exch_release(ep);
+reject:
+	fc_seq_ls_rjt(sp, reason, explan);
+	fc_frame_free(rfp);
+}
+
+/*
+ * Handle response from RRQ.
+ * Not much to do here, really.
+ * Should report errors.
+ *
+ * TODO: fix error handler.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct fc_exch *aborted_ep = arg;
+	unsigned int op;
+
+	if (IS_ERR(fp)) {
+		int err = PTR_ERR(fp);
+
+		if (err == -FC_EX_CLOSED)
+			goto cleanup;
+		FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+		return;
+	}
+
+	op = fc_frame_payload_op(fp);
+	fc_frame_free(fp);
+
+	switch (op) {
+	case ELS_LS_RJT:
+		FC_DBG("LS_RJT for RRQ");
+		/* fall through */
+	case ELS_LS_ACC:
+		goto cleanup;
+	default:
+		FC_DBG("unexpected response op %x for RRQ", op);
+		return;
+	}
+
+cleanup:
+	fc_exch_done(&aborted_ep->seq);
+	/* drop hold for rec qual */
+	fc_exch_release(aborted_ep);
+}
+
+/*
+ * Send ELS RRQ - Reinstate Recovery Qualifier.
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+	struct fc_lport *lp;
+	struct fc_els_rrq *rrq;
+	struct fc_frame *fp;
+	struct fc_seq *rrq_sp;
+	u32 did;
+
+	lp = ep->lp;
+
+	fp = fc_frame_alloc(lp, sizeof(*rrq));
+	if (!fp)
+		return;
+	rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+	memset(rrq, 0, sizeof(*rrq));
+	rrq->rrq_cmd = ELS_RRQ;
+	hton24(rrq->rrq_s_id, ep->sid);
+	rrq->rrq_ox_id = htons(ep->oxid);
+	rrq->rrq_rx_id = htons(ep->rxid);
+
+	did = ep->did;
+	if (ep->esb_stat & ESB_ST_RESP)
+		did = ep->sid;
+
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
+		       fc_host_port_id(lp->host), FC_TYPE_ELS,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
+				  lp->e_d_tov);
+	if (!rrq_sp) {
+		ep->esb_stat |= ESB_ST_REC_QUAL;
+		fc_exch_timer_set_locked(ep, ep->r_a_tov);
+		return;
+	}
+}
+
+
+/*
+ * Handle incoming ELS RRQ - Reset Recovery Qualifier.
+ */
+static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
+{
+	struct fc_exch *ep;		/* request or subject exchange */
+	struct fc_els_rrq *rp;
+	u32 sid;
+	u16 xid;
+	enum fc_els_rjt_explan explan;
+
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+	explan = ELS_EXPL_INV_LEN;
+	if (!rp)
+		goto reject;
+
+	/*
+	 * lookup subject exchange.
+	 */
+	ep = fc_seq_exch(sp);
+	sid = ntoh24(rp->rrq_s_id);		/* subject source */
+	xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+	ep = fc_exch_find(ep->em, xid);
+
+	explan = ELS_EXPL_OXID_RXID;
+	if (!ep)
+		goto reject;
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->oxid != ntohs(rp->rrq_ox_id))
+		goto unlock_reject;
+	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+	    ep->rxid != FC_XID_UNKNOWN)
+		goto unlock_reject;
+	explan = ELS_EXPL_SID;
+	if (ep->sid != sid)
+		goto unlock_reject;
+
+	/*
+	 * Clear Recovery Qualifier state, and cancel timer if complete.
+	 */
+	if (ep->esb_stat & ESB_ST_REC_QUAL) {
+		ep->esb_stat &= ~ESB_ST_REC_QUAL;
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec qual */
+	}
+	if (ep->esb_stat & ESB_ST_COMPLETE) {
+		if (cancel_delayed_work(&ep->timeout_work))
+			atomic_dec(&ep->ex_refcnt);	/* drop timer hold */
+	}
+
+	spin_unlock_bh(&ep->ex_lock);
+
+	/*
+	 * Send LS_ACC.
+	 */
+	fc_seq_ls_acc(sp);
+	fc_frame_free(fp);
+	return;
+
+unlock_reject:
+	spin_unlock_bh(&ep->ex_lock);
+	fc_exch_release(ep);	/* drop hold from fc_exch_find */
+reject:
+	fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
+	fc_frame_free(fp);
+}
+
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+				      enum fc_class class,
+				      u16 min_xid, u16 max_xid)
+{
+	struct fc_exch_mgr *mp;
+	size_t len;
+
+	if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
+		FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+		       min_xid, max_xid);
+		return NULL;
+	}
+
+	/*
+	 * Memory need for EM
+	 */
+#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
+	len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
+	len += sizeof(struct fc_exch_mgr);
+
+	mp = kzalloc(len, GFP_ATOMIC);
+	if (!mp)
+		return NULL;
+
+	mp->class = class;
+	mp->total_exches = 0;
+	mp->exches = (struct fc_exch **)(mp + 1);
+	mp->lp = lp;
+	/* adjust em exch xid range for offload */
+	mp->min_xid = min_xid;
+	mp->max_xid = max_xid;
+	mp->last_xid = min_xid - 1;
+	mp->max_read = 0;
+	mp->last_read = 0;
+	if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
+		mp->max_read = lp->lro_xid;
+		mp->last_read = min_xid - 1;
+		mp->last_xid = mp->max_read;
+	} else {
+		/* disable lro if no xid control over read */
+		lp->lro_enabled = 0;
+	}
+
+	INIT_LIST_HEAD(&mp->ex_list);
+	spin_lock_init(&mp->em_lock);
+
+	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
+	if (!mp->ep_pool)
+		goto free_mp;
+
+	return mp;
+
+free_mp:
+	kfree(mp);
+	return NULL;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+void fc_exch_mgr_free(struct fc_exch_mgr *mp)
+{
+	WARN_ON(!mp);
+	/*
+	 * The total exch count must be zero
+	 * before freeing exchange manager.
+	 */
+	WARN_ON(mp->total_exches != 0);
+	mempool_destroy(mp->ep_pool);
+	kfree(mp);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
+{
+	if (!lp || !lp->emp)
+		return NULL;
+
+	return fc_exch_alloc(lp->emp, fp, 0);
+}
+EXPORT_SYMBOL(fc_exch_get);
+
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				void (*destructor)(struct fc_seq *, void *),
+				void *arg, u32 timer_msec)
+{
+	struct fc_exch *ep;
+	struct fc_seq *sp = NULL;
+	struct fc_frame_header *fh;
+	int rc = 1;
+
+	ep = lp->tt.exch_get(lp, fp);
+	if (!ep) {
+		fc_frame_free(fp);
+		return NULL;
+	}
+	ep->esb_stat |= ESB_ST_SEQ_INIT;
+	fh = fc_frame_header_get(fp);
+	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
+	ep->resp = resp;
+	ep->destructor = destructor;
+	ep->arg = arg;
+	ep->r_a_tov = FC_DEF_R_A_TOV;
+	ep->lp = lp;
+	sp = &ep->seq;
+
+	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
+	ep->f_ctl = ntoh24(fh->fh_f_ctl);
+	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
+	sp->cnt++;
+
+	if (unlikely(lp->tt.frame_send(lp, fp)))
+		goto err;
+
+	if (timer_msec)
+		fc_exch_timer_set_locked(ep, timer_msec);
+	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
+
+	if (ep->f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
+	return sp;
+err:
+	rc = fc_exch_done_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	if (!rc)
+		fc_exch_mgr_delete_ep(ep);
+	return NULL;
+}
+EXPORT_SYMBOL(fc_exch_seq_send);
+
+/*
+ * Receive a frame
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+		  struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	u32 f_ctl;
+
+	/* lport lock ? */
+	if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
+		FC_DBG("fc_lport or EM is not allocated and configured");
+		fc_frame_free(fp);
+		return;
+	}
+
+	/*
+	 * If frame is marked invalid, just drop it.
+	 */
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	switch (fr_eof(fp)) {
+	case FC_EOF_T:
+		if (f_ctl & FC_FC_END_SEQ)
+			skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+		/* fall through */
+	case FC_EOF_N:
+		if (fh->fh_type == FC_TYPE_BLS)
+			fc_exch_recv_bls(mp, fp);
+		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+			 FC_FC_EX_CTX)
+			fc_exch_recv_seq_resp(mp, fp);
+		else if (f_ctl & FC_FC_SEQ_CTX)
+			fc_exch_recv_resp(mp, fp);
+		else
+			fc_exch_recv_req(lp, mp, fp);
+		break;
+	default:
+		FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+		fc_frame_free(fp);
+		break;
+	}
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+int fc_exch_init(struct fc_lport *lp)
+{
+	if (!lp->tt.exch_get) {
+		/*
+		 *  exch_put() should be NULL if
+		 *  exch_get() is NULL
+		 */
+		WARN_ON(lp->tt.exch_put);
+		lp->tt.exch_get = fc_exch_get;
+	}
+
+	if (!lp->tt.seq_start_next)
+		lp->tt.seq_start_next = fc_seq_start_next;
+
+	if (!lp->tt.exch_seq_send)
+		lp->tt.exch_seq_send = fc_exch_seq_send;
+
+	if (!lp->tt.seq_send)
+		lp->tt.seq_send = fc_seq_send;
+
+	if (!lp->tt.seq_els_rsp_send)
+		lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+
+	if (!lp->tt.exch_done)
+		lp->tt.exch_done = fc_exch_done;
+
+	if (!lp->tt.exch_mgr_reset)
+		lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+	if (!lp->tt.seq_exch_abort)
+		lp->tt.seq_exch_abort = fc_seq_exch_abort;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
+
+int fc_setup_exch_mgr(void)
+{
+	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
+					 0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!fc_em_cachep)
+		return -ENOMEM;
+	return 0;
+}
+
+void fc_destroy_exch_mgr(void)
+{
+	kmem_cache_destroy(fc_em_cachep);
+}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000000..404e63ff46b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL");
+
+static int fc_fcp_debug;
+
+#define FC_DEBUG_FCP(fmt...)			\
+	do {					\
+		if (fc_fcp_debug)		\
+			FC_DBG(fmt);		\
+	} while (0)
+
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE		0		/* cmd is free */
+#define FC_SRB_CMD_SENT		(1 << 0)	/* cmd has been sent */
+#define FC_SRB_RCV_STATUS	(1 << 1)	/* response has arrived */
+#define FC_SRB_ABORT_PENDING	(1 << 2)	/* cmd abort sent to device */
+#define FC_SRB_ABORTED		(1 << 3)	/* abort acknowleged */
+#define FC_SRB_DISCONTIG	(1 << 4)	/* non-sequential data recvd */
+#define FC_SRB_COMPL		(1 << 5)	/* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6)	/* timer function processing */
+#define FC_SRB_NOMEM		(1 << 7)	/* dropped to out of mem */
+
+#define FC_SRB_READ		(1 << 1)
+#define FC_SRB_WRITE		(1 << 0)
+
+/*
+ * The SCp.ptr should be tested and set under the host lock. NULL indicates
+ * that the command has been retruned to the scsi layer.
+ */
+#define CMD_SP(Cmnd)		    ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd)	    ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd)	    ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd)	    ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd)	    ((Cmnd)->SCp.buffers_residual)
+
+struct fc_fcp_internal {
+	mempool_t	*scsi_pkt_pool;
+	struct list_head scsi_pkt_queue;
+	u8		throttled;
+};
+
+#define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE		0
+#define FC_CMD_ABORTED		1
+#define FC_CMD_RESET		2
+#define FC_CMD_PLOGO		3
+#define FC_SNS_RCV		4
+#define FC_TRANS_ERR		5
+#define FC_DATA_OVRRUN		6
+#define FC_DATA_UNDRUN		7
+#define FC_ERROR		8
+#define FC_HRD_ERROR		9
+#define FC_CMD_TIME_OUT		10
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
+#define FC_SCSI_TM_TOV		(10 * HZ)
+#define FC_SCSI_REC_TOV		(2 * HZ)
+#define FC_HOST_RESET_TIMEOUT	(30 * HZ)
+
+#define FC_MAX_ERROR_CNT	5
+#define FC_MAX_RECOV_RETRY	3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
+ * @lp:		fc lport struct
+ * @gfp:	gfp flags for allocation
+ *
+ * This is used by upper layer scsi driver.
+ * Return Value : scsi_pkt structure or null on allocation failure.
+ * Context	: call from process context. no locking required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_pkt *fsp;
+
+	fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
+	if (fsp) {
+		memset(fsp, 0, sizeof(*fsp));
+		fsp->lp = lp;
+		atomic_set(&fsp->ref_cnt, 1);
+		init_timer(&fsp->timer);
+		INIT_LIST_HEAD(&fsp->list);
+		spin_lock_init(&fsp->scsi_pkt_lock);
+	}
+	return fsp;
+}
+
+/**
+ * fc_fcp_pkt_release - release hold on scsi_pkt packet
+ * @fsp:	fcp packet struct
+ *
+ * This is used by upper layer scsi driver.
+ * Context	: call from process  and interrupt context.
+ *		  no locking required
+ */
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
+{
+	if (atomic_dec_and_test(&fsp->ref_cnt)) {
+		struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
+
+		mempool_free(fsp, si->scsi_pkt_pool);
+	}
+}
+
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
+{
+	atomic_inc(&fsp->ref_cnt);
+}
+
+/**
+ * fc_fcp_pkt_destory - release hold on scsi_pkt packet
+ *
+ * @seq:		exchange sequence
+ * @fsp:	fcp packet struct
+ *
+ * Release hold on scsi_pkt packet set to keep scsi_pkt
+ * till EM layer exch resource is not freed.
+ * Context	: called from from EM layer.
+ *		  no locking required
+ */
+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
+{
+	fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * @fsp:	fcp packet
+ *
+ * We should only return error if we return a command to scsi-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+	spin_lock_bh(&fsp->scsi_pkt_lock);
+	if (fsp->state & FC_SRB_COMPL) {
+		spin_unlock_bh(&fsp->scsi_pkt_lock);
+		return -EPERM;
+	}
+
+	fc_fcp_pkt_hold(fsp);
+	return 0;
+}
+
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+	spin_unlock_bh(&fsp->scsi_pkt_lock);
+	fc_fcp_pkt_release(fsp);
+}
+
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+	if (!(fsp->state & FC_SRB_COMPL))
+		mod_timer(&fsp->timer, jiffies + delay);
+}
+
+static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+{
+	if (!fsp->seq_ptr)
+		return -EINVAL;
+
+	fsp->state |= FC_SRB_ABORT_PENDING;
+	return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+}
+
+/*
+ * Retry command.
+ * An abort isn't needed.
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+	if (fsp->seq_ptr) {
+		fsp->lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+
+	fsp->state &= ~FC_SRB_ABORT_PENDING;
+	fsp->io_status = SUGGEST_RETRY << 24;
+	fsp->status_code = FC_ERROR;
+	fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * Receive SCSI data from target.
+ * Called after receiving solicited data.
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct scsi_cmnd *sc = fsp->cmd;
+	struct fc_lport *lp = fsp->lp;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	size_t start_offset;
+	size_t offset;
+	u32 crc;
+	u32 copy_len = 0;
+	size_t len;
+	void *buf;
+	struct scatterlist *sg;
+	size_t remaining;
+
+	fh = fc_frame_header_get(fp);
+	offset = ntohl(fh->fh_parm_offset);
+	start_offset = offset;
+	len = fr_len(fp) - sizeof(*fh);
+	buf = fc_frame_payload_get(fp, 0);
+
+	if (offset + len > fsp->data_len) {
+		/*
+		 * this should never happen
+		 */
+		if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+		    fc_frame_crc_check(fp))
+			goto crc_err;
+		FC_DEBUG_FCP("data received past end. len %zx offset %zx "
+			     "data_len %x\n", len, offset, fsp->data_len);
+		fc_fcp_retry_cmd(fsp);
+		return;
+	}
+	if (offset != fsp->xfer_len)
+		fsp->state |= FC_SRB_DISCONTIG;
+
+	crc = 0;
+	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+
+	sg = scsi_sglist(sc);
+	remaining = len;
+
+	while (remaining > 0 && sg) {
+		size_t off;
+		void *page_addr;
+		size_t sg_bytes;
+
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			sg = sg_next(sg);
+			continue;
+		}
+		sg_bytes = min(remaining, sg->length - offset);
+
+		/*
+		 * The scatterlist item may be bigger than PAGE_SIZE,
+		 * but we are limited to mapping PAGE_SIZE at a time.
+		 */
+		off = offset + sg->offset;
+		sg_bytes = min(sg_bytes, (size_t)
+			       (PAGE_SIZE - (off & ~PAGE_MASK)));
+		page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+					KM_SOFTIRQ0);
+		if (!page_addr)
+			break;		/* XXX panic? */
+
+		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+			crc = crc32(crc, buf, sg_bytes);
+		memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
+		       sg_bytes);
+
+		kunmap_atomic(page_addr, KM_SOFTIRQ0);
+		buf += sg_bytes;
+		offset += sg_bytes;
+		remaining -= sg_bytes;
+		copy_len += sg_bytes;
+	}
+
+	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+		buf = fc_frame_payload_get(fp, 0);
+		if (len % 4) {
+			crc = crc32(crc, buf + len, 4 - (len % 4));
+			len += 4 - (len % 4);
+		}
+
+		if (~crc != le32_to_cpu(fr_crc(fp))) {
+crc_err:
+			stats = lp->dev_stats[smp_processor_id()];
+			stats->ErrorFrames++;
+			if (stats->InvalidCRCCount++ < 5)
+				FC_DBG("CRC error on data frame\n");
+			/*
+			 * Assume the frame is total garbage.
+			 * We may have copied it over the good part
+			 * of the buffer.
+			 * If so, we need to retry the entire operation.
+			 * Otherwise, ignore it.
+			 */
+			if (fsp->state & FC_SRB_DISCONTIG)
+				fc_fcp_retry_cmd(fsp);
+			return;
+		}
+	}
+
+	if (fsp->xfer_contig_end == start_offset)
+		fsp->xfer_contig_end += copy_len;
+	fsp->xfer_len += copy_len;
+
+	/*
+	 * In the very rare event that this data arrived after the response
+	 * and completes the transfer, call the completion handler.
+	 */
+	if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+		fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * fc_fcp_send_data -  Send SCSI data to target.
+ * @fsp: ptr to fc_fcp_pkt
+ * @sp: ptr to this sequence
+ * @offset: starting offset for this data request
+ * @seq_blen: the burst length for this data request
+ *
+ * Called after receiving a Transfer Ready data descriptor.
+ * if LLD is capable of seq offload then send down seq_blen
+ * size of data in single frame, otherwise send multiple FC
+ * frames of max FC frame payload supported by target port.
+ *
+ * Returns : 0 for success.
+ */
+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
+			    size_t offset, size_t seq_blen)
+{
+	struct fc_exch *ep;
+	struct scsi_cmnd *sc;
+	struct scatterlist *sg;
+	struct fc_frame *fp = NULL;
+	struct fc_lport *lp = fsp->lp;
+	size_t remaining;
+	size_t t_blen;
+	size_t tlen;
+	size_t sg_bytes;
+	size_t frame_offset, fh_parm_offset;
+	int error;
+	void *data = NULL;
+	void *page_addr;
+	int using_sg = lp->sg_supp;
+	u32 f_ctl;
+
+	WARN_ON(seq_blen <= 0);
+	if (unlikely(offset + seq_blen > fsp->data_len)) {
+		/* this should never happen */
+		FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
+			     seq_blen, offset);
+		fc_fcp_send_abort(fsp);
+		return 0;
+	} else if (offset != fsp->xfer_len) {
+		/* Out of Order Data Request - no problem, but unexpected. */
+		FC_DEBUG_FCP("xfer-ready non-contiguous. "
+			     "seq_blen %zx offset %zx\n", seq_blen, offset);
+	}
+
+	/*
+	 * if LLD is capable of seq_offload then set transport
+	 * burst length (t_blen) to seq_blen, otherwise set t_blen
+	 * to max FC frame payload previously set in fsp->max_payload.
+	 */
+	t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
+	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
+	if (t_blen > 512)
+		t_blen &= ~(512 - 1);	/* round down to block size */
+	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);	/* won't go below 256 */
+	sc = fsp->cmd;
+
+	remaining = seq_blen;
+	fh_parm_offset = frame_offset = offset;
+	tlen = 0;
+	seq = lp->tt.seq_start_next(seq);
+	f_ctl = FC_FC_REL_OFF;
+	WARN_ON(!seq);
+
+	/*
+	 * If a get_page()/put_page() will fail, don't use sg lists
+	 * in the fc_frame structure.
+	 *
+	 * The put_page() may be long after the I/O has completed
+	 * in the case of FCoE, since the network driver does it
+	 * via free_skb().  See the test in free_pages_check().
+	 *
+	 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
+	 */
+	if (using_sg) {
+		for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
+			if (page_count(sg_page(sg)) == 0 ||
+			    (sg_page(sg)->flags & (1 << PG_lru |
+						   1 << PG_private |
+						   1 << PG_locked |
+						   1 << PG_active |
+						   1 << PG_slab |
+						   1 << PG_swapcache |
+						   1 << PG_writeback |
+						   1 << PG_reserved |
+						   1 << PG_buddy))) {
+				using_sg = 0;
+				break;
+			}
+		}
+	}
+	sg = scsi_sglist(sc);
+
+	while (remaining > 0 && sg) {
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			sg = sg_next(sg);
+			continue;
+		}
+		if (!fp) {
+			tlen = min(t_blen, remaining);
+
+			/*
+			 * TODO.  Temporary workaround.	 fc_seq_send() can't
+			 * handle odd lengths in non-linear skbs.
+			 * This will be the final fragment only.
+			 */
+			if (tlen % 4)
+				using_sg = 0;
+			if (using_sg) {
+				fp = _fc_frame_alloc(lp, 0);
+				if (!fp)
+					return -ENOMEM;
+			} else {
+				fp = fc_frame_alloc(lp, tlen);
+				if (!fp)
+					return -ENOMEM;
+
+				data = (void *)(fr_hdr(fp)) +
+					sizeof(struct fc_frame_header);
+			}
+			fh_parm_offset = frame_offset;
+			fr_max_payload(fp) = fsp->max_payload;
+		}
+		sg_bytes = min(tlen, sg->length - offset);
+		if (using_sg) {
+			WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
+				FC_FRAME_SG_LEN);
+			get_page(sg_page(sg));
+			skb_fill_page_desc(fp_skb(fp),
+					   skb_shinfo(fp_skb(fp))->nr_frags,
+					   sg_page(sg), sg->offset + offset,
+					   sg_bytes);
+			fp_skb(fp)->data_len += sg_bytes;
+			fr_len(fp) += sg_bytes;
+			fp_skb(fp)->truesize += PAGE_SIZE;
+		} else {
+			size_t off = offset + sg->offset;
+
+			/*
+			 * The scatterlist item may be bigger than PAGE_SIZE,
+			 * but we must not cross pages inside the kmap.
+			 */
+			sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
+							   (off & ~PAGE_MASK)));
+			page_addr = kmap_atomic(sg_page(sg) +
+						(off >> PAGE_SHIFT),
+						KM_SOFTIRQ0);
+			memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+			       sg_bytes);
+			kunmap_atomic(page_addr, KM_SOFTIRQ0);
+			data += sg_bytes;
+		}
+		offset += sg_bytes;
+		frame_offset += sg_bytes;
+		tlen -= sg_bytes;
+		remaining -= sg_bytes;
+
+		if (tlen)
+			continue;
+
+		/*
+		 * Send sequence with transfer sequence initiative in case
+		 * this is last FCP frame of the sequence.
+		 */
+		if (remaining == 0)
+			f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+
+		ep = fc_seq_exch(seq);
+		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+			       FC_TYPE_FCP, f_ctl, fh_parm_offset);
+
+		/*
+		 * send fragment using for a sequence.
+		 */
+		error = lp->tt.seq_send(lp, seq, fp);
+		if (error) {
+			WARN_ON(1);		/* send error should be rare */
+			fc_fcp_retry_cmd(fsp);
+			return 0;
+		}
+		fp = NULL;
+	}
+	fsp->xfer_len += seq_blen;	/* premature count? */
+	return 0;
+}
+
+static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	int ba_done = 1;
+	struct fc_ba_rjt *brp;
+	struct fc_frame_header *fh;
+
+	fh = fc_frame_header_get(fp);
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_BA_ACC:
+		break;
+	case FC_RCTL_BA_RJT:
+		brp = fc_frame_payload_get(fp, sizeof(*brp));
+		if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
+			break;
+		/* fall thru */
+	default:
+		/*
+		 * we will let the command timeout
+		 * and scsi-ml recover in this case,
+		 * therefore cleared the ba_done flag.
+		 */
+		ba_done = 0;
+	}
+
+	if (ba_done) {
+		fsp->state |= FC_SRB_ABORTED;
+		fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+		if (fsp->wait_for_comp)
+			complete(&fsp->tm_done);
+		else
+			fc_fcp_complete_locked(fsp);
+	}
+}
+
+/*
+ * fc_fcp_reduce_can_queue - drop can_queue
+ * @lp: lport to drop queueing for
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ */
+static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	unsigned long flags;
+	int can_queue;
+
+	spin_lock_irqsave(lp->host->host_lock, flags);
+	if (si->throttled)
+		goto done;
+	si->throttled = 1;
+
+	can_queue = lp->host->can_queue;
+	can_queue >>= 1;
+	if (!can_queue)
+		can_queue = 1;
+	lp->host->can_queue = can_queue;
+	shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+		     "Reducing can_queue to %d.\n", can_queue);
+done:
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+/*
+ * exch mgr calls this routine to process scsi
+ * exchanges.
+ *
+ * Return   : None
+ * Context  : called from Soft IRQ context
+ *	      can not called holding list lock
+ */
+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+	struct fc_lport *lp;
+	struct fc_frame_header *fh;
+	struct fcp_txrdy *dd;
+	u8 r_ctl;
+	int rc = 0;
+
+	if (IS_ERR(fp))
+		goto errout;
+
+	fh = fc_frame_header_get(fp);
+	r_ctl = fh->fh_r_ctl;
+	lp = fsp->lp;
+
+	if (!(lp->state & LPORT_ST_READY))
+		goto out;
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+	fsp->last_pkt_time = jiffies;
+
+	if (fh->fh_type == FC_TYPE_BLS) {
+		fc_fcp_abts_resp(fsp, fp);
+		goto unlock;
+	}
+
+	if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+		goto unlock;
+
+	if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+		/*
+		 * received XFER RDY from the target
+		 * need to send data to the target
+		 */
+		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+		dd = fc_frame_payload_get(fp, sizeof(*dd));
+		WARN_ON(!dd);
+
+		rc = fc_fcp_send_data(fsp, seq,
+				      (size_t) ntohl(dd->ft_data_ro),
+				      (size_t) ntohl(dd->ft_burst_len));
+		if (!rc)
+			seq->rec_data = fsp->xfer_len;
+		else if (rc == -ENOMEM)
+			fsp->state |= FC_SRB_NOMEM;
+	} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+		/*
+		 * received a DATA frame
+		 * next we will copy the data to the system buffer
+		 */
+		WARN_ON(fr_len(fp) < sizeof(*fh));	/* len may be 0 */
+		fc_fcp_recv_data(fsp, fp);
+		seq->rec_data = fsp->xfer_contig_end;
+	} else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+		fc_fcp_resp(fsp, fp);
+	} else {
+		FC_DBG("unexpected frame.  r_ctl %x\n", r_ctl);
+	}
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_frame_free(fp);
+errout:
+	if (IS_ERR(fp))
+		fc_fcp_error(fsp, fp);
+	else if (rc == -ENOMEM)
+		fc_fcp_reduce_can_queue(lp);
+}
+
+static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fcp_resp *fc_rp;
+	struct fcp_resp_ext *rp_ex;
+	struct fcp_resp_rsp_info *fc_rp_info;
+	u32 plen;
+	u32 expected_len;
+	u32 respl = 0;
+	u32 snsl = 0;
+	u8 flags = 0;
+
+	plen = fr_len(fp);
+	fh = (struct fc_frame_header *)fr_hdr(fp);
+	if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+		goto len_err;
+	plen -= sizeof(*fh);
+	fc_rp = (struct fcp_resp *)(fh + 1);
+	fsp->cdb_status = fc_rp->fr_status;
+	flags = fc_rp->fr_flags;
+	fsp->scsi_comp_flags = flags;
+	expected_len = fsp->data_len;
+
+	if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+		rp_ex = (void *)(fc_rp + 1);
+		if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+			if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+				goto len_err;
+			fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+			if (flags & FCP_RSP_LEN_VAL) {
+				respl = ntohl(rp_ex->fr_rsp_len);
+				if (respl != sizeof(*fc_rp_info))
+					goto len_err;
+				if (fsp->wait_for_comp) {
+					/* Abuse cdb_status for rsp code */
+					fsp->cdb_status = fc_rp_info->rsp_code;
+					complete(&fsp->tm_done);
+					/*
+					 * tmfs will not have any scsi cmd so
+					 * exit here
+					 */
+					return;
+				} else
+					goto err;
+			}
+			if (flags & FCP_SNS_LEN_VAL) {
+				snsl = ntohl(rp_ex->fr_sns_len);
+				if (snsl > SCSI_SENSE_BUFFERSIZE)
+					snsl = SCSI_SENSE_BUFFERSIZE;
+				memcpy(fsp->cmd->sense_buffer,
+				       (char *)fc_rp_info + respl, snsl);
+			}
+		}
+		if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+			if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+				goto len_err;
+			if (flags & FCP_RESID_UNDER) {
+				fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+				/*
+				 * The cmnd->underflow is the minimum number of
+				 * bytes that must be transfered for this
+				 * command.  Provided a sense condition is not
+				 * present, make sure the actual amount
+				 * transferred is at least the underflow value
+				 * or fail.
+				 */
+				if (!(flags & FCP_SNS_LEN_VAL) &&
+				    (fc_rp->fr_status == 0) &&
+				    (scsi_bufflen(fsp->cmd) -
+				     fsp->scsi_resid) < fsp->cmd->underflow)
+					goto err;
+				expected_len -= fsp->scsi_resid;
+			} else {
+				fsp->status_code = FC_ERROR;
+			}
+		}
+	}
+	fsp->state |= FC_SRB_RCV_STATUS;
+
+	/*
+	 * Check for missing or extra data frames.
+	 */
+	if (unlikely(fsp->xfer_len != expected_len)) {
+		if (fsp->xfer_len < expected_len) {
+			/*
+			 * Some data may be queued locally,
+			 * Wait a at least one jiffy to see if it is delivered.
+			 * If this expires without data, we may do SRR.
+			 */
+			fc_fcp_timer_set(fsp, 2);
+			return;
+		}
+		fsp->status_code = FC_DATA_OVRRUN;
+		FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
+		       "data len %x\n",
+		       fsp->rport->port_id,
+		       fsp->xfer_len, expected_len, fsp->data_len);
+	}
+	fc_fcp_complete_locked(fsp);
+	return;
+
+len_err:
+	FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
+	       flags, fr_len(fp), respl, snsl);
+err:
+	fsp->status_code = FC_ERROR;
+	fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_complete_locked - complete processing of a fcp packet
+ * @fsp:	fcp packet
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp = fsp->lp;
+	struct fc_seq *seq;
+	struct fc_exch *ep;
+	u32 f_ctl;
+
+	if (fsp->state & FC_SRB_ABORT_PENDING)
+		return;
+
+	if (fsp->state & FC_SRB_ABORTED) {
+		if (!fsp->status_code)
+			fsp->status_code = FC_CMD_ABORTED;
+	} else {
+		/*
+		 * Test for transport underrun, independent of response
+		 * underrun status.
+		 */
+		if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+			fsp->status_code = FC_DATA_UNDRUN;
+			fsp->io_status = SUGGEST_RETRY << 24;
+		}
+	}
+
+	seq = fsp->seq_ptr;
+	if (seq) {
+		fsp->seq_ptr = NULL;
+		if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+			struct fc_frame *conf_frame;
+			struct fc_seq *csp;
+
+			csp = lp->tt.seq_start_next(seq);
+			conf_frame = fc_frame_alloc(fsp->lp, 0);
+			if (conf_frame) {
+				f_ctl = FC_FC_SEQ_INIT;
+				f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+				ep = fc_seq_exch(seq);
+				fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
+					       ep->did, ep->sid,
+					       FC_TYPE_FCP, f_ctl, 0);
+				lp->tt.seq_send(lp, csp, conf_frame);
+			}
+		}
+		lp->tt.exch_done(seq);
+	}
+	fc_io_compl(fsp);
+}
+
+static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
+{
+	struct fc_lport *lp = fsp->lp;
+
+	if (fsp->seq_ptr) {
+		lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+	fsp->status_code = error;
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * @lp:		logical port
+ * @id:		target id
+ * @lun:	lun
+ * @error:	fsp status code
+ *
+ * If lun or id is -1, they are ignored.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+				    unsigned int lun, int error)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_pkt *fsp;
+	struct scsi_cmnd *sc_cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(lp->host->host_lock, flags);
+restart:
+	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+		sc_cmd = fsp->cmd;
+		if (id != -1 && scmd_id(sc_cmd) != id)
+			continue;
+
+		if (lun != -1 && sc_cmd->device->lun != lun)
+			continue;
+
+		fc_fcp_pkt_hold(fsp);
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+		if (!fc_fcp_lock_pkt(fsp)) {
+			fc_fcp_cleanup_cmd(fsp, error);
+			fc_io_compl(fsp);
+			fc_fcp_unlock_pkt(fsp);
+		}
+
+		fc_fcp_pkt_release(fsp);
+		spin_lock_irqsave(lp->host->host_lock, flags);
+		/*
+		 * while we dropped the lock multiple pkts could
+		 * have been released, so we have to start over.
+		 */
+		goto restart;
+	}
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+static void fc_fcp_abort_io(struct fc_lport *lp)
+{
+	fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
+}
+
+/**
+ * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * @lp:		fc lport
+ * @fsp:	fc packet.
+ *
+ * This is called by upper layer protocol.
+ * Return   : zero for success and -1 for failure
+ * Context  : called from queuecommand which can be called from process
+ *	      or scsi soft irq.
+ * Locks    : called with the host lock and irqs disabled.
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	int rc;
+
+	fsp->cmd->SCp.ptr = (char *)fsp;
+	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+	fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+	int_to_scsilun(fsp->cmd->device->lun,
+		       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+
+	spin_unlock_irq(lp->host->host_lock);
+	rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
+	spin_lock_irq(lp->host->host_lock);
+	if (rc)
+		list_del(&fsp->list);
+
+	return rc;
+}
+
+static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+			   void (*resp)(struct fc_seq *,
+					struct fc_frame *fp,
+					void *arg))
+{
+	struct fc_frame *fp;
+	struct fc_seq *seq;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	const size_t len = sizeof(fsp->cdb_cmd);
+	int rc = 0;
+
+	if (fc_fcp_lock_pkt(fsp))
+		return 0;
+
+	fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+	if (!fp) {
+		rc = -1;
+		goto unlock;
+	}
+
+	memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+	fr_cmd(fp) = fsp->cmd;
+	rport = fsp->rport;
+	fsp->max_payload = rport->maxframe_size;
+	rp = rport->dd_data;
+
+	fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
+		       fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
+	if (!seq) {
+		fc_frame_free(fp);
+		rc = -1;
+		goto unlock;
+	}
+	fsp->last_pkt_time = jiffies;
+	fsp->seq_ptr = seq;
+	fc_fcp_pkt_hold(fsp);	/* hold for fc_fcp_pkt_destroy */
+
+	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+	fc_fcp_timer_set(fsp,
+			 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
+			 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+	return rc;
+}
+
+/*
+ * transport error handler
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	int error = PTR_ERR(fp);
+
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	switch (error) {
+	case -FC_EX_CLOSED:
+		fc_fcp_retry_cmd(fsp);
+		goto unlock;
+	default:
+		FC_DBG("unknown error %ld\n", PTR_ERR(fp));
+	}
+	/*
+	 * clear abort pending, because the lower layer
+	 * decided to force completion.
+	 */
+	fsp->state &= ~FC_SRB_ABORT_PENDING;
+	fsp->status_code = FC_CMD_PLOGO;
+	fc_fcp_complete_locked(fsp);
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Scsi abort handler- calls to send an abort
+ * and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+	int rc = FAILED;
+
+	if (fc_fcp_send_abort(fsp))
+		return FAILED;
+
+	init_completion(&fsp->tm_done);
+	fsp->wait_for_comp = 1;
+
+	spin_unlock_bh(&fsp->scsi_pkt_lock);
+	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+	spin_lock_bh(&fsp->scsi_pkt_lock);
+	fsp->wait_for_comp = 0;
+
+	if (!rc) {
+		FC_DBG("target abort cmd  failed\n");
+		rc = FAILED;
+	} else if (fsp->state & FC_SRB_ABORTED) {
+		FC_DBG("target abort cmd  passed\n");
+		rc = SUCCESS;
+		fc_fcp_complete_locked(fsp);
+	}
+
+	return rc;
+}
+
+/*
+ * Retry LUN reset after resource allocation failed.
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+	struct fc_lport *lp = fsp->lp;
+	if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
+		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
+			return;
+		if (fc_fcp_lock_pkt(fsp))
+			return;
+		setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		fc_fcp_unlock_pkt(fsp);
+	}
+}
+
+/*
+ * Scsi device reset handler- send a LUN RESET to the device
+ * and wait for reset reply
+ */
+static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+			unsigned int id, unsigned int lun)
+{
+	int rc;
+
+	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+	fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+	int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+
+	fsp->wait_for_comp = 1;
+	init_completion(&fsp->tm_done);
+
+	fc_lun_reset_send((unsigned long)fsp);
+
+	/*
+	 * wait for completion of reset
+	 * after that make sure all commands are terminated
+	 */
+	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+	spin_lock_bh(&fsp->scsi_pkt_lock);
+	fsp->state |= FC_SRB_COMPL;
+	spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+	del_timer_sync(&fsp->timer);
+
+	spin_lock_bh(&fsp->scsi_pkt_lock);
+	if (fsp->seq_ptr) {
+		lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+	fsp->wait_for_comp = 0;
+	spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+	if (!rc) {
+		FC_DBG("lun reset failed\n");
+		return FAILED;
+	}
+
+	/* cdb_status holds the tmf's rsp code */
+	if (fsp->cdb_status != FCP_TMF_CMPL)
+		return FAILED;
+
+	FC_DBG("lun reset to lun %u completed\n", lun);
+	fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
+	return SUCCESS;
+}
+
+/*
+ * Task Managment response handler
+ */
+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = arg;
+	struct fc_frame_header *fh;
+
+	if (IS_ERR(fp)) {
+		/*
+		 * If there is an error just let it timeout or wait
+		 * for TMF to be aborted if it timedout.
+		 *
+		 * scsi-eh will escalate for when either happens.
+		 */
+		return;
+	}
+
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	/*
+	 * raced with eh timeout handler.
+	 */
+	if (!fsp->seq_ptr || !fsp->wait_for_comp) {
+		spin_unlock_bh(&fsp->scsi_pkt_lock);
+		return;
+	}
+
+	fh = fc_frame_header_get(fp);
+	if (fh->fh_type != FC_TYPE_BLS)
+		fc_fcp_resp(fsp, fp);
+	fsp->seq_ptr = NULL;
+	fsp->lp->tt.exch_done(seq);
+	fc_frame_free(fp);
+	fc_fcp_unlock_pkt(fsp);
+}
+
+static void fc_fcp_cleanup(struct fc_lport *lp)
+{
+	fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
+}
+
+/*
+ * fc_fcp_timeout: called by OS timer function.
+ *
+ * The timer has been inactivated and must be reactivated if desired
+ * using fc_fcp_timer_set().
+ *
+ * Algorithm:
+ *
+ * If REC is supported, just issue it, and return.  The REC exchange will
+ * complete or time out, and recovery can continue at that point.
+ *
+ * Otherwise, if the response has been received without all the data,
+ * it has been ER_TIMEOUT since the response was received.
+ *
+ * If the response has not been received,
+ * we see if data was received recently.  If it has been, we continue waiting,
+ * otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(unsigned long data)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+	struct fc_rport *rport = fsp->rport;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	if (fsp->cdb_cmd.fc_tm_flags)
+		goto unlock;
+
+	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+	if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
+		fc_fcp_rec(fsp);
+	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
+			       jiffies))
+		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+	else if (fsp->state & FC_SRB_RCV_STATUS)
+		fc_fcp_complete_locked(fsp);
+	else
+		fc_timeout_error(fsp);
+	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Send a REC ELS request
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp;
+	struct fc_frame *fp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+
+	lp = fsp->lp;
+	rport = fsp->rport;
+	rp = rport->dd_data;
+	if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
+		fsp->status_code = FC_HRD_ERROR;
+		fsp->io_status = SUGGEST_RETRY << 24;
+		fc_fcp_complete_locked(fsp);
+		return;
+	}
+	fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
+	if (!fp)
+		goto retry;
+
+	fr_seq(fp) = fsp->seq_ptr;
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
+		       fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+	if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
+			      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
+		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
+		return;
+	}
+	fc_frame_free(fp);
+retry:
+	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+	else
+		fc_timeout_error(fsp);
+}
+
+/*
+ * Receive handler for REC ELS frame
+ * if it is a reject then let the scsi layer to handle
+ * the timeout. if it is a LS_ACC then if the io was not completed
+ * then set the timeout and return otherwise complete the exchange
+ * and tell the scsi layer to restart the I/O.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+	struct fc_els_rec_acc *recp;
+	struct fc_els_ls_rjt *rjt;
+	u32 e_stat;
+	u8 opcode;
+	u32 offset;
+	enum dma_data_direction data_dir;
+	enum fc_rctl r_ctl;
+	struct fc_rport_libfc_priv *rp;
+
+	if (IS_ERR(fp)) {
+		fc_fcp_rec_error(fsp, fp);
+		return;
+	}
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	fsp->recov_retry = 0;
+	opcode = fc_frame_payload_op(fp);
+	if (opcode == ELS_LS_RJT) {
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		switch (rjt->er_reason) {
+		default:
+			FC_DEBUG_FCP("device %x unexpected REC reject "
+				     "reason %d expl %d\n",
+				     fsp->rport->port_id, rjt->er_reason,
+				     rjt->er_explan);
+			/* fall through */
+		case ELS_RJT_UNSUP:
+			FC_DEBUG_FCP("device does not support REC\n");
+			rp = fsp->rport->dd_data;
+			/*
+			 * if we do not spport RECs or got some bogus
+			 * reason then resetup timer so we check for
+			 * making progress.
+			 */
+			rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+			break;
+		case ELS_RJT_LOGIC:
+		case ELS_RJT_UNAB:
+			/*
+			 * If no data transfer, the command frame got dropped
+			 * so we just retry.  If data was transferred, we
+			 * lost the response but the target has no record,
+			 * so we abort and retry.
+			 */
+			if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
+			    fsp->xfer_len == 0) {
+				fc_fcp_retry_cmd(fsp);
+				break;
+			}
+			fc_timeout_error(fsp);
+			break;
+		}
+	} else if (opcode == ELS_LS_ACC) {
+		if (fsp->state & FC_SRB_ABORTED)
+			goto unlock_out;
+
+		data_dir = fsp->cmd->sc_data_direction;
+		recp = fc_frame_payload_get(fp, sizeof(*recp));
+		offset = ntohl(recp->reca_fc4value);
+		e_stat = ntohl(recp->reca_e_stat);
+
+		if (e_stat & ESB_ST_COMPLETE) {
+
+			/*
+			 * The exchange is complete.
+			 *
+			 * For output, we must've lost the response.
+			 * For input, all data must've been sent.
+			 * We lost may have lost the response
+			 * (and a confirmation was requested) and maybe
+			 * some data.
+			 *
+			 * If all data received, send SRR
+			 * asking for response.	 If partial data received,
+			 * or gaps, SRR requests data at start of gap.
+			 * Recovery via SRR relies on in-order-delivery.
+			 */
+			if (data_dir == DMA_TO_DEVICE) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else if (fsp->xfer_contig_end == offset) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else {
+				offset = fsp->xfer_contig_end;
+				r_ctl = FC_RCTL_DD_SOL_DATA;
+			}
+			fc_fcp_srr(fsp, r_ctl, offset);
+		} else if (e_stat & ESB_ST_SEQ_INIT) {
+
+			/*
+			 * The remote port has the initiative, so just
+			 * keep waiting for it to complete.
+			 */
+			fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		} else {
+
+			/*
+			 * The exchange is incomplete, we have seq. initiative.
+			 * Lost response with requested confirmation,
+			 * lost confirmation, lost transfer ready or
+			 * lost write data.
+			 *
+			 * For output, if not all data was received, ask
+			 * for transfer ready to be repeated.
+			 *
+			 * If we received or sent all the data, send SRR to
+			 * request response.
+			 *
+			 * If we lost a response, we may have lost some read
+			 * data as well.
+			 */
+			r_ctl = FC_RCTL_DD_SOL_DATA;
+			if (data_dir == DMA_TO_DEVICE) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+				if (offset < fsp->data_len)
+					r_ctl = FC_RCTL_DD_DATA_DESC;
+			} else if (offset == fsp->xfer_contig_end) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else if (fsp->xfer_contig_end < offset) {
+				offset = fsp->xfer_contig_end;
+			}
+			fc_fcp_srr(fsp, r_ctl, offset);
+		}
+	}
+unlock_out:
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle error response or timeout for REC exchange.
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	int error = PTR_ERR(fp);
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	switch (error) {
+	case -FC_EX_CLOSED:
+		fc_fcp_retry_cmd(fsp);
+		break;
+
+	default:
+		FC_DBG("REC %p fid %x error unexpected error %d\n",
+		       fsp, fsp->rport->port_id, error);
+		fsp->status_code = FC_CMD_PLOGO;
+		/* fall through */
+
+	case -FC_EX_TIMEOUT:
+		/*
+		 * Assume REC or LS_ACC was lost.
+		 * The exchange manager will have aborted REC, so retry.
+		 */
+		FC_DBG("REC fid %x error error %d retry %d/%d\n",
+		       fsp->rport->port_id, error, fsp->recov_retry,
+		       FC_MAX_RECOV_RETRY);
+		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+			fc_fcp_rec(fsp);
+		else
+			fc_timeout_error(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
+}
+
+/*
+ * Time out error routine:
+ * abort's the I/O close the exchange and
+ * send completion notification to scsi layer
+ */
+static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+{
+	fsp->status_code = FC_CMD_TIME_OUT;
+	fsp->cdb_status = 0;
+	fsp->io_status = 0;
+	/*
+	 * if this fails then we let the scsi command timer fire and
+	 * scsi-ml escalate.
+	 */
+	fc_fcp_send_abort(fsp);
+}
+
+/*
+ * Sequence retransmission request.
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+	struct fc_lport *lp = fsp->lp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+	struct fc_seq *seq;
+	struct fcp_srr *srr;
+	struct fc_frame *fp;
+	u8 cdb_op;
+
+	rport = fsp->rport;
+	rp = rport->dd_data;
+	cdb_op = fsp->cdb_cmd.fc_cdb[0];
+
+	if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
+		goto retry;			/* shouldn't happen */
+	fp = fc_frame_alloc(lp, sizeof(*srr));
+	if (!fp)
+		goto retry;
+
+	srr = fc_frame_payload_get(fp, sizeof(*srr));
+	memset(srr, 0, sizeof(*srr));
+	srr->srr_op = ELS_SRR;
+	srr->srr_ox_id = htons(ep->oxid);
+	srr->srr_rx_id = htons(ep->rxid);
+	srr->srr_r_ctl = r_ctl;
+	srr->srr_rel_off = htonl(offset);
+
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
+		       fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
+				   fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
+	if (!seq) {
+		fc_frame_free(fp);
+		goto retry;
+	}
+	fsp->recov_seq = seq;
+	fsp->xfer_len = offset;
+	fsp->xfer_contig_end = offset;
+	fsp->state &= ~FC_SRB_RCV_STATUS;
+	fc_fcp_pkt_hold(fsp);		/* hold for outstanding SRR */
+	return;
+retry:
+	fc_fcp_retry_cmd(fsp);
+}
+
+/*
+ * Handle response from SRR.
+ */
+static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = arg;
+	struct fc_frame_header *fh;
+
+	if (IS_ERR(fp)) {
+		fc_fcp_srr_error(fsp, fp);
+		return;
+	}
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	fh = fc_frame_header_get(fp);
+	/*
+	 * BUG? fc_fcp_srr_error calls exch_done which would release
+	 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
+	 * then fc_exch_timeout would be sending an abort. The exch_done
+	 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
+	 * an abort response though.
+	 */
+	if (fh->fh_type == FC_TYPE_BLS) {
+		fc_fcp_unlock_pkt(fsp);
+		return;
+	}
+
+	fsp->recov_seq = NULL;
+	switch (fc_frame_payload_op(fp)) {
+	case ELS_LS_ACC:
+		fsp->recov_retry = 0;
+		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		break;
+	case ELS_LS_RJT:
+	default:
+		fc_timeout_error(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+	fsp->lp->tt.exch_done(seq);
+out:
+	fc_frame_free(fp);
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
+}
+
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+	fsp->lp->tt.exch_done(fsp->recov_seq);
+	fsp->recov_seq = NULL;
+	switch (PTR_ERR(fp)) {
+	case -FC_EX_TIMEOUT:
+		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+			fc_fcp_rec(fsp);
+		else
+			fc_timeout_error(fsp);
+		break;
+	case -FC_EX_CLOSED:			/* e.g., link failure */
+		/* fall through */
+	default:
+		fc_fcp_retry_cmd(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
+}
+
+static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
+{
+	/* lock ? */
+	return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
+}
+
+/**
+ * fc_queuecommand - The queuecommand function of the scsi template
+ * @cmd:	struct scsi_cmnd to be executed
+ * @done:	Callback function to be called when cmd is completed
+ *
+ * this is the i/o strategy routine, called by the scsi layer
+ * this routine is called with holding the host_lock.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+{
+	struct fc_lport *lp;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	struct fc_fcp_pkt *fsp;
+	struct fc_rport_libfc_priv *rp;
+	int rval;
+	int rc = 0;
+	struct fcoe_dev_stats *stats;
+
+	lp = shost_priv(sc_cmd->device->host);
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval) {
+		sc_cmd->result = rval;
+		done(sc_cmd);
+		goto out;
+	}
+
+	if (!*(struct fc_remote_port **)rport->dd_data) {
+		/*
+		 * rport is transitioning from blocked/deleted to
+		 * online
+		 */
+		sc_cmd->result = DID_IMM_RETRY << 16;
+		done(sc_cmd);
+		goto out;
+	}
+
+	rp = rport->dd_data;
+
+	if (!fc_fcp_lport_queue_ready(lp)) {
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
+	if (fsp == NULL) {
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	/*
+	 * build the libfc request pkt
+	 */
+	fsp->cmd = sc_cmd;	/* save the cmd */
+	fsp->lp = lp;		/* save the softc ptr */
+	fsp->rport = rport;	/* set the remote port ptr */
+	sc_cmd->scsi_done = done;
+
+	/*
+	 * set up the transfer length
+	 */
+	fsp->data_len = scsi_bufflen(sc_cmd);
+	fsp->xfer_len = 0;
+
+	/*
+	 * setup the data direction
+	 */
+	stats = lp->dev_stats[smp_processor_id()];
+	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		fsp->req_flags = FC_SRB_READ;
+		stats->InputRequests++;
+		stats->InputMegabytes = fsp->data_len;
+	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+		fsp->req_flags = FC_SRB_WRITE;
+		stats->OutputRequests++;
+		stats->OutputMegabytes = fsp->data_len;
+	} else {
+		fsp->req_flags = 0;
+		stats->ControlRequests++;
+	}
+
+	fsp->tgt_flags = rp->flags;
+
+	init_timer(&fsp->timer);
+	fsp->timer.data = (unsigned long)fsp;
+
+	/*
+	 * send it to the lower layer
+	 * if we get -1 return then put the request in the pending
+	 * queue.
+	 */
+	rval = fc_fcp_pkt_send(lp, fsp);
+	if (rval != 0) {
+		fsp->state = FC_SRB_FREE;
+		fc_fcp_pkt_release(fsp);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+	}
+out:
+	return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl -  Handle responses for completed commands
+ * @fsp:	scsi packet
+ *
+ * Translates a error to a Linux SCSI error.
+ *
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *fsp)
+{
+	struct fc_fcp_internal *si;
+	struct scsi_cmnd *sc_cmd;
+	struct fc_lport *lp;
+	unsigned long flags;
+
+	fsp->state |= FC_SRB_COMPL;
+	if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+		spin_unlock_bh(&fsp->scsi_pkt_lock);
+		del_timer_sync(&fsp->timer);
+		spin_lock_bh(&fsp->scsi_pkt_lock);
+	}
+
+	lp = fsp->lp;
+	si = fc_get_scsi_internal(lp);
+	spin_lock_irqsave(lp->host->host_lock, flags);
+	if (!fsp->cmd) {
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return;
+	}
+
+	/*
+	 * if a command timed out while we had to try and throttle IO
+	 * and it is now getting cleaned up, then we are about to
+	 * try again so clear the throttled flag incase we get more
+	 * time outs.
+	 */
+	if (si->throttled && fsp->state & FC_SRB_NOMEM)
+		si->throttled = 0;
+
+	sc_cmd = fsp->cmd;
+	fsp->cmd = NULL;
+
+	if (!sc_cmd->SCp.ptr) {
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return;
+	}
+
+	CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+	switch (fsp->status_code) {
+	case FC_COMPLETE:
+		if (fsp->cdb_status == 0) {
+			/*
+			 * good I/O status
+			 */
+			sc_cmd->result = DID_OK << 16;
+			if (fsp->scsi_resid)
+				CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+		} else if (fsp->cdb_status == QUEUE_FULL) {
+			struct scsi_device *tmp_sdev;
+			struct scsi_device *sdev = sc_cmd->device;
+
+			shost_for_each_device(tmp_sdev, sdev->host) {
+				if (tmp_sdev->id != sdev->id)
+					continue;
+
+				if (tmp_sdev->queue_depth > 1) {
+					scsi_track_queue_full(tmp_sdev,
+							      tmp_sdev->
+							      queue_depth - 1);
+				}
+			}
+			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+		} else {
+			/*
+			 * transport level I/O was ok but scsi
+			 * has non zero status
+			 */
+			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+		}
+		break;
+	case FC_ERROR:
+		sc_cmd->result = DID_ERROR << 16;
+		break;
+	case FC_DATA_UNDRUN:
+		if (fsp->cdb_status == 0) {
+			/*
+			 * scsi status is good but transport level
+			 * underrun. for read it should be an error??
+			 */
+			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+		} else {
+			/*
+			 * scsi got underrun, this is an error
+			 */
+			CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+			sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+		}
+		break;
+	case FC_DATA_OVRRUN:
+		/*
+		 * overrun is an error
+		 */
+		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+		break;
+	case FC_CMD_ABORTED:
+		sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+		break;
+	case FC_CMD_TIME_OUT:
+		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+		break;
+	case FC_CMD_RESET:
+		sc_cmd->result = (DID_RESET << 16);
+		break;
+	case FC_HRD_ERROR:
+		sc_cmd->result = (DID_NO_CONNECT << 16);
+		break;
+	default:
+		sc_cmd->result = (DID_ERROR << 16);
+		break;
+	}
+
+	list_del(&fsp->list);
+	sc_cmd->SCp.ptr = NULL;
+	sc_cmd->scsi_done(sc_cmd);
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+	/* release ref from initial allocation in queue command */
+	fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_complete - complete processing of a fcp packet
+ * @fsp:	fcp packet
+ *
+ * This function may sleep if a fsp timer is pending.
+ * The host lock must not be held by caller.
+ */
+void fc_fcp_complete(struct fc_fcp_pkt *fsp)
+{
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	fc_fcp_complete_locked(fsp);
+	fc_fcp_unlock_pkt(fsp);
+}
+EXPORT_SYMBOL(fc_fcp_complete);
+
+/**
+ * fc_eh_abort - Abort a command...from scsi host template
+ * @sc_cmd:	scsi command to abort
+ *
+ * send ABTS to the target device  and wait for the response
+ * sc_cmd is the pointer to the command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_fcp_pkt *fsp;
+	struct fc_lport *lp;
+	int rc = FAILED;
+	unsigned long flags;
+
+	lp = shost_priv(sc_cmd->device->host);
+	if (lp->state != LPORT_ST_READY)
+		return rc;
+	else if (!(lp->link_status & FC_LINK_UP))
+		return rc;
+
+	spin_lock_irqsave(lp->host->host_lock, flags);
+	fsp = CMD_SP(sc_cmd);
+	if (!fsp) {
+		/* command completed while scsi eh was setting up */
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return SUCCESS;
+	}
+	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
+	fc_fcp_pkt_hold(fsp);
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+	if (fc_fcp_lock_pkt(fsp)) {
+		/* completed while we were waiting for timer to be deleted */
+		rc = SUCCESS;
+		goto release_pkt;
+	}
+
+	rc = fc_fcp_pkt_abort(lp, fsp);
+	fc_fcp_unlock_pkt(fsp);
+
+release_pkt:
+	fc_fcp_pkt_release(fsp);
+	return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset: Reset a single LUN
+ * @sc_cmd:	scsi command
+ *
+ * Set from scsi host template to send tm cmd to the target and wait for the
+ * response.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_lport *lp;
+	struct fc_fcp_pkt *fsp;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	int rc = FAILED;
+	struct fc_rport_libfc_priv *rp;
+	int rval;
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval)
+		goto out;
+
+	rp = rport->dd_data;
+	lp = shost_priv(sc_cmd->device->host);
+
+	if (lp->state != LPORT_ST_READY)
+		return rc;
+
+	fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
+	if (fsp == NULL) {
+		FC_DBG("could not allocate scsi_pkt\n");
+		sc_cmd->result = DID_NO_CONNECT << 16;
+		goto out;
+	}
+
+	/*
+	 * Build the libfc request pkt. Do not set the scsi cmnd, because
+	 * the sc passed in is not setup for execution like when sent
+	 * through the queuecommand callout.
+	 */
+	fsp->lp = lp;		/* save the softc ptr */
+	fsp->rport = rport;	/* set the remote port ptr */
+
+	/*
+	 * flush outstanding commands
+	 */
+	rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
+	fsp->state = FC_SRB_FREE;
+	fc_fcp_pkt_release(fsp);
+
+out:
+	return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset - The reset function will reset the ports on the host.
+ * @sc_cmd:	scsi command
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+	struct Scsi_Host *shost = sc_cmd->device->host;
+	struct fc_lport *lp = shost_priv(shost);
+	unsigned long wait_tmo;
+
+	lp->tt.lport_reset(lp);
+	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+	while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
+		msleep(1000);
+
+	if (fc_fcp_lport_queue_ready(lp)) {
+		shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
+		return SUCCESS;
+	} else {
+		shost_printk(KERN_INFO, shost, "Host reset failed. "
+			     "lport not ready.\n");
+		return FAILED;
+	}
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc - configure queue depth
+ * @sdev:	scsi device
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	int queue_depth;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	if (sdev->tagged_supported) {
+		if (sdev->host->hostt->cmd_per_lun)
+			queue_depth = sdev->host->hostt->cmd_per_lun;
+		else
+			queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
+		scsi_activate_tcq(sdev, queue_depth);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	return sdev->queue_depth;
+}
+EXPORT_SYMBOL(fc_change_queue_depth);
+
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+	if (sdev->tagged_supported) {
+		scsi_set_tag_type(sdev, tag_type);
+		if (tag_type)
+			scsi_activate_tcq(sdev, sdev->queue_depth);
+		else
+			scsi_deactivate_tcq(sdev, sdev->queue_depth);
+	} else
+		tag_type = 0;
+
+	return tag_type;
+}
+EXPORT_SYMBOL(fc_change_queue_type);
+
+void fc_fcp_destroy(struct fc_lport *lp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+
+	if (!list_empty(&si->scsi_pkt_queue))
+		printk(KERN_ERR "Leaked scsi packets.\n");
+
+	mempool_destroy(si->scsi_pkt_pool);
+	kfree(si);
+	lp->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_fcp_init(struct fc_lport *lp)
+{
+	int rc;
+	struct fc_fcp_internal *si;
+
+	if (!lp->tt.fcp_cmd_send)
+		lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
+
+	if (!lp->tt.fcp_cleanup)
+		lp->tt.fcp_cleanup = fc_fcp_cleanup;
+
+	if (!lp->tt.fcp_abort_io)
+		lp->tt.fcp_abort_io = fc_fcp_abort_io;
+
+	si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+	if (!si)
+		return -ENOMEM;
+	lp->scsi_priv = si;
+	INIT_LIST_HEAD(&si->scsi_pkt_queue);
+
+	si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+	if (!si->scsi_pkt_pool) {
+		rc = -ENOMEM;
+		goto free_internal;
+	}
+	return 0;
+
+free_internal:
+	kfree(si);
+	return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
+
+static int __init libfc_init(void)
+{
+	int rc;
+
+	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+					    sizeof(struct fc_fcp_pkt),
+					    0, SLAB_HWCACHE_ALIGN, NULL);
+	if (scsi_pkt_cachep == NULL) {
+		FC_DBG("Unable to allocate SRB cache...module load failed!");
+		return -ENOMEM;
+	}
+
+	rc = fc_setup_exch_mgr();
+	if (rc)
+		goto destroy_pkt_cache;
+
+	rc = fc_setup_rport();
+	if (rc)
+		goto destroy_em;
+
+	return rc;
+destroy_em:
+	fc_destroy_exch_mgr();
+destroy_pkt_cache:
+	kmem_cache_destroy(scsi_pkt_cachep);
+	return rc;
+}
+
+static void __exit libfc_exit(void)
+{
+	kmem_cache_destroy(scsi_pkt_cachep);
+	fc_destroy_exch_mgr();
+	fc_destroy_rport();
+}
+
+module_init(libfc_init);
+module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000000..63fe00cfe667
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <scsi/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+	u32 crc;
+	u32 error;
+	const u8 *bp;
+	unsigned int len;
+
+	WARN_ON(!fc_frame_is_linear(fp));
+	fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+	len = (fr_len(fp) + 3) & ~3;	/* round up length to include fill */
+	bp = (const u8 *) fr_hdr(fp);
+	crc = ~crc32(~0, bp, len);
+	error = crc ^ fr_crc(fp);
+	return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent via fcoe_xmit.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *__fc_frame_alloc(size_t len)
+{
+	struct fc_frame *fp;
+	struct sk_buff *skb;
+
+	WARN_ON((len % sizeof(u32)) != 0);
+	len += sizeof(struct fc_frame_header);
+	skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
+	if (!skb)
+		return NULL;
+	fp = (struct fc_frame *) skb;
+	fc_frame_init(fp);
+	skb_reserve(skb, FC_FRAME_HEADROOM);
+	skb_put(skb, len);
+	return fp;
+}
+EXPORT_SYMBOL(__fc_frame_alloc);
+
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+	struct fc_frame *fp;
+	size_t fill;
+
+	fill = payload_len % 4;
+	if (fill != 0)
+		fill = 4 - fill;
+	fp = __fc_frame_alloc(payload_len + fill);
+	if (fp) {
+		memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+		/* trim is OK, we just allocated it so there are no fragments */
+		skb_trim(fp_skb(fp),
+			 payload_len + sizeof(struct fc_frame_header));
+	}
+	return fp;
+}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000000..0b9bdb1fb807
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,1604 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * PORT LOCKING NOTES
+ *
+ * These comments only apply to the 'port code' which consists of the lport,
+ * disc and rport blocks.
+ *
+ * MOTIVATION
+ *
+ * The lport, disc and rport blocks all have mutexes that are used to protect
+ * those objects. The main motivation for these locks is to prevent from
+ * having an lport reset just before we send a frame. In that scenario the
+ * lport's FID would get set to zero and then we'd send a frame with an
+ * invalid SID. We also need to ensure that states don't change unexpectedly
+ * while processing another state.
+ *
+ * HEIRARCHY
+ *
+ * The following heirarchy defines the locking rules. A greater lock
+ * may be held before acquiring a lesser lock, but a lesser lock should never
+ * be held while attempting to acquire a greater lock. Here is the heirarchy-
+ *
+ * lport > disc, lport > rport, disc > rport
+ *
+ * CALLBACKS
+ *
+ * The callbacks cause complications with this scheme. There is a callback
+ * from the rport (to either lport or disc) and a callback from disc
+ * (to the lport).
+ *
+ * As rports exit the rport state machine a callback is made to the owner of
+ * the rport to notify success or failure. Since the callback is likely to
+ * cause the lport or disc to grab its lock we cannot hold the rport lock
+ * while making the callback. To ensure that the rport is not free'd while
+ * processing the callback the rport callbacks are serialized through a
+ * single-threaded workqueue. An rport would never be free'd while in a
+ * callback handler becuase no other rport work in this queue can be executed
+ * at the same time.
+ *
+ * When discovery succeeds or fails a callback is made to the lport as
+ * notification. Currently, succesful discovery causes the lport to take no
+ * action. A failure will cause the lport to reset. There is likely a circular
+ * locking problem with this implementation.
+ */
+
+/*
+ * LPORT LOCKING
+ *
+ * The critical sections protected by the lport's mutex are quite broad and
+ * may be improved upon in the future. The lport code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't penalize I/O
+ * performance.
+ *
+ * The strategy is to lock whenever processing a request or response. Note
+ * that every _enter_* function corresponds to a state change. They generally
+ * change the lports state and then send a request out on the wire. We lock
+ * before calling any of these functions to protect that state change. This
+ * means that the entry points into the lport block manage the locks while
+ * the state machine can transition between states (i.e. _enter_* functions)
+ * while always staying protected.
+ *
+ * When handling responses we also hold the lport mutex broadly. When the
+ * lport receives the response frame it locks the mutex and then calls the
+ * appropriate handler for the particuar response. Generally a response will
+ * trigger a state change and so the lock must already be held.
+ *
+ * Retries also have to consider the locking. The retries occur from a work
+ * context and the work function will lock the lport and then retry the state
+ * (i.e. _enter_* function).
+ */
+
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO   0x010101
+#define FC_LOCAL_PTP_FID_HI   0x010102
+
+#define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
+
+static int fc_lport_debug;
+
+#define FC_DEBUG_LPORT(fmt...)			\
+	do {					\
+		if (fc_lport_debug)		\
+			FC_DBG(fmt);		\
+	} while (0)
+
+static void fc_lport_error(struct fc_lport *, struct fc_frame *);
+
+static void fc_lport_enter_reset(struct fc_lport *);
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_dns(struct fc_lport *);
+static void fc_lport_enter_rpn_id(struct fc_lport *);
+static void fc_lport_enter_rft_id(struct fc_lport *);
+static void fc_lport_enter_scr(struct fc_lport *);
+static void fc_lport_enter_ready(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+
+static const char *fc_lport_state_names[] = {
+	[LPORT_ST_NONE] =     "none",
+	[LPORT_ST_FLOGI] =    "FLOGI",
+	[LPORT_ST_DNS] =      "dNS",
+	[LPORT_ST_RPN_ID] =   "RPN_ID",
+	[LPORT_ST_RFT_ID] =   "RFT_ID",
+	[LPORT_ST_SCR] =      "SCR",
+	[LPORT_ST_READY] =    "Ready",
+	[LPORT_ST_LOGO] =     "LOGO",
+	[LPORT_ST_RESET] =    "reset",
+};
+
+static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
+{
+	fc_frame_free(fp);
+	return 0;
+}
+
+/**
+ * fc_lport_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+ *
+ * Locking Note: The rport lock should not be held when calling
+ *		 this function.
+ */
+static void fc_lport_rport_callback(struct fc_lport *lport,
+				    struct fc_rport *rport,
+				    enum fc_rport_event event)
+{
+	FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
+		       rport->port_id);
+
+	switch (event) {
+	case RPORT_EV_CREATED:
+		if (rport->port_id == FC_FID_DIR_SERV) {
+			mutex_lock(&lport->lp_mutex);
+			if (lport->state == LPORT_ST_DNS) {
+				lport->dns_rp = rport;
+				fc_lport_enter_rpn_id(lport);
+			} else {
+				FC_DEBUG_LPORT("Received an CREATED event on "
+					       "port (%6x) for the directory "
+					       "server, but the lport is not "
+					       "in the DNS state, it's in the "
+					       "%d state", rport->port_id,
+					       lport->state);
+				lport->tt.rport_logoff(rport);
+			}
+			mutex_unlock(&lport->lp_mutex);
+		} else
+			FC_DEBUG_LPORT("Received an event for port (%6x) "
+				       "which is not the directory server\n",
+				       rport->port_id);
+		break;
+	case RPORT_EV_LOGO:
+	case RPORT_EV_FAILED:
+	case RPORT_EV_STOP:
+		if (rport->port_id == FC_FID_DIR_SERV) {
+			mutex_lock(&lport->lp_mutex);
+			lport->dns_rp = NULL;
+			mutex_unlock(&lport->lp_mutex);
+
+		} else
+			FC_DEBUG_LPORT("Received an event for port (%6x) "
+				       "which is not the directory server\n",
+				       rport->port_id);
+		break;
+	case RPORT_EV_NONE:
+		break;
+	}
+}
+
+/**
+ * fc_lport_state - Return a string which represents the lport's state
+ * @lport: The lport whose state is to converted to a string
+ */
+static const char *fc_lport_state(struct fc_lport *lport)
+{
+	const char *cp;
+
+	cp = fc_lport_state_names[lport->state];
+	if (!cp)
+		cp = "unknown";
+	return cp;
+}
+
+/**
+ * fc_lport_ptp_setup - Create an rport for point-to-point mode
+ * @lport: The lport to attach the ptp rport to
+ * @fid: The FID of the ptp rport
+ * @remote_wwpn: The WWPN of the ptp rport
+ * @remote_wwnn: The WWNN of the ptp rport
+ */
+static void fc_lport_ptp_setup(struct fc_lport *lport,
+			       u32 remote_fid, u64 remote_wwpn,
+			       u64 remote_wwnn)
+{
+	struct fc_disc_port dp;
+
+	dp.lp = lport;
+	dp.ids.port_id = remote_fid;
+	dp.ids.port_name = remote_wwpn;
+	dp.ids.node_name = remote_wwnn;
+	dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+	if (lport->ptp_rp) {
+		lport->tt.rport_logoff(lport->ptp_rp);
+		lport->ptp_rp = NULL;
+	}
+
+	lport->ptp_rp = fc_rport_rogue_create(&dp);
+
+	lport->tt.rport_login(lport->ptp_rp);
+
+	fc_lport_enter_ready(lport);
+}
+
+void fc_get_host_port_type(struct Scsi_Host *shost)
+{
+	/* TODO - currently just NPORT */
+	fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+}
+EXPORT_SYMBOL(fc_get_host_port_type);
+
+void fc_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct fc_lport *lp = shost_priv(shost);
+
+	if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+	else
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+EXPORT_SYMBOL(fc_get_host_port_state);
+
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+	struct fc_lport *lport = shost_priv(shost);
+
+	fc_host_speed(shost) = lport->link_speed;
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+	int i;
+	struct fc_host_statistics *fcoe_stats;
+	struct fc_lport *lp = shost_priv(shost);
+	struct timespec v0, v1;
+
+	fcoe_stats = &lp->host_stats;
+	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
+
+	jiffies_to_timespec(jiffies, &v0);
+	jiffies_to_timespec(lp->boot_time, &v1);
+	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+
+	for_each_online_cpu(i) {
+		struct fcoe_dev_stats *stats = lp->dev_stats[i];
+		if (stats == NULL)
+			continue;
+		fcoe_stats->tx_frames += stats->TxFrames;
+		fcoe_stats->tx_words += stats->TxWords;
+		fcoe_stats->rx_frames += stats->RxFrames;
+		fcoe_stats->rx_words += stats->RxWords;
+		fcoe_stats->error_frames += stats->ErrorFrames;
+		fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
+		fcoe_stats->fcp_input_requests += stats->InputRequests;
+		fcoe_stats->fcp_output_requests += stats->OutputRequests;
+		fcoe_stats->fcp_control_requests += stats->ControlRequests;
+		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
+		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+		fcoe_stats->link_failure_count += stats->LinkFailureCount;
+	}
+	fcoe_stats->lip_count = -1;
+	fcoe_stats->nos_count = -1;
+	fcoe_stats->loss_of_sync_count = -1;
+	fcoe_stats->loss_of_signal_count = -1;
+	fcoe_stats->prim_seq_protocol_err_count = -1;
+	fcoe_stats->dumped_frames = -1;
+	return fcoe_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
+
+/*
+ * Fill in FLOGI command for request.
+ */
+static void
+fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
+		    unsigned int op)
+{
+	struct fc_els_csp *sp;
+	struct fc_els_cssp *cp;
+
+	memset(flogi, 0, sizeof(*flogi));
+	flogi->fl_cmd = (u8) op;
+	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+	sp = &flogi->fl_csp;
+	sp->sp_hi_ver = 0x20;
+	sp->sp_lo_ver = 0x20;
+	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
+	sp->sp_bb_data = htons((u16) lport->mfs);
+	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
+	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+	if (op != ELS_FLOGI) {
+		sp->sp_features = htons(FC_SP_FT_CIRO);
+		sp->sp_tot_seq = htons(255);	/* seq. we accept */
+		sp->sp_rel_off = htons(0x1f);
+		sp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+		cp->cp_rdfs = htons((u16) lport->mfs);
+		cp->cp_con_seq = htons(255);
+		cp->cp_open_seq = 1;
+	}
+}
+
+/*
+ * Add a supported FC-4 type.
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
+{
+	__be32 *mp;
+
+	mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
+	*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/**
+ * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
+ * @lport: Fibre Channel local port recieving the RLIR
+ * @sp: current sequence in the RLIR exchange
+ * @fp: RLIR request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
+				   struct fc_lport *lport)
+{
+	FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
+		       fc_lport_state(lport));
+
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_lport_recv_echo_req - Handle received ECHO request
+ * @lport: Fibre Channel local port recieving the ECHO
+ * @sp: current sequence in the ECHO exchange
+ * @fp: ECHO request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
+				   struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	unsigned int len;
+	void *pp;
+	void *dp;
+	u32 f_ctl;
+
+	FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
+		       fc_lport_state(lport));
+
+	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+	pp = fc_frame_payload_get(in_fp, len);
+
+	if (len < sizeof(__be32))
+		len = sizeof(__be32);
+
+	fp = fc_frame_alloc(lport, len);
+	if (fp) {
+		dp = fc_frame_payload_get(fp, len);
+		memcpy(dp, pp, len);
+		*((u32 *)dp) = htonl(ELS_LS_ACC << 24);
+		sp = lport->tt.seq_start_next(sp);
+		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+			       FC_TYPE_ELS, f_ctl, 0);
+		lport->tt.seq_send(lport, sp, fp);
+	}
+	fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_echo_req - Handle received Request Node ID data request
+ * @lport: Fibre Channel local port recieving the RNID
+ * @sp: current sequence in the RNID exchange
+ * @fp: RNID request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
+				   struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	struct fc_els_rnid *req;
+	struct {
+		struct fc_els_rnid_resp rnid;
+		struct fc_els_rnid_cid cid;
+		struct fc_els_rnid_gen gen;
+	} *rp;
+	struct fc_seq_els_data rjt_data;
+	u8 fmt;
+	size_t len;
+	u32 f_ctl;
+
+	FC_DEBUG_LPORT("Received RNID request while in state %s\n",
+		       fc_lport_state(lport));
+
+	req = fc_frame_payload_get(in_fp, sizeof(*req));
+	if (!req) {
+		rjt_data.fp = NULL;
+		rjt_data.reason = ELS_RJT_LOGIC;
+		rjt_data.explan = ELS_EXPL_NONE;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	} else {
+		fmt = req->rnid_fmt;
+		len = sizeof(*rp);
+		if (fmt != ELS_RNIDF_GEN ||
+		    ntohl(lport->rnid_gen.rnid_atype) == 0) {
+			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
+			len -= sizeof(rp->gen);
+		}
+		fp = fc_frame_alloc(lport, len);
+		if (fp) {
+			rp = fc_frame_payload_get(fp, len);
+			memset(rp, 0, len);
+			rp->rnid.rnid_cmd = ELS_LS_ACC;
+			rp->rnid.rnid_fmt = fmt;
+			rp->rnid.rnid_cid_len = sizeof(rp->cid);
+			rp->cid.rnid_wwpn = htonll(lport->wwpn);
+			rp->cid.rnid_wwnn = htonll(lport->wwnn);
+			if (fmt == ELS_RNIDF_GEN) {
+				rp->rnid.rnid_sid_len = sizeof(rp->gen);
+				memcpy(&rp->gen, &lport->rnid_gen,
+				       sizeof(rp->gen));
+			}
+			sp = lport->tt.seq_start_next(sp);
+			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+				       FC_TYPE_ELS, f_ctl, 0);
+			lport->tt.seq_send(lport, sp, fp);
+		}
+	}
+	fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_adisc_req - Handle received Address Discovery Request
+ * @lport: Fibre Channel local port recieving the ADISC
+ * @sp: current sequence in the ADISC exchange
+ * @fp: ADISC request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
+				    struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	struct fc_els_adisc *req, *rp;
+	struct fc_seq_els_data rjt_data;
+	size_t len;
+	u32 f_ctl;
+
+	FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
+		       fc_lport_state(lport));
+
+	req = fc_frame_payload_get(in_fp, sizeof(*req));
+	if (!req) {
+		rjt_data.fp = NULL;
+		rjt_data.reason = ELS_RJT_LOGIC;
+		rjt_data.explan = ELS_EXPL_NONE;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	} else {
+		len = sizeof(*rp);
+		fp = fc_frame_alloc(lport, len);
+		if (fp) {
+			rp = fc_frame_payload_get(fp, len);
+			memset(rp, 0, len);
+			rp->adisc_cmd = ELS_LS_ACC;
+			rp->adisc_wwpn = htonll(lport->wwpn);
+			rp->adisc_wwnn = htonll(lport->wwnn);
+			hton24(rp->adisc_port_id,
+			       fc_host_port_id(lport->host));
+			sp = lport->tt.seq_start_next(sp);
+			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+				       FC_TYPE_ELS, f_ctl, 0);
+			lport->tt.seq_send(lport, sp, fp);
+		}
+	}
+	fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_logo_req - Handle received fabric LOGO request
+ * @lport: Fibre Channel local port recieving the LOGO
+ * @sp: current sequence in the LOGO exchange
+ * @fp: LOGO request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
+				   struct fc_lport *lport)
+{
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_lport_enter_reset(lport);
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_fabric_login - Start the lport state machine
+ * @lport: The lport that should log into the fabric
+ *
+ * Locking Note: This function should not be called
+ *		 with the lport lock held.
+ */
+int fc_fabric_login(struct fc_lport *lport)
+{
+	int rc = -1;
+
+	mutex_lock(&lport->lp_mutex);
+	if (lport->state == LPORT_ST_NONE) {
+		fc_lport_enter_reset(lport);
+		rc = 0;
+	}
+	mutex_unlock(&lport->lp_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * fc_linkup - Handler for transport linkup events
+ * @lport: The lport whose link is up
+ */
+void fc_linkup(struct fc_lport *lport)
+{
+	FC_DEBUG_LPORT("Link is up for port (%6x)\n",
+		       fc_host_port_id(lport->host));
+
+	mutex_lock(&lport->lp_mutex);
+	if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
+		lport->link_status |= FC_LINK_UP;
+
+		if (lport->state == LPORT_ST_RESET)
+			fc_lport_enter_flogi(lport);
+	}
+	mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * fc_linkdown - Handler for transport linkdown events
+ * @lport: The lport whose link is down
+ */
+void fc_linkdown(struct fc_lport *lport)
+{
+	mutex_lock(&lport->lp_mutex);
+	FC_DEBUG_LPORT("Link is down for port (%6x)\n",
+		       fc_host_port_id(lport->host));
+
+	if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
+		lport->link_status &= ~(FC_LINK_UP);
+		fc_lport_enter_reset(lport);
+		lport->tt.fcp_cleanup(lport);
+	}
+	mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+/**
+ * fc_pause - Pause the flow of frames
+ * @lport: The lport to be paused
+ */
+void fc_pause(struct fc_lport *lport)
+{
+	mutex_lock(&lport->lp_mutex);
+	lport->link_status |= FC_PAUSE;
+	mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_pause);
+
+/**
+ * fc_unpause - Unpause the flow of frames
+ * @lport: The lport to be unpaused
+ */
+void fc_unpause(struct fc_lport *lport)
+{
+	mutex_lock(&lport->lp_mutex);
+	lport->link_status &= ~(FC_PAUSE);
+	mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_unpause);
+
+/**
+ * fc_fabric_logoff - Logout of the fabric
+ * @lport:	      fc_lport pointer to logoff the fabric
+ *
+ * Return value:
+ *	0 for success, -1 for failure
+ **/
+int fc_fabric_logoff(struct fc_lport *lport)
+{
+	lport->tt.disc_stop_final(lport);
+	mutex_lock(&lport->lp_mutex);
+	fc_lport_enter_logo(lport);
+	mutex_unlock(&lport->lp_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy - unregister a fc_lport
+ * @lport:	      fc_lport pointer to unregister
+ *
+ * Return value:
+ *	None
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ **/
+int fc_lport_destroy(struct fc_lport *lport)
+{
+	lport->tt.frame_send = fc_frame_drop;
+	lport->tt.fcp_abort_io(lport);
+	lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+/**
+ * fc_set_mfs - sets up the mfs for the corresponding fc_lport
+ * @lport: fc_lport pointer to unregister
+ * @mfs: the new mfs for fc_lport
+ *
+ * Set mfs for the given fc_lport to the new mfs.
+ *
+ * Return: 0 for success
+ *
+ **/
+int fc_set_mfs(struct fc_lport *lport, u32 mfs)
+{
+	unsigned int old_mfs;
+	int rc = -EINVAL;
+
+	mutex_lock(&lport->lp_mutex);
+
+	old_mfs = lport->mfs;
+
+	if (mfs >= FC_MIN_MAX_FRAME) {
+		mfs &= ~3;
+		if (mfs > FC_MAX_FRAME)
+			mfs = FC_MAX_FRAME;
+		mfs -= sizeof(struct fc_frame_header);
+		lport->mfs = mfs;
+		rc = 0;
+	}
+
+	if (!rc && mfs < old_mfs)
+		fc_lport_enter_reset(lport);
+
+	mutex_unlock(&lport->lp_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/**
+ * fc_lport_disc_callback - Callback for discovery events
+ * @lport: FC local port
+ * @event: The discovery event
+ */
+void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+{
+	switch (event) {
+	case DISC_EV_SUCCESS:
+		FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
+			       fc_host_port_id(lport->host));
+		break;
+	case DISC_EV_FAILED:
+		FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
+			       fc_host_port_id(lport->host));
+		mutex_lock(&lport->lp_mutex);
+		fc_lport_enter_reset(lport);
+		mutex_unlock(&lport->lp_mutex);
+		break;
+	case DISC_EV_NONE:
+		WARN_ON(1);
+		break;
+	}
+}
+
+/**
+ * fc_rport_enter_ready - Enter the ready state and start discovery
+ * @lport: Fibre Channel local port that is ready
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ready(struct fc_lport *lport)
+{
+	FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_READY);
+
+	lport->tt.disc_start(fc_lport_disc_callback, lport);
+}
+
+/**
+ * fc_lport_recv_flogi_req - Receive a FLOGI request
+ * @sp_in: The sequence the FLOGI is on
+ * @rx_fp: The frame the FLOGI is in
+ * @lport: The lport that recieved the request
+ *
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
+				    struct fc_frame *rx_fp,
+				    struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	struct fc_seq *sp;
+	struct fc_exch *ep;
+	struct fc_els_flogi *flp;
+	struct fc_els_flogi *new_flp;
+	u64 remote_wwpn;
+	u32 remote_fid;
+	u32 local_fid;
+	u32 f_ctl;
+
+	FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
+		       fc_lport_state(lport));
+
+	fh = fc_frame_header_get(rx_fp);
+	remote_fid = ntoh24(fh->fh_s_id);
+	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+	if (!flp)
+		goto out;
+	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+	if (remote_wwpn == lport->wwpn) {
+		FC_DBG("FLOGI from port with same WWPN %llx "
+		       "possible configuration error\n", remote_wwpn);
+		goto out;
+	}
+	FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
+
+	/*
+	 * XXX what is the right thing to do for FIDs?
+	 * The originator might expect our S_ID to be 0xfffffe.
+	 * But if so, both of us could end up with the same FID.
+	 */
+	local_fid = FC_LOCAL_PTP_FID_LO;
+	if (remote_wwpn < lport->wwpn) {
+		local_fid = FC_LOCAL_PTP_FID_HI;
+		if (!remote_fid || remote_fid == local_fid)
+			remote_fid = FC_LOCAL_PTP_FID_LO;
+	} else if (!remote_fid) {
+		remote_fid = FC_LOCAL_PTP_FID_HI;
+	}
+
+	fc_host_port_id(lport->host) = local_fid;
+
+	fp = fc_frame_alloc(lport, sizeof(*flp));
+	if (fp) {
+		sp = lport->tt.seq_start_next(fr_seq(rx_fp));
+		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
+		new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+		/*
+		 * Send the response.  If this fails, the originator should
+		 * repeat the sequence.
+		 */
+		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+		ep = fc_seq_exch(sp);
+		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+			       FC_TYPE_ELS, f_ctl, 0);
+		lport->tt.seq_send(lport, sp, fp);
+
+	} else {
+		fc_lport_error(lport, fp);
+	}
+	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
+			   get_unaligned_be64(&flp->fl_wwnn));
+
+	lport->tt.disc_start(fc_lport_disc_callback, lport);
+
+out:
+	sp = fr_seq(rx_fp);
+	fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_lport_recv_req - The generic lport request handler
+ * @lport: The lport that received the request
+ * @sp: The sequence the request is on
+ * @fp: The frame the request is in
+ *
+ * This function will see if the lport handles the request or
+ * if an rport should handle the request.
+ *
+ * Locking Note: This function should not be called with the lport
+ *		 lock held becuase it will grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
+			      struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
+	struct fc_rport *rport;
+	u32 s_id;
+	u32 d_id;
+	struct fc_seq_els_data rjt_data;
+
+	mutex_lock(&lport->lp_mutex);
+
+	/*
+	 * Handle special ELS cases like FLOGI, LOGO, and
+	 * RSCN here.  These don't require a session.
+	 * Even if we had a session, it might not be ready.
+	 */
+	if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+		/*
+		 * Check opcode.
+		 */
+		recv = NULL;
+		switch (fc_frame_payload_op(fp)) {
+		case ELS_FLOGI:
+			recv = fc_lport_recv_flogi_req;
+			break;
+		case ELS_LOGO:
+			fh = fc_frame_header_get(fp);
+			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
+				recv = fc_lport_recv_logo_req;
+			break;
+		case ELS_RSCN:
+			recv = lport->tt.disc_recv_req;
+			break;
+		case ELS_ECHO:
+			recv = fc_lport_recv_echo_req;
+			break;
+		case ELS_RLIR:
+			recv = fc_lport_recv_rlir_req;
+			break;
+		case ELS_RNID:
+			recv = fc_lport_recv_rnid_req;
+			break;
+		case ELS_ADISC:
+			recv = fc_lport_recv_adisc_req;
+			break;
+		}
+
+		if (recv)
+			recv(sp, fp, lport);
+		else {
+			/*
+			 * Find session.
+			 * If this is a new incoming PLOGI, we won't find it.
+			 */
+			s_id = ntoh24(fh->fh_s_id);
+			d_id = ntoh24(fh->fh_d_id);
+
+			rport = lport->tt.rport_lookup(lport, s_id);
+			if (rport)
+				lport->tt.rport_recv_req(sp, fp, rport);
+			else {
+				rjt_data.fp = NULL;
+				rjt_data.reason = ELS_RJT_UNAB;
+				rjt_data.explan = ELS_EXPL_NONE;
+				lport->tt.seq_els_rsp_send(sp,
+							   ELS_LS_RJT,
+							   &rjt_data);
+				fc_frame_free(fp);
+			}
+		}
+	} else {
+		FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
+		fc_frame_free(fp);
+	}
+	mutex_unlock(&lport->lp_mutex);
+
+	/*
+	 *  The common exch_done for all request may not be good
+	 *  if any request requires longer hold on exhange. XXX
+	 */
+	lport->tt.exch_done(sp);
+}
+
+/**
+ * fc_lport_reset - Reset an lport
+ * @lport: The lport which should be reset
+ *
+ * Locking Note: This functions should not be called with the
+ *		 lport lock held.
+ */
+int fc_lport_reset(struct fc_lport *lport)
+{
+	mutex_lock(&lport->lp_mutex);
+	fc_lport_enter_reset(lport);
+	mutex_unlock(&lport->lp_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_reset);
+
+/**
+ * fc_rport_enter_reset - Reset the local port
+ * @lport: Fibre Channel local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_reset(struct fc_lport *lport)
+{
+	FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_RESET);
+
+	if (lport->dns_rp)
+		lport->tt.rport_logoff(lport->dns_rp);
+
+	if (lport->ptp_rp) {
+		lport->tt.rport_logoff(lport->ptp_rp);
+		lport->ptp_rp = NULL;
+	}
+
+	lport->tt.disc_stop(lport);
+
+	lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+	fc_host_fabric_name(lport->host) = 0;
+	fc_host_port_id(lport->host) = 0;
+
+	if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
+		fc_lport_enter_flogi(lport);
+}
+
+/**
+ * fc_lport_error - Handler for any errors
+ * @lport: The fc_lport object
+ * @fp: The frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * after the e_d_tov time.
+ */
+static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
+{
+	unsigned long delay = 0;
+	FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
+		       PTR_ERR(fp), fc_lport_state(lport),
+		       lport->retry_count);
+
+	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+		/*
+		 * Memory allocation failure, or the exchange timed out.
+		 *  Retry after delay
+		 */
+		if (lport->retry_count < lport->max_retry_count) {
+			lport->retry_count++;
+			if (!fp)
+				delay = msecs_to_jiffies(500);
+			else
+				delay =	msecs_to_jiffies(lport->e_d_tov);
+
+			schedule_delayed_work(&lport->retry_work, delay);
+		} else {
+			switch (lport->state) {
+			case LPORT_ST_NONE:
+			case LPORT_ST_READY:
+			case LPORT_ST_RESET:
+			case LPORT_ST_RPN_ID:
+			case LPORT_ST_RFT_ID:
+			case LPORT_ST_SCR:
+			case LPORT_ST_DNS:
+			case LPORT_ST_FLOGI:
+			case LPORT_ST_LOGO:
+				fc_lport_enter_reset(lport);
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * fc_lport_rft_id_resp - Handle response to Register Fibre
+ *			  Channel Types by ID (RPN_ID) request
+ * @sp: current sequence in RPN_ID exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+				 void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	struct fc_frame_header *fh;
+	struct fc_ct_hdr *ct;
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	FC_DEBUG_LPORT("Received a RFT_ID response\n");
+
+	if (lport->state != LPORT_ST_RFT_ID) {
+		FC_DBG("Received a RFT_ID response, but in state %s\n",
+		       fc_lport_state(lport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	fh = fc_frame_header_get(fp);
+	ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+	    ct->ct_fs_type == FC_FST_DIR &&
+	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+	    ntohs(ct->ct_cmd) == FC_FS_ACC)
+		fc_lport_enter_scr(lport);
+	else
+		fc_lport_error(lport, fp);
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_rpn_id_resp - Handle response to Register Port
+ *			  Name by ID (RPN_ID) request
+ * @sp: current sequence in RPN_ID exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+				 void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	struct fc_frame_header *fh;
+	struct fc_ct_hdr *ct;
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	FC_DEBUG_LPORT("Received a RPN_ID response\n");
+
+	if (lport->state != LPORT_ST_RPN_ID) {
+		FC_DBG("Received a RPN_ID response, but in state %s\n",
+		       fc_lport_state(lport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	fh = fc_frame_header_get(fp);
+	ct = fc_frame_payload_get(fp, sizeof(*ct));
+	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+	    ct->ct_fs_type == FC_FST_DIR &&
+	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+	    ntohs(ct->ct_cmd) == FC_FS_ACC)
+		fc_lport_enter_rft_id(lport);
+	else
+		fc_lport_error(lport, fp);
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
+ * @sp: current sequence in SCR exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the registration request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	u8 op;
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	FC_DEBUG_LPORT("Received a SCR response\n");
+
+	if (lport->state != LPORT_ST_SCR) {
+		FC_DBG("Received a SCR response, but in state %s\n",
+		       fc_lport_state(lport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC)
+		fc_lport_enter_ready(lport);
+	else
+		fc_lport_error(lport, fp);
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_enter_scr - Send a State Change Register (SCR) request
+ * @lport: Fibre Channel local port to register for state changes
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_scr(struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+
+	FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_SCR);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
+	if (!fp) {
+		fc_lport_error(lport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
+				  fc_lport_scr_resp, lport, lport->e_d_tov))
+		fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_lport_enter_rft_id - Register FC4-types with the name server
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_rft_id(struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_ns_fts *lps;
+	int i;
+
+	FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
+
+	lps = &lport->fcts;
+	i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
+	while (--i >= 0)
+		if (ntohl(lps->ff_type_map[i]) != 0)
+			break;
+	if (i < 0) {
+		/* nothing to register, move on to SCR */
+		fc_lport_enter_scr(lport);
+		return;
+	}
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+			    sizeof(struct fc_ns_rft));
+	if (!fp) {
+		fc_lport_error(lport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
+				  fc_lport_rft_id_resp,
+				  lport, lport->e_d_tov))
+		fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_rport_enter_rft_id - Register port name with the name server
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_rpn_id(struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+
+	FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+			    sizeof(struct fc_ns_rn_id));
+	if (!fp) {
+		fc_lport_error(lport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
+				  fc_lport_rpn_id_resp,
+				  lport, lport->e_d_tov))
+		fc_lport_error(lport, fp);
+}
+
+static struct fc_rport_operations fc_lport_rport_ops = {
+	.event_callback = fc_lport_rport_callback,
+};
+
+/**
+ * fc_rport_enter_dns - Create a rport to the name server
+ * @lport: Fibre Channel local port requesting a rport for the name server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_dns(struct fc_lport *lport)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata;
+	struct fc_disc_port dp;
+
+	dp.ids.port_id = FC_FID_DIR_SERV;
+	dp.ids.port_name = -1;
+	dp.ids.node_name = -1;
+	dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+	dp.lp = lport;
+
+	FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_DNS);
+
+	rport = fc_rport_rogue_create(&dp);
+	if (!rport)
+		goto err;
+
+	rdata = rport->dd_data;
+	rdata->ops = &fc_lport_rport_ops;
+	lport->tt.rport_login(rport);
+	return;
+
+err:
+	fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_timeout - Handler for the retry_work timer.
+ * @work: The work struct of the fc_lport
+ */
+static void fc_lport_timeout(struct work_struct *work)
+{
+	struct fc_lport *lport =
+		container_of(work, struct fc_lport,
+			     retry_work.work);
+
+	mutex_lock(&lport->lp_mutex);
+
+	switch (lport->state) {
+	case LPORT_ST_NONE:
+	case LPORT_ST_READY:
+	case LPORT_ST_RESET:
+		WARN_ON(1);
+		break;
+	case LPORT_ST_FLOGI:
+		fc_lport_enter_flogi(lport);
+		break;
+	case LPORT_ST_DNS:
+		fc_lport_enter_dns(lport);
+		break;
+	case LPORT_ST_RPN_ID:
+		fc_lport_enter_rpn_id(lport);
+		break;
+	case LPORT_ST_RFT_ID:
+		fc_lport_enter_rft_id(lport);
+		break;
+	case LPORT_ST_SCR:
+		fc_lport_enter_scr(lport);
+		break;
+	case LPORT_ST_LOGO:
+		fc_lport_enter_logo(lport);
+		break;
+	}
+
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_logo_resp - Handle response to LOGO request
+ * @sp: current sequence in LOGO exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+			       void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	u8 op;
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	FC_DEBUG_LPORT("Received a LOGO response\n");
+
+	if (lport->state != LPORT_ST_LOGO) {
+		FC_DBG("Received a LOGO response, but in state %s\n",
+		       fc_lport_state(lport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC)
+		fc_lport_enter_reset(lport);
+	else
+		fc_lport_error(lport, fp);
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_rport_enter_logo - Logout of the fabric
+ * @lport: Fibre Channel local port to be logged out
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_logo(struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+	struct fc_els_logo *logo;
+
+	FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
+		       fc_host_port_id(lport->host), fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_LOGO);
+
+	/* DNS session should be closed so we can release it here */
+	if (lport->dns_rp)
+		lport->tt.rport_logoff(lport->dns_rp);
+
+	fp = fc_frame_alloc(lport, sizeof(*logo));
+	if (!fp) {
+		fc_lport_error(lport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
+				  lport, lport->e_d_tov))
+		fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_lport_flogi_resp - Handle response to FLOGI request
+ * @sp: current sequence in FLOGI exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+				void *lp_arg)
+{
+	struct fc_lport *lport = lp_arg;
+	struct fc_frame_header *fh;
+	struct fc_els_flogi *flp;
+	u32 did;
+	u16 csp_flags;
+	unsigned int r_a_tov;
+	unsigned int e_d_tov;
+	u16 mfs;
+
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		return;
+
+	mutex_lock(&lport->lp_mutex);
+
+	FC_DEBUG_LPORT("Received a FLOGI response\n");
+
+	if (lport->state != LPORT_ST_FLOGI) {
+		FC_DBG("Received a FLOGI response, but in state %s\n",
+		       fc_lport_state(lport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_lport_error(lport, fp);
+		goto err;
+	}
+
+	fh = fc_frame_header_get(fp);
+	did = ntoh24(fh->fh_d_id);
+	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
+
+		FC_DEBUG_LPORT("Assigned fid %x\n", did);
+		fc_host_port_id(lport->host) = did;
+
+		flp = fc_frame_payload_get(fp, sizeof(*flp));
+		if (flp) {
+			mfs = ntohs(flp->fl_csp.sp_bb_data) &
+				FC_SP_BB_DATA_MASK;
+			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+			    mfs < lport->mfs)
+				lport->mfs = mfs;
+			csp_flags = ntohs(flp->fl_csp.sp_features);
+			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+			if (csp_flags & FC_SP_FT_EDTR)
+				e_d_tov /= 1000000;
+			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+				if (e_d_tov > lport->e_d_tov)
+					lport->e_d_tov = e_d_tov;
+				lport->r_a_tov = 2 * e_d_tov;
+				FC_DBG("Point-to-Point mode\n");
+				fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
+						   get_unaligned_be64(
+							   &flp->fl_wwpn),
+						   get_unaligned_be64(
+							   &flp->fl_wwnn));
+			} else {
+				lport->e_d_tov = e_d_tov;
+				lport->r_a_tov = r_a_tov;
+				fc_host_fabric_name(lport->host) =
+					get_unaligned_be64(&flp->fl_wwnn);
+				fc_lport_enter_dns(lport);
+			}
+		}
+
+		if (flp) {
+			csp_flags = ntohs(flp->fl_csp.sp_features);
+			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+				lport->tt.disc_start(fc_lport_disc_callback,
+						     lport);
+			}
+		}
+	} else {
+		FC_DBG("bad FLOGI response\n");
+	}
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
+ * @lport: Fibre Channel local port to be logged in to the fabric
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+void fc_lport_enter_flogi(struct fc_lport *lport)
+{
+	struct fc_frame *fp;
+
+	FC_DEBUG_LPORT("Processing FLOGI state\n");
+
+	fc_lport_state_enter(lport, LPORT_ST_FLOGI);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+	if (!fp)
+		return fc_lport_error(lport, fp);
+
+	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
+				  fc_lport_flogi_resp, lport, lport->e_d_tov))
+		fc_lport_error(lport, fp);
+}
+
+/* Configure a fc_lport */
+int fc_lport_config(struct fc_lport *lport)
+{
+	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
+	mutex_init(&lport->lp_mutex);
+
+	fc_lport_state_enter(lport, LPORT_ST_NONE);
+
+	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
+	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+int fc_lport_init(struct fc_lport *lport)
+{
+	if (!lport->tt.lport_recv)
+		lport->tt.lport_recv = fc_lport_recv_req;
+
+	if (!lport->tt.lport_reset)
+		lport->tt.lport_reset = fc_lport_reset;
+
+	fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+	fc_host_node_name(lport->host) = lport->wwnn;
+	fc_host_port_name(lport->host) = lport->wwpn;
+	fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
+	memset(fc_host_supported_fc4s(lport->host), 0,
+	       sizeof(fc_host_supported_fc4s(lport->host)));
+	fc_host_supported_fc4s(lport->host)[2] = 1;
+	fc_host_supported_fc4s(lport->host)[7] = 1;
+
+	/* This value is also unchanging */
+	memset(fc_host_active_fc4s(lport->host), 0,
+	       sizeof(fc_host_active_fc4s(lport->host)));
+	fc_host_active_fc4s(lport->host)[2] = 1;
+	fc_host_active_fc4s(lport->host)[7] = 1;
+	fc_host_maxframe_size(lport->host) = lport->mfs;
+	fc_host_supported_speeds(lport->host) = 0;
+	if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
+		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
+	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
+		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000000..e780d8caf70e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1291 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * RPORT GENERAL INFO
+ *
+ * This file contains all processing regarding fc_rports. It contains the
+ * rport state machine and does all rport interaction with the transport class.
+ * There should be no other places in libfc that interact directly with the
+ * transport class in regards to adding and deleting rports.
+ *
+ * fc_rport's represent N_Port's within the fabric.
+ */
+
+/*
+ * RPORT LOCKING
+ *
+ * The rport should never hold the rport mutex and then attempt to acquire
+ * either the lport or disc mutexes. The rport's mutex is considered lesser
+ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
+ * more comments on the heirarchy.
+ *
+ * The locking strategy is similar to the lport's strategy. The lock protects
+ * the rport's states and is held and released by the entry points to the rport
+ * block. All _enter_* functions correspond to rport states and expect the rport
+ * mutex to be locked before calling them. This means that rports only handle
+ * one request or response at a time, since they're not critical for the I/O
+ * path this potential over-use of the mutex is acceptable.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+static int fc_rport_debug;
+
+#define FC_DEBUG_RPORT(fmt...)			\
+	do {					\
+		if (fc_rport_debug)		\
+			FC_DBG(fmt);		\
+	} while (0)
+
+struct workqueue_struct *rport_event_queue;
+
+static void fc_rport_enter_plogi(struct fc_rport *);
+static void fc_rport_enter_prli(struct fc_rport *);
+static void fc_rport_enter_rtv(struct fc_rport *);
+static void fc_rport_enter_ready(struct fc_rport *);
+static void fc_rport_enter_logo(struct fc_rport *);
+
+static void fc_rport_recv_plogi_req(struct fc_rport *,
+				    struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+static void fc_rport_error(struct fc_rport *, struct fc_frame *);
+static void fc_rport_work(struct work_struct *);
+
+static const char *fc_rport_state_names[] = {
+	[RPORT_ST_NONE] = "None",
+	[RPORT_ST_INIT] = "Init",
+	[RPORT_ST_PLOGI] = "PLOGI",
+	[RPORT_ST_PRLI] = "PRLI",
+	[RPORT_ST_RTV] = "RTV",
+	[RPORT_ST_READY] = "Ready",
+	[RPORT_ST_LOGO] = "LOGO",
+};
+
+static void fc_rport_rogue_destroy(struct device *dev)
+{
+	struct fc_rport *rport = dev_to_rport(dev);
+	FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
+	kfree(rport);
+}
+
+struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rdata;
+	rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
+
+	if (!rport)
+		return NULL;
+
+	rdata = RPORT_TO_PRIV(rport);
+
+	rport->dd_data = rdata;
+	rport->port_id = dp->ids.port_id;
+	rport->port_name = dp->ids.port_name;
+	rport->node_name = dp->ids.node_name;
+	rport->roles = dp->ids.roles;
+	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+	/*
+	 * Note: all this libfc rogue rport code will be removed for
+	 * upstream so it fine that this is really ugly and hacky right now.
+	 */
+	device_initialize(&rport->dev);
+	rport->dev.release = fc_rport_rogue_destroy;
+
+	mutex_init(&rdata->rp_mutex);
+	rdata->local_port = dp->lp;
+	rdata->trans_state = FC_PORTSTATE_ROGUE;
+	rdata->rp_state = RPORT_ST_INIT;
+	rdata->event = RPORT_EV_NONE;
+	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+	rdata->ops = NULL;
+	rdata->e_d_tov = dp->lp->e_d_tov;
+	rdata->r_a_tov = dp->lp->r_a_tov;
+	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+	INIT_WORK(&rdata->event_work, fc_rport_work);
+	/*
+	 * For good measure, but not necessary as we should only
+	 * add REAL rport to the lport list.
+	 */
+	INIT_LIST_HEAD(&rdata->peers);
+
+	return rport;
+}
+
+/**
+ * fc_rport_state - return a string for the state the rport is in
+ * @rport: The rport whose state we want to get a string for
+ */
+static const char *fc_rport_state(struct fc_rport *rport)
+{
+	const char *cp;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+	cp = fc_rport_state_names[rdata->rp_state];
+	if (!cp)
+		cp = "Unknown";
+	return cp;
+}
+
+/**
+ * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
+ * @rport: Pointer to Fibre Channel remote port structure
+ * @timeout: timeout in seconds
+ */
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout + 5;
+	else
+		rport->dev_loss_tmo = 30;
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+/**
+ * fc_plogi_get_maxframe - Get max payload from the common service parameters
+ * @flp: FLOGI payload structure
+ * @maxval: upper limit, may be less than what is in the service parameters
+ */
+static unsigned int
+fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
+{
+	unsigned int mfs;
+
+	/*
+	 * Get max payload from the common service parameters and the
+	 * class 3 receive data field size.
+	 */
+	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+		maxval = mfs;
+	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+		maxval = mfs;
+	return maxval;
+}
+
+/**
+ * fc_rport_state_enter - Change the rport's state
+ * @rport: The rport whose state should change
+ * @new: The new state of the rport
+ *
+ * Locking Note: Called with the rport lock held
+ */
+static void fc_rport_state_enter(struct fc_rport *rport,
+				 enum fc_rport_state new)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	if (rdata->rp_state != new)
+		rdata->retries = 0;
+	rdata->rp_state = new;
+}
+
+static void fc_rport_work(struct work_struct *work)
+{
+	struct fc_rport_libfc_priv *rdata =
+		container_of(work, struct fc_rport_libfc_priv, event_work);
+	enum fc_rport_event event;
+	enum fc_rport_trans_state trans_state;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_rport_operations *rport_ops;
+	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+
+	mutex_lock(&rdata->rp_mutex);
+	event = rdata->event;
+	rport_ops = rdata->ops;
+
+	if (event == RPORT_EV_CREATED) {
+		struct fc_rport *new_rport;
+		struct fc_rport_libfc_priv *new_rdata;
+		struct fc_rport_identifiers ids;
+
+		ids.port_id = rport->port_id;
+		ids.roles = rport->roles;
+		ids.port_name = rport->port_name;
+		ids.node_name = rport->node_name;
+
+		mutex_unlock(&rdata->rp_mutex);
+
+		new_rport = fc_remote_port_add(lport->host, 0, &ids);
+		if (new_rport) {
+			/*
+			 * Switch from the rogue rport to the rport
+			 * returned by the FC class.
+			 */
+			new_rport->maxframe_size = rport->maxframe_size;
+
+			new_rdata = new_rport->dd_data;
+			new_rdata->e_d_tov = rdata->e_d_tov;
+			new_rdata->r_a_tov = rdata->r_a_tov;
+			new_rdata->ops = rdata->ops;
+			new_rdata->local_port = rdata->local_port;
+			new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+			new_rdata->trans_state = FC_PORTSTATE_REAL;
+			mutex_init(&new_rdata->rp_mutex);
+			INIT_DELAYED_WORK(&new_rdata->retry_work,
+					  fc_rport_timeout);
+			INIT_LIST_HEAD(&new_rdata->peers);
+			INIT_WORK(&new_rdata->event_work, fc_rport_work);
+
+			fc_rport_state_enter(new_rport, RPORT_ST_READY);
+		} else {
+			FC_DBG("Failed to create the rport for port "
+			       "(%6x).\n", ids.port_id);
+			event = RPORT_EV_FAILED;
+		}
+		put_device(&rport->dev);
+		rport = new_rport;
+		rdata = new_rport->dd_data;
+		if (rport_ops->event_callback)
+			rport_ops->event_callback(lport, rport, event);
+	} else if ((event == RPORT_EV_FAILED) ||
+		   (event == RPORT_EV_LOGO) ||
+		   (event == RPORT_EV_STOP)) {
+		trans_state = rdata->trans_state;
+		mutex_unlock(&rdata->rp_mutex);
+		if (rport_ops->event_callback)
+			rport_ops->event_callback(lport, rport, event);
+		if (trans_state == FC_PORTSTATE_ROGUE)
+			put_device(&rport->dev);
+		else
+			fc_remote_port_delete(rport);
+	} else
+		mutex_unlock(&rdata->rp_mutex);
+}
+
+/**
+ * fc_rport_login - Start the remote port login state machine
+ * @rport: Fibre Channel remote port
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+int fc_rport_login(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
+
+	fc_rport_enter_plogi(rport);
+
+	mutex_unlock(&rdata->rp_mutex);
+
+	return 0;
+}
+
+/**
+ * fc_rport_logoff - Logoff and remove an rport
+ * @rport: Fibre Channel remote port to be removed
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+int fc_rport_logoff(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
+
+	fc_rport_enter_logo(rport);
+
+	/*
+	 * Change the state to NONE so that we discard
+	 * the response.
+	 */
+	fc_rport_state_enter(rport, RPORT_ST_NONE);
+
+	mutex_unlock(&rdata->rp_mutex);
+
+	cancel_delayed_work_sync(&rdata->retry_work);
+
+	mutex_lock(&rdata->rp_mutex);
+
+	rdata->event = RPORT_EV_STOP;
+	queue_work(rport_event_queue, &rdata->event_work);
+
+	mutex_unlock(&rdata->rp_mutex);
+
+	return 0;
+}
+
+/**
+ * fc_rport_enter_ready - The rport is ready
+ * @rport: Fibre Channel remote port that is ready
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_ready(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+	fc_rport_state_enter(rport, RPORT_ST_READY);
+
+	FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
+
+	rdata->event = RPORT_EV_CREATED;
+	queue_work(rport_event_queue, &rdata->event_work);
+}
+
+/**
+ * fc_rport_timeout - Handler for the retry_work timer.
+ * @work: The work struct of the fc_rport_libfc_priv
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+	struct fc_rport_libfc_priv *rdata =
+		container_of(work, struct fc_rport_libfc_priv, retry_work.work);
+	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+
+	mutex_lock(&rdata->rp_mutex);
+
+	switch (rdata->rp_state) {
+	case RPORT_ST_PLOGI:
+		fc_rport_enter_plogi(rport);
+		break;
+	case RPORT_ST_PRLI:
+		fc_rport_enter_prli(rport);
+		break;
+	case RPORT_ST_RTV:
+		fc_rport_enter_rtv(rport);
+		break;
+	case RPORT_ST_LOGO:
+		fc_rport_enter_logo(rport);
+		break;
+	case RPORT_ST_READY:
+	case RPORT_ST_INIT:
+	case RPORT_ST_NONE:
+		break;
+	}
+
+	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_error - Handler for any errors
+ * @rport: The fc_rport object
+ * @fp: The frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * immediately.
+ *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	unsigned long delay = 0;
+
+	FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
+		       PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+
+	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+		/*
+		 * Memory allocation failure, or the exchange timed out.
+		 *  Retry after delay
+		 */
+		if (rdata->retries < rdata->local_port->max_retry_count) {
+			rdata->retries++;
+			if (!fp)
+				delay = msecs_to_jiffies(500);
+			get_device(&rport->dev);
+			schedule_delayed_work(&rdata->retry_work, delay);
+		} else {
+			switch (rdata->rp_state) {
+			case RPORT_ST_PLOGI:
+			case RPORT_ST_PRLI:
+			case RPORT_ST_LOGO:
+				rdata->event = RPORT_EV_FAILED;
+				queue_work(rport_event_queue,
+					   &rdata->event_work);
+				break;
+			case RPORT_ST_RTV:
+				fc_rport_enter_ready(rport);
+				break;
+			case RPORT_ST_NONE:
+			case RPORT_ST_READY:
+			case RPORT_ST_INIT:
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+				void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_els_flogi *plp;
+	unsigned int tov;
+	u16 csp_seq;
+	u16 cssp_seq;
+	u8 op;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
+		       rport->port_id);
+
+	if (rdata->rp_state != RPORT_ST_PLOGI) {
+		FC_DBG("Received a PLOGI response, but in state %s\n",
+		       fc_rport_state(rport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		goto err;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC &&
+	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+		rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
+		rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
+
+		tov = ntohl(plp->fl_csp.sp_e_d_tov);
+		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
+			tov /= 1000;
+		if (tov > rdata->e_d_tov)
+			rdata->e_d_tov = tov;
+		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+		if (cssp_seq < csp_seq)
+			csp_seq = cssp_seq;
+		rdata->max_seq = csp_seq;
+		rport->maxframe_size =
+			fc_plogi_get_maxframe(plp, lport->mfs);
+
+		/*
+		 * If the rport is one of the well known addresses
+		 * we skip PRLI and RTV and go straight to READY.
+		 */
+		if (rport->port_id >= FC_FID_DOM_MGR)
+			fc_rport_enter_ready(rport);
+		else
+			fc_rport_enter_prli(rport);
+	} else
+		fc_rport_error(rport, fp);
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
+ * @rport: Fibre Channel remote port to send PLOGI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_plogi(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+
+	FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
+		       rport->port_id, fc_rport_state(rport));
+
+	fc_rport_state_enter(rport, RPORT_ST_PLOGI);
+
+	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+	if (!fp) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+	rdata->e_d_tov = lport->e_d_tov;
+
+	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
+				  fc_rport_plogi_resp, rport, lport->e_d_tov))
+		fc_rport_error(rport, fp);
+	else
+		get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_prli_resp - Process Login (PRLI) response handler
+ * @sp: current sequence in the PRLI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
+			       void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	u32 roles = FC_RPORT_ROLE_UNKNOWN;
+	u32 fcp_parm = 0;
+	u8 op;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
+		       rport->port_id);
+
+	if (rdata->rp_state != RPORT_ST_PRLI) {
+		FC_DBG("Received a PRLI response, but in state %s\n",
+		       fc_rport_state(rport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		goto err;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		pp = fc_frame_payload_get(fp, sizeof(*pp));
+		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
+			fcp_parm = ntohl(pp->spp.spp_params);
+			if (fcp_parm & FCP_SPPF_RETRY)
+				rdata->flags |= FC_RP_FLAGS_RETRY;
+		}
+
+		rport->supported_classes = FC_COS_CLASS3;
+		if (fcp_parm & FCP_SPPF_INIT_FCN)
+			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		if (fcp_parm & FCP_SPPF_TARG_FCN)
+			roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+		rport->roles = roles;
+		fc_rport_enter_rtv(rport);
+
+	} else {
+		FC_DBG("Bad ELS response\n");
+		rdata->event = RPORT_EV_FAILED;
+		queue_work(rport_event_queue, &rdata->event_work);
+	}
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_logo_resp - Logout (LOGO) response handler
+ * @sp: current sequence in the LOGO exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+			       void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	u8 op;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
+		       rport->port_id);
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		goto err;
+	}
+
+	if (rdata->rp_state != RPORT_ST_LOGO) {
+		FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
+			       fc_rport_state(rport));
+		goto out;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		fc_rport_enter_rtv(rport);
+	} else {
+		FC_DBG("Bad ELS response\n");
+		rdata->event = RPORT_EV_LOGO;
+		queue_work(rport_event_queue, &rdata->event_work);
+	}
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
+ * @rport: Fibre Channel remote port to send PRLI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_prli(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	struct fc_frame *fp;
+
+	FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
+		       rport->port_id, fc_rport_state(rport));
+
+	fc_rport_state_enter(rport, RPORT_ST_PRLI);
+
+	fp = fc_frame_alloc(lport, sizeof(*pp));
+	if (!fp) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
+				  fc_rport_prli_resp, rport, lport->e_d_tov))
+		fc_rport_error(rport, fp);
+	else
+		get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_els_rtv_resp - Request Timeout Value response handler
+ * @sp: current sequence in the RTV exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Many targets don't seem to support this.
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	u8 op;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
+		       rport->port_id);
+
+	if (rdata->rp_state != RPORT_ST_RTV) {
+		FC_DBG("Received a RTV response, but in state %s\n",
+		       fc_rport_state(rport));
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		goto err;
+	}
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		struct fc_els_rtv_acc *rtv;
+		u32 toq;
+		u32 tov;
+
+		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+		if (rtv) {
+			toq = ntohl(rtv->rtv_toq);
+			tov = ntohl(rtv->rtv_r_a_tov);
+			if (tov == 0)
+				tov = 1;
+			rdata->r_a_tov = tov;
+			tov = ntohl(rtv->rtv_e_d_tov);
+			if (toq & FC_ELS_RTV_EDRES)
+				tov /= 1000000;
+			if (tov == 0)
+				tov = 1;
+			rdata->e_d_tov = tov;
+		}
+	}
+
+	fc_rport_enter_ready(rport);
+
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
+ * @rport: Fibre Channel remote port to send RTV to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_rtv(struct fc_rport *rport)
+{
+	struct fc_frame *fp;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+
+	FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
+		       rport->port_id, fc_rport_state(rport));
+
+	fc_rport_state_enter(rport, RPORT_ST_RTV);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
+	if (!fp) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
+				     fc_rport_rtv_resp, rport, lport->e_d_tov))
+		fc_rport_error(rport, fp);
+	else
+		get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_logo - Send Logout (LOGO) request to peer
+ * @rport: Fibre Channel remote port to send LOGO to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_logo(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+
+	FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
+		       rport->port_id, fc_rport_state(rport));
+
+	fc_rport_state_enter(rport, RPORT_ST_LOGO);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
+	if (!fp) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
+				  fc_rport_logo_resp, rport, lport->e_d_tov))
+		fc_rport_error(rport, fp);
+	else
+		get_device(&rport->dev);
+}
+
+
+/**
+ * fc_rport_recv_req - Receive a request from a rport
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+		       struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+
+	struct fc_frame_header *fh;
+	struct fc_seq_els_data els_data;
+	u8 op;
+
+	mutex_lock(&rdata->rp_mutex);
+
+	els_data.fp = NULL;
+	els_data.explan = ELS_EXPL_NONE;
+	els_data.reason = ELS_RJT_NONE;
+
+	fh = fc_frame_header_get(fp);
+
+	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
+		op = fc_frame_payload_op(fp);
+		switch (op) {
+		case ELS_PLOGI:
+			fc_rport_recv_plogi_req(rport, sp, fp);
+			break;
+		case ELS_PRLI:
+			fc_rport_recv_prli_req(rport, sp, fp);
+			break;
+		case ELS_PRLO:
+			fc_rport_recv_prlo_req(rport, sp, fp);
+			break;
+		case ELS_LOGO:
+			fc_rport_recv_logo_req(rport, sp, fp);
+			break;
+		case ELS_RRQ:
+			els_data.fp = fp;
+			lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
+			break;
+		case ELS_REC:
+			els_data.fp = fp;
+			lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
+			break;
+		default:
+			els_data.reason = ELS_RJT_UNSUP;
+			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+			break;
+		}
+	}
+
+	mutex_unlock(&rdata->rp_mutex);
+}
+
+/**
+ * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
+ * @rport: Fibre Channel remote port that initiated PLOGI
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: PLOGI request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_plogi_req(struct fc_rport *rport,
+				    struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp = rx_fp;
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	struct fc_els_flogi *pl;
+	struct fc_seq_els_data rjt_data;
+	u32 sid;
+	u64 wwpn;
+	u64 wwnn;
+	enum fc_els_rjt_reason reject = 0;
+	u32 f_ctl;
+	rjt_data.fp = NULL;
+
+	fh = fc_frame_header_get(fp);
+
+	FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
+		       "while in state %s\n", ntoh24(fh->fh_s_id),
+		       fc_rport_state(rport));
+
+	sid = ntoh24(fh->fh_s_id);
+	pl = fc_frame_payload_get(fp, sizeof(*pl));
+	if (!pl) {
+		FC_DBG("incoming PLOGI from %x too short\n", sid);
+		WARN_ON(1);
+		/* XXX TBD: send reject? */
+		fc_frame_free(fp);
+		return;
+	}
+	wwpn = get_unaligned_be64(&pl->fl_wwpn);
+	wwnn = get_unaligned_be64(&pl->fl_wwnn);
+
+	/*
+	 * If the session was just created, possibly due to the incoming PLOGI,
+	 * set the state appropriately and accept the PLOGI.
+	 *
+	 * If we had also sent a PLOGI, and if the received PLOGI is from a
+	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+	 * "command already in progress".
+	 *
+	 * XXX TBD: If the session was ready before, the PLOGI should result in
+	 * all outstanding exchanges being reset.
+	 */
+	switch (rdata->rp_state) {
+	case RPORT_ST_INIT:
+		FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
+			       "- reject\n", sid, wwpn);
+		reject = ELS_RJT_UNSUP;
+		break;
+	case RPORT_ST_PLOGI:
+		FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
+			       sid, rdata->rp_state);
+		if (wwpn < lport->wwpn)
+			reject = ELS_RJT_INPROG;
+		break;
+	case RPORT_ST_PRLI:
+	case RPORT_ST_READY:
+		FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
+			       "- ignored for now\n", sid, rdata->rp_state);
+		/* XXX TBD - should reset */
+		break;
+	case RPORT_ST_NONE:
+	default:
+		FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
+			       "state %d\n", sid, rdata->rp_state);
+		break;
+	}
+
+	if (reject) {
+		rjt_data.reason = reject;
+		rjt_data.explan = ELS_EXPL_NONE;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+		fc_frame_free(fp);
+	} else {
+		fp = fc_frame_alloc(lport, sizeof(*pl));
+		if (fp == NULL) {
+			fp = rx_fp;
+			rjt_data.reason = ELS_RJT_UNAB;
+			rjt_data.explan = ELS_EXPL_NONE;
+			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+			fc_frame_free(fp);
+		} else {
+			sp = lport->tt.seq_start_next(sp);
+			WARN_ON(!sp);
+			fc_rport_set_name(rport, wwpn, wwnn);
+
+			/*
+			 * Get session payload size from incoming PLOGI.
+			 */
+			rport->maxframe_size =
+				fc_plogi_get_maxframe(pl, lport->mfs);
+			fc_frame_free(rx_fp);
+			fc_plogi_fill(lport, fp, ELS_LS_ACC);
+
+			/*
+			 * Send LS_ACC.	 If this fails,
+			 * the originator should retry.
+			 */
+			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+			ep = fc_seq_exch(sp);
+			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+				       FC_TYPE_ELS, f_ctl, 0);
+			lport->tt.seq_send(lport, sp, fp);
+			if (rdata->rp_state == RPORT_ST_PLOGI)
+				fc_rport_enter_prli(rport);
+		}
+	}
+}
+
+/**
+ * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
+ * @rport: Fibre Channel remote port that initiated PRLI
+ * @sp: current sequence in the PRLI exchange
+ * @fp: PRLI request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prli_req(struct fc_rport *rport,
+				   struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_exch *ep;
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	struct fc_els_spp *rspp;	/* request service param page */
+	struct fc_els_spp *spp;	/* response spp */
+	unsigned int len;
+	unsigned int plen;
+	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
+	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
+	enum fc_els_spp_resp resp;
+	struct fc_seq_els_data rjt_data;
+	u32 f_ctl;
+	u32 fcp_parm;
+	u32 roles = FC_RPORT_ROLE_UNKNOWN;
+	rjt_data.fp = NULL;
+
+	fh = fc_frame_header_get(rx_fp);
+
+	FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
+		       "while in state %s\n", ntoh24(fh->fh_s_id),
+		       fc_rport_state(rport));
+
+	switch (rdata->rp_state) {
+	case RPORT_ST_PRLI:
+	case RPORT_ST_READY:
+		reason = ELS_RJT_NONE;
+		break;
+	default:
+		break;
+	}
+	len = fr_len(rx_fp) - sizeof(*fh);
+	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+	if (pp == NULL) {
+		reason = ELS_RJT_PROT;
+		explan = ELS_EXPL_INV_LEN;
+	} else {
+		plen = ntohs(pp->prli.prli_len);
+		if ((plen % 4) != 0 || plen > len) {
+			reason = ELS_RJT_PROT;
+			explan = ELS_EXPL_INV_LEN;
+		} else if (plen < len) {
+			len = plen;
+		}
+		plen = pp->prli.prli_spp_len;
+		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+		    plen > len || len < sizeof(*pp)) {
+			reason = ELS_RJT_PROT;
+			explan = ELS_EXPL_INV_LEN;
+		}
+		rspp = &pp->spp;
+	}
+	if (reason != ELS_RJT_NONE ||
+	    (fp = fc_frame_alloc(lport, len)) == NULL) {
+		rjt_data.reason = reason;
+		rjt_data.explan = explan;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	} else {
+		sp = lport->tt.seq_start_next(sp);
+		WARN_ON(!sp);
+		pp = fc_frame_payload_get(fp, len);
+		WARN_ON(!pp);
+		memset(pp, 0, len);
+		pp->prli.prli_cmd = ELS_LS_ACC;
+		pp->prli.prli_spp_len = plen;
+		pp->prli.prli_len = htons(len);
+		len -= sizeof(struct fc_els_prli);
+
+		/*
+		 * Go through all the service parameter pages and build
+		 * response.  If plen indicates longer SPP than standard,
+		 * use that.  The entire response has been pre-cleared above.
+		 */
+		spp = &pp->spp;
+		while (len >= plen) {
+			spp->spp_type = rspp->spp_type;
+			spp->spp_type_ext = rspp->spp_type_ext;
+			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+			resp = FC_SPP_RESP_ACK;
+			if (rspp->spp_flags & FC_SPP_RPA_VAL)
+				resp = FC_SPP_RESP_NO_PA;
+			switch (rspp->spp_type) {
+			case 0:	/* common to all FC-4 types */
+				break;
+			case FC_TYPE_FCP:
+				fcp_parm = ntohl(rspp->spp_params);
+				if (fcp_parm * FCP_SPPF_RETRY)
+					rdata->flags |= FC_RP_FLAGS_RETRY;
+				rport->supported_classes = FC_COS_CLASS3;
+				if (fcp_parm & FCP_SPPF_INIT_FCN)
+					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+				if (fcp_parm & FCP_SPPF_TARG_FCN)
+					roles |= FC_RPORT_ROLE_FCP_TARGET;
+				rport->roles = roles;
+
+				spp->spp_params =
+					htonl(lport->service_params);
+				break;
+			default:
+				resp = FC_SPP_RESP_INVL;
+				break;
+			}
+			spp->spp_flags |= resp;
+			len -= plen;
+			rspp = (struct fc_els_spp *)((char *)rspp + plen);
+			spp = (struct fc_els_spp *)((char *)spp + plen);
+		}
+
+		/*
+		 * Send LS_ACC.	 If this fails, the originator should retry.
+		 */
+		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+		ep = fc_seq_exch(sp);
+		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+			       FC_TYPE_ELS, f_ctl, 0);
+		lport->tt.seq_send(lport, sp, fp);
+
+		/*
+		 * Get lock and re-check state.
+		 */
+		switch (rdata->rp_state) {
+		case RPORT_ST_PRLI:
+			fc_rport_enter_ready(rport);
+			break;
+		case RPORT_ST_READY:
+			break;
+		default:
+			break;
+		}
+	}
+	fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
+ * @rport: Fibre Channel remote port that initiated PRLO
+ * @sp: current sequence in the PRLO exchange
+ * @fp: PRLO request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
+				   struct fc_frame *fp)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+
+	struct fc_frame_header *fh;
+	struct fc_seq_els_data rjt_data;
+
+	fh = fc_frame_header_get(fp);
+
+	FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
+		       "while in state %s\n", ntoh24(fh->fh_s_id),
+		       fc_rport_state(rport));
+
+	rjt_data.fp = NULL;
+	rjt_data.reason = ELS_RJT_UNAB;
+	rjt_data.explan = ELS_EXPL_NONE;
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
+ * @rport: Fibre Channel remote port that initiated LOGO
+ * @sp: current sequence in the LOGO exchange
+ * @fp: LOGO request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
+				   struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+
+	fh = fc_frame_header_get(fp);
+
+	FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
+		       "while in state %s\n", ntoh24(fh->fh_s_id),
+		       fc_rport_state(rport));
+
+	rdata->event = RPORT_EV_LOGO;
+	queue_work(rport_event_queue, &rdata->event_work);
+
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_frame_free(fp);
+}
+
+static void fc_rport_flush_queue(void)
+{
+	flush_workqueue(rport_event_queue);
+}
+
+
+int fc_rport_init(struct fc_lport *lport)
+{
+	if (!lport->tt.rport_login)
+		lport->tt.rport_login = fc_rport_login;
+
+	if (!lport->tt.rport_logoff)
+		lport->tt.rport_logoff = fc_rport_logoff;
+
+	if (!lport->tt.rport_recv_req)
+		lport->tt.rport_recv_req = fc_rport_recv_req;
+
+	if (!lport->tt.rport_flush_queue)
+		lport->tt.rport_flush_queue = fc_rport_flush_queue;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_rport_init);
+
+int fc_setup_rport()
+{
+	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+	if (!rport_event_queue)
+		return -ENOMEM;
+	return 0;
+}
+EXPORT_SYMBOL(fc_setup_rport);
+
+void fc_destroy_rport()
+{
+	destroy_workqueue(rport_event_queue);
+}
+EXPORT_SYMBOL(fc_destroy_rport);
+
+void fc_rport_terminate_io(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+
+	lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
+	lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
+}
+EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3fdee7370ccc..7225b6e2029e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -88,34 +88,47 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
 
-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
-				   struct iscsi_data *hdr)
+/**
+ * iscsi_prep_data_out_pdu - initialize Data-Out
+ * @task: scsi command task
+ * @r2t: R2T info
+ * @hdr: iscsi data in pdu
+ *
+ * Notes:
+ *	Initialize Data-Out within this R2T sequence and finds
+ *	proper data_offset within this SCSI command.
+ *
+ *	This function is called with connection lock taken.
+ **/
+void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
+			   struct iscsi_data *hdr)
 {
 	struct iscsi_conn *conn = task->conn;
+	unsigned int left = r2t->data_length - r2t->sent;
+
+	task->hdr_len = sizeof(struct iscsi_data);
 
 	memset(hdr, 0, sizeof(struct iscsi_data));
-	hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
-	hdr->datasn = cpu_to_be32(task->unsol_datasn);
-	task->unsol_datasn++;
+	hdr->ttt = r2t->ttt;
+	hdr->datasn = cpu_to_be32(r2t->datasn);
+	r2t->datasn++;
 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
-
-	hdr->itt = task->hdr->itt;
-	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
-	hdr->offset = cpu_to_be32(task->unsol_offset);
-
-	if (task->unsol_count > conn->max_xmit_dlength) {
+	memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+	hdr->itt = task->hdr_itt;
+	hdr->exp_statsn = r2t->exp_statsn;
+	hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
+	if (left > conn->max_xmit_dlength) {
 		hton24(hdr->dlength, conn->max_xmit_dlength);
-		task->data_count = conn->max_xmit_dlength;
-		task->unsol_offset += task->data_count;
+		r2t->data_count = conn->max_xmit_dlength;
 		hdr->flags = 0;
 	} else {
-		hton24(hdr->dlength, task->unsol_count);
-		task->data_count = task->unsol_count;
+		hton24(hdr->dlength, left);
+		r2t->data_count = left;
 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
 	}
+	conn->dataout_pdus_cnt++;
 }
-EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
 
 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
 {
@@ -206,11 +219,24 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 {
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd *hdr = task->hdr;
 	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_cmd *hdr;
 	unsigned hdrlength, cmd_len;
+	itt_t itt;
 	int rc;
 
+	rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+	if (rc)
+		return rc;
+	hdr = (struct iscsi_cmd *) task->hdr;
+	itt = hdr->itt;
+	memset(hdr, 0, sizeof(*hdr));
+
+	if (session->tt->parse_pdu_itt)
+		hdr->itt = task->hdr_itt = itt;
+	else
+		hdr->itt = task->hdr_itt = build_itt(task->itt,
+						     task->conn->session->age);
 	task->hdr_len = 0;
 	rc = iscsi_add_hdr(task, sizeof(*hdr));
 	if (rc)
@@ -218,8 +244,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 	hdr->opcode = ISCSI_OP_SCSI_CMD;
 	hdr->flags = ISCSI_ATTR_SIMPLE;
 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
-	hdr->itt = build_itt(task->itt, session->age);
-	hdr->cmdsn = cpu_to_be32(session->cmdsn);
+	memcpy(task->lun, hdr->lun, sizeof(task->lun));
+	hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
 	session->cmdsn++;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
 	cmd_len = sc->cmd_len;
@@ -242,6 +268,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 	}
 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
 		unsigned out_len = scsi_out(sc)->length;
+		struct iscsi_r2t_info *r2t = &task->unsol_r2t;
+
 		hdr->data_length = cpu_to_be32(out_len);
 		hdr->flags |= ISCSI_FLAG_CMD_WRITE;
 		/*
@@ -254,13 +282,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 		 *			without	R2T ack right after
 		 *			immediate data
 		 *
-		 *	r2t_data_count	bytes to be sent via R2T ack's
+		 *	r2t data_length bytes to be sent via R2T ack's
 		 *
 		 *      pad_count       bytes to be sent as zero-padding
 		 */
-		task->unsol_count = 0;
-		task->unsol_offset = 0;
-		task->unsol_datasn = 0;
+		memset(r2t, 0, sizeof(*r2t));
 
 		if (session->imm_data_en) {
 			if (out_len >= session->first_burst)
@@ -274,12 +300,14 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 			zero_data(hdr->dlength);
 
 		if (!session->initial_r2t_en) {
-			task->unsol_count = min(session->first_burst, out_len)
-							     - task->imm_count;
-			task->unsol_offset = task->imm_count;
+			r2t->data_length = min(session->first_burst, out_len) -
+					       task->imm_count;
+			r2t->data_offset = task->imm_count;
+			r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+			r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
 		}
 
-		if (!task->unsol_count)
+		if (!task->unsol_r2t.data_length)
 			/* No unsolicit Data-Out's */
 			hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 	} else {
@@ -300,8 +328,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 	WARN_ON(hdrlength >= 256);
 	hdr->hlength = hdrlength & 0xFF;
 
-	if (conn->session->tt->init_task &&
-	    conn->session->tt->init_task(task))
+	if (session->tt->init_task && session->tt->init_task(task))
 		return -EIO;
 
 	task->state = ISCSI_TASK_RUNNING;
@@ -332,6 +359,7 @@ static void iscsi_complete_command(struct iscsi_task *task)
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = task->sc;
 
+	session->tt->cleanup_task(task);
 	list_del_init(&task->running);
 	task->state = ISCSI_TASK_COMPLETED;
 	task->sc = NULL;
@@ -402,8 +430,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
 		 * the cmd in the sequencing
 		 */
 		conn->session->queued_cmdsn--;
-	else
-		conn->session->tt->cleanup_task(conn, task);
 
 	sc->result = err;
 	if (!scsi_bidi_cmnd(sc))
@@ -423,7 +449,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 				struct iscsi_task *task)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+	struct iscsi_hdr *hdr = task->hdr;
 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
 
 	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -437,7 +463,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 	 */
 	nop->cmdsn = cpu_to_be32(session->cmdsn);
 	if (hdr->itt != RESERVED_ITT) {
-		hdr->itt = build_itt(task->itt, session->age);
 		/*
 		 * TODO: We always use immediate, so we never hit this.
 		 * If we start to send tmfs or nops as non-immediate then
@@ -450,12 +475,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 		}
 	}
 
-	if (session->tt->init_task)
-		session->tt->init_task(task);
+	if (session->tt->init_task && session->tt->init_task(task))
+		return -EIO;
 
 	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
 		session->state = ISCSI_STATE_LOGGING_OUT;
 
+	task->state = ISCSI_TASK_RUNNING;
 	list_move_tail(&task->running, &conn->mgmt_run_list);
 	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
 		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
@@ -469,6 +495,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_task *task;
+	itt_t itt;
 
 	if (session->state == ISCSI_STATE_TERMINATE)
 		return NULL;
@@ -505,23 +532,47 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	} else
 		task->data_count = 0;
 
+	if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+				 "pdu for mgmt task.\n");
+		goto requeue_task;
+	}
+	itt = task->hdr->itt;
+	task->hdr_len = sizeof(struct iscsi_hdr);
 	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+
+	if (hdr->itt != RESERVED_ITT) {
+		if (session->tt->parse_pdu_itt)
+			task->hdr->itt = itt;
+		else
+			task->hdr->itt = build_itt(task->itt,
+						   task->conn->session->age);
+	}
+
 	INIT_LIST_HEAD(&task->running);
 	list_add_tail(&task->running, &conn->mgmtqueue);
 
 	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
-		if (iscsi_prep_mgmt_task(conn, task)) {
-			__iscsi_put_task(task);
-			return NULL;
-		}
+		if (iscsi_prep_mgmt_task(conn, task))
+			goto free_task;
 
 		if (session->tt->xmit_task(task))
-			task = NULL;
+			goto free_task;
 
 	} else
 		scsi_queue_work(conn->session->host, &conn->xmitwork);
 
 	return task;
+
+free_task:
+	__iscsi_put_task(task);
+	return NULL;
+
+requeue_task:
+	if (task != conn->login_task)
+		__kfifo_put(session->cmdpool.queue, (void*)&task,
+			    sizeof(void*));
+	return NULL;
 }
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -709,7 +760,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 {
 	struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
 	struct iscsi_hdr rejected_pdu;
-	uint32_t itt;
 
 	conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
 
@@ -719,10 +769,9 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 
 		if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
 			memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
-			itt = get_itt(rejected_pdu.itt);
 			iscsi_conn_printk(KERN_ERR, conn,
-					  "itt 0x%x had pdu (op 0x%x) rejected "
-					  "due to DataDigest error.\n", itt,
+					  "pdu (op 0x%x) rejected "
+					  "due to DataDigest error.\n",
 					  rejected_pdu.opcode);
 		}
 	}
@@ -742,12 +791,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
-	uint32_t i;
+	int i;
 
 	if (itt == RESERVED_ITT)
 		return NULL;
 
-	i = get_itt(itt);
+	if (session->tt->parse_pdu_itt)
+		session->tt->parse_pdu_itt(conn, itt, &i, NULL);
+	else
+		i = get_itt(itt);
 	if (i >= session->cmds_max)
 		return NULL;
 
@@ -922,20 +974,25 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
-	uint32_t i;
+	int age = 0, i = 0;
 
 	if (itt == RESERVED_ITT)
 		return 0;
 
-	if (((__force u32)itt & ISCSI_AGE_MASK) !=
-	    (session->age << ISCSI_AGE_SHIFT)) {
+	if (session->tt->parse_pdu_itt)
+		session->tt->parse_pdu_itt(conn, itt, &i, &age);
+	else {
+		i = get_itt(itt);
+		age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
+	}
+
+	if (age != session->age) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "received itt %x expected session age (%x)\n",
 				  (__force u32)itt, session->age);
 		return ISCSI_ERR_BAD_ITT;
 	}
 
-	i = get_itt(itt);
 	if (i >= session->cmds_max) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "received invalid itt index %u (max cmds "
@@ -1136,8 +1193,13 @@ check_mgmt:
 			fail_command(conn, conn->task, DID_IMM_RETRY << 16);
 			continue;
 		}
-		if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
-			fail_command(conn, conn->task, DID_ABORT << 16);
+		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+		if (rc) {
+			if (rc == -ENOMEM) {
+				conn->task = NULL;
+				goto again;
+			} else
+				fail_command(conn, conn->task, DID_ABORT << 16);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
@@ -1195,6 +1257,26 @@ static void iscsi_xmitworker(struct work_struct *work)
 	} while (rc >= 0 || rc == -EAGAIN);
 }
 
+static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
+						  struct scsi_cmnd *sc)
+{
+	struct iscsi_task *task;
+
+	if (!__kfifo_get(conn->session->cmdpool.queue,
+			 (void *) &task, sizeof(void *)))
+		return NULL;
+
+	sc->SCp.phase = conn->session->age;
+	sc->SCp.ptr = (char *) task;
+
+	atomic_set(&task->refcount, 1);
+	task->state = ISCSI_TASK_PENDING;
+	task->conn = conn;
+	task->sc = sc;
+	INIT_LIST_HEAD(&task->running);
+	return task;
+}
+
 enum {
 	FAILURE_BAD_HOST = 1,
 	FAILURE_SESSION_FAILED,
@@ -1281,33 +1363,27 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		goto reject;
 	}
 
-	if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
-			 sizeof(void*))) {
+	task = iscsi_alloc_task(conn, sc);
+	if (!task) {
 		reason = FAILURE_OOM;
 		goto reject;
 	}
-	sc->SCp.phase = session->age;
-	sc->SCp.ptr = (char *)task;
-
-	atomic_set(&task->refcount, 1);
-	task->state = ISCSI_TASK_PENDING;
-	task->conn = conn;
-	task->sc = sc;
-	INIT_LIST_HEAD(&task->running);
 	list_add_tail(&task->running, &conn->xmitqueue);
 
 	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
-		if (iscsi_prep_scsi_cmd_pdu(task)) {
-			sc->result = DID_ABORT << 16;
-			sc->scsi_done = NULL;
-			iscsi_complete_command(task);
-			goto fault;
+		reason = iscsi_prep_scsi_cmd_pdu(task);
+		if (reason) {
+			if (reason == -ENOMEM) {
+				reason = FAILURE_OOM;
+				goto prepd_reject;
+			} else {
+				sc->result = DID_ABORT << 16;
+				goto prepd_fault;
+			}
 		}
 		if (session->tt->xmit_task(task)) {
-			sc->scsi_done = NULL;
-			iscsi_complete_command(task);
 			reason = FAILURE_SESSION_NOT_READY;
-			goto reject;
+			goto prepd_reject;
 		}
 	} else
 		scsi_queue_work(session->host, &conn->xmitwork);
@@ -1317,12 +1393,18 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	spin_lock(host->host_lock);
 	return 0;
 
+prepd_reject:
+	sc->scsi_done = NULL;
+	iscsi_complete_command(task);
 reject:
 	spin_unlock(&session->lock);
 	debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
 	spin_lock(host->host_lock);
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
+prepd_fault:
+	sc->scsi_done = NULL;
+	iscsi_complete_command(task);
 fault:
 	spin_unlock(&session->lock);
 	debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
@@ -1634,9 +1716,9 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
 	hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
-	hdr->rtt = task->hdr->itt;
-	hdr->refcmdsn = task->hdr->cmdsn;
+	memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+	hdr->rtt = task->hdr_itt;
+	hdr->refcmdsn = task->cmdsn;
 }
 
 int iscsi_eh_abort(struct scsi_cmnd *sc)
@@ -2223,7 +2305,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
 	}
 	spin_unlock_bh(&session->lock);
 
-	data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+	data = (char *) __get_free_pages(GFP_KERNEL,
+					 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
 	if (!data)
 		goto login_task_data_alloc_fail;
 	conn->login_task->data = conn->data = data;
@@ -2294,7 +2377,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 	iscsi_suspend_tx(conn);
 
 	spin_lock_bh(&session->lock);
-	kfree(conn->data);
+	free_pages((unsigned long) conn->data,
+		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
 	kfree(conn->persistent_address);
 	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
 		    sizeof(void*));
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
new file mode 100644
index 000000000000..a745f91d2928
--- /dev/null
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -0,0 +1,1163 @@
+/*
+ * iSCSI over TCP/IP Data-Path lib
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 - 2006 Mike Christie
+ * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Credits:
+ *	Christoph Hellwig
+ *	FUJITA Tomonori
+ *	Arne Redlich
+ *	Zhenyu Wang
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/inet.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_tcp.h"
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+	      "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+	      "Alex Aizman <itn780@yahoo.com>");
+MODULE_DESCRIPTION("iSCSI/TCP data-path");
+MODULE_LICENSE("GPL");
+#undef DEBUG_TCP
+
+#ifdef DEBUG_TCP
+#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
+#else
+#define debug_tcp(fmt...)
+#endif
+
+static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+				   struct iscsi_segment *segment);
+
+/*
+ * Scatterlist handling: inside the iscsi_segment, we
+ * remember an index into the scatterlist, and set data/size
+ * to the current scatterlist entry. For highmem pages, we
+ * kmap as needed.
+ *
+ * Note that the page is unmapped when we return from
+ * TCP's data_ready handler, so we may end up mapping and
+ * unmapping the same page repeatedly. The whole reason
+ * for this is that we shouldn't keep the page mapped
+ * outside the softirq.
+ */
+
+/**
+ * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+ * @segment: the buffer object
+ * @sg: scatterlist
+ * @offset: byte offset into that sg entry
+ *
+ * This function sets up the segment so that subsequent
+ * data is copied to the indicated sg entry, at the given
+ * offset.
+ */
+static inline void
+iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+			  struct scatterlist *sg, unsigned int offset)
+{
+	segment->sg = sg;
+	segment->sg_offset = offset;
+	segment->size = min(sg->length - offset,
+			    segment->total_size - segment->total_copied);
+	segment->data = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_map - map the current S/G page
+ * @segment: iscsi_segment
+ * @recv: 1 if called from recv path
+ *
+ * We only need to possibly kmap data if scatter lists are being used,
+ * because the iscsi passthrough and internal IO paths will never use high
+ * mem pages.
+ */
+static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
+{
+	struct scatterlist *sg;
+
+	if (segment->data != NULL || !segment->sg)
+		return;
+
+	sg = segment->sg;
+	BUG_ON(segment->sg_mapped);
+	BUG_ON(sg->length == 0);
+
+	/*
+	 * If the page count is greater than one it is ok to send
+	 * to the network layer's zero copy send path. If not we
+	 * have to go the slow sendmsg path. We always map for the
+	 * recv path.
+	 */
+	if (page_count(sg_page(sg)) >= 1 && !recv)
+		return;
+
+	debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+		  segment);
+	segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+	segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
+}
+
+void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
+{
+	debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
+
+	if (segment->sg_mapped) {
+		debug_tcp("iscsi_tcp_segment_unmap valid\n");
+		kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+		segment->sg_mapped = NULL;
+		segment->data = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
+
+/*
+ * Splice the digest buffer into the buffer
+ */
+static inline void
+iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
+{
+	segment->data = digest;
+	segment->digest_len = ISCSI_DIGEST_SIZE;
+	segment->total_size += ISCSI_DIGEST_SIZE;
+	segment->size = ISCSI_DIGEST_SIZE;
+	segment->copied = 0;
+	segment->sg = NULL;
+	segment->hash = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_done - check whether the segment is complete
+ * @tcp_conn: iscsi tcp connection
+ * @segment: iscsi segment to check
+ * @recv: set to one of this is called from the recv path
+ * @copied: number of bytes copied
+ *
+ * Check if we're done receiving this segment. If the receive
+ * buffer is full but we expect more data, move on to the
+ * next entry in the scatterlist.
+ *
+ * If the amount of data we received isn't a multiple of 4,
+ * we will transparently receive the pad bytes, too.
+ *
+ * This function must be re-entrant.
+ */
+int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
+			   struct iscsi_segment *segment, int recv,
+			   unsigned copied)
+{
+	static unsigned char padbuf[ISCSI_PAD_LEN];
+	struct scatterlist sg;
+	unsigned int pad;
+
+	debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+		  segment->size, recv ? "recv" : "xmit");
+	if (segment->hash && copied) {
+		/*
+		 * If a segment is kmapd we must unmap it before sending
+		 * to the crypto layer since that will try to kmap it again.
+		 */
+		iscsi_tcp_segment_unmap(segment);
+
+		if (!segment->data) {
+			sg_init_table(&sg, 1);
+			sg_set_page(&sg, sg_page(segment->sg), copied,
+				    segment->copied + segment->sg_offset +
+							segment->sg->offset);
+		} else
+			sg_init_one(&sg, segment->data + segment->copied,
+				    copied);
+		crypto_hash_update(segment->hash, &sg, copied);
+	}
+
+	segment->copied += copied;
+	if (segment->copied < segment->size) {
+		iscsi_tcp_segment_map(segment, recv);
+		return 0;
+	}
+
+	segment->total_copied += segment->copied;
+	segment->copied = 0;
+	segment->size = 0;
+
+	/* Unmap the current scatterlist page, if there is one. */
+	iscsi_tcp_segment_unmap(segment);
+
+	/* Do we have more scatterlist entries? */
+	debug_tcp("total copied %u total size %u\n", segment->total_copied,
+		   segment->total_size);
+	if (segment->total_copied < segment->total_size) {
+		/* Proceed to the next entry in the scatterlist. */
+		iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+					  0);
+		iscsi_tcp_segment_map(segment, recv);
+		BUG_ON(segment->size == 0);
+		return 0;
+	}
+
+	/* Do we need to handle padding? */
+	if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
+		pad = iscsi_padding(segment->total_copied);
+		if (pad != 0) {
+			debug_tcp("consume %d pad bytes\n", pad);
+			segment->total_size += pad;
+			segment->size = pad;
+			segment->data = padbuf;
+			return 0;
+		}
+	}
+
+	/*
+	 * Set us up for transferring the data digest. hdr digest
+	 * is completely handled in hdr done function.
+	 */
+	if (segment->hash) {
+		crypto_hash_final(segment->hash, segment->digest);
+		iscsi_tcp_segment_splice_digest(segment,
+				 recv ? segment->recv_digest : segment->digest);
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
+
+/**
+ * iscsi_tcp_segment_recv - copy data to segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to copy to
+ * @ptr: data pointer
+ * @len: amount of data available
+ *
+ * This function copies up to @len bytes to the
+ * given buffer, and returns the number of bytes
+ * consumed, which can actually be less than @len.
+ *
+ * If hash digest is enabled, the function will update the
+ * hash while copying.
+ * Combining these two operations doesn't buy us a lot (yet),
+ * but in the future we could implement combined copy+crc,
+ * just way we do for network layer checksums.
+ */
+static int
+iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+		       struct iscsi_segment *segment, const void *ptr,
+		       unsigned int len)
+{
+	unsigned int copy = 0, copied = 0;
+
+	while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
+		if (copied == len) {
+			debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+				  len);
+			break;
+		}
+
+		copy = min(len - copied, segment->size - segment->copied);
+		debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+		memcpy(segment->data + segment->copied, ptr + copied, copy);
+		copied += copy;
+	}
+	return copied;
+}
+
+inline void
+iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+		      unsigned char digest[ISCSI_DIGEST_SIZE])
+{
+	struct scatterlist sg;
+
+	sg_init_one(&sg, hdr, hdrlen);
+	crypto_hash_digest(hash, &sg, hdrlen, digest);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
+
+static inline int
+iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+		      struct iscsi_segment *segment)
+{
+	if (!segment->digest_len)
+		return 1;
+
+	if (memcmp(segment->recv_digest, segment->digest,
+		   segment->digest_len)) {
+		debug_scsi("digest mismatch\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Helper function to set up segment buffer
+ */
+static inline void
+__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+		     iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+	memset(segment, 0, sizeof(*segment));
+	segment->total_size = size;
+	segment->done = done;
+
+	if (hash) {
+		segment->hash = hash;
+		crypto_hash_init(hash);
+	}
+}
+
+inline void
+iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+			  size_t size, iscsi_segment_done_fn_t *done,
+			  struct hash_desc *hash)
+{
+	__iscsi_segment_init(segment, size, done, hash);
+	segment->data = data;
+	segment->size = size;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
+
+inline int
+iscsi_segment_seek_sg(struct iscsi_segment *segment,
+		      struct scatterlist *sg_list, unsigned int sg_count,
+		      unsigned int offset, size_t size,
+		      iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+	struct scatterlist *sg;
+	unsigned int i;
+
+	debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+		  offset, size);
+	__iscsi_segment_init(segment, size, done, hash);
+	for_each_sg(sg_list, sg, sg_count, i) {
+		debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+			   sg->offset);
+		if (offset < sg->length) {
+			iscsi_tcp_segment_init_sg(segment, sg, offset);
+			return 0;
+		}
+		offset -= sg->length;
+	}
+
+	return ISCSI_ERR_DATA_OFFSET;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
+
+/**
+ * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+ * @tcp_conn: iscsi connection to prep for
+ *
+ * This function always passes NULL for the hash argument, because when this
+ * function is called we do not yet know the final size of the header and want
+ * to delay the digest processing until we know that.
+ */
+void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+	debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+		  tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+	iscsi_segment_init_linear(&tcp_conn->in.segment,
+				tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+				iscsi_tcp_hdr_recv_done, NULL);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
+
+/*
+ * Handle incoming reply to any other type of command
+ */
+static int
+iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+			 struct iscsi_segment *segment)
+{
+	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+	int rc = 0;
+
+	if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+		return ISCSI_ERR_DATA_DGST;
+
+	rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+			conn->data, tcp_conn->in.datalen);
+	if (rc)
+		return rc;
+
+	iscsi_tcp_hdr_recv_prep(tcp_conn);
+	return 0;
+}
+
+static void
+iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+	struct hash_desc *rx_hash = NULL;
+
+	if (conn->datadgst_en &
+	    !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+		rx_hash = tcp_conn->rx_hash;
+
+	iscsi_segment_init_linear(&tcp_conn->in.segment,
+				conn->data, tcp_conn->in.datalen,
+				iscsi_tcp_data_recv_done, rx_hash);
+}
+
+/**
+ * iscsi_tcp_cleanup_task - free tcp_task resources
+ * @task: iscsi task
+ *
+ * must be called with session lock
+ */
+void iscsi_tcp_cleanup_task(struct iscsi_task *task)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_r2t_info *r2t;
+
+	/* nothing to do for mgmt or pending tasks */
+	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+		return;
+
+	/* flush task's r2t queues */
+	while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+			    sizeof(void*));
+		debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
+	}
+
+	r2t = tcp_task->r2t;
+	if (r2t != NULL) {
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+			    sizeof(void*));
+		tcp_task->r2t = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
+
+/**
+ * iscsi_tcp_data_in - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+	int datasn = be32_to_cpu(rhdr->datasn);
+	unsigned total_in_length = scsi_in(task->sc)->length;
+
+	iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+	if (tcp_conn->in.datalen == 0)
+		return 0;
+
+	if (tcp_task->exp_datasn != datasn) {
+		debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+		          __func__, tcp_task->exp_datasn, datasn);
+		return ISCSI_ERR_DATASN;
+	}
+
+	tcp_task->exp_datasn++;
+
+	tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+	if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+		          __func__, tcp_task->data_offset,
+		          tcp_conn->in.datalen, total_in_length);
+		return ISCSI_ERR_DATA_OFFSET;
+	}
+
+	conn->datain_pdus_cnt++;
+	return 0;
+}
+
+/**
+ * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+	struct iscsi_session *session = conn->session;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+	struct iscsi_r2t_info *r2t;
+	int r2tsn = be32_to_cpu(rhdr->r2tsn);
+	int rc;
+
+	if (tcp_conn->in.datalen) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2t with datalen %d\n",
+				  tcp_conn->in.datalen);
+		return ISCSI_ERR_DATALEN;
+	}
+
+	if (tcp_task->exp_datasn != r2tsn){
+		debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+		          __func__, tcp_task->exp_datasn, r2tsn);
+		return ISCSI_ERR_R2TSN;
+	}
+
+	/* fill-in new R2T associated with the task */
+	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+	if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+		iscsi_conn_printk(KERN_INFO, conn,
+				  "dropping R2T itt %d in recovery.\n",
+				  task->itt);
+		return 0;
+	}
+
+	rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+	if (!rc) {
+		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
+				  "Target has sent more R2Ts than it "
+				  "negotiated for or driver has has leaked.\n");
+		return ISCSI_ERR_PROTO;
+	}
+
+	r2t->exp_statsn = rhdr->statsn;
+	r2t->data_length = be32_to_cpu(rhdr->data_length);
+	if (r2t->data_length == 0) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with zero data len\n");
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+			    sizeof(void*));
+		return ISCSI_ERR_DATALEN;
+	}
+
+	if (r2t->data_length > session->max_burst)
+		debug_scsi("invalid R2T with data len %u and max burst %u."
+			   "Attempting to execute request.\n",
+			    r2t->data_length, session->max_burst);
+
+	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with data len %u at offset %u "
+				  "and total length %d\n", r2t->data_length,
+				  r2t->data_offset, scsi_out(task->sc)->length);
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+			    sizeof(void*));
+		return ISCSI_ERR_DATALEN;
+	}
+
+	r2t->ttt = rhdr->ttt; /* no flip */
+	r2t->datasn = 0;
+	r2t->sent = 0;
+
+	tcp_task->exp_datasn = r2tsn + 1;
+	__kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+	conn->r2t_pdus_cnt++;
+
+	iscsi_requeue_task(task);
+	return 0;
+}
+
+/*
+ * Handle incoming reply to DataIn command
+ */
+static int
+iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+			  struct iscsi_segment *segment)
+{
+	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+	struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+	int rc;
+
+	if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+		return ISCSI_ERR_DATA_DGST;
+
+	/* check for non-exceptional status */
+	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+		rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+		if (rc)
+			return rc;
+	}
+
+	iscsi_tcp_hdr_recv_prep(tcp_conn);
+	return 0;
+}
+
+/**
+ * iscsi_tcp_hdr_dissect - process PDU header
+ * @conn: iSCSI connection
+ * @hdr: PDU header
+ *
+ * This function analyzes the header of the PDU received,
+ * and performs several sanity checks. If the PDU is accompanied
+ * by data, the receive buffer is set up to copy the incoming data
+ * to the correct location.
+ */
+static int
+iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+	int rc = 0, opcode, ahslen;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_task *task;
+
+	/* verify PDU length */
+	tcp_conn->in.datalen = ntoh24(hdr->dlength);
+	if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "iscsi_tcp: datalen %d > %d\n",
+				  tcp_conn->in.datalen, conn->max_recv_dlength);
+		return ISCSI_ERR_DATALEN;
+	}
+
+	/* Additional header segments. So far, we don't
+	 * process additional headers.
+	 */
+	ahslen = hdr->hlength << 2;
+
+	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+	/* verify itt (itt encoding: age+cid+itt) */
+	rc = iscsi_verify_itt(conn, hdr->itt);
+	if (rc)
+		return rc;
+
+	debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+		  opcode, ahslen, tcp_conn->in.datalen);
+
+	switch(opcode) {
+	case ISCSI_OP_SCSI_DATA_IN:
+		spin_lock(&conn->session->lock);
+		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!task)
+			rc = ISCSI_ERR_BAD_ITT;
+		else
+			rc = iscsi_tcp_data_in(conn, task);
+		if (rc) {
+			spin_unlock(&conn->session->lock);
+			break;
+		}
+
+		if (tcp_conn->in.datalen) {
+			struct iscsi_tcp_task *tcp_task = task->dd_data;
+			struct hash_desc *rx_hash = NULL;
+			struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+			/*
+			 * Setup copy of Data-In into the Scsi_Cmnd
+			 * Scatterlist case:
+			 * We set up the iscsi_segment to point to the next
+			 * scatterlist entry to copy to. As we go along,
+			 * we move on to the next scatterlist entry and
+			 * update the digest per-entry.
+			 */
+			if (conn->datadgst_en &&
+			    !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+				rx_hash = tcp_conn->rx_hash;
+
+			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+				  "datalen=%d)\n", tcp_conn,
+				  tcp_task->data_offset,
+				  tcp_conn->in.datalen);
+			rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+						   sdb->table.sgl,
+						   sdb->table.nents,
+						   tcp_task->data_offset,
+						   tcp_conn->in.datalen,
+						   iscsi_tcp_process_data_in,
+						   rx_hash);
+			spin_unlock(&conn->session->lock);
+			return rc;
+		}
+		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+		spin_unlock(&conn->session->lock);
+		break;
+	case ISCSI_OP_SCSI_CMD_RSP:
+		if (tcp_conn->in.datalen) {
+			iscsi_tcp_data_recv_prep(tcp_conn);
+			return 0;
+		}
+		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+		break;
+	case ISCSI_OP_R2T:
+		spin_lock(&conn->session->lock);
+		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!task)
+			rc = ISCSI_ERR_BAD_ITT;
+		else if (ahslen)
+			rc = ISCSI_ERR_AHSLEN;
+		else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+			rc = iscsi_tcp_r2t_rsp(conn, task);
+		else
+			rc = ISCSI_ERR_PROTO;
+		spin_unlock(&conn->session->lock);
+		break;
+	case ISCSI_OP_LOGIN_RSP:
+	case ISCSI_OP_TEXT_RSP:
+	case ISCSI_OP_REJECT:
+	case ISCSI_OP_ASYNC_EVENT:
+		/*
+		 * It is possible that we could get a PDU with a buffer larger
+		 * than 8K, but there are no targets that currently do this.
+		 * For now we fail until we find a vendor that needs it
+		 */
+		if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "iscsi_tcp: received buffer of "
+					  "len %u but conn buffer is only %u "
+					  "(opcode %0x)\n",
+					  tcp_conn->in.datalen,
+					  ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+			rc = ISCSI_ERR_PROTO;
+			break;
+		}
+
+		/* If there's data coming in with the response,
+		 * receive it to the connection's buffer.
+		 */
+		if (tcp_conn->in.datalen) {
+			iscsi_tcp_data_recv_prep(tcp_conn);
+			return 0;
+		}
+	/* fall through */
+	case ISCSI_OP_LOGOUT_RSP:
+	case ISCSI_OP_NOOP_IN:
+	case ISCSI_OP_SCSI_TMFUNC_RSP:
+		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+		break;
+	default:
+		rc = ISCSI_ERR_BAD_OPCODE;
+		break;
+	}
+
+	if (rc == 0) {
+		/* Anything that comes with data should have
+		 * been handled above. */
+		if (tcp_conn->in.datalen)
+			return ISCSI_ERR_PROTO;
+		iscsi_tcp_hdr_recv_prep(tcp_conn);
+	}
+
+	return rc;
+}
+
+/**
+ * iscsi_tcp_hdr_recv_done - process PDU header
+ *
+ * This is the callback invoked when the PDU header has
+ * been received. If the header is followed by additional
+ * header segments, we go back for more data.
+ */
+static int
+iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+			struct iscsi_segment *segment)
+{
+	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+	struct iscsi_hdr *hdr;
+
+	/* Check if there are additional header segments
+	 * *prior* to computing the digest, because we
+	 * may need to go back to the caller for more.
+	 */
+	hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+	if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+		/* Bump the header length - the caller will
+		 * just loop around and get the AHS for us, and
+		 * call again. */
+		unsigned int ahslen = hdr->hlength << 2;
+
+		/* Make sure we don't overflow */
+		if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+			return ISCSI_ERR_AHSLEN;
+
+		segment->total_size += ahslen;
+		segment->size += ahslen;
+		return 0;
+	}
+
+	/* We're done processing the header. See if we're doing
+	 * header digests; if so, set up the recv_digest buffer
+	 * and go back for more. */
+	if (conn->hdrdgst_en &&
+	    !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
+		if (segment->digest_len == 0) {
+			/*
+			 * Even if we offload the digest processing we
+			 * splice it in so we can increment the skb/segment
+			 * counters in preparation for the data segment.
+			 */
+			iscsi_tcp_segment_splice_digest(segment,
+							segment->recv_digest);
+			return 0;
+		}
+
+		iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+				      segment->total_copied - ISCSI_DIGEST_SIZE,
+				      segment->digest);
+
+		if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+			return ISCSI_ERR_HDR_DGST;
+	}
+
+	tcp_conn->in.hdr = hdr;
+	return iscsi_tcp_hdr_dissect(conn, hdr);
+}
+
+/**
+ * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
+ * @tcp_conn: iscsi tcp conn
+ *
+ * returns non zero if we are currently processing or setup to process
+ * a header.
+ */
+inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
+{
+	return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
+
+/**
+ * iscsi_tcp_recv_skb - Process skb
+ * @conn: iscsi connection
+ * @skb: network buffer with header and/or data segment
+ * @offset: offset in skb
+ * @offload: bool indicating if transfer was offloaded
+ *
+ * Will return status of transfer in status. And will return
+ * number of bytes copied.
+ */
+int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+		       unsigned int offset, bool offloaded, int *status)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_segment *segment = &tcp_conn->in.segment;
+	struct skb_seq_state seq;
+	unsigned int consumed = 0;
+	int rc = 0;
+
+	debug_tcp("in %d bytes\n", skb->len - offset);
+
+	if (unlikely(conn->suspend_rx)) {
+		debug_tcp("conn %d Rx suspended!\n", conn->id);
+		*status = ISCSI_TCP_SUSPENDED;
+		return 0;
+	}
+
+	if (offloaded) {
+		segment->total_copied = segment->total_size;
+		goto segment_done;
+	}
+
+	skb_prepare_seq_read(skb, offset, skb->len, &seq);
+	while (1) {
+		unsigned int avail;
+		const u8 *ptr;
+
+		avail = skb_seq_read(consumed, &ptr, &seq);
+		if (avail == 0) {
+			debug_tcp("no more data avail. Consumed %d\n",
+				  consumed);
+			*status = ISCSI_TCP_SKB_DONE;
+			skb_abort_seq_read(&seq);
+			goto skb_done;
+		}
+		BUG_ON(segment->copied >= segment->size);
+
+		debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+		rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+		BUG_ON(rc == 0);
+		consumed += rc;
+
+		if (segment->total_copied >= segment->total_size) {
+			skb_abort_seq_read(&seq);
+			goto segment_done;
+		}
+	}
+
+segment_done:
+	*status = ISCSI_TCP_SEGMENT_DONE;
+	debug_tcp("segment done\n");
+	rc = segment->done(tcp_conn, segment);
+	if (rc != 0) {
+		*status = ISCSI_TCP_CONN_ERR;
+		debug_tcp("Error receiving PDU, errno=%d\n", rc);
+		iscsi_conn_failure(conn, rc);
+		return 0;
+	}
+	/* The done() functions sets up the next segment. */
+
+skb_done:
+	conn->rxdata_octets += consumed;
+	return consumed;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
+
+/**
+ * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+ * @task: scsi command task
+ * @sc: scsi command
+ */
+int iscsi_tcp_task_init(struct iscsi_task *task)
+{
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_conn *conn = task->conn;
+	struct scsi_cmnd *sc = task->sc;
+	int err;
+
+	if (!sc) {
+		/*
+		 * mgmt tasks do not have a scatterlist since they come
+		 * in from the iscsi interface.
+		 */
+		debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+			   task->itt);
+
+		return conn->session->tt->init_pdu(task, 0, task->data_count);
+	}
+
+	BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+	tcp_task->exp_datasn = 0;
+
+	/* Prepare PDU, optionally w/ immediate data */
+	debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+		    conn->id, task->itt, task->imm_count,
+		    task->unsol_r2t.data_length);
+
+	err = conn->session->tt->init_pdu(task, 0, task->imm_count);
+	if (err)
+		return err;
+	task->imm_count = 0;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
+
+static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
+{
+	struct iscsi_session *session = task->conn->session;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_r2t_info *r2t = NULL;
+
+	if (iscsi_task_has_unsol_data(task))
+		r2t = &task->unsol_r2t;
+	else {
+		spin_lock_bh(&session->lock);
+		if (tcp_task->r2t) {
+			r2t = tcp_task->r2t;
+			/* Continue with this R2T? */
+			if (r2t->data_length <= r2t->sent) {
+				debug_scsi("  done with r2t %p\n", r2t);
+				__kfifo_put(tcp_task->r2tpool.queue,
+					    (void *)&tcp_task->r2t,
+					    sizeof(void *));
+				tcp_task->r2t = r2t = NULL;
+			}
+		}
+
+		if (r2t == NULL) {
+			__kfifo_get(tcp_task->r2tqueue,
+				    (void *)&tcp_task->r2t, sizeof(void *));
+			r2t = tcp_task->r2t;
+		}
+		spin_unlock_bh(&session->lock);
+	}
+
+	return r2t;
+}
+
+/**
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+int iscsi_tcp_task_xmit(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *session = conn->session;
+	struct iscsi_r2t_info *r2t;
+	int rc = 0;
+
+flush:
+	/* Flush any pending data first. */
+	rc = session->tt->xmit_pdu(task);
+	if (rc < 0)
+		return rc;
+
+	/* mgmt command */
+	if (!task->sc) {
+		if (task->hdr->itt == RESERVED_ITT)
+			iscsi_put_task(task);
+		return 0;
+	}
+
+	/* Are we done already? */
+	if (task->sc->sc_data_direction != DMA_TO_DEVICE)
+		return 0;
+
+	r2t = iscsi_tcp_get_curr_r2t(task);
+	if (r2t == NULL) {
+		/* Waiting for more R2Ts to arrive. */
+		debug_tcp("no R2Ts yet\n");
+		return 0;
+	}
+
+	rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
+	if (rc)
+		return rc;
+	iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
+
+	debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+		   r2t, r2t->datasn - 1, task->hdr->itt,
+		   r2t->data_offset + r2t->sent, r2t->data_count);
+
+	rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
+					 r2t->data_count);
+	if (rc)
+		return rc;
+	r2t->sent += r2t->data_count;
+	goto flush;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
+
+struct iscsi_cls_conn *
+iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
+		      uint32_t conn_idx)
+
+{
+	struct iscsi_conn *conn;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_tcp_conn *tcp_conn;
+
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+	if (!cls_conn)
+		return NULL;
+	conn = cls_conn->dd_data;
+	/*
+	 * due to strange issues with iser these are not set
+	 * in iscsi_conn_setup
+	 */
+	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+	tcp_conn = conn->dd_data;
+	tcp_conn->iscsi_conn = conn;
+
+	tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
+	if (!tcp_conn->dd_data) {
+		iscsi_conn_teardown(cls_conn);
+		return NULL;
+	}
+	return cls_conn;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
+
+void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+	kfree(tcp_conn->dd_data);
+	iscsi_conn_teardown(cls_conn);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
+
+int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
+{
+	int i;
+	int cmd_i;
+
+	/*
+	 * initialize per-task: R2T pool and xmit queue
+	 */
+	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+	        struct iscsi_task *task = session->cmds[cmd_i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+		/*
+		 * pre-allocated x2 as much r2ts to handle race when
+		 * target acks DataOut faster than we data_xmit() queues
+		 * could replenish r2tqueue.
+		 */
+
+		/* R2T pool */
+		if (iscsi_pool_init(&tcp_task->r2tpool,
+				    session->max_r2t * 2, NULL,
+				    sizeof(struct iscsi_r2t_info))) {
+			goto r2t_alloc_fail;
+		}
+
+		/* R2T xmit queue */
+		tcp_task->r2tqueue = kfifo_alloc(
+		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+		if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+			iscsi_pool_free(&tcp_task->r2tpool);
+			goto r2t_alloc_fail;
+		}
+	}
+
+	return 0;
+
+r2t_alloc_fail:
+	for (i = 0; i < cmd_i; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+		kfifo_free(tcp_task->r2tqueue);
+		iscsi_pool_free(&tcp_task->r2tpool);
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
+
+void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+		kfifo_free(tcp_task->r2tqueue);
+		iscsi_pool_free(&tcp_task->r2tpool);
+	}
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
+
+void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+			      struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 60a9e6e9384b..dcba267db711 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,8 +29,10 @@ struct lpfc_sli2_slim;
 #define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
 					   the NameServer  before giving up. */
 #define LPFC_CMD_PER_LUN	3	/* max outstanding cmds per lun */
-#define LPFC_DEFAULT_SG_SEG_CNT	64	/* sg element count per scsi cmnd */
-#define LPFC_MAX_SG_SEG_CNT	256	/* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_SG_SEG_CNT 64	/* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+#define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
+#define LPFC_MAX_PROT_SG_SEG_CNT 4096	/* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN		100	/* vport symbolic name length */
@@ -354,8 +356,6 @@ struct lpfc_vport {
 	uint8_t load_flag;
 #define FC_LOADING		0x1	/* HBA in process of loading drvr */
 #define FC_UNLOADING		0x2	/* HBA in process of unloading drvr */
-	char  *vname;		        /* Application assigned name */
-
 	/* Vport Config Parameters */
 	uint32_t cfg_scan_down;
 	uint32_t cfg_lun_queue_depth;
@@ -376,7 +376,7 @@ struct lpfc_vport {
 
 	struct fc_vport *fc_vport;
 
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct dentry *debug_disc_trc;
 	struct dentry *debug_nodelist;
 	struct dentry *vport_debugfs_root;
@@ -428,6 +428,7 @@ struct lpfc_hba {
 #define LPFC_SLI3_VPORT_TEARDOWN	0x04
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_INB_ENABLED		0x10
+#define LPFC_SLI3_BG_ENABLED		0x20
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_rsp_size;
 
@@ -501,12 +502,14 @@ struct lpfc_hba {
 	uint32_t cfg_poll_tmo;
 	uint32_t cfg_use_msi;
 	uint32_t cfg_sg_seg_cnt;
+	uint32_t cfg_prot_sg_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
 	uint64_t cfg_soft_wwnn;
 	uint64_t cfg_soft_wwpn;
 	uint32_t cfg_hba_queue_depth;
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
+	uint32_t cfg_enable_bg;
 
 	lpfc_vpd_t vpd;		/* vital product data */
 
@@ -572,6 +575,9 @@ struct lpfc_hba {
 	uint64_t fc4InputRequests;
 	uint64_t fc4OutputRequests;
 	uint64_t fc4ControlRequests;
+	uint64_t bg_guard_err_cnt;
+	uint64_t bg_apptag_err_cnt;
+	uint64_t bg_reftag_err_cnt;
 
 	struct lpfc_sysfs_mbox sysfs_mbox;
 
@@ -594,6 +600,8 @@ struct lpfc_hba {
 
 	struct fc_host_statistics link_stats;
 	enum intr_type_t intr_type;
+	uint32_t intr_mode;
+#define LPFC_INTR_ERROR	0xFFFFFFFF
 	struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
 
 	struct list_head port_list;
@@ -613,12 +621,14 @@ struct lpfc_hba {
 	unsigned long last_rsrc_error_time;
 	unsigned long last_ramp_down_time;
 	unsigned long last_ramp_up_time;
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct dentry *hba_debugfs_root;
 	atomic_t debugfs_vport_count;
 	struct dentry *debug_hbqinfo;
 	struct dentry *debug_dumpHostSlim;
 	struct dentry *debug_dumpHBASlim;
+	struct dentry *debug_dumpData;   /* BlockGuard BPL*/
+	struct dentry *debug_dumpDif;    /* BlockGuard BPL*/
 	struct dentry *debug_slow_ring_trc;
 	struct lpfc_debugfs_trc *slow_ring_trc;
 	atomic_t slow_ring_trc_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index aa3d6277581d..40cf0f4f327f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -96,6 +96,61 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
 }
 
+static ssize_t
+lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->cfg_enable_bg)
+		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+			return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+		else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Not Supported\n");
+	else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Disabled\n");
+}
+
+static ssize_t
+lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_guard_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_apptag_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_reftag_err_cnt);
+}
+
 /**
  * lpfc_info_show: Return some pci info about the host in ascii.
  * @dev: class converted to a Scsi_host structure.
@@ -1485,6 +1540,10 @@ lpfc_vport_param_store(name)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 		   lpfc_##name##_show, lpfc_##name##_store)
 
+static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
+static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
+static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
+static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@@ -1970,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
 # LOG_LINK_EVENT                0x10       Link events
 # LOG_FCP                       0x40       FCP traffic history
 # LOG_NODE                      0x80       Node table events
+# LOG_BG                        0x200      BlockBuard events
 # LOG_MISC                      0x400      Miscellaneous events
 # LOG_SLI                       0x800      SLI events
 # LOG_FCP_ERROR                 0x1000     Only log FCP errors
@@ -2769,6 +2829,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
 LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 
 /*
+# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+#       0  = BlockGuard disabled (default)
+#       1  = BlockGuard enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+
+
+/*
+# lpfc_prot_mask: i
+#	- Bit mask of host protection capabilities used to register with the
+#	  SCSI mid-layer
+# 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all profiles.
+#
+*/
+unsigned int lpfc_prot_mask =   SHOST_DIX_TYPE0_PROTECTION;
+
+module_param(lpfc_prot_mask, uint, 0);
+MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+
+/*
+# lpfc_prot_guard: i
+#	- Bit mask of protection guard types to register with the SCSI mid-layer
+# 	- Guard types are currently either 1) IP checksum 2) T10-DIF CRC
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all guard types
+#
+*/
+unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
+module_param(lpfc_prot_guard, byte, 0);
+MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+
+
+/*
  * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
  * This value can be set to values between 64 and 256. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
@@ -2777,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
 	    LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
+		LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
+		"Max Protection Scatter Gather Segment Count");
+
 struct device_attribute *lpfc_hba_attrs[] = {
+	&dev_attr_bg_info,
+	&dev_attr_bg_guard_err,
+	&dev_attr_bg_apptag_err,
+	&dev_attr_bg_reftag_err,
 	&dev_attr_info,
 	&dev_attr_serialnum,
 	&dev_attr_modeldesc,
@@ -2825,6 +2929,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_poll,
 	&dev_attr_lpfc_poll_tmo,
 	&dev_attr_lpfc_use_msi,
+	&dev_attr_lpfc_enable_bg,
 	&dev_attr_lpfc_soft_wwnn,
 	&dev_attr_lpfc_soft_wwpn,
 	&dev_attr_lpfc_soft_wwn_enable,
@@ -2833,6 +2938,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_sg_seg_cnt,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_prot_sg_seg_cnt,
 	NULL,
 };
 
@@ -3282,25 +3388,28 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
 	int error;
 
 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
-				      &sysfs_ctlreg_attr);
-	if (error)
+				      &sysfs_drvr_stat_data_attr);
+
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (error || vport->port_type == LPFC_NPIV_PORT)
 		goto out;
 
 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
-				      &sysfs_mbox_attr);
+				      &sysfs_ctlreg_attr);
 	if (error)
-		goto out_remove_ctlreg_attr;
+		goto out_remove_stat_attr;
 
 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
-				      &sysfs_drvr_stat_data_attr);
+				      &sysfs_mbox_attr);
 	if (error)
-		goto out_remove_mbox_attr;
+		goto out_remove_ctlreg_attr;
 
 	return 0;
-out_remove_mbox_attr:
-	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
 out_remove_ctlreg_attr:
 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+out_remove_stat_attr:
+	sysfs_remove_bin_file(&shost->shost_dev.kobj,
+			&sysfs_drvr_stat_data_attr);
 out:
 	return error;
 }
@@ -3315,6 +3424,9 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	sysfs_remove_bin_file(&shost->shost_dev.kobj,
 		&sysfs_drvr_stat_data_attr);
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (vport->port_type == LPFC_NPIV_PORT)
+		return;
 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
 }
@@ -3792,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev,				\
 	lpfc_rport_show_function(field, format_string, sz, )		\
 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
 
+/**
+ * lpfc_set_vport_symbolic_name: Set the vport's symbolic name.
+ * @fc_vport: The fc_vport who's symbolic name has been changed.
+ *
+ * Description:
+ * This function is called by the transport after the @fc_vport's symbolic name
+ * has been changed. This function re-registers the symbolic name with the
+ * switch to propogate the change into the fabric if the vport is active.
+ **/
+static void
+lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+
+	if (vport->port_state == LPFC_VPORT_READY)
+		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+}
 
 struct fc_function_template lpfc_transport_functions = {
 	/* fixed attributes the driver supports */
@@ -3801,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = {
 	.show_host_supported_fc4s = 1,
 	.show_host_supported_speeds = 1,
 	.show_host_maxframe_size = 1,
+	.show_host_symbolic_name = 1,
 
 	/* dynamic attributes the driver supports */
 	.get_host_port_id = lpfc_get_host_port_id,
@@ -3850,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = {
 	.terminate_rport_io = lpfc_terminate_rport_io,
 
 	.dd_fcvport_size = sizeof(struct lpfc_vport *),
+
+	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
 };
 
 struct fc_function_template lpfc_vport_transport_functions = {
@@ -3860,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
 	.show_host_supported_fc4s = 1,
 	.show_host_supported_speeds = 1,
 	.show_host_maxframe_size = 1,
+	.show_host_symbolic_name = 1,
 
 	/* dynamic attributes the driver supports */
 	.get_host_port_id = lpfc_get_host_port_id,
@@ -3908,6 +4043,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
 	.terminate_rport_io = lpfc_terminate_rport_io,
 
 	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
 };
 
 /**
@@ -3930,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+	lpfc_enable_bg_init(phba, lpfc_enable_bg);
 	phba->cfg_poll = lpfc_poll;
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
-	/* Also reinitialize the host templates with new values. */
-	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
 	/*
 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -3945,6 +4081,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp) +
 			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+	if (phba->cfg_enable_bg) {
+		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+		phba->cfg_sg_dma_buf_size +=
+			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+	}
+
+	/* Also reinitialize the host templates with new values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 044ef4057d28..07f4976319a5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -22,6 +22,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
 
 struct fc_rport;
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -284,12 +285,24 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
 	uint32_t, uint32_t);
 extern struct lpfc_hbq_init *lpfc_hbq_defs[];
 
+/* externs BlockGuard */
+extern char *_dump_buf_data;
+extern unsigned long _dump_buf_data_order;
+extern char *_dump_buf_dif;
+extern unsigned long _dump_buf_dif_order;
+extern spinlock_t _dump_buf_lock;
+extern int _dump_buf_done;
+extern spinlock_t pgcnt_lock;
+extern unsigned int pgcnt;
+extern unsigned int lpfc_prot_mask;
+extern unsigned char lpfc_prot_guard;
+
 /* Interface exported by fabric iocb scheduler */
 void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
 void lpfc_fabric_abort_hba(struct lpfc_hba *);
 void lpfc_fabric_block_timeout(unsigned long);
 void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
-void lpfc_adjust_queue_depth(struct lpfc_hba *);
+void lpfc_rampdown_queue_depth(struct lpfc_hba *);
 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
 void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
 void lpfc_scsi_dev_block(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 26dae8bae2d1..896c7b0351e5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
 
 	/* Don't bother processing response if vport is being torn down. */
-	if (vport->load_flag & FC_UNLOADING)
+	if (vport->load_flag & FC_UNLOADING) {
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
 		goto out;
+	}
 
 	if (lpfc_els_chk_latt(vport)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0216 Link event during NS query\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		goto out;
 	}
 	if (lpfc_error_lost_link(irsp)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0226 NS query failed due to link event\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
 		goto out;
 	}
 	if (irsp->ulpStatus) {
@@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			if (rc == 0)
 				goto out;
 		}
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "0257 GID_FT Query error: 0x%x 0x%x\n",
@@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
 	if (n < size)
 		n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
 
-	if (n < size && vport->vname)
-		n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
+	if (n < size &&
+	    strlen(vport->fc_vport->symbolic_name))
+		n += snprintf(symbol + n, size - n, " VName-%s",
+			      vport->fc_vport->symbolic_name);
 	return n;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 771920bdde44..b615eda361d5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -46,7 +46,7 @@
 #include "lpfc_compat.h"
 #include "lpfc_debugfs.h"
 
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 /**
  * debugfs interface
  *
@@ -618,7 +618,7 @@ inline void
 lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
 	uint32_t data1, uint32_t data2, uint32_t data3)
 {
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct lpfc_debugfs_trc *dtp;
 	int index;
 
@@ -659,7 +659,7 @@ inline void
 lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
 	uint32_t data1, uint32_t data2, uint32_t data3)
 {
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct lpfc_debugfs_trc *dtp;
 	int index;
 
@@ -680,7 +680,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
 	return;
 }
 
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 /**
  * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
  * @inode: The inode pointer that contains a vport pointer.
@@ -907,6 +907,91 @@ out:
 	return rc;
 }
 
+static int
+lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_data)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundry */
+	printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
+			__func__, _dump_buf_data);
+	debug->buffer = _dump_buf_data;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_dif)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundry */
+	printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
+	       _dump_buf_dif, file->f_dentry->d_name.name);
+	debug->buffer = _dump_buf_dif;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
+		  size_t nbytes, loff_t *ppos)
+{
+	/*
+	 * The Data/DIF buffers only save one failing IO
+	 * The write op is used as a reset mechanism after an IO has
+	 * already been saved to the next one can be saved
+	 */
+	spin_lock(&_dump_buf_lock);
+
+	memset((void *)_dump_buf_data, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_data_order));
+	memset((void *)_dump_buf_dif, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_dif_order));
+
+	_dump_buf_done = 0;
+
+	spin_unlock(&_dump_buf_lock);
+
+	return nbytes;
+}
+
+
+
 /**
  * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
  * @inode: The inode pointer that contains a vport pointer.
@@ -1035,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
+static int
+lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	debug->buffer = NULL;
+	kfree(debug);
+
+	return 0;
+}
+
 #undef lpfc_debugfs_op_disc_trc
 static struct file_operations lpfc_debugfs_op_disc_trc = {
 	.owner =        THIS_MODULE,
@@ -1080,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
 	.release =      lpfc_debugfs_release,
 };
 
+#undef lpfc_debugfs_op_dumpData
+static struct file_operations lpfc_debugfs_op_dumpData = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpData_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dumpDif
+static struct file_operations lpfc_debugfs_op_dumpDif = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpDif_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
 #undef lpfc_debugfs_op_slow_ring_trc
 static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
 	.owner =        THIS_MODULE,
@@ -1106,7 +1222,7 @@ static atomic_t lpfc_debugfs_hba_count;
 inline void
 lpfc_debugfs_initialize(struct lpfc_vport *vport)
 {
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct lpfc_hba   *phba = vport->phba;
 	char name[64];
 	uint32_t num, i;
@@ -1176,6 +1292,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 			goto debug_failed;
 		}
 
+		/* Setup dumpData */
+		snprintf(name, sizeof(name), "dumpData");
+		phba->debug_dumpData =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpData);
+		if (!phba->debug_dumpData) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0800 Cannot create debugfs dumpData\n");
+			goto debug_failed;
+		}
+
+		/* Setup dumpDif */
+		snprintf(name, sizeof(name), "dumpDif");
+		phba->debug_dumpDif =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpDif);
+		if (!phba->debug_dumpDif) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0801 Cannot create debugfs dumpDif\n");
+			goto debug_failed;
+		}
+
+
+
 		/* Setup slow ring trace */
 		if (lpfc_debugfs_max_slow_ring_trc) {
 			num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1305,7 +1447,7 @@ debug_failed:
 inline void
 lpfc_debugfs_terminate(struct lpfc_vport *vport)
 {
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct lpfc_hba   *phba = vport->phba;
 
 	if (vport->disc_trc) {
@@ -1340,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
 			debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
 			phba->debug_dumpHostSlim = NULL;
 		}
+		if (phba->debug_dumpData) {
+			debugfs_remove(phba->debug_dumpData); /* dumpData */
+			phba->debug_dumpData = NULL;
+		}
+
+		if (phba->debug_dumpDif) {
+			debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+			phba->debug_dumpDif = NULL;
+		}
+
 		if (phba->slow_ring_trc) {
 			kfree(phba->slow_ring_trc);
 			phba->slow_ring_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 31e86a55391d..03c7313a1012 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -21,7 +21,7 @@
 #ifndef _H_LPFC_DEBUG_FS
 #define _H_LPFC_DEBUG_FS
 
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 struct lpfc_debugfs_trc {
 	char *fmt;
 	uint32_t data1;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 630bd28fb997..a8f30bdaff69 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -221,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 		/* For ELS_REQUEST64_CR, use the VPI by default */
 		icmd->ulpContext = vport->vpi;
 		icmd->ulpCt_h = 0;
-		icmd->ulpCt_l = 1;
+		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+		if (elscmd == ELS_CMD_ECHO)
+			icmd->ulpCt_l = 0; /* context = invalid RPI */
+		else
+			icmd->ulpCt_l = 1; /* context = VPI */
 	}
 
 	bpl = (struct ulp_bde64 *) pbuflist->virt;
@@ -271,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 	return elsiocb;
 
 els_iocb_free_pbuf_exit:
-	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+	if (expectRsp)
+		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
 	kfree(pbuflist);
 
 els_iocb_free_prsp_exit:
@@ -2468,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	case IOSTAT_LOCAL_REJECT:
 		switch ((irsp->un.ulpWord[4] & 0xff)) {
 		case IOERR_LOOP_OPEN_FAILURE:
+			if (cmd == ELS_CMD_FLOGI) {
+				if (PCI_DEVICE_ID_HORNET ==
+					phba->pcidev->device) {
+					phba->fc_topology = TOPOLOGY_LOOP;
+					phba->pport->fc_myDID = 0;
+					phba->alpa_map[0] = 0;
+					phba->alpa_map[1] = 0;
+				}
+			}
 			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
 				delay = 1000;
 			retry = 1;
@@ -3823,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
 		while (payload_len) {
 			rscn_did.un.word = be32_to_cpu(*lp++);
 			payload_len -= sizeof(uint32_t);
-			switch (rscn_did.un.b.resv) {
-			case 0:	/* Single N_Port ID effected */
+			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
+			case RSCN_ADDRESS_FORMAT_PORT:
 				if (ns_did.un.word == rscn_did.un.word)
 					goto return_did_out;
 				break;
-			case 1:	/* Whole N_Port Area effected */
+			case RSCN_ADDRESS_FORMAT_AREA:
 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
 				    && (ns_did.un.b.area == rscn_did.un.b.area))
 					goto return_did_out;
 				break;
-			case 2:	/* Whole N_Port Domain effected */
+			case RSCN_ADDRESS_FORMAT_DOMAIN:
 				if (ns_did.un.b.domain == rscn_did.un.b.domain)
 					goto return_did_out;
 				break;
-			default:
-				/* Unknown Identifier in RSCN node */
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
-						 "0217 Unknown Identifier in "
-						 "RSCN payload Data: x%x\n",
-						 rscn_did.un.word);
-			case 3:	/* Whole Fabric effected */
+			case RSCN_ADDRESS_FORMAT_FABRIC:
 				goto return_did_out;
 			}
 		}
@@ -3887,6 +3895,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
 }
 
 /**
+ * lpfc_send_rscn_event: Send an RSCN event to management application.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ *
+ * lpfc_send_rscn_event sends an RSCN netlink event to management
+ * applications.
+ */
+static void
+lpfc_send_rscn_event(struct lpfc_vport *vport,
+		struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_dmabuf *pcmd;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	uint32_t *payload_ptr;
+	uint32_t payload_len;
+	struct lpfc_rscn_event_header *rscn_event_data;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	payload_ptr = (uint32_t *) pcmd->virt;
+	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
+
+	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
+		payload_len, GFP_KERNEL);
+	if (!rscn_event_data) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0147 Failed to allocate memory for RSCN event\n");
+		return;
+	}
+	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
+	rscn_event_data->payload_length = payload_len;
+	memcpy(rscn_event_data->rscn_payload, payload_ptr,
+		payload_len);
+
+	fc_host_post_vendor_event(shost,
+		fc_get_event_number(),
+		sizeof(struct lpfc_els_event_header) + payload_len,
+		(char *)rscn_event_data,
+		LPFC_NL_VENDOR_ID);
+
+	kfree(rscn_event_data);
+}
+
+/**
  * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
  * @vport: pointer to a host virtual N_Port data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3933,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
 			 vport->fc_flag, payload_len, *lp,
 			 vport->fc_rscn_id_cnt);
+
+	/* Send an RSCN event to the management application */
+	lpfc_send_rscn_event(vport, cmdiocb);
+
 	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
 		fc_host_post_event(shost, fc_get_event_number(),
 			FCH_EVT_RSCN, lp[i]);
@@ -4884,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
 	uint32_t timeout;
 	uint32_t remote_ID = 0xffffffff;
 
-	/* If the timer is already canceled do nothing */
-	if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
-		return;
-	}
 	spin_lock_irq(&phba->hbalock);
 	timeout = (uint32_t)(phba->fc_ratov << 1);
 
@@ -5128,7 +5179,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
 			fc_get_event_number(),
 			sizeof(lsrjt_event),
 			(char *)&lsrjt_event,
-			SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+			LPFC_NL_VENDOR_ID);
 		return;
 	}
 	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
@@ -5146,7 +5197,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
 			fc_get_event_number(),
 			sizeof(fabric_event),
 			(char *)&fabric_event,
-			SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+			LPFC_NL_VENDOR_ID);
 		return;
 	}
 
@@ -5164,32 +5215,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
 static void
 lpfc_send_els_event(struct lpfc_vport *vport,
 		    struct lpfc_nodelist *ndlp,
-		    uint32_t cmd)
+		    uint32_t *payload)
 {
-	struct lpfc_els_event_header els_data;
+	struct lpfc_els_event_header *els_data = NULL;
+	struct lpfc_logo_event *logo_data = NULL;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	els_data.event_type = FC_REG_ELS_EVENT;
-	switch (cmd) {
+	if (*payload == ELS_CMD_LOGO) {
+		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
+		if (!logo_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0148 Failed to allocate memory "
+				"for LOGO event\n");
+			return;
+		}
+		els_data = &logo_data->header;
+	} else {
+		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
+			GFP_KERNEL);
+		if (!els_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0149 Failed to allocate memory "
+				"for ELS event\n");
+			return;
+		}
+	}
+	els_data->event_type = FC_REG_ELS_EVENT;
+	switch (*payload) {
 	case ELS_CMD_PLOGI:
-		els_data.subcategory = LPFC_EVENT_PLOGI_RCV;
+		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
 		break;
 	case ELS_CMD_PRLO:
-		els_data.subcategory = LPFC_EVENT_PRLO_RCV;
+		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
 		break;
 	case ELS_CMD_ADISC:
-		els_data.subcategory = LPFC_EVENT_ADISC_RCV;
+		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
+		break;
+	case ELS_CMD_LOGO:
+		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
+		/* Copy the WWPN in the LOGO payload */
+		memcpy(logo_data->logo_wwpn, &payload[2],
+			sizeof(struct lpfc_name));
 		break;
 	default:
 		return;
 	}
-	memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
-	memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
-	fc_host_post_vendor_event(shost,
-		fc_get_event_number(),
-		sizeof(els_data),
-		(char *)&els_data,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+	if (*payload == ELS_CMD_LOGO) {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_logo_event),
+			(char *)logo_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(logo_data);
+	} else {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_els_event_header),
+			(char *)els_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(els_data);
+	}
 
 	return;
 }
@@ -5296,7 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		phba->fc_stat.elsRcvPLOGI++;
 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
 
-		lpfc_send_els_event(vport, ndlp, cmd);
+		lpfc_send_els_event(vport, ndlp, payload);
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -5334,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			did, vport->port_state, ndlp->nlp_flag);
 
 		phba->fc_stat.elsRcvLOGO++;
+		lpfc_send_els_event(vport, ndlp, payload);
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			rjt_err = LSRJT_UNABLE_TPC;
 			break;
@@ -5346,7 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			did, vport->port_state, ndlp->nlp_flag);
 
 		phba->fc_stat.elsRcvPRLO++;
-		lpfc_send_els_event(vport, ndlp, cmd);
+		lpfc_send_els_event(vport, ndlp, payload);
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			rjt_err = LSRJT_UNABLE_TPC;
 			break;
@@ -5364,7 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
 			did, vport->port_state, ndlp->nlp_flag);
 
-		lpfc_send_els_event(vport, ndlp, cmd);
+		lpfc_send_els_event(vport, ndlp, payload);
 		phba->fc_stat.elsRcvADISC++;
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			rjt_err = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a1a70d9ffc2a..8c64494444bf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -350,7 +350,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
 			evt_data_size = sizeof(fast_evt_data->un.
 				read_check_error);
 		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
-			(evt_sub_category == IOSTAT_NPORT_BSY)) {
+			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
 			evt_data = (char *) &fast_evt_data->un.fabric_evt;
 			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
 		} else {
@@ -387,7 +387,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
 		fc_get_event_number(),
 		evt_data_size,
 		evt_data,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+		LPFC_NL_VENDOR_ID);
 
 	lpfc_free_fast_evt(phba, fast_evt_data);
 	return;
@@ -585,20 +585,25 @@ lpfc_do_work(void *p)
 	set_user_nice(current, -20);
 	phba->data_flags = 0;
 
-	while (1) {
+	while (!kthread_should_stop()) {
 		/* wait and check worker queue activities */
 		rc = wait_event_interruptible(phba->work_waitq,
 					(test_and_clear_bit(LPFC_DATA_READY,
 							    &phba->data_flags)
 					 || kthread_should_stop()));
-		BUG_ON(rc);
-
-		if (kthread_should_stop())
+		/* Signal wakeup shall terminate the worker thread */
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					"0433 Wakeup on signal: rc=x%x\n", rc);
 			break;
+		}
 
 		/* Attend pending lpfc data processing */
 		lpfc_work_done(phba);
 	}
+	phba->worker_thread = NULL;
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0432 Worker thread stopped.\n");
 	return 0;
 }
 
@@ -1852,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
 				NLP_STE_UNUSED_NODE);
 }
+/**
+ * lpfc_initialize_node: Initialize all fields of node object.
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *	This function is always called when node object need to
+ * be initialized. It initializes all the fields of the node
+ * object.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	uint32_t did)
+{
+	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+	init_timer(&ndlp->nlp_delayfunc);
+	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+	ndlp->nlp_DID = did;
+	ndlp->vport = vport;
+	ndlp->nlp_sid = NLP_NO_SID;
+	kref_init(&ndlp->kref);
+	NLP_INT_NODE_ACT(ndlp);
+	atomic_set(&ndlp->cmd_pending, 0);
+	ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+}
 
 struct lpfc_nodelist *
 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -1892,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	/* re-initialize ndlp except of ndlp linked list pointer */
 	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
 		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
-	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
-	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
-	init_timer(&ndlp->nlp_delayfunc);
-	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
-	ndlp->nlp_DID = did;
-	ndlp->vport = vport;
-	ndlp->nlp_sid = NLP_NO_SID;
-	/* ndlp management re-initialize */
-	kref_init(&ndlp->kref);
-	NLP_INT_NODE_ACT(ndlp);
+	lpfc_initialize_node(vport, ndlp, did);
 
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 
@@ -3116,19 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	      uint32_t did)
 {
 	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
-	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
-	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
-	init_timer(&ndlp->nlp_delayfunc);
-	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
-	ndlp->nlp_DID = did;
-	ndlp->vport = vport;
-	ndlp->nlp_sid = NLP_NO_SID;
+
+	lpfc_initialize_node(vport, ndlp, did);
 	INIT_LIST_HEAD(&ndlp->nlp_listp);
-	kref_init(&ndlp->kref);
-	NLP_INT_NODE_ACT(ndlp);
-	atomic_set(&ndlp->cmd_pending, 0);
-	ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
 		"node init:       did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5de5dabbbee6..4168c7b498b8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -65,6 +65,9 @@
 #define SLI3_IOCB_RSP_SIZE	64
 
 
+/* vendor ID used in SCSI netlink calls */
+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+
 /* Common Transport structures and definitions */
 
 union CtRevisionId {
@@ -866,6 +869,12 @@ typedef struct _D_ID {		/* Structure is in Big Endian format */
 	} un;
 } D_ID;
 
+#define RSCN_ADDRESS_FORMAT_PORT	0x0
+#define RSCN_ADDRESS_FORMAT_AREA	0x1
+#define RSCN_ADDRESS_FORMAT_DOMAIN	0x2
+#define RSCN_ADDRESS_FORMAT_FABRIC	0x3
+#define RSCN_ADDRESS_FORMAT_MASK	0x3
+
 /*
  *  Structure to define all ELS Payload types
  */
@@ -1535,6 +1544,108 @@ typedef struct ULP_BDL {	/* SLI-2 */
 	uint32_t ulpIoTag32;	/* Can be used for 32 bit I/O Tag */
 } ULP_BDL;
 
+/*
+ * BlockGuard Definitions
+ */
+
+enum lpfc_protgrp_type {
+	LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors                  */
+	LPFC_PG_TYPE_NO_DIF,	  /* no DIF data pointed to by prot grp       */
+	LPFC_PG_TYPE_EMBD_DIF,	  /* DIF is embedded (inline) with data       */
+	LPFC_PG_TYPE_DIF_BUF	  /* DIF has its own scatter/gather list      */
+};
+
+/* PDE Descriptors */
+#define LPFC_PDE1_DESCRIPTOR		0x81
+#define LPFC_PDE2_DESCRIPTOR		0x82
+#define LPFC_PDE3_DESCRIPTOR		0x83
+
+/* BlockGuard Profiles */
+enum lpfc_bg_prof_codes {
+	LPFC_PROF_INVALID,
+	LPFC_PROF_A1	= 128,	/* Full Protection			      */
+	LPFC_PROF_A2,		/* Disabled Protection Checks:A2~A4           */
+	LPFC_PROF_A3,
+	LPFC_PROF_A4,
+	LPFC_PROF_B1,		/* Embedded DIFs: B1~B3	                      */
+	LPFC_PROF_B2,
+	LPFC_PROF_B3,
+	LPFC_PROF_C1,		/* Separate DIFs: C1~C3                       */
+	LPFC_PROF_C2,
+	LPFC_PROF_C3,
+	LPFC_PROF_D1,		/* Full Protection                            */
+	LPFC_PROF_D2,		/* Partial Protection & Check Disabling       */
+	LPFC_PROF_D3,
+	LPFC_PROF_E1,		/* E1~E4:out - check-only, in - update apptag */
+	LPFC_PROF_E2,
+	LPFC_PROF_E3,
+	LPFC_PROF_E4,
+	LPFC_PROF_F1,		/* Full Translation - F1 Prot Descriptor      */
+				/* F1 Translation BDE                         */
+	LPFC_PROF_ANT1,		/* TCP checksum, DIF inline with data buffers */
+	LPFC_PROF_AST1,		/* TCP checksum, DIF split from data buffer   */
+	LPFC_PROF_ANT2,
+	LPFC_PROF_AST2
+};
+
+/* BlockGuard error-control defines */
+#define BG_EC_STOP_ERR			0x00
+#define BG_EC_CONT_ERR			0x01
+#define BG_EC_IGN_UNINIT_STOP_ERR	0x10
+#define BG_EC_IGN_UNINIT_CONT_ERR	0x11
+
+/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
+#define PDE_DESC_TYPE_MASK		0xff000000
+#define PDE_DESC_TYPE_SHIFT		24
+#define PDE_BG_PROFILE_MASK		0x00ff0000
+#define PDE_BG_PROFILE_SHIFT		16
+#define PDE_BLOCK_LEN_MASK		0x0000fffc
+#define PDE_BLOCK_LEN_SHIFT		2
+#define PDE_ERR_CTRL_MASK		0x00000003
+#define PDE_ERR_CTRL_SHIFT		0
+/* PDE word 1 bit masks and shifts */
+#define PDE_APPTAG_MASK_MASK		0xffff0000
+#define PDE_APPTAG_MASK_SHIFT		16
+#define PDE_APPTAG_VAL_MASK		0x0000ffff
+#define PDE_APPTAG_VAL_SHIFT		0
+struct lpfc_pde {
+	uint32_t parms;     /* bitfields of descriptor, prof, len, and ec */
+	uint32_t apptag;    /* bitfields of app tag maskand app tag value */
+	uint32_t reftag;    /* reference tag occupying all 32 bits        */
+};
+
+/* inline function to set fields in parms of PDE */
+static inline void
+lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
+{
+	uint32_t *wp = &p->parms;
+
+	/* spec indicates that adapter appends two 0's to length field */
+	len = len >> 2;
+
+	*wp &= 0;
+	*wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
+	*wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
+	*wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
+	*wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
+	*wp = le32_to_cpu(*wp);
+}
+
+/* inline function to set apptag and reftag fields of PDE */
+static inline void
+lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
+		u32 reftag)
+{
+	uint32_t *wp = &p->apptag;
+	*wp &= 0;
+	*wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
+	*wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
+	*wp = le32_to_cpu(*wp);
+	wp = &p->reftag;
+	*wp = le32_to_cpu(reftag);
+}
+
+
 /* Structure for MB Command LOAD_SM and DOWN_LOAD */
 
 typedef struct {
@@ -2359,6 +2470,30 @@ typedef struct {
 #define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
 #define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
 
+#define  WAKE_UP_PARMS_REGION_ID    4
+#define  WAKE_UP_PARMS_WORD_SIZE   15
+
+/* Option rom version structure */
+struct prog_id {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t  type;
+	uint8_t  id;
+	uint32_t ver:4;  /* Major Version */
+	uint32_t rev:4;  /* Revision */
+	uint32_t lev:2;  /* Level */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t num:4;  /* number after dist type */
+#else /*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t num:4;  /* number after dist type */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t lev:2;  /* Level */
+	uint32_t rev:4;  /* Revision */
+	uint32_t ver:4;  /* Major Version */
+	uint8_t  id;
+	uint8_t  type;
+#endif
+};
+
 /* Structure for MB Command UPDATE_CFG (0x1B) */
 
 struct update_cfg_var {
@@ -2552,11 +2687,19 @@ typedef struct {
 
 	uint32_t pcbLow;       /* bit 31:0  of memory based port config block */
 	uint32_t pcbHigh;      /* bit 63:32 of memory based port config block */
-	uint32_t hbainit[6];
+	uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+	uint32_t rsvd	   : 31; /* least significant 31 bits of word 9 */
+#else   /*  __LITTLE_ENDIAN */
+	uint32_t rsvd      : 31; /* least significant 31 bits of word 9 */
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+#endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd      : 24;  /* Reserved                             */
-	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
+	uint32_t rsvd1     : 23;  /* Reserved                             */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
 	uint32_t chbs      :  1;  /* Cofigure Host Backing store          */
@@ -2573,10 +2716,12 @@ typedef struct {
 	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
-	uint32_t rsvd      : 24;  /* Reserved                             */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t rsvd1     : 23;  /* Reserved                             */
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd2     : 24;  /* Reserved                             */
+	uint32_t rsvd2     : 23;  /* Reserved                             */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
@@ -2594,7 +2739,8 @@ typedef struct {
 	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
-	uint32_t rsvd2     : 24;  /* Reserved                             */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
+	uint32_t rsvd2     : 23;  /* Reserved                             */
 #endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -3214,6 +3360,94 @@ struct que_xri64cx_ext_fields {
 	struct lpfc_hbq_entry	buff[5];
 };
 
+struct sli3_bg_fields {
+	uint32_t filler[6];	/* word 8-13 in IOCB */
+	uint32_t bghm;		/* word 14 - BlockGuard High Water Mark */
+/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
+#define BGS_BIDIR_BG_PROF_MASK		0xff000000
+#define BGS_BIDIR_BG_PROF_SHIFT		24
+#define BGS_BIDIR_ERR_COND_FLAGS_MASK	0x003f0000
+#define BGS_BIDIR_ERR_COND_SHIFT	16
+#define BGS_BG_PROFILE_MASK		0x0000ff00
+#define BGS_BG_PROFILE_SHIFT		8
+#define BGS_INVALID_PROF_MASK		0x00000020
+#define BGS_INVALID_PROF_SHIFT		5
+#define BGS_UNINIT_DIF_BLOCK_MASK	0x00000010
+#define BGS_UNINIT_DIF_BLOCK_SHIFT	4
+#define BGS_HI_WATER_MARK_PRESENT_MASK	0x00000008
+#define BGS_HI_WATER_MARK_PRESENT_SHIFT	3
+#define BGS_REFTAG_ERR_MASK		0x00000004
+#define BGS_REFTAG_ERR_SHIFT		2
+#define BGS_APPTAG_ERR_MASK		0x00000002
+#define BGS_APPTAG_ERR_SHIFT		1
+#define BGS_GUARD_ERR_MASK		0x00000001
+#define BGS_GUARD_ERR_SHIFT		0
+	uint32_t bgstat;	/* word 15 - BlockGuard Status */
+};
+
+static inline uint32_t
+lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
+				BGS_BIDIR_BG_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+				BGS_BIDIR_ERR_COND_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bg_prof(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
+				BGS_BG_PROFILE_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_invalid_prof(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
+				BGS_INVALID_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
+				BGS_UNINIT_DIF_BLOCK_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+				BGS_HI_WATER_MARK_PRESENT_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_reftag_err(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
+				BGS_REFTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_apptag_err(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
+				BGS_APPTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_guard_err(uint32_t bgstat)
+{
+	return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
+				BGS_GUARD_ERR_SHIFT;
+}
+
 #define LPFC_EXT_DATA_BDE_COUNT 3
 struct fcp_irw_ext {
 	uint32_t	io_tag64_low;
@@ -3322,6 +3556,9 @@ typedef struct _IOCB {	/* IOCB structure */
 		struct que_xri64cx_ext_fields que_xri64cx_ext_words;
 		struct fcp_irw_ext fcp_ext;
 		uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+
+		/* words 8-15 for BlockGuard */
+		struct sli3_bg_fields sli3_bg;
 	} unsli3;
 
 #define ulpCt_h ulpXS
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 909be3301bba..4c77038c8f1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -45,6 +45,12 @@
 #include "lpfc_vport.h"
 #include "lpfc_version.h"
 
+char *_dump_buf_data;
+unsigned long _dump_buf_data_order;
+char *_dump_buf_dif;
+unsigned long _dump_buf_dif_order;
+spinlock_t _dump_buf_lock;
+
 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -236,6 +242,51 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 }
 
 /**
+ * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
+ *     command used for getting wake up parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for dump mailbox command for getting
+ * wake up parameters. When this command complete, the response contain
+ * Option rom version of the HBA. This function translate the version number
+ * into a human readable string and store it in OptionROMVersion.
+ **/
+static void
+lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct prog_id *prg;
+	uint32_t prog_id_word;
+	char dist = ' ';
+	/* character array used for decoding dist type. */
+	char dist_char[] = "nabx";
+
+	if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	prg = (struct prog_id *) &prog_id_word;
+
+	/* word 7 contain option rom version */
+	prog_id_word = pmboxq->mb.un.varWords[7];
+
+	/* Decode the Option rom version word to a readable string */
+	if (prg->dist < 4)
+		dist = dist_char[prg->dist];
+
+	if ((prg->dist == 3) && (prg->num == 0))
+		sprintf(phba->OptionROMVersion, "%d.%d%d",
+			prg->ver, prg->rev, prg->lev);
+	else
+		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
+			prg->ver, prg->rev, prg->lev,
+			dist, prg->num);
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
  * lpfc_config_port_post: Perform lpfc initialization after config port.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -482,6 +533,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 				rc);
 		mempool_free(pmb, phba->mbox_mem_pool);
 	}
+
+	/* Get Option rom version */
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	lpfc_dump_wakeup_param(phba, pmb);
+	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+	pmb->vport = phba->pport;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+				"to get Option ROM version status x%x\n.", rc);
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
 	return 0;
 }
 
@@ -686,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 		return;
 
 	spin_lock_irq(&phba->pport->work_port_lock);
-	/* If the timer is already canceled do nothing */
-	if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
-		spin_unlock_irq(&phba->pport->work_port_lock);
-		return;
-	}
 
 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
 		jiffies)) {
@@ -833,8 +893,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 				  sizeof(board_event),
 				  (char *) &board_event,
-				  SCSI_NL_VID_TYPE_PCI
-				  | PCI_VENDOR_ID_EMULEX);
+				  LPFC_NL_VENDOR_ID);
 
 	if (phba->work_hs & HS_FFER6) {
 		/* Re-establishing Link */
@@ -1984,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 	shost->max_lun = vport->cfg_max_luns;
 	shost->this_id = -1;
 	shost->max_cmd_len = 16;
+
 	/*
 	 * Set initial can_queue value since 0 is no longer supported and
 	 * scsi_add_host will fail. This will be adjusted later based on the
@@ -2042,8 +2102,6 @@ destroy_port(struct lpfc_vport *vport)
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 
-	kfree(vport->vname);
-
 	lpfc_debugfs_terminate(vport);
 	fc_remove_host(shost);
 	scsi_remove_host(shost);
@@ -2226,8 +2284,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
 				ARRAY_SIZE(phba->msix_entries));
 	if (rc) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0420 Enable MSI-X failed (%d), continuing "
-				"with MSI\n", rc);
+				"0420 PCI enable MSI-X failed (%d)\n", rc);
 		goto msi_fail_out;
 	} else
 		for (i = 0; i < LPFC_MSIX_VECTORS; i++)
@@ -2244,9 +2301,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
 	rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
 			 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
 	if (rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 				"0421 MSI-X slow-path request_irq failed "
-				"(%d), continuing with MSI\n", rc);
+				"(%d)\n", rc);
 		goto msi_fail_out;
 	}
 
@@ -2255,9 +2312,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
 			 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
 
 	if (rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 				"0429 MSI-X fast-path request_irq failed "
-				"(%d), continuing with MSI\n", rc);
+				"(%d)\n", rc);
 		goto irq_fail_out;
 	}
 
@@ -2278,7 +2335,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
 		goto mbx_fail_out;
 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 				"0351 Config MSI mailbox command failed, "
 				"mbxCmd x%x, mbxStatus x%x\n",
 				pmb->mb.mbxCommand, pmb->mb.mbxStatus);
@@ -2327,6 +2384,195 @@ lpfc_disable_msix(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_enable_msi: Enable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode. The kernel
+ * function pci_enable_msi() is called to enable the MSI vector. The
+ * device driver is responsible for calling the request_irq() to register
+ * MSI vector with a interrupt the handler, which is done in this function.
+ *
+ * Return codes
+ * 	0 - sucessful
+ * 	other values - error
+ */
+static int
+lpfc_enable_msi(struct lpfc_hba *phba)
+{
+	int rc;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0462 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0471 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0478 MSI request_irq failed (%d)\n", rc);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_disable_msi: Disable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode. The driver
+ * calls free_irq() on MSI vector it has done request_irq() on before
+ * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
+ * a device will be left with MSI enabled and leaks its vector.
+ */
+
+static void
+lpfc_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_log_intr_mode: Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ */
+static void
+lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+	switch (intr_mode) {
+	case 0:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0470 Enable INTx interrupt mode.\n");
+		break;
+	case 1:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0481 Enabled MSI interrupt mode.\n");
+		break;
+	case 2:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0480 Enabled MSI-X interrupt mode.\n");
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0482 Illegal interrupt mode.\n");
+		break;
+	}
+	return;
+}
+
+static void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	/* Clear all pending interrupts */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+
+	/* Reset some HBA SLI setup states */
+	lpfc_stop_phba_timers(phba);
+	phba->pport->work_port_events = 0;
+
+	return;
+}
+
+/**
+ * lpfc_enable_intr: Enable device interrupt.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
+ * mode configured to the driver, the driver will try to fallback from the
+ * configured interrupt mode to an interrupt mode which is supported by the
+ * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval;
+
+	if (cfg_mode == 2) {
+		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+		retval = lpfc_sli_config_port(phba, 3);
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_disable_intr: Disable device interrupt.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s). Depending on the
+ * interrupt mode, the driver will release the interrupt vector(s) for the
+ * message signaled interrupt.
+ **/
+static void
+lpfc_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
  * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
  * @pdev: pointer to PCI device
  * @pid: pointer to PCI device identifier
@@ -2356,6 +2602,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	int error = -ENODEV, retval;
 	int  i, hbq_count;
 	uint16_t iotag;
+	uint32_t cfg_mode, intr_mode;
 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
 	struct lpfc_adapter_event_header adapter_event;
 
@@ -2409,6 +2656,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	phba->eratt_poll.data = (unsigned long) phba;
 
 	pci_set_master(pdev);
+	pci_save_state(pdev);
 	pci_try_set_mwi(pdev);
 
 	if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
@@ -2557,7 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	lpfc_debugfs_initialize(vport);
 
 	pci_set_drvdata(pdev, shost);
-	phba->intr_type = NONE;
 
 	phba->MBslimaddr = phba->slim_memmap_p;
 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
@@ -2565,63 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
 
-	/* Configure and enable interrupt */
-	if (phba->cfg_use_msi == 2) {
-		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
-		error = lpfc_sli_config_port(phba, 3);
-		if (error)
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0427 Firmware not capable of SLI 3 mode.\n");
-		else {
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0426 Firmware capable of SLI 3 mode.\n");
-			/* Now, try to enable MSI-X interrupt mode */
-			error = lpfc_enable_msix(phba);
-			if (!error) {
-				phba->intr_type = MSIX;
-				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-						"0430 enable MSI-X mode.\n");
-			}
-		}
-	}
-
-	/* Fallback to MSI if MSI-X initialization failed */
-	if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
-		retval = pci_enable_msi(phba->pcidev);
-		if (!retval) {
-			phba->intr_type = MSI;
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0473 enable MSI mode.\n");
-		} else
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0452 enable IRQ mode.\n");
-	}
-
-	/* MSI-X is the only case the doesn't need to call request_irq */
-	if (phba->intr_type != MSIX) {
-		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-		if (retval) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
-					"interrupt handler failed\n");
-			error = retval;
-			goto out_disable_msi;
-		} else if (phba->intr_type != MSI)
-			phba->intr_type = INTx;
-	}
-
+	/* Configure sysfs attributes */
 	if (lpfc_alloc_sysfs_attr(vport)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1476 Failed to allocate sysfs attr\n");
 		error = -ENOMEM;
-		goto out_free_irq;
+		goto out_destroy_port;
 	}
 
-	if (lpfc_sli_hba_setup(phba)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1477 Failed to set up hba\n");
-		error = -ENODEV;
-		goto out_remove_device;
+	cfg_mode = phba->cfg_use_msi;
+	while (true) {
+		/* Configure and enable interrupt */
+		intr_mode = lpfc_enable_intr(phba, cfg_mode);
+		if (intr_mode == LPFC_INTR_ERROR) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0426 Failed to enable interrupt.\n");
+			goto out_free_sysfs_attr;
+		}
+		/* HBA SLI setup */
+		if (lpfc_sli_hba_setup(phba)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1477 Failed to set up hba\n");
+			error = -ENODEV;
+			goto out_remove_device;
+		}
+
+		/* Wait 50ms for the interrupts of previous mailbox commands */
+		msleep(50);
+		/* Check active interrupts received */
+		if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+			/* Log the current active interrupt mode */
+			phba->intr_mode = intr_mode;
+			lpfc_log_intr_mode(phba, intr_mode);
+			break;
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0451 Configure interrupt mode (%d) "
+					"failed active interrupt test.\n",
+					intr_mode);
+			if (intr_mode == 0) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0479 Failed to enable "
+						"interrupt.\n");
+				error = -ENODEV;
+				goto out_remove_device;
+			}
+			/* Stop HBA SLI setups */
+			lpfc_stop_port(phba);
+			/* Disable the current interrupt mode */
+			lpfc_disable_intr(phba);
+			/* Try next level of interrupt mode */
+			cfg_mode = --intr_mode;
+		}
 	}
 
 	/*
@@ -2629,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	 * the value of can_queue.
 	 */
 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+
+		if (lpfc_prot_mask && lpfc_prot_guard) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"1478 Registering BlockGuard with the "
+					"SCSI layer\n");
+
+			scsi_host_set_prot(shost, lpfc_prot_mask);
+			scsi_host_set_guard(shost, lpfc_prot_guard);
+		}
+	}
+
+	if (!_dump_buf_data) {
+		int pagecnt = 10;
+		while (pagecnt) {
+			spin_lock_init(&_dump_buf_lock);
+			_dump_buf_data =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_data) {
+				printk(KERN_ERR "BLKGRD allocated %d pages for "
+						"_dump_buf_data at 0x%p\n",
+						(1 << pagecnt), _dump_buf_data);
+				_dump_buf_data_order = pagecnt;
+				memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
+							   << pagecnt));
+				break;
+			} else {
+				--pagecnt;
+			}
+
+		}
+
+		if (!_dump_buf_data_order)
+			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+					"memory for hexdump\n");
+
+	} else {
+		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+		       "\n", _dump_buf_data);
+	}
+
+
+	if (!_dump_buf_dif) {
+		int pagecnt = 10;
+		while (pagecnt) {
+			_dump_buf_dif =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_dif) {
+				printk(KERN_ERR "BLKGRD allocated %d pages for "
+						"_dump_buf_dif at 0x%p\n",
+						(1 << pagecnt), _dump_buf_dif);
+				_dump_buf_dif_order = pagecnt;
+				memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
+							  << pagecnt));
+				break;
+			} else {
+				--pagecnt;
+			}
+
+		}
+
+		if (!_dump_buf_dif_order)
+			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+					"memory for hexdump\n");
+
+	} else {
+		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+				_dump_buf_dif);
+	}
 
 	lpfc_host_attrib_init(shost);
 
@@ -2646,29 +2957,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 		sizeof(adapter_event),
 		(char *) &adapter_event,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
-	scsi_scan_host(shost);
+		LPFC_NL_VENDOR_ID);
 
 	return 0;
 
 out_remove_device:
-	lpfc_free_sysfs_attr(vport);
 	spin_lock_irq(shost->host_lock);
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(shost->host_lock);
-out_free_irq:
 	lpfc_stop_phba_timers(phba);
 	phba->pport->work_port_events = 0;
-
-	if (phba->intr_type == MSIX)
-		lpfc_disable_msix(phba);
-	else
-		free_irq(phba->pcidev->irq, phba);
-
-out_disable_msi:
-	if (phba->intr_type == MSI)
-		pci_disable_msi(phba->pcidev);
+	lpfc_disable_intr(phba);
+	lpfc_sli_hba_down(phba);
+	lpfc_sli_brdrestart(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_port:
 	destroy_port(vport);
 out_kthread_stop:
 	kthread_stop(phba->worker_thread);
@@ -2709,7 +3013,7 @@ out:
  * @pdev: pointer to PCI device
  *
  * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup
+ * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
  * for the HBA device to be removed from the PCI subsystem properly.
  **/
 static void __devexit
@@ -2717,18 +3021,27 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 {
 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
 	struct lpfc_hba   *phba = vport->phba;
+	int i;
 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
 
 	spin_lock_irq(&phba->hbalock);
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(&phba->hbalock);
 
-	kfree(vport->vname);
 	lpfc_free_sysfs_attr(vport);
 
 	kthread_stop(phba->worker_thread);
 
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
+			fc_vport_terminate(vports[i]->fc_vport);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
 	fc_remove_host(shost);
 	scsi_remove_host(shost);
 	lpfc_cleanup(vport);
@@ -2748,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 
 	lpfc_debugfs_terminate(vport);
 
-	if (phba->intr_type == MSIX)
-		lpfc_disable_msix(phba);
-	else {
-		free_irq(phba->pcidev->irq, phba);
-		if (phba->intr_type == MSI)
-			pci_disable_msi(phba->pcidev);
-	}
+	/* Disable interrupt */
+	lpfc_disable_intr(phba);
 
 	pci_set_drvdata(pdev, NULL);
 	scsi_host_put(shost);
@@ -2786,6 +3094,115 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 }
 
 /**
+ * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it quiesces the
+ * device by stopping the driver's worker thread for the device, turning off
+ * device's interrupt and DMA, and bring the device offline. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's PM
+ * support for suspend/resume -- all the possible PM messages (SUSPEND,
+ * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
+ * and the driver will fully reinitialize its device during resume() method
+ * call, the driver will set device to PCI_D3hot state in PCI config space
+ * instead of setting it according to the @msg provided by the PM.
+ *
+ * Return code
+ *   0 - driver suspended the device
+ *   Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0473 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_disable_intr(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it restores
+ * the device's PCI config space state and fully reinitializes the device
+ * and brings it online. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the device will be
+ * set to PCI_D0 directly in PCI config space before restoring the state.
+ *
+ * Return code
+ *   0 - driver suspended the device
+ *   Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0452 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0434 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0430 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
  * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
@@ -2828,13 +3245,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
 	pring = &psli->ring[psli->fcp_ring];
 	lpfc_sli_abort_iocb_ring(phba, pring);
 
-	if (phba->intr_type == MSIX)
-		lpfc_disable_msix(phba);
-	else {
-		free_irq(phba->pcidev->irq, phba);
-		if (phba->intr_type == MSI)
-			pci_disable_msi(phba->pcidev);
-	}
+	/* Disable interrupt */
+	lpfc_disable_intr(phba);
 
 	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -2862,7 +3274,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 	struct lpfc_sli *psli = &phba->sli;
-	int error, retval;
+	uint32_t intr_mode;
 
 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
 	if (pci_enable_device_mem(pdev)) {
@@ -2871,61 +3283,31 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 
-	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
 
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
 	spin_unlock_irq(&phba->hbalock);
 
-	/* Enable configured interrupt method */
-	phba->intr_type = NONE;
-	if (phba->cfg_use_msi == 2) {
-		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
-		error = lpfc_sli_config_port(phba, 3);
-		if (error)
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0478 Firmware not capable of SLI 3 mode.\n");
-		else {
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"0479 Firmware capable of SLI 3 mode.\n");
-			/* Now, try to enable MSI-X interrupt mode */
-			error = lpfc_enable_msix(phba);
-			if (!error) {
-				phba->intr_type = MSIX;
-				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-						"0480 enable MSI-X mode.\n");
-			}
-		}
-	}
-
-	/* Fallback to MSI if MSI-X initialization failed */
-	if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
-		retval = pci_enable_msi(phba->pcidev);
-		if (!retval) {
-			phba->intr_type = MSI;
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0481 enable MSI mode.\n");
-		} else
-			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-					"0470 enable IRQ mode.\n");
-	}
-
-	/* MSI-X is the only case the doesn't need to call request_irq */
-	if (phba->intr_type != MSIX) {
-		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-		if (retval) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0471 Enable interrupt handler "
-					"failed\n");
-		} else if (phba->intr_type != MSI)
-			phba->intr_type = INTx;
-	}
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0427 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
 
 	/* Take device offline; this will perform cleanup */
 	lpfc_offline(phba);
 	lpfc_sli_brdrestart(phba);
 
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -3037,6 +3419,8 @@ static struct pci_driver lpfc_driver = {
 	.id_table	= lpfc_id_table,
 	.probe		= lpfc_pci_probe_one,
 	.remove		= __devexit_p(lpfc_pci_remove_one),
+	.suspend        = lpfc_pci_suspend_one,
+	.resume         = lpfc_pci_resume_one,
 	.err_handler    = &lpfc_err_handler,
 };
 
@@ -3100,6 +3484,19 @@ lpfc_exit(void)
 	fc_release_transport(lpfc_transport_template);
 	if (lpfc_enable_npiv)
 		fc_release_transport(lpfc_vport_transport_template);
+	if (_dump_buf_data) {
+		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
+				"at 0x%p\n",
+				(1L << _dump_buf_data_order), _dump_buf_data);
+		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
+	}
+
+	if (_dump_buf_dif) {
+		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
+				"at 0x%p\n",
+				(1L << _dump_buf_dif_order), _dump_buf_dif);
+		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
+	}
 }
 
 module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 39fd2b843bec..a85b7c196bbc 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -27,6 +27,7 @@
 #define LOG_FCP                       0x40	/* FCP traffic history */
 #define LOG_NODE                      0x80	/* Node table events */
 #define LOG_TEMP                      0x100	/* Temperature sensor events */
+#define LOG_BG			      0x200	/* BlockBuard events */
 #define LOG_MISC                      0x400	/* Miscellaneous events */
 #define LOG_SLI                       0x800	/* SLI events */
 #define LOG_FCP_ERROR                 0x1000	/* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7465fe746fe9..34eeb086a667 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -77,6 +77,38 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
 }
 
 /**
+ * lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * This function create a dump memory mailbox command to dump wake up
+ * parameters.
+ */
+void
+lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->mb;
+	/* Save context so that we can restore after memset */
+	ctx = pmb->context2;
+
+	/* Setup to dump VPD region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->mbxOwner = OWN_HOST;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = 0;
+	mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
+	mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	return;
+}
+
+/**
  * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -1061,9 +1093,14 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
 	mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
 
+	/* Always Host Group Pointer is in SLIM */
+	mb->un.varCfgPort.hps = 1;
+
 	/* If HBA supports SLI=3 ask for it */
 
 	if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+		if (phba->cfg_enable_bg)
+			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
@@ -1163,16 +1200,11 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				    sizeof(*phba->host_gp));
 	}
 
-	/* Setup Port Group ring pointer */
-	if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
-		pgp_offset = offsetof(struct lpfc_sli2_slim,
-				      mbx.us.s3_inb_pgp.port);
-		phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
-	} else if (phba->sli_rev == 3) {
+	/* Setup Port Group offset */
+	if (phba->sli_rev == 3)
 		pgp_offset = offsetof(struct lpfc_sli2_slim,
 				      mbx.us.s3_pgp.port);
-		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
-	} else
+	else
 		pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
 	pdma_addr = phba->slim2p.phys + pgp_offset;
 	phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
@@ -1285,10 +1317,12 @@ lpfc_mbox_get(struct lpfc_hba * phba)
 void
 lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
 {
+	unsigned long iflag;
+
 	/* This function expects to be called from interrupt context */
-	spin_lock(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, iflag);
 	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
-	spin_unlock(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 1accb5a9f4e6..27d1a88a98fe 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -22,18 +22,20 @@
 #define FC_REG_LINK_EVENT		0x0001	/* link up / down events */
 #define FC_REG_RSCN_EVENT		0x0002	/* RSCN events */
 #define FC_REG_CT_EVENT			0x0004	/* CT request events */
-#define FC_REG_DUMP_EVENT		0x0008	/* Dump events */
-#define FC_REG_TEMPERATURE_EVENT	0x0010	/* temperature events */
-#define FC_REG_ELS_EVENT		0x0020	/* lpfc els events */
-#define FC_REG_FABRIC_EVENT		0x0040	/* lpfc fabric events */
-#define FC_REG_SCSI_EVENT		0x0080	/* lpfc scsi events */
-#define FC_REG_BOARD_EVENT		0x0100	/* lpfc board events */
-#define FC_REG_ADAPTER_EVENT		0x0200	/* lpfc adapter events */
+#define FC_REG_DUMP_EVENT		0x0010	/* Dump events */
+#define FC_REG_TEMPERATURE_EVENT	0x0020	/* temperature events */
+#define FC_REG_VPORTRSCN_EVENT		0x0040	/* Vport RSCN events */
+#define FC_REG_ELS_EVENT		0x0080	/* lpfc els events */
+#define FC_REG_FABRIC_EVENT		0x0100	/* lpfc fabric events */
+#define FC_REG_SCSI_EVENT		0x0200	/* lpfc scsi events */
+#define FC_REG_BOARD_EVENT		0x0400	/* lpfc board events */
+#define FC_REG_ADAPTER_EVENT		0x0800	/* lpfc adapter events */
 #define FC_REG_EVENT_MASK		(FC_REG_LINK_EVENT | \
 						FC_REG_RSCN_EVENT | \
 						FC_REG_CT_EVENT | \
 						FC_REG_DUMP_EVENT | \
 						FC_REG_TEMPERATURE_EVENT | \
+						FC_REG_VPORTRSCN_EVENT | \
 						FC_REG_ELS_EVENT | \
 						FC_REG_FABRIC_EVENT | \
 						FC_REG_SCSI_EVENT | \
@@ -52,6 +54,13 @@
  * The payload sent via the fc transport is one-way driver->application.
  */
 
+/* RSCN event header */
+struct lpfc_rscn_event_header {
+	uint32_t event_type;
+	uint32_t payload_length; /* RSCN data length in bytes */
+	uint32_t rscn_payload[];
+};
+
 /* els event header */
 struct lpfc_els_event_header {
 	uint32_t event_type;
@@ -65,6 +74,7 @@ struct lpfc_els_event_header {
 #define LPFC_EVENT_PRLO_RCV		0x02
 #define LPFC_EVENT_ADISC_RCV		0x04
 #define LPFC_EVENT_LSRJT_RCV		0x08
+#define LPFC_EVENT_LOGO_RCV		0x10
 
 /* special els lsrjt event */
 struct lpfc_lsrjt_event {
@@ -74,6 +84,11 @@ struct lpfc_lsrjt_event {
 	uint32_t explanation;
 };
 
+/* special els logo event */
+struct lpfc_logo_event {
+	struct lpfc_els_event_header header;
+	uint8_t logo_wwpn[8];
+};
 
 /* fabric event header */
 struct lpfc_fabric_event_header {
@@ -125,6 +140,7 @@ struct lpfc_scsi_varqueuedepth_event {
 /* special case scsi check condition event */
 struct lpfc_scsi_check_condition_event {
 	struct lpfc_scsi_event_header scsi_event;
+	uint8_t opcode;
 	uint8_t sense_key;
 	uint8_t asc;
 	uint8_t ascq;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0c25d97acb42..8f548adae9cc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1929,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (vport->fc_flag & FC_RSCN_DEFERRED)
 		return ndlp->nlp_state;
 
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
 	spin_unlock_irq(shost->host_lock);
-	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	return ndlp->nlp_state;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bd1867411821..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
  * more details, a copy of which can be found in the file COPYING  *
  * included with this package.                                     *
  *******************************************************************/
-
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
@@ -43,6 +44,73 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
+int _dump_buf_done;
+
+static char *dif_op_str[] = {
+	"SCSI_PROT_NORMAL",
+	"SCSI_PROT_READ_INSERT",
+	"SCSI_PROT_WRITE_STRIP",
+	"SCSI_PROT_READ_STRIP",
+	"SCSI_PROT_WRITE_INSERT",
+	"SCSI_PROT_READ_PASS",
+	"SCSI_PROT_WRITE_PASS",
+	"SCSI_PROT_READ_CONVERT",
+	"SCSI_PROT_WRITE_CONVERT"
+};
+
+static void
+lpfc_debug_save_data(struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_sglist(cmnd);
+
+	if (!_dump_buf_data) {
+		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+
+	if (!sgde) {
+		printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
+		return;
+	}
+
+	dst = (void *) _dump_buf_data;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
+static void
+lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+	if (!_dump_buf_dif) {
+		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+	if (!sgde) {
+		printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
+		return;
+	}
+
+	dst = _dump_buf_dif;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
 /**
  * lpfc_update_stats: Update statistical data for the command completion.
  * @phba: Pointer to HBA object.
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
 	if (cmd->result)
 		return;
 
+	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
 	spin_lock_irqsave(shost->host_lock, flags);
 	if (!vport->stat_data_enabled ||
 		vport->stat_data_blocked ||
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		return;
 	}
-	latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
 
 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
 			phba->bucket_step;
-		if (i >= LPFC_MAX_BUCKET_COUNT)
-			i = LPFC_MAX_BUCKET_COUNT;
+		/* check array subscript bounds */
+		if (i < 0)
+			i = 0;
+		else if (i >= LPFC_MAX_BUCKET_COUNT)
+			i = LPFC_MAX_BUCKET_COUNT - 1;
 	} else {
 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
 			if (latency <= (phba->bucket_base +
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
 	spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
-
 /**
  * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
  *                   event.
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
 	return;
 }
 
-/*
- * This function is called with no lock held when there is a resource
- * error in driver or in firmware.
- */
+/**
+ * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
 void
-lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
 {
 	unsigned long flags;
 	uint32_t evt_posted;
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 	return;
 }
 
-/*
- * This function is called with no lock held when there is a successful
- * SCSI command completion.
- */
+/**
+ * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
+ * post at most 1 event every 5 minute after last_ramp_up_time or
+ * last_rsrc_error_time.  This routine wakes up worker thread of @phba
+ * to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
 static inline void
 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 			struct scsi_device *sdev)
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 	return;
 }
 
+/**
+ * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
 void
 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 {
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 	atomic_set(&phba->num_cmd_success, 0);
 }
 
+/**
+ * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
+ * thread.This routine increases queue depth for all scsi device on each vport
+ * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
+ * num_cmd_success to zero.
+ **/
 void
 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 {
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
 	lpfc_destroy_vport_work_array(phba, vports);
 }
 
-/*
+/**
+ * lpfc_new_scsi_buf: Scsi buffer allocator.
+ * @vport: The virtual port for which this call being executed.
+ *
  * This routine allocates a scsi buffer, which contains all the necessary
  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
  * contains information to build the IOCB.  The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to
- * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
+ * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
+ * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  * and the BPL BDE is setup in the IOCB.
- */
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf data structure - Success
+ **/
 static struct lpfc_scsi_buf *
 lpfc_new_scsi_buf(struct lpfc_vport *vport)
 {
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
 	bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
 	bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
 	bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-	bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
+	bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
 
 	/* Setup the physical region for the FCP RSP */
 	bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
 	bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
 	bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
 	bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-	bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
+	bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
 
 	/*
 	 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
 	 */
 	iocb = &psb->cur_iocbq.iocb;
 	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
-	if (phba->sli_rev == 3) {
+	if ((phba->sli_rev == 3) &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
 		/* fill in immediate fcp command BDE */
 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
 	return psb;
 }
 
+/**
+ * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
 static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf(struct lpfc_hba * phba)
 {
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
 	if (lpfc_cmd) {
 		lpfc_cmd->seg_cnt = 0;
 		lpfc_cmd->nonsg_phys = 0;
+		lpfc_cmd->prot_seg_cnt = 0;
 	}
 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 	return  lpfc_cmd;
 }
 
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
 static void
 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 }
 
+/**
+ * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd. This routine scans through sg elements and format the
+ * bdea. This routine also initializes all IOCB fields which are dependent on
+ * scsi command request buffer.
+ *
+ * Return codes:
+ *   1 - Error
+ *   0 - Success
+ **/
 static int
 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 {
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 		lpfc_cmd->seg_cnt = nseg;
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 			printk(KERN_ERR "%s: Too many sg segments from "
-			       "dma_map_sg.  Config %d, seg_cnt %d",
+			       "dma_map_sg.  Config %d, seg_cnt %d\n",
 			       __func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
 			scsi_dma_unmap(scsi_cmnd);
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
 			physaddr = sg_dma_address(sgel);
 			if (phba->sli_rev == 3 &&
+			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 	 * explicitly reinitialized and for SLI-3 the extended bde count is
 	 * explicitly reinitialized since all iocb memory resources are reused.
 	 */
-	if (phba->sli_rev == 3) {
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
 			/*
 			 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 			((num_bde + 2) * sizeof(struct ulp_bde64));
 	}
 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
+	return 0;
+}
+
+/*
+ * Given a scsi cmnd, determine the BlockGuard profile to be used
+ * with the cmd
+ */
+static int
+lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
+{
+	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+	uint8_t ret_prof = LPFC_PROF_INVALID;
+
+	if (guard_type == SHOST_DIX_GUARD_IP) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			ret_prof = LPFC_PROF_AST2;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			ret_prof = LPFC_PROF_A1;
+			break;
+
+		case SCSI_PROT_READ_CONVERT:
+		case SCSI_PROT_WRITE_CONVERT:
+			ret_prof = LPFC_PROF_AST1;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+		case SCSI_PROT_NORMAL:
+		default:
+			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+					scsi_get_prot_op(sc), guard_type);
+			break;
+
+		}
+	} else if (guard_type == SHOST_DIX_GUARD_CRC) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			ret_prof = LPFC_PROF_A1;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			ret_prof = LPFC_PROF_C1;
+			break;
+
+		case SCSI_PROT_READ_CONVERT:
+		case SCSI_PROT_WRITE_CONVERT:
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+		case SCSI_PROT_NORMAL:
+		default:
+			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+					scsi_get_prot_op(sc), guard_type);
+			break;
+		}
+	} else {
+		/* unsupported format */
+		BUG();
+	}
+
+	return ret_prof;
+}
+
+struct scsi_dif_tuple {
+	__be16 guard_tag;       /* Checksum */
+	__be16 app_tag;         /* Opaque storage */
+	__be32 ref_tag;         /* Target LBA or indirect LBA */
+};
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+	return sc->device->sector_size;
+}
+
+/**
+ * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
+ * @sc:             in: SCSI command
+ * @apptagmask      out: app tag mask
+ * @apptagval       out: app tag value
+ * @reftag          out: ref tag (reference tag)
+ *
+ * Description:
+ *   Extract DIF paramters from the command if possible.  Otherwise,
+ *   use default paratmers.
+ *
+ **/
+static inline void
+lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
+		uint16_t *apptagval, uint32_t *reftag)
+{
+	struct  scsi_dif_tuple *spt;
+	unsigned char op = scsi_get_prot_op(sc);
+	unsigned int protcnt = scsi_prot_sg_count(sc);
+	static int cnt;
+
+	if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
+				op == SCSI_PROT_WRITE_PASS ||
+				op == SCSI_PROT_WRITE_CONVERT)) {
+
+		cnt++;
+		spt = page_address(sg_page(scsi_prot_sglist(sc))) +
+			scsi_prot_sglist(sc)[0].offset;
+		*apptagmask = 0;
+		*apptagval = 0;
+		*reftag = cpu_to_be32(spt->ref_tag);
+
+	} else {
+		/* SBC defines ref tag to be lower 32bits of LBA */
+		*reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
+		*apptagmask = 0;
+		*apptagval = 0;
+	}
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |          PDE_1          |
+ *                                +-------------------------+
+ *                                |         Data BDE        |
+ *                                +-------------------------+
+ *                                |more Data BDE's ... (opt)|
+ *                                +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * Note: Data s/g buffers have been dma mapped
+ */
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct lpfc_pde *pde1 = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_bde = 0;
+	int datadir = sc->sc_data_direction;
+	int prof = LPFC_PROF_INVALID;
+	unsigned blksize;
+	uint32_t reftag;
+	uint16_t apptagmask, apptagval;
+
+	pde1 = (struct lpfc_pde *) bpl;
+	prof = lpfc_sc_to_sli_prof(sc);
+
+	if (prof == LPFC_PROF_INVALID)
+		goto out;
+
+	/* extract some info from the scsi command for PDE1*/
+	blksize = lpfc_cmd_blksize(sc);
+	lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+	/* setup PDE1 with what we have */
+	lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+			BG_EC_STOP_ERR);
+	lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+	num_bde++;
+	bpl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+		bpl->tus.f.bdeSize = sg_dma_len(sgde);
+		if (datadir == DMA_TO_DEVICE)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+		num_bde++;
+	}
+
+out:
+	return num_bde;
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF_BUF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |          PDE_1          |
+ *                                    +-------------------------+
+ *                                    |      PDE_3 (Prot BDE)   |
+ *                                    +-------------------------+
+ *                                    |        Data BDE         |
+ *                                    +-------------------------+
+ *                                    |more Data BDE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |          PDE_1          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ */
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct lpfc_pde *pde1 = NULL;
+	struct ulp_bde64 *prot_bde = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset, protgroup_len;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int prof = LPFC_PROF_INVALID;
+	int datadir = sc->sc_data_direction;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+	uint32_t reftag;
+	uint16_t apptagmask, apptagval;
+	int num_bde = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	prof = lpfc_sc_to_sli_prof(sc);
+	if (prof == LPFC_PROF_INVALID)
+		goto out;
+
+	/* extract some info from the scsi command for PDE1*/
+	blksize = lpfc_cmd_blksize(sc);
+	lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+	split_offset = 0;
+	do {
+		/* setup the first PDE_1 */
+		pde1 = (struct lpfc_pde *) bpl;
+
+		lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+				BG_EC_STOP_ERR);
+		lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+		num_bde++;
+		bpl++;
+
+		/* setup the first BDE that points to protection buffer */
+		prot_bde = (struct ulp_bde64 *) bpl;
+		protphysaddr = sg_dma_address(sgpe);
+		prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+		prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+		protgroup_len = sg_dma_len(sgpe);
+
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		prot_bde->tus.f.bdeSize = protgroup_len;
+		if (datadir == DMA_TO_DEVICE)
+			prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+
+		curr_prot++;
+		num_bde++;
+
+		/* setup BDE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			if (!sgde) {
+				printk(KERN_ERR "%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			bpl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				bpl->tus.f.bdeSize = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+				split_offset += bpl->tus.f.bdeSize;
+			}
+
+			subtotal += bpl->tus.f.bdeSize;
+
+			if (datadir == DMA_TO_DEVICE)
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			else
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+			bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+			num_bde++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			bpl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+
+out:
+
+
+	return num_bde;
+}
+/*
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns:
+ *			      for DIF (for both read and write)
+ * */
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+	int ret = LPFC_PG_TYPE_INVALID;
+	unsigned char op = scsi_get_prot_op(sc);
+
+	switch (op) {
+	case SCSI_PROT_READ_STRIP:
+	case SCSI_PROT_WRITE_INSERT:
+		ret = LPFC_PG_TYPE_NO_DIF;
+		break;
+	case SCSI_PROT_READ_INSERT:
+	case SCSI_PROT_WRITE_STRIP:
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+	case SCSI_PROT_WRITE_CONVERT:
+	case SCSI_PROT_READ_CONVERT:
+		ret = LPFC_PG_TYPE_DIF_BUF;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9021 Unsupported protection op:%d\n", op);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ */
+static int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_bde = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int diflen, fcpdl;
+	unsigned blksize;
+
+	/*
+	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data bde entry
+	 */
+	bpl += 2;
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		lpfc_cmd->seg_cnt = datasegcnt;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			printk(KERN_ERR "%s: Too many sg segments from "
+					"dma_map_sg.  Config %d, seg_cnt %d\n",
+					__func__, phba->cfg_sg_seg_cnt,
+					lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+					datasegcnt);
+			/* we shoud have 2 or more entries in buffer list */
+			if (num_bde < 2)
+				goto err;
+			break;
+		case LPFC_PG_TYPE_DIF_BUF:{
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+			if (lpfc_cmd->prot_seg_cnt
+			    > phba->cfg_prot_sg_seg_cnt) {
+				printk(KERN_ERR "%s: Too many prot sg segments "
+						"from dma_map_sg.  Config %d,"
+						"prot_seg_cnt %d\n", __func__,
+						phba->cfg_prot_sg_seg_cnt,
+						lpfc_cmd->prot_seg_cnt);
+				dma_unmap_sg(&phba->pcidev->dev,
+					     scsi_prot_sglist(scsi_cmnd),
+					     scsi_prot_sg_count(scsi_cmnd),
+					     datadir);
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+					datasegcnt, protsegcnt);
+			/* we shoud have 3 or more entries in buffer list */
+			if (num_bde < 3)
+				goto err;
+			break;
+		}
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9022 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
+	 * reinitialized since all iocb memory resources are used many times
+	 * for transmit, receive, and continuation bpl's.
+	 */
+	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
+	iocb_cmd->ulpBdeCount = 1;
+	iocb_cmd->ulpLe = 1;
+
+	fcpdl = scsi_bufflen(scsi_cmnd);
+
+	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
+		/*
+		 * We are in DIF Type 1 mode
+		 * Every data block has a 8 byte DIF (trailer)
+		 * attached to it.  Must ajust FCP data length
+		 */
+		blksize = lpfc_cmd_blksize(scsi_cmnd);
+		diflen = (fcpdl / blksize) * 8;
+		fcpdl += diflen;
+	}
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
 	return 0;
+err:
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9023 Could not setup all needed BDE's"
+			"prot_group_type=%d, num_bde=%d\n",
+			prot_group_type, num_bde);
+	return 1;
+}
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA.  In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ *  0 - No error found
+ *  1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+			struct lpfc_iocbq *pIocbOut)
+{
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+	int ret = 0;
+	uint32_t bghm = bgf->bghm;
+	uint32_t bgstat = bgf->bgstat;
+	uint64_t failing_sector = 0;
+
+	printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+			"bgstat=0x%x bghm=0x%x\n",
+			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
+			cmd->request->nr_sectors, bgstat, bghm);
+
+	spin_lock(&_dump_buf_lock);
+	if (!_dump_buf_done) {
+		printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+		lpfc_debug_save_data(cmd);
+
+		/* If we have a prot sgl, save the DIF buffer */
+		if (lpfc_prot_group_type(phba, cmd) ==
+				LPFC_PG_TYPE_DIF_BUF) {
+			printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
+					(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+			lpfc_debug_save_dif(cmd);
+		}
+
+		_dump_buf_done = 1;
+	}
+	spin_unlock(&_dump_buf_lock);
+
+	if (lpfc_bgs_get_invalid_prof(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
+				bgstat);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
+				bgstat);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_guard_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x1);
+		cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+		phba->bg_guard_err_cnt++;
+		printk(KERN_ERR "BLKGRD: guard_tag error\n");
+	}
+
+	if (lpfc_bgs_get_reftag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x3);
+		cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_reftag_err_cnt++;
+		printk(KERN_ERR "BLKGRD: ref_tag error\n");
+	}
+
+	if (lpfc_bgs_get_apptag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x2);
+		cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_apptag_err_cnt++;
+		printk(KERN_ERR "BLKGRD: app_tag error\n");
+	}
+
+	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+		/*
+		 * setup sense data descriptor 0 per SPC-4 as an information
+		 * field, and put the failing LBA in it
+		 */
+		cmd->sense_buffer[8] = 0;     /* Information */
+		cmd->sense_buffer[9] = 0xa;   /* Add. length */
+		do_div(bghm, cmd->device->sector_size);
+
+		failing_sector = scsi_get_lba(cmd);
+		failing_sector += bghm;
+
+		put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
+	}
+
+	if (!ret) {
+		/* No error was reported - problem in FW? */
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		printk(KERN_ERR "BLKGRD: no errors reported!\n");
+	}
+
+out:
+	return ret;
 }
 
 /**
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
 	lpfc_worker_wake_up(phba);
 	return;
 }
+
+/**
+ * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd.
+ **/
 static void
 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
 {
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
 	 */
 	if (psb->seg_cnt > 0)
 		scsi_dma_unmap(psb->pCmd);
+	if (psb->prot_seg_cnt > 0)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+				scsi_prot_sg_count(psb->pCmd),
+				psb->pCmd->sc_data_direction);
 }
 
+/**
+ * lpfc_handler_fcp_err: FCP response handler.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
 static void
 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		    struct lpfc_iocbq *rsp_iocb)
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		logit = LOG_FCP;
 
 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
-			 "0730 FCP command x%x failed: x%x SNS x%x x%x "
+			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
 			 "Data: x%x x%x x%x x%x x%x\n",
 			 cmnd->cmnd[0], scsi_status,
 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
 
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-				 "0716 FCP Read Underrun, expected %d, "
+				 "9025 FCP Read Underrun, expected %d, "
 				 "residual %d Data: x%x x%x x%x\n",
 				 be32_to_cpu(fcpcmd->fcpDl),
 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 			(scsi_get_resid(cmnd) != fcpi_parm)) {
 			lpfc_printf_vlog(vport, KERN_WARNING,
 					 LOG_FCP | LOG_FCP_ERROR,
-					 "0735 FCP Read Check Error "
+					 "9026 FCP Read Check Error "
 					 "and Underrun Data: x%x x%x x%x x%x\n",
 					 be32_to_cpu(fcpcmd->fcpDl),
 					 scsi_get_resid(cmnd), fcpi_parm,
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
 		     < cmnd->underflow)) {
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-					 "0717 FCP command x%x residual "
+					 "9027 FCP command x%x residual "
 					 "underrun converted to error "
 					 "Data: x%x x%x x%x\n",
 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		}
 	} else if (resp_info & RESID_OVER) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-				 "0720 FCP command x%x residual overrun error. "
+				 "9028 FCP command x%x residual overrun error. "
 				 "Data: x%x x%x \n", cmnd->cmnd[0],
 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
 		host_status = DID_ERROR;
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
 			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
-				 "0734 FCP Read Check Error Data: "
+				 "9029 FCP Read Check Error Data: "
 				 "x%x x%x x%x x%x\n",
 				 be32_to_cpu(fcpcmd->fcpDl),
 				 be32_to_cpu(fcprsp->rspResId),
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
 }
 
+/**
+ * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
 static void
 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			struct lpfc_iocbq *pIocbOut)
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
-	atomic_dec(&pnode->cmd_pending);
+	if (pnode && NLP_CHK_NODE_ACT(pnode))
+		atomic_dec(&pnode->cmd_pending);
 
 	if (lpfc_cmd->status) {
 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			lpfc_cmd->status = IOSTAT_DEFAULT;
 
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-				 "0729 FCP cmd x%x failed <%d/%d> "
+				 "9030 FCP cmd x%x failed <%d/%d> "
 				 "status: x%x result: x%x Data: x%x x%x\n",
 				 cmd->cmnd[0],
 				 cmd->device ? cmd->device->id : 0xffff,
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
 				cmd->result = ScsiResult(DID_REQUEUE, 0);
 				break;
-			} /* else: fall through */
+			}
+
+			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+					/*
+					 * This is a response for a BG enabled
+					 * cmd. Parse BG error
+					 */
+					lpfc_parse_bg_err(phba, lpfc_cmd,
+							pIocbOut);
+					break;
+				} else {
+					lpfc_printf_vlog(vport, KERN_WARNING,
+							LOG_BG,
+							"9031 non-zero BGSTAT "
+							"on unprotected cmd");
+				}
+			}
+
+		/* else: fall through */
 		default:
 			cmd->result = ScsiResult(DID_ERROR, 0);
 			break;
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	   time_after(jiffies, lpfc_cmd->start_time +
 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
 		spin_lock_irqsave(sdev->host->host_lock, flags);
-		if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
-		    (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
-		    ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
-			pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
-
-		pnode->last_change_time = jiffies;
+		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+			if (pnode->cmd_qdepth >
+				atomic_read(&pnode->cmd_pending) &&
+				(atomic_read(&pnode->cmd_pending) >
+				LPFC_MIN_TGT_QDEPTH) &&
+				((cmd->cmnd[0] == READ_10) ||
+				(cmd->cmnd[0] == WRITE_10)))
+				pnode->cmd_qdepth =
+					atomic_read(&pnode->cmd_pending);
+
+			pnode->last_change_time = jiffies;
+		}
 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
-	} else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+		if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
 		   time_after(jiffies, pnode->last_change_time +
-			msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
-		spin_lock_irqsave(sdev->host->host_lock, flags);
-		pnode->cmd_qdepth += pnode->cmd_qdepth *
-			LPFC_TGTQ_RAMPUP_PCENT / 100;
-		if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
-			pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
-		pnode->last_change_time = jiffies;
-		spin_unlock_irqrestore(sdev->host->host_lock, flags);
+			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+			spin_lock_irqsave(sdev->host->host_lock, flags);
+			pnode->cmd_qdepth += pnode->cmd_qdepth *
+				LPFC_TGTQ_RAMPUP_PCENT / 100;
+			if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+				pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+			pnode->last_change_time = jiffies;
+			spin_unlock_irqrestore(sdev->host->host_lock, flags);
+		}
 	}
 
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
 	}
 }
 
+/**
+ * lpfc_scsi_prep_cmnd:  Routine to convert scsi cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer.
+ **/
 static void
 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		    struct lpfc_nodelist *pnode)
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		} else {
 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
 			iocb_cmd->ulpPU = PARM_READ_CHECK;
-			iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
 			fcp_cmnd->fcpCntl3 = READ_DATA;
 			phba->fc4InputRequests++;
 		}
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 		fcp_cmnd->fcpCntl3 = 0;
 		phba->fc4ControlRequests++;
 	}
-	if (phba->sli_rev == 3)
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
 	/*
 	 * Finish initializing those IOCB fields that are independent
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	piocbq->vport = vport;
 }
 
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ *
+ * Return codes:
+ *   0 - Error
+ *   1 - Success
+ **/
 static int
 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 			     struct lpfc_scsi_buf *lpfc_cmd,
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
-	if (vport->phba->sli_rev == 3)
+	if (vport->phba->sli_rev == 3 &&
+	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
 	piocb->ulpContext = ndlp->nlp_rpi;
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 	return 1;
 }
 
+/**
+ * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
 static void
 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
 			struct lpfc_iocbq *cmdiocbq,
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
 	return;
 }
 
+/**
+ * lpfc_scsi_tgt_reset: Target reset handler.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
+ * @vport: The virtual port for which this call is being executed.
+ * @tgt_id: Target ID.
+ * @lun: Lun number.
+ * @rdata: Pointer to lpfc_rport_data.
+ *
+ * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
+ *
+ * Return Code:
+ *   0x2003 - Error
+ *   0x2002 - Success.
+ **/
 static int
 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 		    unsigned  tgt_id, unsigned int lun,
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	return ret;
 }
 
+/**
+ * lpfc_info: Info entry point of scsi_host_template data structure.
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ *   Pointer to char - Success.
+ **/
 const char *
 lpfc_info(struct Scsi_Host *host)
 {
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
 	return lpfcinfobuf;
 }
 
+/**
+ * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
 {
 	unsigned long  poll_tmo_expires =
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
 			  poll_tmo_expires);
 }
 
+/**
+ * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
 void lpfc_poll_start_timer(struct lpfc_hba * phba)
 {
 	lpfc_poll_rearm_timer(phba);
 }
 
+/**
+ * lpfc_poll_timeout: Restart polling timer.
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring  polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
 void lpfc_poll_timeout(unsigned long ptr)
 {
 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
 	}
 }
 
+/**
+ * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
+ * structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ *   0 - Success
+ *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
 static int
 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 {
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		goto out_fail_command;
 	}
 
+	if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+		scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+
+		printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
+				"str=%s without registering for BlockGuard - "
+				"Rejecting command\n",
+				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+				dif_op_str[scsi_get_prot_op(cmnd)]);
+		goto out_fail_command;
+	}
+
 	/*
 	 * Catch race where our node has transitioned, but the
 	 * transport is still transitioning.
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
 		goto out_fail_command;
 	}
-	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+	if (vport->cfg_max_scsicmpl_time &&
+		(atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
 		goto out_host_busy;
 
 	lpfc_cmd = lpfc_get_scsi_buf(phba);
 	if (lpfc_cmd == NULL) {
-		lpfc_adjust_queue_depth(phba);
+		lpfc_rampdown_queue_depth(phba);
 
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 				 "0707 driver's buffer pool is empty, "
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		goto out_host_busy;
 	}
 
-	lpfc_cmd->start_time = jiffies;
 	/*
 	 * Store the midlayer's command structure for the completion phase
 	 * and complete the command initialization.
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
 	cmnd->scsi_done = done;
 
-	err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				"9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
+				"str=%s\n",
+				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+				dif_op_str[scsi_get_prot_op(cmnd)]);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+				"%02x %02x %02x %02x %02x \n",
+				cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+				cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+				cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+				cmnd->cmnd[9]);
+		if (cmnd->cmnd[0] == READ_10)
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9035 BLKGRD: READ @ sector %llu, "
+					 "count %lu\n",
+					 (unsigned long long)scsi_get_lba(cmnd),
+					cmnd->request->nr_sectors);
+		else if (cmnd->cmnd[0] == WRITE_10)
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9036 BLKGRD: WRITE @ sector %llu, "
+					"count %lu cmd=%p\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					cmnd->request->nr_sectors,
+					cmnd);
+
+		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+	} else {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				"9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
+				" str=%s\n",
+				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+				dif_op_str[scsi_get_prot_op(cmnd)]);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+				 "%02x %02x %02x %02x %02x \n",
+				 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+				 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+				 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+				 cmnd->cmnd[9]);
+		if (cmnd->cmnd[0] == READ_10)
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					 "9040 dbg: READ @ sector %llu, "
+					 "count %lu\n",
+					 (unsigned long long)scsi_get_lba(cmnd),
+					 cmnd->request->nr_sectors);
+		else if (cmnd->cmnd[0] == WRITE_10)
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					 "9041 dbg: WRITE @ sector %llu, "
+					 "count %lu cmd=%p\n",
+					 (unsigned long long)scsi_get_lba(cmnd),
+					 cmnd->request->nr_sectors, cmnd);
+		else
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					 "9042 dbg: parser not implemented\n");
+		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+	}
+
 	if (err)
 		goto out_host_busy_free_buf;
 
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	atomic_inc(&ndlp->cmd_pending);
 	err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
-	if (err)
+	if (err) {
+		atomic_dec(&ndlp->cmd_pending);
 		goto out_host_busy_free_buf;
-
+	}
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
 		lpfc_sli_poll_fcp_ring(phba);
 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	return 0;
 
  out_host_busy_free_buf:
-	atomic_dec(&ndlp->cmd_pending);
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 	lpfc_release_scsi_buf(phba, lpfc_cmd);
  out_host_busy:
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	return 0;
 }
 
+/**
+ * lpfc_block_error_handler: Routine to block error  handler.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
+ **/
 static void
 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
 {
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
 	return;
 }
 
+/**
+ * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
+ *structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
 static int
 lpfc_abort_handler(struct scsi_cmnd *cmnd)
 {
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	return ret;
 }
 
+/**
+ * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
+ *data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0ex2002 - Success
+ **/
 static int
 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 {
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 		fc_get_event_number(),
 		sizeof(scsi_event),
 		(char *)&scsi_event,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+		LPFC_NL_VENDOR_ID);
 
 	if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 	return ret;
 }
 
+/**
+ * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
+ * Template data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all target on @cmnd->device->host.
+ *
+ * Return Code:
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
 static int
 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 {
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 		fc_get_event_number(),
 		sizeof(scsi_event),
 		(char *)&scsi_event,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+		LPFC_NL_VENDOR_ID);
 
 	lpfc_block_error_handler(cmnd);
 	/*
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	return ret;
 }
 
+/**
+ * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
+ * structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
 static int
 lpfc_slave_alloc(struct scsi_device *sdev)
 {
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 	return 0;
 }
 
+/**
+ * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
+ *  structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ *   - Tag command queuing support for @sdev if supported.
+ *   - Dev loss time out value of fc_rport.
+ *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ *   0 - Success
+ **/
 static int
 lpfc_slave_configure(struct scsi_device *sdev)
 {
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
 	return 0;
 }
 
+/**
+ * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
 static void
 lpfc_slave_destroy(struct scsi_device *sdev)
 {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 437f182e2322..c7c440d5fa29 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -124,6 +124,8 @@ struct lpfc_scsi_buf {
 	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
 				 * dma_map_sg.  The driver needs this for calls
 				 * to dma_unmap_sg. */
+	uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for protection data */
+
 	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
 
 	/*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8ab5babdeebc..01dfdc8696f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	 */
 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
 
+
 	if (pring->ringno == LPFC_ELS_RING) {
 		lpfc_debugfs_slow_ring_trc(phba,
 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
@@ -1259,68 +1260,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
- * @phba: Pointer to HBA context object.
- * @tag: Tag for the HBQ buffer.
- *
- * This function is called from unsolicited event handler code path to get the
- * HBQ buffer associated with an unsolicited iocb. This function is called with
- * no lock held. It returns the buffer associated with the given tag and posts
- * another buffer to the firmware. Note that the new buffer must be allocated
- * before taking the hbalock and that the hba lock must be held until it is
- * finished with the hbq entry swap.
- **/
-static struct lpfc_dmabuf *
-lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
-{
-	struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
-	uint32_t hbqno;
-	void *virt;		/* virtual address ptr */
-	dma_addr_t phys;	/* mapped address */
-	unsigned long flags;
-
-	hbqno = tag >> 16;
-	new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
-	/* Check whether HBQ is still in use */
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (!phba->hbq_in_use) {
-		if (new_hbq_entry)
-			(phba->hbqs[hbqno].hbq_free_buffer)(phba,
-							    new_hbq_entry);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		return NULL;
-	}
-
-	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
-	if (hbq_entry == NULL) {
-		if (new_hbq_entry)
-			(phba->hbqs[hbqno].hbq_free_buffer)(phba,
-							    new_hbq_entry);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		return NULL;
-	}
-	list_del(&hbq_entry->dbuf.list);
-
-	if (new_hbq_entry == NULL) {
-		list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		return &hbq_entry->dbuf;
-	}
-	new_hbq_entry->tag = -1;
-	phys = new_hbq_entry->dbuf.phys;
-	virt = new_hbq_entry->dbuf.virt;
-	new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
-	new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
-	hbq_entry->dbuf.phys = phys;
-	hbq_entry->dbuf.virt = virt;
-	lpfc_sli_free_hbq(phba, hbq_entry);
-	list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-
-	return &new_hbq_entry->dbuf;
-}
-
-/**
  * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
@@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
  **/
 static struct lpfc_dmabuf *
 lpfc_sli_get_buff(struct lpfc_hba *phba,
-			struct lpfc_sli_ring *pring,
-			uint32_t tag)
+		  struct lpfc_sli_ring *pring,
+		  uint32_t tag)
 {
+	struct hbq_dmabuf *hbq_entry;
+
 	if (tag & QUE_BUFTAG_BIT)
 		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
-	else
-		return lpfc_sli_replace_hbqbuff(phba, tag);
+	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+	if (!hbq_entry)
+		return NULL;
+	return &hbq_entry->dbuf;
 }
 
 
@@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	match = 0;
 	irsp = &(saveq->iocb);
 
-	if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
-		return 1;
 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
 		if (pring->lpfc_sli_rcv_async_status)
 			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
 				(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
 				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				lpfc_adjust_queue_depth(phba);
+				lpfc_rampdown_queue_depth(phba);
 				spin_lock_irqsave(&phba->hbalock, iflag);
 			}
 
@@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
 			     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
 				spin_unlock_irqrestore(&phba->hbalock, iflag);
-				lpfc_adjust_queue_depth(phba);
+				lpfc_rampdown_queue_depth(phba);
 				spin_lock_irqsave(&phba->hbalock, iflag);
 			}
 
@@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
 {
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli;
-	uint16_t skip_post;
 	volatile uint32_t word0;
 	void __iomem *to_slim;
 
@@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
 	readl(to_slim); /* flush */
 
 	/* Only skip post after fc_ffinit is completed */
-	if (phba->pport->port_state) {
-		skip_post = 1;
+	if (phba->pport->port_state)
 		word0 = 1;	/* This is really setting up word1 */
-	} else {
-		skip_post = 0;
+	else
 		word0 = 0;	/* This is really setting up word1 */
-	}
 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
 	writel(*(uint32_t *) mb, to_slim);
 	readl(to_slim); /* flush */
@@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
 	psli->stats_start = get_seconds();
 
-	if (skip_post)
-		mdelay(100);
-	else
-		mdelay(2000);
+	/* Give the INITFF and Post time to settle. */
+	mdelay(100);
 
 	lpfc_hba_down_post(phba);
 
@@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
 		spin_unlock_irq(&phba->hbalock);
 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
 		lpfc_sli_brdrestart(phba);
-		msleep(2500);
 		rc = lpfc_sli_chipset_init(phba);
 		if (rc)
 			break;
@@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
 		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
 					LPFC_SLI3_HBQ_ENABLED |
 					LPFC_SLI3_CRP_ENABLED |
-					LPFC_SLI3_INB_ENABLED);
+					LPFC_SLI3_INB_ENABLED |
+					LPFC_SLI3_BG_ENABLED);
 		if (rc != MBX_SUCCESS) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0442 Adapter failed to init, mbxCmd x%x "
@@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
 		if (pmb->mb.un.varCfgPort.ginb) {
 			phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
+			phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
 			phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
 			phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
 			phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
 			phba->inb_last_counter =
 					phba->mbox->us.s3_inb_pgp.counter;
 		} else {
+			phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
 			phba->port_gp = phba->mbox->us.s3_pgp.port;
 			phba->inb_ha_copy = NULL;
 			phba->inb_counter = NULL;
 		}
+
+		if (phba->cfg_enable_bg) {
+			if (pmb->mb.un.varCfgPort.gbg)
+				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+			else
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0443 Adapter did not grant "
+						"BlockGuard\n");
+		}
 	} else {
+		phba->hbq_get = NULL;
 		phba->port_gp = phba->mbox->us.s2.port;
 		phba->inb_ha_copy = NULL;
 		phba->inb_counter = NULL;
@@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring *pring;
 
-	if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
-		return;
-	}
-
 	/* Mbox cmd <mbxCommand> timeout */
 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
 	shost = lpfc_shost_from_vport(phba->pport);
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 		sizeof(temp_event_data), (char *) &temp_event_data,
-		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+		LPFC_NL_VENDOR_ID);
 
 }
 
@@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
 {
 	uint32_t ha_copy;
 
+	/* If PCI channel is offline, don't process it */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return 0;
+
 	/* If somebody is waiting to handle an eratt, don't process it
 	 * here. The brdkill function will do this.
 	 */
@@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 	uint32_t ha_copy;
 	uint32_t work_ha_copy;
 	unsigned long status;
+	unsigned long iflag;
 	uint32_t control;
 
 	MAILBOX_t *mbox, *pmbox;
@@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 		if (unlikely(phba->link_state < LPFC_LINK_DOWN))
 			return IRQ_NONE;
 		/* Need to read HA REG for slow-path events */
-		spin_lock(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflag);
 		ha_copy = readl(phba->HAregaddr);
 		/* If somebody is waiting to handle an eratt don't process it
 		 * here. The brdkill function will do this.
@@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
 			phba->HAregaddr);
 		readl(phba->HAregaddr); /* flush */
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
 	} else
 		ha_copy = phba->ha_copy;
 
@@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 				 * Turn off Link Attention interrupts
 				 * until CLEAR_LA done
 				 */
-				spin_lock(&phba->hbalock);
+				spin_lock_irqsave(&phba->hbalock, iflag);
 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
 				control = readl(phba->HCregaddr);
 				control &= ~HC_LAINT_ENA;
 				writel(control, phba->HCregaddr);
 				readl(phba->HCregaddr); /* flush */
-				spin_unlock(&phba->hbalock);
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
 			}
 			else
 				work_ha_copy &= ~HA_LATT;
@@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
 			status >>= (4*LPFC_ELS_RING);
 			if (status & HA_RXMASK) {
-				spin_lock(&phba->hbalock);
+				spin_lock_irqsave(&phba->hbalock, iflag);
 				control = readl(phba->HCregaddr);
 
 				lpfc_debugfs_slow_ring_trc(phba,
@@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 						(uint32_t)((unsigned long)
 						&phba->work_waitq));
 				}
-				spin_unlock(&phba->hbalock);
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
 			}
 		}
-		spin_lock(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflag);
 		if (work_ha_copy & HA_ERATT)
 			lpfc_sli_read_hs(phba);
 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
@@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 			/* First check out the status word */
 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
 			if (pmbox->mbxOwner != OWN_HOST) {
-				spin_unlock(&phba->hbalock);
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
 				/*
 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
 				 * mbxStatus <status>
@@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 				work_ha_copy &= ~HA_MBATT;
 			} else {
 				phba->sli.mbox_active = NULL;
-				spin_unlock(&phba->hbalock);
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
 				phba->last_completion_time = jiffies;
 				del_timer(&phba->sli.mbox_tmo);
 				if (pmb->mbox_cmpl) {
@@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 						goto send_current_mbox;
 					}
 				}
-				spin_lock(&phba->pport->work_port_lock);
+				spin_lock_irqsave(
+						&phba->pport->work_port_lock,
+						iflag);
 				phba->pport->work_port_events &=
 					~WORKER_MBOX_TMO;
-				spin_unlock(&phba->pport->work_port_lock);
+				spin_unlock_irqrestore(
+						&phba->pport->work_port_lock,
+						iflag);
 				lpfc_mbox_cmpl_put(phba, pmb);
 			}
 		} else
-			spin_unlock(&phba->hbalock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
 
 		if ((work_ha_copy & HA_MBATT) &&
 		    (phba->sli.mbox_active == NULL)) {
@@ -5461,9 +5413,9 @@ send_current_mbox:
 						"MBX_SUCCESS");
 		}
 
-		spin_lock(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflag);
 		phba->work_ha |= work_ha_copy;
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		lpfc_worker_wake_up(phba);
 	}
 	return IRQ_HANDLED;
@@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
 	struct lpfc_hba  *phba;
 	uint32_t ha_copy;
 	unsigned long status;
+	unsigned long iflag;
 
 	/* Get the driver's phba structure from the dev_id and
 	 * assume the HBA is not interrupting.
@@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
 		/* Need to read HA REG for FCP ring and other ring events */
 		ha_copy = readl(phba->HAregaddr);
 		/* Clear up only attention source related to fast-path */
-		spin_lock(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflag);
 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
 			phba->HAregaddr);
 		readl(phba->HAregaddr); /* flush */
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
 	} else
 		ha_copy = phba->ha_copy;
 
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index cc43e9de22cc..7e32e95c5392 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.2.8"
+#define LPFC_DRIVER_VERSION "8.3.0"
 
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a7de1cc02b40..63b54c66756c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -288,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 	int vpi;
 	int rc = VPORT_ERROR;
 	int status;
-	int size;
 
-	if ((phba->sli_rev < 3) ||
-		!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+	if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
 				"1808 Create VPORT failed: "
 				"NPIV is not enabled: SLImode:%d\n",
@@ -351,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 
 	memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
 	memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-	size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
-	if (size) {
-		vport->vname = kzalloc(size+1, GFP_KERNEL);
-		if (!vport->vname) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
-					 "1814 Create VPORT failed. "
-					 "vname allocation failed.\n");
-			rc = VPORT_ERROR;
-			lpfc_free_vpi(phba, vpi);
-			destroy_port(vport);
-			goto error_out;
-		}
-		memcpy(vport->vname, fc_vport->symbolic_name, size+1);
-	}
 	if (fc_vport->node_name != 0)
 		u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
 	if (fc_vport->port_name != 0)
@@ -394,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 		goto error_out;
 	}
 
+	/* Create binary sysfs attribute for vport */
+	lpfc_alloc_sysfs_attr(vport);
+
 	*(struct lpfc_vport **)fc_vport->dd_data = vport;
 	vport->fc_vport = fc_vport;
 
@@ -405,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 	}
 
 	if (disable) {
+		lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
 		rc = VPORT_OK;
 		goto out;
 	}
@@ -587,8 +575,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 	spin_lock_irq(&phba->hbalock);
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(&phba->hbalock);
-	kfree(vport->vname);
+
+	lpfc_free_sysfs_attr(vport);
+
 	lpfc_debugfs_terminate(vport);
+
+	/* Remove FC host and then SCSI host with the vport */
 	fc_remove_host(lpfc_shost_from_vport(vport));
 	scsi_remove_host(lpfc_shost_from_vport(vport));
 
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 887682a24e36..c24e86f07804 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -53,7 +53,8 @@ struct mac_esp_priv {
 	void __iomem *pdma_io;
 	int error;
 };
-static struct platform_device *internal_esp, *external_esp;
+static struct platform_device *internal_pdev, *external_pdev;
+static struct esp *esp_chips[2];
 
 #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
 			       platform_get_drvdata((struct platform_device *) \
@@ -170,7 +171,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
 
 #define MAC_ESP_PDMA_LOOP(operands) \
 	asm volatile ( \
-	     "       tstw %2                   \n" \
+	     "       tstw %1                   \n" \
 	     "       jbeq 20f                  \n" \
 	     "1:     movew " operands "        \n" \
 	     "2:     movew " operands "        \n" \
@@ -188,14 +189,14 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
 	     "14:    movew " operands "        \n" \
 	     "15:    movew " operands "        \n" \
 	     "16:    movew " operands "        \n" \
-	     "       subqw #1,%2               \n" \
+	     "       subqw #1,%1               \n" \
 	     "       jbne 1b                   \n" \
-	     "20:    tstw %3                   \n" \
+	     "20:    tstw %2                   \n" \
 	     "       jbeq 30f                  \n" \
 	     "21:    movew " operands "        \n" \
-	     "       subqw #1,%3               \n" \
+	     "       subqw #1,%2               \n" \
 	     "       jbne 21b                  \n" \
-	     "30:    tstw %4                   \n" \
+	     "30:    tstw %3                   \n" \
 	     "       jbeq 40f                  \n" \
 	     "31:    moveb " operands "        \n" \
 	     "32:    nop                       \n" \
@@ -223,8 +224,8 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
 	     "       .long  31b,40b            \n" \
 	     "       .long  32b,40b            \n" \
 	     "       .previous                 \n" \
-	     : "+a" (addr) \
-	     : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
+	     : "+a" (addr), "+r" (count32), "+r" (count2) \
+	     : "g" (count1), "a" (mep->pdma_io))
 
 static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 				  u32 dma_count, int write, u8 cmd)
@@ -247,19 +248,20 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 	do {
 		unsigned int count32 = esp_count >> 5;
 		unsigned int count2 = (esp_count & 0x1F) >> 1;
+		unsigned int count1 = esp_count & 1;
 		unsigned int start_addr = addr;
 
 		if (mac_esp_wait_for_dreq(esp))
 			break;
 
 		if (write) {
-			MAC_ESP_PDMA_LOOP("%1@,%0@+");
+			MAC_ESP_PDMA_LOOP("%4@,%0@+");
 
 			esp_count -= addr - start_addr;
 		} else {
 			unsigned int n;
 
-			MAC_ESP_PDMA_LOOP("%0@+,%1@");
+			MAC_ESP_PDMA_LOOP("%0@+,%4@");
 
 			if (mac_esp_wait_for_empty_fifo(esp))
 				break;
@@ -442,6 +444,32 @@ static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 	return dma_len > 0xFFFF ? 0xFFFF : dma_len;
 }
 
+static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
+{
+	int got_intr;
+
+	/*
+	 * This is an edge triggered IRQ, so we have to be careful to
+	 * avoid missing a transition when it is shared by two ESP devices.
+	 */
+
+	do {
+		got_intr = 0;
+		if (esp_chips[0] &&
+		    (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
+			(void)scsi_esp_intr(irq, esp_chips[0]);
+			got_intr = 1;
+		}
+		if (esp_chips[1] &&
+		    (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
+			(void)scsi_esp_intr(irq, esp_chips[1]);
+			got_intr = 1;
+		}
+	} while (got_intr);
+
+	return IRQ_HANDLED;
+}
+
 static struct esp_driver_ops mac_esp_ops = {
 	.esp_write8       = mac_esp_write8,
 	.esp_read8        = mac_esp_read8,
@@ -556,10 +584,16 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
 	}
 
 	host->irq = IRQ_MAC_SCSI;
-	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
-			  esp);
-	if (err < 0)
-		goto fail_free_priv;
+	esp_chips[dev->id] = esp;
+	mb();
+	if (esp_chips[!dev->id] == NULL) {
+		err = request_irq(host->irq, mac_scsi_esp_intr, 0,
+		                  "Mac ESP", NULL);
+		if (err < 0) {
+			esp_chips[dev->id] = NULL;
+			goto fail_free_priv;
+		}
+	}
 
 	err = scsi_esp_register(esp, &dev->dev);
 	if (err)
@@ -568,7 +602,8 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
 	return 0;
 
 fail_free_irq:
-	free_irq(host->irq, esp);
+	if (esp_chips[!dev->id] == NULL)
+		free_irq(host->irq, esp);
 fail_free_priv:
 	kfree(mep);
 fail_free_command_block:
@@ -587,7 +622,9 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
 
 	scsi_esp_unregister(esp);
 
-	free_irq(irq, esp);
+	esp_chips[dev->id] = NULL;
+	if (!(esp_chips[0] || esp_chips[1]))
+		free_irq(irq, NULL);
 
 	kfree(mep);
 
@@ -614,19 +651,18 @@ static int __init mac_esp_init(void)
 	if (err)
 		return err;
 
-	internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
-	if (internal_esp && platform_device_add(internal_esp)) {
-		platform_device_put(internal_esp);
-		internal_esp = NULL;
+	internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
+	if (internal_pdev && platform_device_add(internal_pdev)) {
+		platform_device_put(internal_pdev);
+		internal_pdev = NULL;
 	}
-
-	external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
-	if (external_esp && platform_device_add(external_esp)) {
-		platform_device_put(external_esp);
-		external_esp = NULL;
+	external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
+	if (external_pdev && platform_device_add(external_pdev)) {
+		platform_device_put(external_pdev);
+		external_pdev = NULL;
 	}
 
-	if (internal_esp || external_esp) {
+	if (internal_pdev || external_pdev) {
 		return 0;
 	} else {
 		platform_driver_unregister(&esp_mac_driver);
@@ -638,13 +674,13 @@ static void __exit mac_esp_exit(void)
 {
 	platform_driver_unregister(&esp_mac_driver);
 
-	if (internal_esp) {
-		platform_device_unregister(internal_esp);
-		internal_esp = NULL;
+	if (internal_pdev) {
+		platform_device_unregister(internal_pdev);
+		internal_pdev = NULL;
 	}
-	if (external_esp) {
-		platform_device_unregister(external_esp);
-		external_esp = NULL;
+	if (external_pdev) {
+		platform_device_unregister(external_pdev);
+		external_pdev = NULL;
 	}
 }
 
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 22052bb7becb..d06ec5aa6924 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -3401,8 +3401,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
 	data->IrqNumber   = pdev->irq;
 	data->BaseAddress = pci_resource_start(pdev, 0);
 	data->NumAddress  = pci_resource_len  (pdev, 0);
-	data->MmioAddress = ioremap_nocache(pci_resource_start(pdev, 1),
-					       pci_resource_len  (pdev, 1));
+	data->MmioAddress = pci_ioremap_bar(pdev, 1);
 	data->MmioLength  = pci_resource_len  (pdev, 1);
 
 	pci_set_master(pdev);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b6cd12b2e996..8cb9240596ab 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4294,8 +4294,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	error = -ENODEV;
 
 #if MEMORY_MAPPED_IO
-	ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1),
-			      pci_resource_len(ha->pdev, 1));
+	ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
 	if (!ha->mmpbase) {
 		printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
 		goto error_free_response_ring;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ed731968f15f..cd53627cc761 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (ha->fw_dump_reading == 0)
 		return 0;
@@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
 			    struct bin_attribute *bin_attr,
 			    char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 	int reading;
 
 	if (off != 0)
@@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
 			break;
 
 		qla_printk(KERN_INFO, ha,
-		    "Firmware dump cleared on (%ld).\n", ha->host_no);
+		    "Firmware dump cleared on (%ld).\n", vha->host_no);
 
 		ha->fw_dump_reading = 0;
 		ha->fw_dumped = 0;
@@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
 
 			qla_printk(KERN_INFO, ha,
 			    "Raw firmware dump ready for read on (%ld).\n",
-			    ha->host_no);
+			    vha->host_no);
 		}
 		break;
 	case 2:
-		qla2x00_alloc_fw_dump(ha);
+		qla2x00_alloc_fw_dump(vha);
 		break;
 	case 3:
-		qla2x00_system_error(ha);
+		qla2x00_system_error(vha);
 		break;
 	}
 	return (count);
@@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
 			 struct bin_attribute *bin_attr,
 			 char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return 0;
@@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
 			  struct bin_attribute *bin_attr,
 			  char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 	uint16_t	cnt;
 
 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
 	}
 
 	/* Write NVRAM. */
-	ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
-	ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base,
+	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
 	    count);
 
-	set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 
 	return (count);
 }
@@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
 			  struct bin_attribute *bin_attr,
 			  char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (ha->optrom_state != QLA_SREADING)
 		return 0;
@@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (ha->optrom_state != QLA_SWRITING)
 		return -EINVAL;
@@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+
 	uint32_t start = 0;
 	uint32_t size = ha->optrom_size;
 	int val, valid;
@@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
 		    ha->optrom_region_start, ha->optrom_region_size));
 
 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
-		ha->isp_ops->read_optrom(ha, ha->optrom_buffer,
+		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	case 2:
@@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
 		    "Writing flash region -- 0x%x/0x%x.\n",
 		    ha->optrom_region_start, ha->optrom_region_size));
 
-		ha->isp_ops->write_optrom(ha, ha->optrom_buffer,
+		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	default:
@@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return 0;
@@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
 			struct bin_attribute *bin_attr,
 			char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
 		return 0;
 
 	/* Write NVRAM. */
-	ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
-	ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
 
 	return count;
 }
@@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
 		       struct bin_attribute *bin_attr,
 		       char *buf, loff_t off, size_t count)
 {
-	struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
 	uint16_t iter, addr, offset;
 	int rval;
 
@@ -429,7 +440,7 @@ do_read:
 			offset = 0;
 		}
 
-		rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset,
+		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
 		    SFP_BLOCK_SIZE);
 		if (rval != QLA_SUCCESS) {
 			qla_printk(KERN_WARNING, ha,
@@ -469,30 +480,31 @@ static struct sysfs_entry {
 };
 
 void
-qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
+qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
 {
-	struct Scsi_Host *host = ha->host;
+	struct Scsi_Host *host = vha->host;
 	struct sysfs_entry *iter;
 	int ret;
 
 	for (iter = bin_file_entries; iter->name; iter++) {
-		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
 			continue;
 
 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
 		    iter->attr);
 		if (ret)
-			qla_printk(KERN_INFO, ha,
+			qla_printk(KERN_INFO, vha->hw,
 			    "Unable to create sysfs %s binary attribute "
 			    "(%d).\n", iter->name, ret);
 	}
 }
 
 void
-qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
+qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
 {
-	struct Scsi_Host *host = ha->host;
+	struct Scsi_Host *host = vha->host;
 	struct sysfs_entry *iter;
+	struct qla_hw_data *ha = vha->hw;
 
 	for (iter = bin_file_entries; iter->name; iter++) {
 		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
 	}
 
 	if (ha->beacon_blink_led == 1)
-		ha->isp_ops->beacon_off(ha);
+		ha->isp_ops->beacon_off(vha);
 }
 
 /* Scsi_Host attributes. */
@@ -519,22 +531,24 @@ static ssize_t
 qla2x00_fw_version_show(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-	char fw_str[30];
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	char fw_str[128];
 
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->isp_ops->fw_version_str(ha, fw_str));
+	    ha->isp_ops->fw_version_str(vha, fw_str));
 }
 
 static ssize_t
 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	uint32_t sn;
 
 	if (IS_FWI2_CAPABLE(ha)) {
-		qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
+		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
 		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
 	}
 
@@ -547,15 +561,16 @@ static ssize_t
 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
 		      char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device);
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
 }
 
 static ssize_t
 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
 	    ha->product_id[3]);
@@ -565,43 +580,44 @@ static ssize_t
 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-	return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
 }
 
 static ssize_t
 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->model_desc ? ha->model_desc: "");
+	    vha->hw->model_desc ? vha->hw->model_desc : "");
 }
 
 static ssize_t
 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
 		      char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	char pci_info[30];
 
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->isp_ops->pci_info_str(ha, pci_info));
+	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
 }
 
 static ssize_t
 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	int len = 0;
 
-	if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
-	    atomic_read(&ha->loop_state) == LOOP_DEAD)
+	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
+	    atomic_read(&vha->loop_state) == LOOP_DEAD)
 		len = snprintf(buf, PAGE_SIZE, "Link Down\n");
-	else if (atomic_read(&ha->loop_state) != LOOP_READY ||
-	    test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
-	    test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
+	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
+	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
 		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
 	else {
 		len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -632,10 +648,10 @@ static ssize_t
 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
 		 char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int len = 0;
 
-	switch (ha->zio_mode) {
+	switch (vha->hw->zio_mode) {
 	case QLA_ZIO_MODE_6:
 		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
 		break;
@@ -650,7 +666,8 @@ static ssize_t
 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
 		  const char *buf, size_t count)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	int val = 0;
 	uint16_t zio_mode;
 
@@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
 	/* Update per-hba values and queue a reset. */
 	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
 		ha->zio_mode = zio_mode;
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 	}
 	return strlen(buf);
 }
@@ -677,16 +694,16 @@ static ssize_t
 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
 		       char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 
-	return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100);
+	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
 }
 
 static ssize_t
 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int val = 0;
 	uint16_t zio_timer;
 
@@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
 		return -ERANGE;
 
 	zio_timer = (uint16_t)(val / 100);
-	ha->zio_timer = zio_timer;
+	vha->hw->zio_timer = zio_timer;
 
 	return strlen(buf);
 }
@@ -705,10 +722,10 @@ static ssize_t
 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int len = 0;
 
-	if (ha->beacon_blink_led)
+	if (vha->hw->beacon_blink_led)
 		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
 	else
 		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@@ -719,14 +736,15 @@ static ssize_t
 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
 		     const char *buf, size_t count)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	int val = 0;
 	int rval;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return -EPERM;
 
-	if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
+	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
 		qla_printk(KERN_WARNING, ha,
 		    "Abort ISP active -- ignoring beacon request.\n");
 		return -EBUSY;
@@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
 		return -EINVAL;
 
 	if (val)
-		rval = ha->isp_ops->beacon_on(ha);
+		rval = ha->isp_ops->beacon_on(vha);
 	else
-		rval = ha->isp_ops->beacon_off(ha);
+		rval = ha->isp_ops->beacon_off(vha);
 
 	if (rval != QLA_SUCCESS)
 		count = 0;
@@ -750,8 +768,8 @@ static ssize_t
 qla2x00_optrom_bios_version_show(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
 	    ha->bios_revision[0]);
 }
@@ -760,8 +778,8 @@ static ssize_t
 qla2x00_optrom_efi_version_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
 	    ha->efi_revision[0]);
 }
@@ -770,8 +788,8 @@ static ssize_t
 qla2x00_optrom_fcode_version_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
 	    ha->fcode_revision[0]);
 }
@@ -780,8 +798,8 @@ static ssize_t
 qla2x00_optrom_fw_version_show(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
 	    ha->fw_revision[3]);
@@ -791,8 +809,8 @@ static ssize_t
 qla2x00_total_isp_aborts_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
-	scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 	    ha->qla_stats.total_isp_aborts);
 }
@@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
 static void
 qla2x00_get_host_port_id(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	fc_host_port_id(shost) = ha->d_id.b.domain << 16 |
-	    ha->d_id.b.area << 8 | ha->d_id.b.al_pa;
+	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
+	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
 }
 
 static void
 qla2x00_get_host_speed(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+	struct qla_hw_data *ha = ((struct scsi_qla_host *)
+					(shost_priv(shost)))->hw;
 	u32 speed = FC_PORTSPEED_UNKNOWN;
 
 	switch (ha->link_data_rate) {
@@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
 static void
 qla2x00_get_host_port_type(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
 
-	if (ha->parent) {
+	if (vha->vp_idx) {
 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
 		return;
 	}
-	switch (ha->current_topology) {
+	switch (vha->hw->current_topology) {
 	case ISP_CFG_NL:
 		port_type = FC_PORTTYPE_LPORT;
 		break;
@@ -908,11 +927,11 @@ static void
 qla2x00_get_starget_node_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
-	scsi_qla_host_t *ha = shost_priv(host);
+	scsi_qla_host_t *vha = shost_priv(host);
 	fc_port_t *fcport;
 	u64 node_name = 0;
 
-	list_for_each_entry(fcport, &ha->fcports, list) {
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (fcport->rport &&
 		    starget->id == fcport->rport->scsi_target_id) {
 			node_name = wwn_to_u64(fcport->node_name);
@@ -927,11 +946,11 @@ static void
 qla2x00_get_starget_port_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
-	scsi_qla_host_t *ha = shost_priv(host);
+	scsi_qla_host_t *vha = shost_priv(host);
 	fc_port_t *fcport;
 	u64 port_name = 0;
 
-	list_for_each_entry(fcport, &ha->fcports, list) {
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (fcport->rport &&
 		    starget->id == fcport->rport->scsi_target_id) {
 			port_name = wwn_to_u64(fcport->port_name);
@@ -946,11 +965,11 @@ static void
 qla2x00_get_starget_port_id(struct scsi_target *starget)
 {
 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
-	scsi_qla_host_t *ha = shost_priv(host);
+	scsi_qla_host_t *vha = shost_priv(host);
 	fc_port_t *fcport;
 	uint32_t port_id = ~0U;
 
-	list_for_each_entry(fcport, &ha->fcports, list) {
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (fcport->rport &&
 		    starget->id == fcport->rport->scsi_target_id) {
 			port_id = fcport->d_id.b.domain << 16 |
@@ -999,9 +1018,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
 	 * final cleanup of firmware resources (PCBs and XCBs).
 	 */
 	if (fcport->loop_id != FC_NO_LOOP_ID) {
-		fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id,
-		    fcport->d_id.b.domain, fcport->d_id.b.area,
-		    fcport->d_id.b.al_pa);
+		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+			fcport->loop_id, fcport->d_id.b.domain,
+			fcport->d_id.b.area, fcport->d_id.b.al_pa);
 		fcport->loop_id = FC_NO_LOOP_ID;
 	}
 
@@ -1011,16 +1030,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
 static int
 qla2x00_issue_lip(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	qla2x00_loop_reset(ha);
+	qla2x00_loop_reset(vha);
 	return 0;
 }
 
 static struct fc_host_statistics *
 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+	scsi_qla_host_t *vha = shost_priv(shost);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	int rval;
 	struct link_statistics *stats;
 	dma_addr_t stats_dma;
@@ -1032,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
 	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
 	if (stats == NULL) {
 		DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
-		    __func__, ha->host_no));
+		    __func__, base_vha->host_no));
 		goto done;
 	}
 	memset(stats, 0, DMA_POOL_SIZE);
 
 	rval = QLA_FUNCTION_FAILED;
 	if (IS_FWI2_CAPABLE(ha)) {
-		rval = qla24xx_get_isp_stats(ha, stats, stats_dma);
-	} else if (atomic_read(&ha->loop_state) == LOOP_READY &&
-		    !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
-		    !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
+		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
+		    !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
+		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
 		    !ha->dpc_active) {
 		/* Must be in a 'READY' state for statistics retrieval. */
-		rval = qla2x00_get_link_status(ha, ha->loop_id, stats,
-		    stats_dma);
+		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
+						stats, stats_dma);
 	}
 
 	if (rval != QLA_SUCCESS)
@@ -1077,29 +1098,29 @@ done:
 static void
 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost));
+	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
 }
 
 static void
 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 }
 
 static void
 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 	u64 node_name;
 
-	if (ha->device_flags & SWITCH_FOUND)
-		node_name = wwn_to_u64(ha->fabric_node_name);
+	if (vha->device_flags & SWITCH_FOUND)
+		node_name = wwn_to_u64(vha->fabric_node_name);
 	else
-		node_name = wwn_to_u64(ha->node_name);
+		node_name = wwn_to_u64(vha->node_name);
 
 	fc_host_fabric_name(shost) = node_name;
 }
@@ -1107,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
 static void
 qla2x00_get_host_port_state(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+	scsi_qla_host_t *vha = shost_priv(shost);
+	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-	if (!ha->flags.online)
+	if (!base_vha->flags.online)
 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-	else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT)
+	else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
 	else
 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@@ -1121,8 +1143,11 @@ static int
 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 {
 	int	ret = 0;
-	scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
-	scsi_qla_host_t *vha;
+	int	cnt = 0;
+	uint8_t	qos = QLA_DEFAULT_QUE_QOS;
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	scsi_qla_host_t *vha = NULL;
+	struct qla_hw_data *ha = base_vha->hw;
 
 	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
 	if (ret) {
@@ -1144,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 		atomic_set(&vha->vp_state, VP_FAILED);
 
 	/* ready to create vport */
-	qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
+	qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
+							vha->vp_idx);
 
 	/* initialized vport states */
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	vha->vp_err_state=  VP_ERR_PORTDWN;
 	vha->vp_prev_err_state=  VP_ERR_UNKWN;
 	/* Check if physical ha port is Up */
-	if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
-	    atomic_read(&ha->loop_state) == LOOP_DEAD) {
+	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 		/* Don't retry or attempt login of this virtual port */
 		DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
-		    vha->host_no));
+		    base_vha->host_no));
 		atomic_set(&vha->loop_state, LOOP_DEAD);
 		if (!disable)
 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1171,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
 	fc_host_supported_classes(vha->host) =
-		fc_host_supported_classes(ha->host);
+		fc_host_supported_classes(base_vha->host);
 	fc_host_supported_speeds(vha->host) =
-		fc_host_supported_speeds(ha->host);
+		fc_host_supported_speeds(base_vha->host);
 
 	qla24xx_vport_disable(fc_vport, disable);
 
+	/* Create a queue pair for the vport */
+	if (ha->mqenable) {
+		if (ha->npiv_info) {
+			for (; cnt < ha->nvram_npiv_size; cnt++) {
+				if (ha->npiv_info[cnt].port_name ==
+					vha->port_name &&
+					ha->npiv_info[cnt].node_name ==
+					vha->node_name) {
+					qos = ha->npiv_info[cnt].q_qos;
+					break;
+				}
+			}
+		}
+		qla25xx_create_queues(vha, qos);
+	}
+
 	return 0;
 vport_create_failed_2:
 	qla24xx_disable_vp(vha);
 	qla24xx_deallocate_vp_id(vha);
-	kfree(vha->port_name);
-	kfree(vha->node_name);
 	scsi_host_put(vha->host);
 	return FC_VPORT_FAILED;
 }
@@ -1191,17 +1231,34 @@ static int
 qla24xx_vport_delete(struct fc_vport *fc_vport)
 {
 	scsi_qla_host_t *vha = fc_vport->dd_data;
-	scsi_qla_host_t *pha = to_qla_parent(vha);
+	fc_port_t *fcport, *tfcport;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t id = vha->vp_idx;
 
 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
-	    test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
+	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
 		msleep(1000);
 
+	if (ha->mqenable) {
+		if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
+			qla_printk(KERN_WARNING, ha,
+				"Queue delete failed.\n");
+		vha->req_ques[0] = ha->req_q_map[0]->id;
+	}
+
 	qla24xx_disable_vp(vha);
-	qla24xx_deallocate_vp_id(vha);
 
-	kfree(vha->node_name);
-	kfree(vha->port_name);
+	fc_remove_host(vha->host);
+
+	scsi_remove_host(vha->host);
+
+	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
+		list_del(&fcport->list);
+		kfree(fcport);
+		fcport = NULL;
+	}
+
+	qla24xx_deallocate_vp_id(vha);
 
 	if (vha->timer_active) {
 		qla2x00_vp_stop_timer(vha);
@@ -1210,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 		    vha->host_no, vha->vp_idx, vha));
         }
 
-	fc_remove_host(vha->host);
-
-	scsi_remove_host(vha->host);
-
 	scsi_host_put(vha->host);
-
+	qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
 	return 0;
 }
 
@@ -1318,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
 };
 
 void
-qla2x00_init_host_attr(scsi_qla_host_t *ha)
+qla2x00_init_host_attr(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	u32 speed = FC_PORTSPEED_UNKNOWN;
 
-	fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
-	fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
-	fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
-	fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;;
-	fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
+	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+	fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
+	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
 	if (IS_QLA25XX(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -1338,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
 		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
 	else
 		speed = FC_PORTSPEED_1GBIT;
-	fc_host_supported_speeds(ha->host) = speed;
+	fc_host_supported_speeds(vha->host) = speed;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 510ba64bc286..1cf77772623b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -9,7 +9,7 @@
 #include <linux/delay.h>
 
 static inline void
-qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
+qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
 {
 	fw_dump->fw_major_version = htonl(ha->fw_major_version);
 	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
 }
 
 static inline void *
-qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
+qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
 {
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 	/* Request queue. */
-	memcpy(ptr, ha->request_ring, ha->request_q_length *
+	memcpy(ptr, req->ring, req->length *
 	    sizeof(request_t));
 
 	/* Response queue. */
-	ptr += ha->request_q_length * sizeof(request_t);
-	memcpy(ptr, ha->response_ring, ha->response_q_length  *
+	ptr += req->length * sizeof(request_t);
+	memcpy(ptr, rsp->ring, rsp->length  *
 	    sizeof(response_t));
 
-	return ptr + (ha->response_q_length * sizeof(response_t));
+	return ptr + (rsp->length * sizeof(response_t));
 }
 
 static int
-qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
     uint32_t ram_dwords, void **nxt)
 {
 	int rval;
@@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
 }
 
 static int
-qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
     uint32_t cram_size, void **nxt)
 {
 	int rval;
@@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
 }
 
 static int
-qla24xx_soft_reset(scsi_qla_host_t *ha)
+qla24xx_soft_reset(struct qla_hw_data *ha)
 {
 	int rval = QLA_SUCCESS;
 	uint32_t cnt;
@@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
 }
 
 static int
-qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
-    uint32_t ram_words, void **nxt)
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+    uint16_t ram_words, void **nxt)
 {
 	int rval;
 	uint32_t cnt, stat, timer, words, idx;
@@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
  * @hardware_locked: Called with the hardware_lock
  */
 void
-qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 {
 	int		rval;
 	uint32_t	cnt;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint16_t __iomem *dmp_reg;
 	unsigned long	flags;
 	struct qla2300_fw_dump	*fw;
 	void		*nxt;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	flags = 0;
 
@@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 	} else {
 		qla_printk(KERN_INFO, ha,
 		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    ha->host_no, ha->fw_dump);
+		    base_vha->host_no, ha->fw_dump);
 		ha->fw_dumped = 1;
 	}
 
@@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
  * @hardware_locked: Called with the hardware_lock
  */
 void
-qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 {
 	int		rval;
 	uint32_t	cnt, timer;
 	uint16_t	risc_address;
 	uint16_t	mb0, mb2;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint16_t __iomem *dmp_reg;
 	unsigned long	flags;
 	struct qla2100_fw_dump	*fw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	risc_address = 0;
 	mb0 = mb2 = 0;
@@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 	} else {
 		qla_printk(KERN_INFO, ha,
 		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    ha->host_no, ha->fw_dump);
+		    base_vha->host_no, ha->fw_dump);
 		ha->fw_dumped = 1;
 	}
 
@@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
 }
 
 void
-qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 {
 	int		rval;
 	uint32_t	cnt;
 	uint32_t	risc_address;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t __iomem *dmp_reg;
 	uint32_t	*iter_reg;
@@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 	struct qla24xx_fw_dump *fw;
 	uint32_t	ext_mem_cnt;
 	void		*nxt;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	risc_address = ext_mem_cnt = 0;
 	flags = 0;
@@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
 	} else {
 		qla_printk(KERN_INFO, ha,
 		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    ha->host_no, ha->fw_dump);
+		    base_vha->host_no, ha->fw_dump);
 		ha->fw_dumped = 1;
 	}
 
@@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
 }
 
 void
-qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 {
 	int		rval;
 	uint32_t	cnt;
 	uint32_t	risc_address;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct device_reg_25xxmq __iomem *reg25;
 	uint32_t __iomem *dmp_reg;
 	uint32_t	*iter_reg;
 	uint16_t __iomem *mbx_reg;
@@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 	uint32_t	ext_mem_cnt;
 	void		*nxt;
 	struct qla2xxx_fce_chain *fcec;
+	struct qla2xxx_mq_chain *mq = NULL;
+	uint32_t	qreg_size;
+	uint8_t		req_cnt, rsp_cnt, que_cnt;
+	uint32_t	que_idx;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	risc_address = ext_mem_cnt = 0;
 	flags = 0;
@@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Multi queue registers */
+	if (ha->mqenable) {
+		qreg_size = sizeof(struct qla2xxx_mq_chain);
+		mq = kzalloc(qreg_size, GFP_KERNEL);
+		if (!mq)
+			goto qla25xx_fw_dump_failed_0;
+		req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
+		rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
+		que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
+		mq->count = htonl(que_cnt);
+		mq->chain_size = htonl(qreg_size);
+		mq->type = __constant_htonl(DUMP_CHAIN_MQ);
+		for (cnt = 0; cnt < que_cnt; cnt++) {
+			reg25 = (struct device_reg_25xxmq *) ((void *)
+				ha->mqiobase + cnt * QLA_QUE_PAGE);
+			que_idx = cnt * 4;
+			mq->qregs[que_idx] = htonl(reg25->req_q_in);
+			mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
+			mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
+			mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
+		}
+	}
 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
 	RD_REG_DWORD(&reg->iobase_window);
 
@@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 
 	ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
 
-	fcec = nxt + ntohl(ha->fw_dump->eft_size);
+	if (ha->mqenable) {
+		nxt = nxt + ntohl(ha->fw_dump->eft_size);
+		memcpy(nxt, mq, qreg_size);
+		kfree(mq);
+		fcec = nxt + qreg_size;
+	} else {
+		fcec = nxt + ntohl(ha->fw_dump->eft_size);
+	}
 	fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
 	    fce_calc_size(ha->fce_bufs));
@@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
 	} else {
 		qla_printk(KERN_INFO, ha,
 		    "Firmware dump saved to temp buffer (%ld/%p).\n",
-		    ha->host_no, ha->fw_dump);
+		    base_vha->host_no, ha->fw_dump);
 		ha->fw_dumped = 1;
 	}
 
@@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
-
 /****************************************************************************/
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
 
 void
-qla2x00_dump_regs(scsi_qla_host_t *ha)
+qla2x00_dump_regs(scsi_qla_host_t *vha)
 {
 	int i;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	uint16_t __iomem *mbx_reg;
@@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
 
 	printk("Mailbox registers:\n");
 	for (i = 0; i < 6; i++)
-		printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i,
+		printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
 		    RD_REG_WORD(mbx_reg++));
 }
 
@@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
 	if (cnt % 16)
 		printk("\n");
 }
+
+
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e9c0c097f5e..c1794a70a45f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -4,6 +4,9 @@
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
+
+#include "qla_def.h"
+
 /*
  * Driver debug definitions.
  */
@@ -23,6 +26,7 @@
 /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
 /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
 /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
+/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
 
 /*
 * Macros use for debugging the driver.
@@ -43,6 +47,7 @@
 #define DEBUG2_11(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
 #define DEBUG2_13(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
 #define DEBUG2_16(x)	do { if (ql2xextended_error_logging) { x; } } while (0)
+#define DEBUG2_17(x) 	do { if (ql2xextended_error_logging) { x; } } while (0)
 
 #if defined(QL_DEBUG_LEVEL_3)
 #define DEBUG3(x)	do {x;} while (0)
@@ -127,7 +132,6 @@
 #else
 #define DEBUG16(x)	do {} while (0)
 #endif
-
 /*
  * Firmware Dump structure definition
  */
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain {
 	uint32_t eregs[8];
 };
 
+struct qla2xxx_mq_chain {
+	uint32_t type;
+	uint32_t chain_size;
+
+	uint32_t count;
+	uint32_t qregs[4 * QLA_MQ_SIZE];
+};
+
 #define DUMP_CHAIN_VARIANT	0x80000000
 #define DUMP_CHAIN_FCE		0x7FFFFAF0
+#define DUMP_CHAIN_MQ		0x7FFFFAF1
 #define DUMP_CHAIN_LAST		0x80000000
 
 struct qla2xxx_fw_dump {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b97194096d8e..a29c95204975 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -181,11 +181,14 @@
 #define RESPONSE_ENTRY_CNT_2100		64	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300		512	/* Number of response entries.*/
 
+struct req_que;
+
 /*
  * SCSI Request Block
  */
 typedef struct srb {
-	struct scsi_qla_host *ha;	/* HA the SP is queued on */
+	struct scsi_qla_host *vha;	/* HA the SP is queued on */
+	struct req_que *que;
 	struct fc_port *fcport;
 
 	struct scsi_cmnd *cmd;		/* Linux SCSI command pkt */
@@ -369,9 +372,17 @@ struct device_reg_2xxx {
 	} u_end;
 };
 
+struct device_reg_25xxmq {
+	volatile uint32_t req_q_in;
+	volatile uint32_t req_q_out;
+	volatile uint32_t rsp_q_in;
+	volatile uint32_t rsp_q_out;
+};
+
 typedef union {
 		struct device_reg_2xxx isp;
 		struct device_reg_24xx isp24;
+		struct device_reg_25xxmq isp25mq;
 } device_reg_t;
 
 #define ISP_REQ_Q_IN(ha, reg) \
@@ -1524,7 +1535,7 @@ typedef struct {
  */
 typedef struct fc_port {
 	struct list_head list;
-	struct scsi_qla_host *ha;
+	struct scsi_qla_host *vha;
 
 	uint8_t node_name[WWN_SIZE];
 	uint8_t port_name[WWN_SIZE];
@@ -1550,7 +1561,6 @@ typedef struct fc_port {
 	unsigned long last_queue_full;
 	unsigned long last_ramp_up;
 
-	struct list_head vp_fcport;
 	uint16_t vp_idx;
 } fc_port_t;
 
@@ -2037,6 +2047,8 @@ typedef struct vport_params {
 #define VP_RET_CODE_NO_MEM		5
 #define VP_RET_CODE_NOT_FOUND		6
 
+struct qla_hw_data;
+
 /*
  * ISP operations
  */
@@ -2055,10 +2067,11 @@ struct isp_operations {
 	char * (*fw_version_str) (struct scsi_qla_host *, char *);
 
 	irq_handler_t intr_handler;
-	void (*enable_intrs) (struct scsi_qla_host *);
-	void (*disable_intrs) (struct scsi_qla_host *);
+	void (*enable_intrs) (struct qla_hw_data *);
+	void (*disable_intrs) (struct qla_hw_data *);
 
-	int (*abort_command) (struct scsi_qla_host *, srb_t *);
+	int (*abort_command) (struct scsi_qla_host *, srb_t *,
+		struct req_que *);
 	int (*target_reset) (struct fc_port *, unsigned int);
 	int (*lun_reset) (struct fc_port *, unsigned int);
 	int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2089,6 +2102,10 @@ struct isp_operations {
 		uint32_t);
 
 	int (*get_flash_version) (struct scsi_qla_host *, void *);
+	int (*start_scsi) (srb_t *);
+	void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
+	void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
+	uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
 };
 
 /* MSI-X Support *************************************************************/
@@ -2100,16 +2117,18 @@ struct isp_operations {
 #define QLA_MSIX_DEFAULT	0x00
 #define QLA_MSIX_RSP_Q		0x01
 
-#define QLA_MSIX_ENTRIES	2
 #define QLA_MIDX_DEFAULT	0
 #define QLA_MIDX_RSP_Q		1
+#define QLA_PCI_MSIX_CONTROL	0xa2
 
 struct scsi_qla_host;
+struct rsp_que;
 
 struct qla_msix_entry {
 	int have_irq;
-	uint32_t msix_vector;
-	uint16_t msix_entry;
+	uint32_t vector;
+	uint16_t entry;
+	struct rsp_que *rsp;
 };
 
 #define	WATCH_INTERVAL		1       /* number of seconds */
@@ -2160,208 +2179,137 @@ struct qla_statistics {
 	uint64_t output_bytes;
 };
 
-/*
- * Linux Host Adapter structure
- */
-typedef struct scsi_qla_host {
-	struct list_head list;
+/* Multi queue support */
+#define MBC_INITIALIZE_MULTIQ 0x1f
+#define QLA_QUE_PAGE 0X1000
+#define QLA_MQ_SIZE 32
+#define QLA_MAX_HOST_QUES 16
+#define QLA_MAX_QUEUES 256
+#define ISP_QUE_REG(ha, id) \
+	((ha->mqenable) ? \
+	((void *)(ha->mqiobase) +\
+	(QLA_QUE_PAGE * id)) :\
+	((void *)(ha->iobase)))
+#define QLA_REQ_QUE_ID(tag) \
+	((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
+#define QLA_DEFAULT_QUE_QOS 5
+#define QLA_PRECONFIG_VPORTS 32
+#define QLA_MAX_VPORTS_QLA24XX	128
+#define QLA_MAX_VPORTS_QLA25XX	256
+/* Response queue data structure */
+struct rsp_que {
+	dma_addr_t  dma;
+	response_t *ring;
+	response_t *ring_ptr;
+	uint16_t  ring_index;
+	uint16_t  out_ptr;
+	uint16_t  length;
+	uint16_t  options;
+	uint16_t  rid;
+	uint16_t  id;
+	uint16_t  vp_idx;
+	struct qla_hw_data *hw;
+	struct qla_msix_entry *msix;
+	struct req_que *req;
+};
 
-	/* Commonly used flags and state information. */
-	struct Scsi_Host *host;
-	struct pci_dev	*pdev;
+/* Request queue data structure */
+struct req_que {
+	dma_addr_t  dma;
+	request_t *ring;
+	request_t *ring_ptr;
+	uint16_t  ring_index;
+	uint16_t  in_ptr;
+	uint16_t  cnt;
+	uint16_t  length;
+	uint16_t  options;
+	uint16_t  rid;
+	uint16_t  id;
+	uint16_t  qos;
+	uint16_t  vp_idx;
+	struct rsp_que *rsp;
+	srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+	uint32_t current_outstanding_cmd;
+	int max_q_depth;
+};
 
-	unsigned long	host_no;
+/*
+ * Qlogic host adapter specific data structure.
+*/
+struct qla_hw_data {
+	struct pci_dev  *pdev;
+	/* SRB cache. */
+#define SRB_MIN_REQ     128
+	mempool_t       *srb_mempool;
 
 	volatile struct {
-		uint32_t	init_done		:1;
-		uint32_t	online			:1;
 		uint32_t	mbox_int		:1;
 		uint32_t	mbox_busy		:1;
-		uint32_t	rscn_queue_overflow	:1;
-		uint32_t	reset_active		:1;
-
-		uint32_t	management_server_logged_in :1;
-                uint32_t	process_response_queue	:1;
 
 		uint32_t	disable_risc_code_load	:1;
 		uint32_t	enable_64bit_addressing	:1;
 		uint32_t	enable_lip_reset	:1;
-		uint32_t	enable_lip_full_login	:1;
 		uint32_t	enable_target_reset	:1;
+		uint32_t	enable_lip_full_login	:1;
 		uint32_t	enable_led_scheme	:1;
 		uint32_t	inta_enabled		:1;
 		uint32_t	msi_enabled		:1;
 		uint32_t	msix_enabled		:1;
 		uint32_t	disable_serdes		:1;
 		uint32_t	gpsc_supported		:1;
-		uint32_t        vsan_enabled            :1;
+		uint32_t	vsan_enabled		:1;
 		uint32_t	npiv_supported		:1;
 		uint32_t	fce_enabled		:1;
-		uint32_t	hw_event_marker_found	:1;
+		uint32_t	hw_event_marker_found:1;
 	} flags;
 
-	atomic_t	loop_state;
-#define LOOP_TIMEOUT	1
-#define LOOP_DOWN	2
-#define LOOP_UP		3
-#define LOOP_UPDATE	4
-#define LOOP_READY	5
-#define LOOP_DEAD	6
-
-	unsigned long   dpc_flags;
-#define	RESET_MARKER_NEEDED	0	/* Send marker to ISP. */
-#define	RESET_ACTIVE		1
-#define	ISP_ABORT_NEEDED	2	/* Initiate ISP abort. */
-#define	ABORT_ISP_ACTIVE	3	/* ISP abort in progress. */
-#define	LOOP_RESYNC_NEEDED	4	/* Device Resync needed. */
-#define	LOOP_RESYNC_ACTIVE	5
-#define LOCAL_LOOP_UPDATE       6	/* Perform a local loop update. */
-#define RSCN_UPDATE             7	/* Perform an RSCN update. */
-#define MAILBOX_RETRY           8
-#define ISP_RESET_NEEDED        9	/* Initiate a ISP reset. */
-#define FAILOVER_EVENT_NEEDED   10
-#define FAILOVER_EVENT		11
-#define FAILOVER_NEEDED   	12
-#define SCSI_RESTART_NEEDED	13	/* Processes SCSI retry queue. */
-#define PORT_RESTART_NEEDED	14	/* Processes Retry queue. */
-#define RESTART_QUEUES_NEEDED	15	/* Restarts the Lun queue. */
-#define ABORT_QUEUES_NEEDED	16
-#define RELOGIN_NEEDED	        17
-#define LOGIN_RETRY_NEEDED	18	/* Initiate required fabric logins. */
-#define REGISTER_FC4_NEEDED	19	/* SNS FC4 registration required. */
-#define ISP_ABORT_RETRY         20      /* ISP aborted. */
-#define FCPORT_RESCAN_NEEDED	21      /* IO descriptor processing needed */
-#define IODESC_PROCESS_NEEDED	22      /* IO descriptor processing needed */
-#define IOCTL_ERROR_RECOVERY	23
-#define LOOP_RESET_NEEDED	24
-#define BEACON_BLINK_NEEDED	25
-#define REGISTER_FDMI_NEEDED	26
-#define FCPORT_UPDATE_NEEDED	27
-#define VP_DPC_NEEDED		28	/* wake up for VP dpc handling */
-#define UNLOADING		29
-#define NPIV_CONFIG_NEEDED	30
-
-	uint32_t	device_flags;
-#define DFLG_LOCAL_DEVICES		BIT_0
-#define DFLG_RETRY_LOCAL_DEVICES	BIT_1
-#define DFLG_FABRIC_DEVICES		BIT_2
-#define	SWITCH_FOUND			BIT_3
-#define	DFLG_NO_CABLE			BIT_4
-
-#define PCI_DEVICE_ID_QLOGIC_ISP2532	0x2532
-#define PCI_DEVICE_ID_QLOGIC_ISP8432	0x8432
-	uint32_t	device_type;
-#define DT_ISP2100			BIT_0
-#define DT_ISP2200			BIT_1
-#define DT_ISP2300			BIT_2
-#define DT_ISP2312			BIT_3
-#define DT_ISP2322			BIT_4
-#define DT_ISP6312			BIT_5
-#define DT_ISP6322			BIT_6
-#define DT_ISP2422			BIT_7
-#define DT_ISP2432			BIT_8
-#define DT_ISP5422			BIT_9
-#define DT_ISP5432			BIT_10
-#define DT_ISP2532			BIT_11
-#define DT_ISP8432			BIT_12
-#define DT_ISP_LAST			(DT_ISP8432 << 1)
-
-#define DT_IIDMA			BIT_26
-#define DT_FWI2				BIT_27
-#define DT_ZIO_SUPPORTED		BIT_28
-#define DT_OEM_001			BIT_29
-#define DT_ISP2200A			BIT_30
-#define DT_EXTENDED_IDS			BIT_31
-
-#define DT_MASK(ha)	((ha)->device_type & (DT_ISP_LAST - 1))
-#define IS_QLA2100(ha)	(DT_MASK(ha) & DT_ISP2100)
-#define IS_QLA2200(ha)	(DT_MASK(ha) & DT_ISP2200)
-#define IS_QLA2300(ha)	(DT_MASK(ha) & DT_ISP2300)
-#define IS_QLA2312(ha)	(DT_MASK(ha) & DT_ISP2312)
-#define IS_QLA2322(ha)	(DT_MASK(ha) & DT_ISP2322)
-#define IS_QLA6312(ha)	(DT_MASK(ha) & DT_ISP6312)
-#define IS_QLA6322(ha)	(DT_MASK(ha) & DT_ISP6322)
-#define IS_QLA2422(ha)	(DT_MASK(ha) & DT_ISP2422)
-#define IS_QLA2432(ha)	(DT_MASK(ha) & DT_ISP2432)
-#define IS_QLA5422(ha)	(DT_MASK(ha) & DT_ISP5422)
-#define IS_QLA5432(ha)	(DT_MASK(ha) & DT_ISP5432)
-#define IS_QLA2532(ha)	(DT_MASK(ha) & DT_ISP2532)
-#define IS_QLA8432(ha)	(DT_MASK(ha) & DT_ISP8432)
-
-#define IS_QLA23XX(ha)	(IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
-    			 IS_QLA6312(ha) || IS_QLA6322(ha))
-#define IS_QLA24XX(ha)	(IS_QLA2422(ha) || IS_QLA2432(ha))
-#define IS_QLA54XX(ha)	(IS_QLA5422(ha) || IS_QLA5432(ha))
-#define IS_QLA25XX(ha)	(IS_QLA2532(ha))
-#define IS_QLA84XX(ha)	(IS_QLA8432(ha))
-#define IS_QLA24XX_TYPE(ha)	(IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
-			IS_QLA84XX(ha))
-
-#define IS_IIDMA_CAPABLE(ha)	((ha)->device_type & DT_IIDMA)
-#define IS_FWI2_CAPABLE(ha)	((ha)->device_type & DT_FWI2)
-#define IS_ZIO_SUPPORTED(ha)	((ha)->device_type & DT_ZIO_SUPPORTED)
-#define IS_OEM_001(ha)		((ha)->device_type & DT_OEM_001)
-#define HAS_EXTENDED_IDS(ha)	((ha)->device_type & DT_EXTENDED_IDS)
-
-	/* SRB cache. */
-#define SRB_MIN_REQ	128
-	mempool_t	*srb_mempool;
-
 	/* This spinlock is used to protect "io transactions", you must
-	 * acquire it before doing any IO to the card, eg with RD_REG*() and
-	 * WRT_REG*() for the duration of your entire commandtransaction.
-	 *
-	 * This spinlock is of lower priority than the io request lock.
-	 */
-
-	spinlock_t		hardware_lock ____cacheline_aligned;
+	* acquire it before doing any IO to the card, eg with RD_REG*() and
+	* WRT_REG*() for the duration of your entire commandtransaction.
+	*
+	* This spinlock is of lower priority than the io request lock.
+	*/
 
+	spinlock_t	hardware_lock ____cacheline_aligned;
 	int		bars;
 	int		mem_only;
-	device_reg_t __iomem *iobase;		/* Base I/O address */
+	device_reg_t __iomem *iobase;           /* Base I/O address */
 	resource_size_t pio_address;
-#define MIN_IOBASE_LEN		0x100
-
-	/* ISP ring lock, rings, and indexes */
-	dma_addr_t	request_dma;        /* Physical address. */
-	request_t       *request_ring;      /* Base virtual address */
-	request_t       *request_ring_ptr;  /* Current address. */
-	uint16_t        req_ring_index;     /* Current index. */
-	uint16_t        req_q_cnt;          /* Number of available entries. */
-	uint16_t	request_q_length;
-
-	dma_addr_t	response_dma;       /* Physical address. */
-	response_t      *response_ring;     /* Base virtual address */
-	response_t      *response_ring_ptr; /* Current address. */
-	uint16_t        rsp_ring_index;     /* Current index. */
-	uint16_t	response_q_length;
-
-	struct isp_operations *isp_ops;
 
-	/* Outstandings ISP commands. */
-	srb_t		*outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
-	uint32_t	current_outstanding_cmd;
-	srb_t		*status_srb;	/* Status continuation entry. */
+#define MIN_IOBASE_LEN          0x100
+/* Multi queue data structs */
+	device_reg_t    *mqiobase;
+	uint16_t        msix_count;
+	uint8_t         mqenable;
+	struct req_que **req_q_map;
+	struct rsp_que **rsp_q_map;
+	unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+	unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+	uint16_t 	max_queues;
+	struct qla_npiv_entry *npiv_info;
+	uint16_t	nvram_npiv_size;
+
+	uint16_t        switch_cap;
+#define FLOGI_SEQ_DEL           BIT_8
+#define FLOGI_MID_SUPPORT       BIT_10
+#define FLOGI_VSAN_SUPPORT      BIT_12
+#define FLOGI_SP_SUPPORT        BIT_13
+	/* Timeout timers. */
+	uint8_t 	loop_down_abort_time;    /* port down timer */
+	atomic_t	loop_down_timer;         /* loop down timer */
+	uint8_t		link_down_timeout;       /* link down timeout */
+	uint16_t	max_loop_id;
 
-	/* ISP configuration data. */
-	uint16_t	loop_id;		/* Host adapter loop id */
-	uint16_t	switch_cap;
-#define FLOGI_SEQ_DEL		BIT_8
-#define FLOGI_MID_SUPPORT	BIT_10
-#define FLOGI_VSAN_SUPPORT	BIT_12
-#define FLOGI_SP_SUPPORT	BIT_13
 	uint16_t	fb_rev;
-
-	port_id_t	d_id;			/* Host adapter port id */
 	uint16_t	max_public_loop_ids;
-	uint16_t	min_external_loopid;	/* First external loop Id */
+	uint16_t	min_external_loopid;    /* First external loop Id */
 
 #define PORT_SPEED_UNKNOWN 0xFFFF
-#define PORT_SPEED_1GB	0x00
-#define PORT_SPEED_2GB	0x01
-#define PORT_SPEED_4GB	0x03
-#define PORT_SPEED_8GB	0x04
-	uint16_t	link_data_rate;		/* F/W operating speed */
+#define PORT_SPEED_1GB  0x00
+#define PORT_SPEED_2GB  0x01
+#define PORT_SPEED_4GB  0x03
+#define PORT_SPEED_8GB  0x04
+	uint16_t	link_data_rate;         /* F/W operating speed */
 
 	uint8_t		current_topology;
 	uint8_t		prev_topology;
@@ -2370,15 +2318,69 @@ typedef struct scsi_qla_host {
 #define ISP_CFG_FL	4
 #define ISP_CFG_F	8
 
-	uint8_t		operating_mode;		/* F/W operating mode */
+	uint8_t		operating_mode;         /* F/W operating mode */
 #define LOOP      0
 #define P2P       1
 #define LOOP_P2P  2
 #define P2P_LOOP  3
-
-        uint8_t		marker_needed;
-
 	uint8_t		interrupts_on;
+	uint32_t	isp_abort_cnt;
+
+#define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
+#define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
+	uint32_t	device_type;
+#define DT_ISP2100                      BIT_0
+#define DT_ISP2200                      BIT_1
+#define DT_ISP2300                      BIT_2
+#define DT_ISP2312                      BIT_3
+#define DT_ISP2322                      BIT_4
+#define DT_ISP6312                      BIT_5
+#define DT_ISP6322                      BIT_6
+#define DT_ISP2422                      BIT_7
+#define DT_ISP2432                      BIT_8
+#define DT_ISP5422                      BIT_9
+#define DT_ISP5432                      BIT_10
+#define DT_ISP2532                      BIT_11
+#define DT_ISP8432                      BIT_12
+#define DT_ISP_LAST                     (DT_ISP8432 << 1)
+
+#define DT_IIDMA                        BIT_26
+#define DT_FWI2                         BIT_27
+#define DT_ZIO_SUPPORTED                BIT_28
+#define DT_OEM_001                      BIT_29
+#define DT_ISP2200A                     BIT_30
+#define DT_EXTENDED_IDS                 BIT_31
+#define DT_MASK(ha)     ((ha)->device_type & (DT_ISP_LAST - 1))
+#define IS_QLA2100(ha)  (DT_MASK(ha) & DT_ISP2100)
+#define IS_QLA2200(ha)  (DT_MASK(ha) & DT_ISP2200)
+#define IS_QLA2300(ha)  (DT_MASK(ha) & DT_ISP2300)
+#define IS_QLA2312(ha)  (DT_MASK(ha) & DT_ISP2312)
+#define IS_QLA2322(ha)  (DT_MASK(ha) & DT_ISP2322)
+#define IS_QLA6312(ha)  (DT_MASK(ha) & DT_ISP6312)
+#define IS_QLA6322(ha)  (DT_MASK(ha) & DT_ISP6322)
+#define IS_QLA2422(ha)  (DT_MASK(ha) & DT_ISP2422)
+#define IS_QLA2432(ha)  (DT_MASK(ha) & DT_ISP2432)
+#define IS_QLA5422(ha)  (DT_MASK(ha) & DT_ISP5422)
+#define IS_QLA5432(ha)  (DT_MASK(ha) & DT_ISP5432)
+#define IS_QLA2532(ha)  (DT_MASK(ha) & DT_ISP2532)
+#define IS_QLA8432(ha)  (DT_MASK(ha) & DT_ISP8432)
+
+#define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
+			IS_QLA6312(ha) || IS_QLA6322(ha))
+#define IS_QLA24XX(ha)  (IS_QLA2422(ha) || IS_QLA2432(ha))
+#define IS_QLA54XX(ha)  (IS_QLA5422(ha) || IS_QLA5432(ha))
+#define IS_QLA25XX(ha)  (IS_QLA2532(ha))
+#define IS_QLA84XX(ha)  (IS_QLA8432(ha))
+#define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+				IS_QLA84XX(ha))
+#define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
+				IS_QLA25XX(ha))
+
+#define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
+#define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
+#define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED)
+#define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)
+#define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 
 	/* HBA serial number */
 	uint8_t		serial0;
@@ -2386,8 +2388,8 @@ typedef struct scsi_qla_host {
 	uint8_t		serial2;
 
 	/* NVRAM configuration data */
-#define MAX_NVRAM_SIZE	4096
-#define VPD_OFFSET	MAX_NVRAM_SIZE / 2
+#define MAX_NVRAM_SIZE  4096
+#define VPD_OFFSET      MAX_NVRAM_SIZE / 2
 	uint16_t	nvram_size;
 	uint16_t	nvram_base;
 	void		*nvram;
@@ -2401,22 +2403,8 @@ typedef struct scsi_qla_host {
 	uint16_t	r_a_tov;
 	int		port_down_retry_count;
 	uint8_t		mbx_count;
-	uint16_t	last_loop_id;
-	uint16_t	mgmt_svr_loop_id;
-
-        uint32_t	login_retry_count;
-	int		max_q_depth;
-
-	struct list_head	work_list;
-
-	/* Fibre Channel Device List. */
-	struct list_head	fcports;
-
-	/* RSCN queue. */
-	uint32_t rscn_queue[MAX_RSCN_COUNT];
-	uint8_t rscn_in_ptr;
-	uint8_t rscn_out_ptr;
 
+	uint32_t	login_retry_count;
 	/* SNS command interfaces. */
 	ms_iocb_entry_t		*ms_iocb;
 	dma_addr_t		ms_iocb_dma;
@@ -2426,28 +2414,20 @@ typedef struct scsi_qla_host {
 	struct sns_cmd_pkt	*sns_cmd;
 	dma_addr_t		sns_cmd_dma;
 
-#define SFP_DEV_SIZE	256
-#define SFP_BLOCK_SIZE	64
-	void			*sfp_data;
-	dma_addr_t		sfp_data_dma;
+#define SFP_DEV_SIZE    256
+#define SFP_BLOCK_SIZE  64
+	void		*sfp_data;
+	dma_addr_t	sfp_data_dma;
 
 	struct task_struct	*dpc_thread;
 	uint8_t dpc_active;                  /* DPC routine is active */
 
-	/* Timeout timers. */
-	uint8_t         loop_down_abort_time;    /* port down timer */
-	atomic_t        loop_down_timer;         /* loop down timer */
-	uint8_t         link_down_timeout;       /* link down timeout */
-
-	uint32_t        timer_active;
-	struct timer_list        timer;
-
 	dma_addr_t	gid_list_dma;
 	struct gid_list_info *gid_list;
 	int		gid_list_info_size;
 
 	/* Small DMA pool allocations -- maximum 256 bytes in length. */
-#define DMA_POOL_SIZE	256
+#define DMA_POOL_SIZE   256
 	struct dma_pool *s_dma_pool;
 
 	dma_addr_t	init_cb_dma;
@@ -2459,17 +2439,17 @@ typedef struct scsi_qla_host {
 
 	mbx_cmd_t	*mcp;
 	unsigned long	mbx_cmd_flags;
-#define MBX_INTERRUPT	1
-#define MBX_INTR_WAIT	2
+#define MBX_INTERRUPT		1
+#define MBX_INTR_WAIT		2
 #define MBX_UPDATE_FLASH_ACTIVE	3
 
-	struct mutex vport_lock;	/* Virtual port synchronization */
-	struct completion mbx_cmd_comp;	/* Serialize mbx access */
+	struct mutex vport_lock;        /* Virtual port synchronization */
+	struct completion mbx_cmd_comp; /* Serialize mbx access */
 	struct completion mbx_intr_comp;  /* Used for completion notification */
 
 	uint32_t	mbx_flags;
 #define  MBX_IN_PROGRESS	BIT_0
-#define  MBX_BUSY		BIT_1	/* Got the Access */
+#define  MBX_BUSY		BIT_1   /* Got the Access */
 #define  MBX_SLEEPING_ON_SEM	BIT_2
 #define  MBX_POLLING_FOR_COMP	BIT_3
 #define  MBX_COMPLETED		BIT_4
@@ -2488,7 +2468,7 @@ typedef struct scsi_qla_host {
 #define RISC_START_ADDRESS_2300 0x800
 #define RISC_START_ADDRESS_2400 0x100000
 
-	uint16_t	fw_options[16];		/* slots: 1,2,3,10,11 */
+	uint16_t	fw_options[16];         /* slots: 1,2,3,10,11 */
 	uint8_t		fw_seriallink_options[4];
 	uint16_t	fw_seriallink_options24[4];
 
@@ -2509,10 +2489,10 @@ typedef struct scsi_qla_host {
 	uint64_t	fce_wr, fce_rd;
 	struct mutex	fce_mutex;
 
+	uint32_t	hw_event_start;
 	uint32_t	hw_event_ptr;
 	uint32_t	hw_event_pause_errors;
 
-	uint8_t		host_str[16];
 	uint32_t	pci_attr;
 	uint16_t	chip_revision;
 
@@ -2523,11 +2503,6 @@ typedef struct scsi_qla_host {
 	char		model_desc[80];
 	uint8_t		adapter_id[16+1];
 
-	uint8_t		*node_name;
-	uint8_t		*port_name;
-	uint8_t		fabric_node_name[WWN_SIZE];
-	uint32_t    isp_abort_cnt;
-
 	/* Option ROM information. */
 	char		*optrom_buffer;
 	uint32_t	optrom_size;
@@ -2538,13 +2513,13 @@ typedef struct scsi_qla_host {
 	uint32_t	optrom_region_start;
 	uint32_t	optrom_region_size;
 
-        /* PCI expansion ROM image information. */
+/* PCI expansion ROM image information. */
 #define ROM_CODE_TYPE_BIOS	0
 #define ROM_CODE_TYPE_FCODE	1
 #define ROM_CODE_TYPE_EFI	3
-	uint8_t		bios_revision[2];
-	uint8_t		efi_revision[2];
-	uint8_t		fcode_revision[16];
+	uint8_t 	bios_revision[2];
+	uint8_t 	efi_revision[2];
+	uint8_t 	fcode_revision[16];
 	uint32_t	fw_revision[4];
 
 	uint32_t	fdt_wrt_disable;
@@ -2553,39 +2528,144 @@ typedef struct scsi_qla_host {
 	uint32_t	fdt_unprotect_sec_cmd;
 	uint32_t	fdt_protect_sec_cmd;
 
-	uint32_t	flt_region_flt;
-	uint32_t	flt_region_fdt;
-	uint32_t	flt_region_boot;
-	uint32_t	flt_region_fw;
-	uint32_t	flt_region_vpd_nvram;
-	uint32_t	flt_region_hw_event;
-	uint32_t	flt_region_npiv_conf;
+	uint32_t        flt_region_flt;
+	uint32_t        flt_region_fdt;
+	uint32_t        flt_region_boot;
+	uint32_t        flt_region_fw;
+	uint32_t        flt_region_vpd_nvram;
+	uint32_t        flt_region_hw_event;
+	uint32_t        flt_region_npiv_conf;
 
 	/* Needed for BEACON */
-	uint16_t	beacon_blink_led;
-	uint8_t		beacon_color_state;
+	uint16_t        beacon_blink_led;
+	uint8_t         beacon_color_state;
 #define QLA_LED_GRN_ON		0x01
 #define QLA_LED_YLW_ON		0x02
 #define QLA_LED_ABR_ON		0x04
 #define QLA_LED_ALL_ON		0x07	/* yellow, green, amber. */
 					/* ISP2322: red, green, amber. */
-
-	uint16_t	zio_mode;
-	uint16_t	zio_timer;
+	uint16_t        zio_mode;
+	uint16_t        zio_timer;
 	struct fc_host_statistics fc_host_stat;
 
-	struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
+	struct qla_msix_entry *msix_entries;
+
+	struct list_head        vp_list;        /* list of VP */
+	unsigned long   vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+			sizeof(unsigned long)];
+	uint16_t        num_vhosts;     /* number of vports created */
+	uint16_t        num_vsans;      /* number of vsan created */
+	uint16_t        max_npiv_vports;        /* 63 or 125 per topoloty */
+	int             cur_vport_count;
+
+	struct qla_chip_state_84xx *cs84xx;
+	struct qla_statistics qla_stats;
+	struct isp_operations *isp_ops;
+};
+
+/*
+ * Qlogic scsi host structure
+ */
+typedef struct scsi_qla_host {
+	struct list_head list;
+	struct list_head vp_fcports;	/* list of fcports */
+	struct list_head work_list;
+	/* Commonly used flags and state information. */
+	struct Scsi_Host *host;
+	unsigned long	host_no;
+	uint8_t		host_str[16];
+
+	volatile struct {
+		uint32_t	init_done		:1;
+		uint32_t	online			:1;
+		uint32_t	rscn_queue_overflow	:1;
+		uint32_t	reset_active		:1;
+
+		uint32_t	management_server_logged_in :1;
+		uint32_t	process_response_queue	:1;
+	} flags;
+
+	atomic_t	loop_state;
+#define LOOP_TIMEOUT	1
+#define LOOP_DOWN	2
+#define LOOP_UP		3
+#define LOOP_UPDATE	4
+#define LOOP_READY	5
+#define LOOP_DEAD	6
+
+	unsigned long   dpc_flags;
+#define RESET_MARKER_NEEDED	0	/* Send marker to ISP. */
+#define RESET_ACTIVE		1
+#define ISP_ABORT_NEEDED	2	/* Initiate ISP abort. */
+#define ABORT_ISP_ACTIVE	3	/* ISP abort in progress. */
+#define LOOP_RESYNC_NEEDED	4	/* Device Resync needed. */
+#define LOOP_RESYNC_ACTIVE	5
+#define LOCAL_LOOP_UPDATE	6	/* Perform a local loop update. */
+#define RSCN_UPDATE		7	/* Perform an RSCN update. */
+#define MAILBOX_RETRY		8
+#define ISP_RESET_NEEDED	9	/* Initiate a ISP reset. */
+#define FAILOVER_EVENT_NEEDED	10
+#define FAILOVER_EVENT		11
+#define FAILOVER_NEEDED		12
+#define SCSI_RESTART_NEEDED	13	/* Processes SCSI retry queue. */
+#define PORT_RESTART_NEEDED	14	/* Processes Retry queue. */
+#define RESTART_QUEUES_NEEDED	15	/* Restarts the Lun queue. */
+#define ABORT_QUEUES_NEEDED	16
+#define RELOGIN_NEEDED		17
+#define LOGIN_RETRY_NEEDED	18	/* Initiate required fabric logins. */
+#define REGISTER_FC4_NEEDED	19	/* SNS FC4 registration required. */
+#define ISP_ABORT_RETRY		20	/* ISP aborted. */
+#define FCPORT_RESCAN_NEEDED	21	/* IO descriptor processing needed */
+#define IODESC_PROCESS_NEEDED	22	/* IO descriptor processing needed */
+#define IOCTL_ERROR_RECOVERY	23
+#define LOOP_RESET_NEEDED	24
+#define BEACON_BLINK_NEEDED	25
+#define REGISTER_FDMI_NEEDED	26
+#define FCPORT_UPDATE_NEEDED	27
+#define VP_DPC_NEEDED		28	/* wake up for VP dpc handling */
+#define UNLOADING		29
+#define NPIV_CONFIG_NEEDED	30
+
+	uint32_t	device_flags;
+#define DFLG_LOCAL_DEVICES		BIT_0
+#define DFLG_RETRY_LOCAL_DEVICES	BIT_1
+#define DFLG_FABRIC_DEVICES		BIT_2
+#define SWITCH_FOUND			BIT_3
+#define DFLG_NO_CABLE			BIT_4
+
+	srb_t		*status_srb;	/* Status continuation entry. */
+
+	/* ISP configuration data. */
+	uint16_t	loop_id;		/* Host adapter loop id */
+
+	port_id_t	d_id;			/* Host adapter port id */
+	uint8_t		marker_needed;
+	uint16_t	mgmt_svr_loop_id;
+
+
+
+	/* RSCN queue. */
+	uint32_t rscn_queue[MAX_RSCN_COUNT];
+	uint8_t rscn_in_ptr;
+	uint8_t rscn_out_ptr;
+
+	/* Timeout timers. */
+	uint8_t         loop_down_abort_time;    /* port down timer */
+	atomic_t        loop_down_timer;         /* loop down timer */
+	uint8_t         link_down_timeout;       /* link down timeout */
+
+	uint32_t        timer_active;
+	struct timer_list        timer;
+
+	uint8_t		node_name[WWN_SIZE];
+	uint8_t		port_name[WWN_SIZE];
+	uint8_t		fabric_node_name[WWN_SIZE];
+	uint32_t   	vp_abort_cnt;
 
-	struct list_head	vp_list;	/* list of VP */
 	struct fc_vport	*fc_vport;	/* holds fc_vport * for each vport */
-	unsigned long	vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
-	uint16_t        num_vhosts;	/* number of vports created */
-	uint16_t        num_vsans;	/* number of vsan created */
 	uint16_t        vp_idx;		/* vport ID */
 
-	struct scsi_qla_host	*parent;	/* holds pport */
 	unsigned long		vp_flags;
-	struct list_head	vp_fcports;	/* list of fcports */
 #define VP_IDX_ACQUIRED		0	/* bit no 0 */
 #define VP_CREATE_NEEDED	1
 #define VP_BIND_NEEDED		2
@@ -2604,14 +2684,10 @@ typedef struct scsi_qla_host {
 #define VP_ERR_FAB_NORESOURCES	3
 #define VP_ERR_FAB_LOGOUT	4
 #define VP_ERR_ADAP_NORESOURCES	5
-	uint16_t	max_npiv_vports;	/* 63 or 125 per topoloty */
-	int		cur_vport_count;
-
-	struct qla_chip_state_84xx *cs84xx;
-	struct qla_statistics qla_stats;
+	struct qla_hw_data *hw;
+	int	req_ques[QLA_MAX_HOST_QUES];
 } scsi_qla_host_t;
 
-
 /*
  * Macros to help code, maintain, etc.
  */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 561a4411719d..0e366a1b44b3 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -15,10 +15,11 @@ static atomic_t qla2x00_dfs_root_count;
 static int
 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
 {
-	scsi_qla_host_t *ha = s->private;
+	scsi_qla_host_t *vha = s->private;
 	uint32_t cnt;
 	uint32_t *fce;
 	uint64_t fce_start;
+	struct qla_hw_data *ha = vha->hw;
 
 	mutex_lock(&ha->fce_mutex);
 
@@ -51,7 +52,8 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
 static int
 qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
 {
-	scsi_qla_host_t *ha = inode->i_private;
+	scsi_qla_host_t *vha = inode->i_private;
+	struct qla_hw_data *ha = vha->hw;
 	int rval;
 
 	if (!ha->flags.fce_enabled)
@@ -60,7 +62,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
 	mutex_lock(&ha->fce_mutex);
 
 	/* Pause tracing to flush FCE buffers. */
-	rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd);
+	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
 	if (rval)
 		qla_printk(KERN_WARNING, ha,
 		    "DebugFS: Unable to disable FCE (%d).\n", rval);
@@ -75,7 +77,8 @@ out:
 static int
 qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
 {
-	scsi_qla_host_t *ha = inode->i_private;
+	scsi_qla_host_t *vha = inode->i_private;
+	struct qla_hw_data *ha = vha->hw;
 	int rval;
 
 	if (ha->flags.fce_enabled)
@@ -86,7 +89,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
 	/* Re-enable FCE tracing. */
 	ha->flags.fce_enabled = 1;
 	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
-	rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs,
+	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
 	    ha->fce_mb, &ha->fce_bufs);
 	if (rval) {
 		qla_printk(KERN_WARNING, ha,
@@ -107,8 +110,9 @@ static const struct file_operations dfs_fce_ops = {
 };
 
 int
-qla2x00_dfs_setup(scsi_qla_host_t *ha)
+qla2x00_dfs_setup(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	if (!IS_QLA25XX(ha))
 		goto out;
 	if (!ha->fce)
@@ -130,7 +134,7 @@ create_dir:
 		goto create_nodes;
 
 	mutex_init(&ha->fce_mutex);
-	ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root);
+	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
 	if (!ha->dfs_dir) {
 		qla_printk(KERN_NOTICE, ha,
 		    "DebugFS: Unable to create ha directory.\n");
@@ -152,8 +156,9 @@ out:
 }
 
 int
-qla2x00_dfs_remove(scsi_qla_host_t *ha)
+qla2x00_dfs_remove(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	if (ha->dfs_fce) {
 		debugfs_remove(ha->dfs_fce);
 		ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d1d14202575a..ee1f1e794c2d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -299,7 +299,8 @@ struct init_cb_24xx {
 	uint32_t response_q_address[2];
 	uint32_t prio_request_q_address[2];
 
-	uint8_t reserved_2[8];
+	uint16_t msix;
+	uint8_t reserved_2[6];
 
 	uint16_t atio_q_inpointer;
 	uint16_t atio_q_length;
@@ -372,8 +373,9 @@ struct init_cb_24xx {
 	 * BIT 17-31 = Reserved
 	 */
 	uint32_t firmware_options_3;
-
-	uint8_t  reserved_3[24];
+	uint16_t qos;
+	uint16_t rid;
+	uint8_t  reserved_3[20];
 };
 
 /*
@@ -754,7 +756,8 @@ struct abort_entry_24xx {
 
 	uint32_t handle_to_abort;	/* System handle to abort. */
 
-	uint8_t reserved_1[32];
+	uint16_t req_que_no;
+	uint8_t reserved_1[30];
 
 	uint8_t port_id[3];		/* PortID of destination port. */
 	uint8_t vp_index;
@@ -1258,7 +1261,8 @@ struct qla_npiv_header {
 struct qla_npiv_entry {
 	uint16_t flags;
 	uint16_t vf_id;
-	uint16_t qos;
+	uint8_t q_qos;
+	uint8_t f_qos;
 	uint16_t unused1;
 	uint8_t port_name[WWN_SIZE];
 	uint8_t node_name[WWN_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 753dbe6cce6e..0011e31205db 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xqfullrampup;
 extern int ql2xiidmaenable;
+extern int ql2xmaxqueues;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -72,7 +73,10 @@ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
     uint16_t, uint16_t);
 
 extern void qla2x00_abort_fcport_cmds(fc_port_t *);
-
+extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
+	struct qla_hw_data *);
+extern void qla2x00_free_host(struct scsi_qla_host *);
+extern void qla2x00_relogin(struct scsi_qla_host *);
 /*
  * Global Functions in qla_mid.c source file.
  */
@@ -94,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
 extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
 extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
 
-extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
+extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
 
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
@@ -105,10 +109,11 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
 
 extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
 
-extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
-extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
-extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
-extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
+extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
+extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
+	uint16_t *);
+extern int  qla2x00_vp_abort_isp(scsi_qla_host_t *);
 
 /*
  * Global Function Prototypes in qla_iocb.c source file.
@@ -119,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
 extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
 extern int qla2x00_start_scsi(srb_t *sp);
 extern int qla24xx_start_scsi(srb_t *sp);
-int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t);
-int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t);
+int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+						uint16_t, uint16_t, uint8_t);
+int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+						uint16_t, uint16_t, uint8_t);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -154,7 +161,7 @@ extern int
 qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
 
 extern int
-qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
+qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
 
 extern int
 qla2x00_abort_target(struct fc_port *, unsigned int);
@@ -225,7 +232,7 @@ extern int
 qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
     dma_addr_t);
 
-extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
+extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
 extern int qla24xx_abort_target(struct fc_port *, unsigned int);
 extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
 
@@ -264,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
 extern irqreturn_t qla2100_intr_handler(int, void *);
 extern irqreturn_t qla2300_intr_handler(int, void *);
 extern irqreturn_t qla24xx_intr_handler(int, void *);
-extern void qla2x00_process_response_queue(struct scsi_qla_host *);
-extern void qla24xx_process_response_queue(struct scsi_qla_host *);
+extern void qla2x00_process_response_queue(struct rsp_que *);
+extern void qla24xx_process_response_queue(struct rsp_que *);
 
-extern int qla2x00_request_irqs(scsi_qla_host_t *);
+extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
 extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
 /*
@@ -367,4 +374,27 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
  */
 extern int qla2x00_dfs_setup(scsi_qla_host_t *);
 extern int qla2x00_dfs_remove(scsi_qla_host_t *);
+
+/* Globa function prototypes for multi-q */
+extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *,
+	uint8_t);
+extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *,
+	uint8_t);
+extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
+	uint16_t, uint8_t, uint8_t);
+extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
+	uint16_t);
+extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
+extern void qla2x00_init_response_q_entries(struct rsp_que *);
+extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
+extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
+extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
+extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c2a4bfbcb05b..0a6f72973996 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -22,8 +22,9 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
  * Returns a pointer to the @ha's ms_iocb.
  */
 void *
-qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
 {
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t *ms_pkt;
 
 	ms_pkt = ha->ms_iocb;
@@ -59,8 +60,9 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
  * Returns a pointer to the @ha's ms_iocb.
  */
 void *
-qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
 {
+	struct qla_hw_data *ha = vha->hw;
 	struct ct_entry_24xx *ct_pkt;
 
 	ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -82,7 +84,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
 	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
-	ct_pkt->vp_index = ha->vp_idx;
+	ct_pkt->vp_index = vha->vp_idx;
 
 	return (ct_pkt);
 }
@@ -110,16 +112,17 @@ qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size)
 }
 
 static int
-qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
+qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
     struct ct_sns_rsp *ct_rsp, const char *routine)
 {
 	int rval;
 	uint16_t comp_status;
+	struct qla_hw_data *ha = vha->hw;
 
 	rval = QLA_FUNCTION_FAILED;
 	if (ms_pkt->entry_status != 0) {
 		DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
-		    ha->host_no, routine, ms_pkt->entry_status));
+		    vha->host_no, routine, ms_pkt->entry_status));
 	} else {
 		if (IS_FWI2_CAPABLE(ha))
 			comp_status = le16_to_cpu(
@@ -133,7 +136,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
 			if (ct_rsp->header.response !=
 			    __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
 				DEBUG2_3(printk("scsi(%ld): %s failed, "
-				    "rejected request:\n", ha->host_no,
+				    "rejected request:\n", vha->host_no,
 				    routine));
 				DEBUG2_3(qla2x00_dump_buffer(
 				    (uint8_t *)&ct_rsp->header,
@@ -144,7 +147,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
 			break;
 		default:
 			DEBUG2_3(printk("scsi(%ld): %s failed, completion "
-			    "status (%x).\n", ha->host_no, routine,
+			    "status (%x).\n", vha->host_no, routine,
 			    comp_status));
 			break;
 		}
@@ -160,21 +163,21 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
  * Returns 0 on success.
  */
 int
-qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	int		rval;
 
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
+	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_ga_nxt(ha, fcport));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_ga_nxt(vha, fcport);
 
 	/* Issue GA_NXT */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
 	    GA_NXT_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -188,13 +191,13 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
 	ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GA_NXT") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
@@ -216,7 +219,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
 		    "nn %02x%02x%02x%02x%02x%02x%02x%02x "
 		    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
 		    "portid=%02x%02x%02x.\n",
-		    ha->host_no,
+		    vha->host_no,
 		    fcport->node_name[0], fcport->node_name[1],
 		    fcport->node_name[2], fcport->node_name[3],
 		    fcport->node_name[4], fcport->node_name[5],
@@ -242,7 +245,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
  * Returns 0 on success.
  */
 int
-qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
 	uint16_t	i;
@@ -252,16 +255,16 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
 	struct ct_sns_rsp	*ct_rsp;
 
 	struct ct_sns_gid_pt_data *gid_data;
+	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_gid_pt(ha, list));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gid_pt(vha, list);
 
 	gid_data = NULL;
 
 	/* Issue GID_PT */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
 	    GID_PT_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -273,13 +276,13 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
 	ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GID_PT") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
@@ -320,7 +323,7 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 int
-qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
 	uint16_t	i;
@@ -328,15 +331,15 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
+	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_gpn_id(ha, list));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gpn_id(vha, list);
 
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GPN_ID */
 		/* Prepare common MS IOCB */
-		ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
 		    GPN_ID_RSP_SIZE);
 
 		/* Prepare CT request */
@@ -350,13 +353,13 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
 
 		/* Execute MS IOCB */
-		rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 		    sizeof(ms_iocb_entry_t));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
-			    "(%d).\n", ha->host_no, rval));
-		} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+			    "(%d).\n", vha->host_no, rval));
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GPN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -381,23 +384,22 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 int
-qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
 	uint16_t	i;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_gnn_id(ha, list));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gnn_id(vha, list);
 
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GNN_ID */
 		/* Prepare common MS IOCB */
-		ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
 		    GNN_ID_RSP_SIZE);
 
 		/* Prepare CT request */
@@ -411,13 +413,13 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
 		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
 
 		/* Execute MS IOCB */
-		rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 		    sizeof(ms_iocb_entry_t));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
-			    "(%d).\n", ha->host_no, rval));
-		} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+			    "(%d).\n", vha->host_no, rval));
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GNN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -429,7 +431,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
 			    "nn %02x%02x%02x%02x%02x%02x%02x%02x "
 			    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
 			    "portid=%02x%02x%02x.\n",
-			    ha->host_no,
+			    vha->host_no,
 			    list[i].node_name[0], list[i].node_name[1],
 			    list[i].node_name[2], list[i].node_name[3],
 			    list[i].node_name[4], list[i].node_name[5],
@@ -457,21 +459,20 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 int
-qla2x00_rft_id(scsi_qla_host_t *ha)
+qla2x00_rft_id(scsi_qla_host_t *vha)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_rft_id(ha));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_rft_id(vha);
 
 	/* Issue RFT_ID */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
 	    RFT_ID_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -480,25 +481,25 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- port_id, FC-4 types */
-	ct_req->req.rft_id.port_id[0] = ha->d_id.b.domain;
-	ct_req->req.rft_id.port_id[1] = ha->d_id.b.area;
-	ct_req->req.rft_id.port_id[2] = ha->d_id.b.al_pa;
+	ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
 
 	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFT_ID") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -511,23 +512,23 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2x00_rff_id(scsi_qla_host_t *ha)
+qla2x00_rff_id(scsi_qla_host_t *vha)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
-		    "ISP2100/ISP2200.\n", ha->host_no));
+		    "ISP2100/ISP2200.\n", vha->host_no));
 		return (QLA_SUCCESS);
 	}
 
 	/* Issue RFF_ID */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
 	    RFF_ID_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -536,26 +537,26 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
-	ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain;
-	ct_req->req.rff_id.port_id[1] = ha->d_id.b.area;
-	ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa;
+	ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
 
 	ct_req->req.rff_id.fc4_feature = BIT_1;
 	ct_req->req.rff_id.fc4_type = 0x08;		/* SCSI - FCP */
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -568,21 +569,20 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2x00_rnn_id(scsi_qla_host_t *ha)
+qla2x00_rnn_id(scsi_qla_host_t *vha)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		return (qla2x00_sns_rnn_id(ha));
-	}
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_rnn_id(vha);
 
 	/* Issue RNN_ID */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
 	    RNN_ID_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -591,33 +591,34 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- port_id, node_name */
-	ct_req->req.rnn_id.port_id[0] = ha->d_id.b.domain;
-	ct_req->req.rnn_id.port_id[1] = ha->d_id.b.area;
-	ct_req->req.rnn_id.port_id[2] = ha->d_id.b.al_pa;
+	ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
 
-	memcpy(ct_req->req.rnn_id.node_name, ha->node_name, WWN_SIZE);
+	memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RNN_ID") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
 }
 
 void
-qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
 {
+	struct qla_hw_data *ha = vha->hw;
 	sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
 	    ha->fw_major_version, ha->fw_minor_version,
 	    ha->fw_subminor_version, qla2x00_version_str);
@@ -630,23 +631,24 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
  * Returns 0 on success.
  */
 int
-qla2x00_rsnn_nn(scsi_qla_host_t *ha)
+qla2x00_rsnn_nn(scsi_qla_host_t *vha)
 {
 	int		rval;
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
-		    "ISP2100/ISP2200.\n", ha->host_no));
+		    "ISP2100/ISP2200.\n", vha->host_no));
 		return (QLA_SUCCESS);
 	}
 
 	/* Issue RSNN_NN */
 	/* Prepare common MS IOCB */
 	/*   Request size adjusted after CT preparation */
-	ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
 
 	/* Prepare CT request */
 	ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -654,10 +656,10 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- node_name, symbolic node_name, size */
-	memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE);
+	memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
 
 	/* Prepare the Symbolic Node Name */
-	qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name);
+	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
 
 	/* Calculate SNN length */
 	ct_req->req.rsnn_nn.name_len =
@@ -669,18 +671,18 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
 	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RSNN_NN") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -696,11 +698,12 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
  * Returns a pointer to the @ha's sns_cmd.
  */
 static inline struct sns_cmd_pkt *
-qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
+qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
     uint16_t data_size)
 {
 	uint16_t		wc;
 	struct sns_cmd_pkt	*sns_cmd;
+	struct qla_hw_data *ha = vha->hw;
 
 	sns_cmd = ha->sns_cmd;
 	memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
@@ -726,15 +729,15 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	/* Issue GA_NXT. */
 	/* Prepare SNS command request. */
-	sns_cmd = qla2x00_prep_sns_cmd(ha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
+	sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
 	    GA_NXT_SNS_DATA_SIZE);
 
 	/* Prepare SNS command arguments -- port_id. */
@@ -743,16 +746,16 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
 	sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
 
 	/* Execute SNS command. */
-	rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
 	    sizeof(struct sns_cmd_pkt));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else if (sns_cmd->p.gan_data[8] != 0x80 ||
 	    sns_cmd->p.gan_data[9] != 0x02) {
 		DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
-		    "ga_nxt_rsp:\n", ha->host_no));
+		    "ga_nxt_rsp:\n", vha->host_no));
 		DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
@@ -772,7 +775,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
 		    "nn %02x%02x%02x%02x%02x%02x%02x%02x "
 		    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
 		    "portid=%02x%02x%02x.\n",
-		    ha->host_no,
+		    vha->host_no,
 		    fcport->node_name[0], fcport->node_name[1],
 		    fcport->node_name[2], fcport->node_name[3],
 		    fcport->node_name[4], fcport->node_name[5],
@@ -800,33 +803,33 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	uint16_t	i;
 	uint8_t		*entry;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	/* Issue GID_PT. */
 	/* Prepare SNS command request. */
-	sns_cmd = qla2x00_prep_sns_cmd(ha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
+	sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
 	    GID_PT_SNS_DATA_SIZE);
 
 	/* Prepare SNS command arguments -- port_type. */
 	sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
 
 	/* Execute SNS command. */
-	rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
 	    sizeof(struct sns_cmd_pkt));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else if (sns_cmd->p.gid_data[8] != 0x80 ||
 	    sns_cmd->p.gid_data[9] != 0x02) {
 		DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
-		    "gid_rsp:\n", ha->host_no));
+		    "gid_rsp:\n", vha->host_no));
 		DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
@@ -867,17 +870,17 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	uint16_t	i;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GPN_ID */
 		/* Prepare SNS command request. */
-		sns_cmd = qla2x00_prep_sns_cmd(ha, GPN_ID_CMD,
+		sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
 		    GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
 
 		/* Prepare SNS command arguments -- port_id. */
@@ -886,16 +889,16 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
 
 		/* Execute SNS command. */
-		rval = qla2x00_send_sns(ha, ha->sns_cmd_dma,
+		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
 		    GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
-			    "(%d).\n", ha->host_no, rval));
+			    "(%d).\n", vha->host_no, rval));
 		} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
 		    sns_cmd->p.gpn_data[9] != 0x02) {
 			DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
-			    "request, gpn_rsp:\n", ha->host_no));
+			    "request, gpn_rsp:\n", vha->host_no));
 			DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -922,17 +925,17 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	uint16_t	i;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GNN_ID */
 		/* Prepare SNS command request. */
-		sns_cmd = qla2x00_prep_sns_cmd(ha, GNN_ID_CMD,
+		sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
 		    GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
 
 		/* Prepare SNS command arguments -- port_id. */
@@ -941,16 +944,16 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
 
 		/* Execute SNS command. */
-		rval = qla2x00_send_sns(ha, ha->sns_cmd_dma,
+		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
 		    GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
-			    "(%d).\n", ha->host_no, rval));
+			    "(%d).\n", vha->host_no, rval));
 		} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
 		    sns_cmd->p.gnn_data[9] != 0x02) {
 			DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
-			    "request, gnn_rsp:\n", ha->host_no));
+			    "request, gnn_rsp:\n", vha->host_no));
 			DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -962,7 +965,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
 			    "nn %02x%02x%02x%02x%02x%02x%02x%02x "
 			    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
 			    "portid=%02x%02x%02x.\n",
-			    ha->host_no,
+			    vha->host_no,
 			    list[i].node_name[0], list[i].node_name[1],
 			    list[i].node_name[2], list[i].node_name[3],
 			    list[i].node_name[4], list[i].node_name[5],
@@ -992,40 +995,40 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_rft_id(scsi_qla_host_t *ha)
+qla2x00_sns_rft_id(scsi_qla_host_t *vha)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	/* Issue RFT_ID. */
 	/* Prepare SNS command request. */
-	sns_cmd = qla2x00_prep_sns_cmd(ha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
+	sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
 	    RFT_ID_SNS_DATA_SIZE);
 
 	/* Prepare SNS command arguments -- port_id, FC-4 types */
-	sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa;
-	sns_cmd->p.cmd.param[1] = ha->d_id.b.area;
-	sns_cmd->p.cmd.param[2] = ha->d_id.b.domain;
+	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
 
 	sns_cmd->p.cmd.param[5] = 0x01;			/* FCP-3 */
 
 	/* Execute SNS command. */
-	rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
 	    sizeof(struct sns_cmd_pkt));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else if (sns_cmd->p.rft_data[8] != 0x80 ||
 	    sns_cmd->p.rft_data[9] != 0x02) {
 		DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
-		    "rft_rsp:\n", ha->host_no));
+		    "rft_rsp:\n", vha->host_no));
 		DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -1041,47 +1044,47 @@ qla2x00_sns_rft_id(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
+qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
 {
 	int		rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	struct sns_cmd_pkt	*sns_cmd;
 
 	/* Issue RNN_ID. */
 	/* Prepare SNS command request. */
-	sns_cmd = qla2x00_prep_sns_cmd(ha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
+	sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
 	    RNN_ID_SNS_DATA_SIZE);
 
 	/* Prepare SNS command arguments -- port_id, nodename. */
-	sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa;
-	sns_cmd->p.cmd.param[1] = ha->d_id.b.area;
-	sns_cmd->p.cmd.param[2] = ha->d_id.b.domain;
-
-	sns_cmd->p.cmd.param[4] = ha->node_name[7];
-	sns_cmd->p.cmd.param[5] = ha->node_name[6];
-	sns_cmd->p.cmd.param[6] = ha->node_name[5];
-	sns_cmd->p.cmd.param[7] = ha->node_name[4];
-	sns_cmd->p.cmd.param[8] = ha->node_name[3];
-	sns_cmd->p.cmd.param[9] = ha->node_name[2];
-	sns_cmd->p.cmd.param[10] = ha->node_name[1];
-	sns_cmd->p.cmd.param[11] = ha->node_name[0];
+	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+	sns_cmd->p.cmd.param[4] = vha->node_name[7];
+	sns_cmd->p.cmd.param[5] = vha->node_name[6];
+	sns_cmd->p.cmd.param[6] = vha->node_name[5];
+	sns_cmd->p.cmd.param[7] = vha->node_name[4];
+	sns_cmd->p.cmd.param[8] = vha->node_name[3];
+	sns_cmd->p.cmd.param[9] = vha->node_name[2];
+	sns_cmd->p.cmd.param[10] = vha->node_name[1];
+	sns_cmd->p.cmd.param[11] = vha->node_name[0];
 
 	/* Execute SNS command. */
-	rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
 	    sizeof(struct sns_cmd_pkt));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
 	    sns_cmd->p.rnn_data[9] != 0x02) {
 		DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
-		    "rnn_rsp:\n", ha->host_no));
+		    "rnn_rsp:\n", vha->host_no));
 		DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -1094,25 +1097,25 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
+qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
 {
 	int ret;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
-
+	struct qla_hw_data *ha = vha->hw;
 	ret = QLA_SUCCESS;
-	if (ha->flags.management_server_logged_in)
+	if (vha->flags.management_server_logged_in)
 		return ret;
 
-	ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
+	ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
 	    mb, BIT_1);
 	if (mb[0] != MBS_COMMAND_COMPLETE) {
 		DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
 		    "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
-		    __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
+		    __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
 		    mb[2], mb[6], mb[7]));
 		ret = QLA_FUNCTION_FAILED;
 	} else
-		ha->flags.management_server_logged_in = 1;
+		vha->flags.management_server_logged_in = 1;
 
 	return ret;
 }
@@ -1126,17 +1129,17 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
  * Returns a pointer to the @ha's ms_iocb.
  */
 void *
-qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
     uint32_t rsp_size)
 {
 	ms_iocb_entry_t *ms_pkt;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_pkt = ha->ms_iocb;
 	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
 
 	ms_pkt->entry_type = MS_IOCB_TYPE;
 	ms_pkt->entry_count = 1;
-	SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
+	SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
 	ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
 	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
 	ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
@@ -1164,17 +1167,18 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
  * Returns a pointer to the @ha's ms_iocb.
  */
 void *
-qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
     uint32_t rsp_size)
 {
 	struct ct_entry_24xx *ct_pkt;
+	struct qla_hw_data *ha = vha->hw;
 
 	ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
 	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
 
 	ct_pkt->entry_type = CT_IOCB_TYPE;
 	ct_pkt->entry_count = 1;
-	ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
+	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
 	ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
 	ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1188,14 +1192,15 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
 	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
-	ct_pkt->vp_index = ha->vp_idx;
+	ct_pkt->vp_index = vha->vp_idx;
 
 	return ct_pkt;
 }
 
 static inline ms_iocb_entry_t *
-qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
+qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
 {
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
 	struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
 
@@ -1240,7 +1245,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
  * Returns 0 on success.
  */
 static int
-qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
 {
 	int rval, alen;
 	uint32_t size, sn;
@@ -1250,11 +1255,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	struct ct_sns_rsp *ct_rsp;
 	uint8_t *entries;
 	struct ct_fdmi_hba_attr *eiter;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Issue RHBA */
 	/* Prepare common MS IOCB */
 	/*   Request size adjusted after CT preparation */
-	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
 
 	/* Prepare CT request */
 	ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1262,9 +1268,9 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare FDMI command arguments -- attribute block, attributes. */
-	memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
+	memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
 	ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
-	memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
+	memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
 	size = 2 * WWN_SIZE + 4 + 4;
 
 	/* Attributes */
@@ -1276,11 +1282,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
 	eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
-	memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
 	size += 4 + WWN_SIZE;
 
 	DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
-	    __func__, ha->host_no,
+	    __func__, vha->host_no,
 	    eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
 	    eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
 	    eiter->a.node_name[6], eiter->a.node_name[7]));
@@ -1294,7 +1300,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
 	    eiter->a.manufacturer));
 
 	/* Serial number. */
@@ -1307,7 +1313,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
 	    eiter->a.serial_num));
 
 	/* Model name. */
@@ -1319,7 +1325,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
 	    eiter->a.model));
 
 	/* Model description. */
@@ -1332,7 +1338,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
 	    eiter->a.model_desc));
 
 	/* Hardware version. */
@@ -1344,7 +1350,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
 	    eiter->a.hw_version));
 
 	/* Driver version. */
@@ -1356,7 +1362,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
 	    eiter->a.driver_version));
 
 	/* Option ROM version. */
@@ -1368,27 +1374,27 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
 	    eiter->a.orom_version));
 
 	/* Firmware version */
 	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
-	ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
+	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
 	alen = strlen(eiter->a.fw_version);
 	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
 	    eiter->a.fw_version));
 
 	/* Update MS request size. */
-	qla2x00_update_ms_fdmi_iocb(ha, size + 16);
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
 
 	DEBUG13(printk("%s(%ld): RHBA identifier="
 	    "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
-	    ha->host_no, ct_req->req.rhba.hba_identifier[0],
+	    vha->host_no, ct_req->req.rhba.hba_identifier[0],
 	    ct_req->req.rhba.hba_identifier[1],
 	    ct_req->req.rhba.hba_identifier[2],
 	    ct_req->req.rhba.hba_identifier[3],
@@ -1399,25 +1405,25 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
 	DEBUG13(qla2x00_dump_buffer(entries, size));
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
 		    ct_rsp->header.explanation_code ==
 		    CT_EXPL_ALREADY_REGISTERED) {
 			DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
-			    __func__, ha->host_no));
+			    __func__, vha->host_no));
 			rval = QLA_ALREADY_REGISTERED;
 		}
 	} else {
 		DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1430,17 +1436,17 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
+qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
 {
 	int rval;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t *ms_pkt;
 	struct ct_sns_req *ct_req;
 	struct ct_sns_rsp *ct_rsp;
 
 	/* Issue RPA */
 	/* Prepare common MS IOCB */
-	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
 	    DHBA_RSP_SIZE);
 
 	/* Prepare CT request */
@@ -1449,28 +1455,28 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare FDMI command arguments -- portname. */
-	memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
+	memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
 
 	DEBUG13(printk("%s(%ld): DHBA portname="
-	    "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
+	    "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
 	    ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
 	    ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
 	    ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
 	    ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1483,11 +1489,11 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
 {
 	int rval, alen;
 	uint32_t size, max_frame_size;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t *ms_pkt;
 	struct ct_sns_req *ct_req;
 	struct ct_sns_rsp *ct_rsp;
@@ -1498,7 +1504,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	/* Issue RPA */
 	/* Prepare common MS IOCB */
 	/*   Request size adjusted after CT preparation */
-	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
 
 	/* Prepare CT request */
 	ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1506,7 +1512,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare FDMI command arguments -- attribute block, attributes. */
-	memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
+	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
 	size = WWN_SIZE + 4;
 
 	/* Attributes */
@@ -1521,8 +1527,9 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	eiter->a.fc4_types[2] = 0x01;
 	size += 4 + 32;
 
-	DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
-	    eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
+	DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
+		vha->host_no, eiter->a.fc4_types[2],
+		eiter->a.fc4_types[1]));
 
 	/* Supported speed. */
 	eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1544,7 +1551,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 		    FDMI_PORT_SPEED_1GB);
 	size += 4 + 4;
 
-	DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
 	    eiter->a.sup_speed));
 
 	/* Current speed. */
@@ -1575,7 +1582,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	}
 	size += 4 + 4;
 
-	DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
 	    eiter->a.cur_speed));
 
 	/* Max frame size. */
@@ -1588,7 +1595,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
 	size += 4 + 4;
 
-	DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
 	    eiter->a.max_frame_size));
 
 	/* OS device name. */
@@ -1600,32 +1607,32 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
+	DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
 	    eiter->a.os_dev_name));
 
 	/* Hostname. */
-	if (strlen(fc_host_system_hostname(ha->host))) {
+	if (strlen(fc_host_system_hostname(vha->host))) {
 		ct_req->req.rpa.attrs.count =
 		    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
 		eiter = (struct ct_fdmi_port_attr *) (entries + size);
 		eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
 		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
-		    "%s", fc_host_system_hostname(ha->host));
+		    "%s", fc_host_system_hostname(vha->host));
 		alen = strlen(eiter->a.host_name);
 		alen += (alen & 3) ? (4 - (alen & 3)) : 4;
 		eiter->len = cpu_to_be16(4 + alen);
 		size += 4 + alen;
 
 		DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
-		    ha->host_no, eiter->a.host_name));
+		    vha->host_no, eiter->a.host_name));
 	}
 
 	/* Update MS request size. */
-	qla2x00_update_ms_fdmi_iocb(ha, size + 16);
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
 
 	DEBUG13(printk("%s(%ld): RPA portname="
 	    "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
-	    ha->host_no, ct_req->req.rpa.port_name[0],
+	    vha->host_no, ct_req->req.rpa.port_name[0],
 	    ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
 	    ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
 	    ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
@@ -1633,18 +1640,18 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
 	DEBUG13(qla2x00_dump_buffer(entries, size));
 
 	/* Execute MS IOCB */
-	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
-		    ha->host_no, rval));
-	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
+		    vha->host_no, rval));
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1657,34 +1664,28 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2x00_fdmi_register(scsi_qla_host_t *ha)
+qla2x00_fdmi_register(scsi_qla_host_t *vha)
 {
 	int rval;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		DEBUG2(printk("scsi(%ld): FDMI unsupported on "
-		    "ISP2100/ISP2200.\n", ha->host_no));
-		return QLA_SUCCESS;
-	}
-
-	rval = qla2x00_mgmt_svr_login(ha);
+	rval = qla2x00_mgmt_svr_login(vha);
 	if (rval)
 		return rval;
 
-	rval = qla2x00_fdmi_rhba(ha);
+	rval = qla2x00_fdmi_rhba(vha);
 	if (rval) {
 		if (rval != QLA_ALREADY_REGISTERED)
 			return rval;
 
-		rval = qla2x00_fdmi_dhba(ha);
+		rval = qla2x00_fdmi_dhba(vha);
 		if (rval)
 			return rval;
 
-		rval = qla2x00_fdmi_rhba(ha);
+		rval = qla2x00_fdmi_rhba(vha);
 		if (rval)
 			return rval;
 	}
-	rval = qla2x00_fdmi_rpa(ha);
+	rval = qla2x00_fdmi_rpa(vha);
 
 	return rval;
 }
@@ -1697,11 +1698,11 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
 	uint16_t	i;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
@@ -1712,7 +1713,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GFPN_ID */
 		/* Prepare common MS IOCB */
-		ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
 		    GFPN_ID_RSP_SIZE);
 
 		/* Prepare CT request */
@@ -1726,13 +1727,13 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
 
 		/* Execute MS IOCB */
-		rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 		    sizeof(ms_iocb_entry_t));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
-			    "failed (%d).\n", ha->host_no, rval));
-		} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+			    "failed (%d).\n", vha->host_no, rval));
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GFPN_ID") != QLA_SUCCESS) {
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -1750,17 +1751,17 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
 }
 
 static inline void *
-qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
     uint32_t rsp_size)
 {
 	struct ct_entry_24xx *ct_pkt;
-
+	struct qla_hw_data *ha = vha->hw;
 	ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
 	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
 
 	ct_pkt->entry_type = CT_IOCB_TYPE;
 	ct_pkt->entry_count = 1;
-	ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
+	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
 	ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
 	ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1774,7 +1775,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
 	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
 	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
-	ct_pkt->vp_index = ha->vp_idx;
+	ct_pkt->vp_index = vha->vp_idx;
 
 	return ct_pkt;
 }
@@ -1803,11 +1804,11 @@ qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
  * Returns 0 on success.
  */
 int
-qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
 {
 	int		rval;
 	uint16_t	i;
-
+	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t	*ms_pkt;
 	struct ct_sns_req	*ct_req;
 	struct ct_sns_rsp	*ct_rsp;
@@ -1817,14 +1818,14 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
 	if (!ha->flags.gpsc_supported)
 		return QLA_FUNCTION_FAILED;
 
-	rval = qla2x00_mgmt_svr_login(ha);
+	rval = qla2x00_mgmt_svr_login(vha);
 	if (rval)
 		return rval;
 
 	for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
 		/* Issue GFPN_ID */
 		/* Prepare common MS IOCB */
-		ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE,
+		ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
 		    GPSC_RSP_SIZE);
 
 		/* Prepare CT request */
@@ -1837,13 +1838,13 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
 		    WWN_SIZE);
 
 		/* Execute MS IOCB */
-		rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
 		    sizeof(ms_iocb_entry_t));
 		if (rval != QLA_SUCCESS) {
 			/*EMPTY*/
 			DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
-			    "failed (%d).\n", ha->host_no, rval));
-		} else if ((rval = qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+			    "failed (%d).\n", vha->host_no, rval));
+		} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
 		    "GPSC")) != QLA_SUCCESS) {
 			/* FM command unsupported? */
 			if (rval == QLA_INVALID_COMMAND &&
@@ -1853,7 +1854,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
 				CT_REASON_COMMAND_UNSUPPORTED)) {
 				DEBUG2(printk("scsi(%ld): GPSC command "
 				    "unsupported, disabling query...\n",
-				    ha->host_no));
+				    vha->host_no));
 				ha->flags.gpsc_supported = 0;
 				rval = QLA_FUNCTION_FAILED;
 				break;
@@ -1878,7 +1879,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
 
 			DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
 			    "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
-			    "speed=%04x.\n", ha->host_no,
+			    "speed=%04x.\n", vha->host_no,
 			    list[i].fabric_port_name[0],
 			    list[i].fabric_port_name[1],
 			    list[i].fabric_port_name[2],
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4218f20f5ed5..52ed56ecf195 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_gbl.h"
 
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
@@ -21,7 +22,6 @@
 static int qla2x00_isp_firmware(scsi_qla_host_t *);
 static void qla2x00_resize_request_q(scsi_qla_host_t *);
 static int qla2x00_setup_chip(scsi_qla_host_t *);
-static void qla2x00_init_response_q_entries(scsi_qla_host_t *);
 static int qla2x00_init_rings(scsi_qla_host_t *);
 static int qla2x00_fw_ready(scsi_qla_host_t *);
 static int qla2x00_configure_hba(scsi_qla_host_t *);
@@ -35,10 +35,11 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
 
 static int qla2x00_restart_isp(scsi_qla_host_t *);
 
-static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev);
+static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
 
 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
 static int qla84xx_init_chip(scsi_qla_host_t *);
+static int qla25xx_init_queues(struct qla_hw_data *);
 
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
@@ -55,77 +56,81 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
 *      0 = success
 */
 int
-qla2x00_initialize_adapter(scsi_qla_host_t *ha)
+qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 {
 	int	rval;
-
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 	/* Clear adapter flags. */
-	ha->flags.online = 0;
-	ha->flags.reset_active = 0;
-	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-	atomic_set(&ha->loop_state, LOOP_DOWN);
-	ha->device_flags = DFLG_NO_CABLE;
-	ha->dpc_flags = 0;
-	ha->flags.management_server_logged_in = 0;
-	ha->marker_needed = 0;
+	vha->flags.online = 0;
+	vha->flags.reset_active = 0;
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->device_flags = DFLG_NO_CABLE;
+	vha->dpc_flags = 0;
+	vha->flags.management_server_logged_in = 0;
+	vha->marker_needed = 0;
 	ha->mbx_flags = 0;
 	ha->isp_abort_cnt = 0;
 	ha->beacon_blink_led = 0;
-	set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+	set_bit(0, ha->req_qid_map);
+	set_bit(0, ha->rsp_qid_map);
 
 	qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
-	rval = ha->isp_ops->pci_config(ha);
+	rval = ha->isp_ops->pci_config(vha);
 	if (rval) {
 		DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
-		    ha->host_no));
+		    vha->host_no));
 		return (rval);
 	}
 
-	ha->isp_ops->reset_chip(ha);
+	ha->isp_ops->reset_chip(vha);
 
-	rval = qla2xxx_get_flash_info(ha);
+	rval = qla2xxx_get_flash_info(vha);
 	if (rval) {
 		DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
-		    ha->host_no));
+		    vha->host_no));
 		return (rval);
 	}
 
-	ha->isp_ops->get_flash_version(ha, ha->request_ring);
+	ha->isp_ops->get_flash_version(vha, req->ring);
 
 	qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
 
-	ha->isp_ops->nvram_config(ha);
+	ha->isp_ops->nvram_config(vha);
 
 	if (ha->flags.disable_serdes) {
 		/* Mask HBA via NVRAM settings? */
 		qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
 		    "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
-		    ha->port_name[0], ha->port_name[1],
-		    ha->port_name[2], ha->port_name[3],
-		    ha->port_name[4], ha->port_name[5],
-		    ha->port_name[6], ha->port_name[7]);
+		    vha->port_name[0], vha->port_name[1],
+		    vha->port_name[2], vha->port_name[3],
+		    vha->port_name[4], vha->port_name[5],
+		    vha->port_name[6], vha->port_name[7]);
 		return QLA_FUNCTION_FAILED;
 	}
 
 	qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
 
-	if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
-		rval = ha->isp_ops->chip_diag(ha);
+	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
+		rval = ha->isp_ops->chip_diag(vha);
 		if (rval)
 			return (rval);
-		rval = qla2x00_setup_chip(ha);
+		rval = qla2x00_setup_chip(vha);
 		if (rval)
 			return (rval);
 	}
 	if (IS_QLA84XX(ha)) {
-		ha->cs84xx = qla84xx_get_chip(ha);
+		ha->cs84xx = qla84xx_get_chip(vha);
 		if (!ha->cs84xx) {
 			qla_printk(KERN_ERR, ha,
 			    "Unable to configure ISP84XX.\n");
 			return QLA_FUNCTION_FAILED;
 		}
 	}
-	rval = qla2x00_init_rings(ha);
+	rval = qla2x00_init_rings(vha);
 
 	return (rval);
 }
@@ -137,10 +142,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2100_pci_config(scsi_qla_host_t *ha)
+qla2100_pci_config(scsi_qla_host_t *vha)
 {
 	uint16_t w;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	pci_set_master(ha->pdev);
@@ -167,11 +173,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2300_pci_config(scsi_qla_host_t *ha)
+qla2300_pci_config(scsi_qla_host_t *vha)
 {
 	uint16_t	w;
 	unsigned long   flags = 0;
 	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	pci_set_master(ha->pdev);
@@ -248,10 +255,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla24xx_pci_config(scsi_qla_host_t *ha)
+qla24xx_pci_config(scsi_qla_host_t *vha)
 {
 	uint16_t w;
 	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	pci_set_master(ha->pdev);
@@ -291,9 +299,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla25xx_pci_config(scsi_qla_host_t *ha)
+qla25xx_pci_config(scsi_qla_host_t *vha)
 {
 	uint16_t w;
+	struct qla_hw_data *ha = vha->hw;
 
 	pci_set_master(ha->pdev);
 	pci_try_set_mwi(ha->pdev);
@@ -321,32 +330,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_isp_firmware(scsi_qla_host_t *ha)
+qla2x00_isp_firmware(scsi_qla_host_t *vha)
 {
 	int  rval;
 	uint16_t loop_id, topo, sw_cap;
 	uint8_t domain, area, al_pa;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Assume loading risc code */
 	rval = QLA_FUNCTION_FAILED;
 
 	if (ha->flags.disable_risc_code_load) {
 		DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
 
 		/* Verify checksum of loaded RISC code. */
-		rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
+		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
 		if (rval == QLA_SUCCESS) {
 			/* And, verify we are not in ROM code. */
-			rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
+			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
 			    &area, &domain, &topo, &sw_cap);
 		}
 	}
 
 	if (rval) {
 		DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -359,9 +369,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 void
-qla2x00_reset_chip(scsi_qla_host_t *ha)
+qla2x00_reset_chip(scsi_qla_host_t *vha)
 {
 	unsigned long   flags = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t	cnt;
 	uint16_t	cmd;
@@ -499,10 +510,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static inline void
-qla24xx_reset_risc(scsi_qla_host_t *ha)
+qla24xx_reset_risc(scsi_qla_host_t *vha)
 {
 	int hw_evt = 0;
 	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t cnt, d2;
 	uint16_t wd;
@@ -541,7 +553,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
 		barrier();
 	}
 	if (cnt == 0 || hw_evt)
-		qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR,
+		qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
 		    RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
 		    RD_REG_WORD(&reg->mailbox3));
 
@@ -571,12 +583,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 void
-qla24xx_reset_chip(scsi_qla_host_t *ha)
+qla24xx_reset_chip(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	ha->isp_ops->disable_intrs(ha);
 
 	/* Perform RISC reset. */
-	qla24xx_reset_risc(ha);
+	qla24xx_reset_risc(vha);
 }
 
 /**
@@ -586,20 +599,22 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 int
-qla2x00_chip_diag(scsi_qla_host_t *ha)
+qla2x00_chip_diag(scsi_qla_host_t *vha)
 {
 	int		rval;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	unsigned long	flags = 0;
 	uint16_t	data;
 	uint32_t	cnt;
 	uint16_t	mb[5];
+	struct req_que *req = ha->req_q_map[0];
 
 	/* Assume a failed state */
 	rval = QLA_FUNCTION_FAILED;
 
 	DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
-	    ha->host_no, (u_long)&reg->flash_address));
+	    vha->host_no, (u_long)&reg->flash_address));
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
@@ -662,17 +677,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
 	ha->product_id[3] = mb[4];
 
 	/* Adjust fw RISC transfer size */
-	if (ha->request_q_length > 1024)
+	if (req->length > 1024)
 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
 	else
 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
-		    ha->request_q_length;
+		    req->length;
 
 	if (IS_QLA2200(ha) &&
 	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
 		/* Limit firmware transfer size with a 2200A */
 		DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
-		    ha->host_no));
+		    vha->host_no));
 
 		ha->device_type |= DT_ISP2200A;
 		ha->fw_transfer_size = 128;
@@ -681,11 +696,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
 	/* Wrap Incoming Mailboxes Test. */
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no));
-	rval = qla2x00_mbx_reg_test(ha);
+	DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
+	rval = qla2x00_mbx_reg_test(vha);
 	if (rval) {
 		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha,
 		    "Failed mailbox send register test\n");
 	}
@@ -698,7 +713,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
 chip_diag_failed:
 	if (rval)
 		DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
-		    "****\n", ha->host_no));
+		    "****\n", vha->host_no));
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -712,19 +727,21 @@ chip_diag_failed:
  * Returns 0 on success.
  */
 int
-qla24xx_chip_diag(scsi_qla_host_t *ha)
+qla24xx_chip_diag(scsi_qla_host_t *vha)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	/* Perform RISC reset. */
-	qla24xx_reset_risc(ha);
+	qla24xx_reset_risc(vha);
 
-	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
+	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
 
-	rval = qla2x00_mbx_reg_test(ha);
+	rval = qla2x00_mbx_reg_test(vha);
 	if (rval) {
 		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha,
 		    "Failed mailbox send register test\n");
 	} else {
@@ -736,13 +753,16 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
 }
 
 void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
-	    eft_size, fce_size;
+	    eft_size, fce_size, mq_size;
 	dma_addr_t tc_dma;
 	void *tc;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	if (ha->fw_dump) {
 		qla_printk(KERN_WARNING, ha,
@@ -751,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
 	}
 
 	ha->fw_dumped = 0;
-	fixed_size = mem_size = eft_size = fce_size = 0;
+	fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		fixed_size = sizeof(struct qla2100_fw_dump);
 	} else if (IS_QLA23XX(ha)) {
@@ -760,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
 		    sizeof(uint16_t);
 	} else if (IS_FWI2_CAPABLE(ha)) {
 		fixed_size = IS_QLA25XX(ha) ?
-		    offsetof(struct qla25xx_fw_dump, ext_mem):
-		    offsetof(struct qla24xx_fw_dump, ext_mem);
+			offsetof(struct qla25xx_fw_dump, ext_mem) :
+			offsetof(struct qla24xx_fw_dump, ext_mem);
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
+		if (ha->mqenable)
+			mq_size = sizeof(struct qla2xxx_mq_chain);
 
 		/* Allocate memory for Fibre Channel Event Buffer. */
 		if (!IS_QLA25XX(ha))
@@ -778,7 +800,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
 		}
 
 		memset(tc, 0, FCE_SIZE);
-		rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS,
+		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
 		    ha->fce_mb, &ha->fce_bufs);
 		if (rval) {
 			qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -807,7 +829,7 @@ try_eft:
 		}
 
 		memset(tc, 0, EFT_SIZE);
-		rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
+		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
 		if (rval) {
 			qla_printk(KERN_WARNING, ha, "Unable to initialize "
 			    "EFT (%d).\n", rval);
@@ -824,12 +846,12 @@ try_eft:
 		ha->eft = tc;
 	}
 cont_alloc:
-	req_q_size = ha->request_q_length * sizeof(request_t);
-	rsp_q_size = ha->response_q_length * sizeof(response_t);
+	req_q_size = req->length * sizeof(request_t);
+	rsp_q_size = rsp->length * sizeof(response_t);
 
 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
-	    eft_size + fce_size;
+	    mq_size + eft_size + fce_size;
 
 	ha->fw_dump = vmalloc(dump_size);
 	if (!ha->fw_dump) {
@@ -844,7 +866,6 @@ cont_alloc:
 		}
 		return;
 	}
-
 	qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
 	    dump_size / 1024);
 
@@ -875,27 +896,29 @@ cont_alloc:
  * Returns 0 on success.
  */
 static void
-qla2x00_resize_request_q(scsi_qla_host_t *ha)
+qla2x00_resize_request_q(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint16_t fw_iocb_cnt = 0;
 	uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
 	dma_addr_t request_dma;
 	request_t *request_ring;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	/* Valid only on recent ISPs. */
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return;
 
 	/* Retrieve IOCB counts available to the firmware. */
-	rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt,
-	    &ha->max_npiv_vports);
+	rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
+					&ha->max_npiv_vports);
 	if (rval)
 		return;
 	/* No point in continuing if current settings are sufficient. */
 	if (fw_iocb_cnt < 1024)
 		return;
-	if (ha->request_q_length >= request_q_length)
+	if (req->length >= request_q_length)
 		return;
 
 	/* Attempt to claim larger area for request queue. */
@@ -909,17 +932,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
 	qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
 	    (ha->fw_memory_size + 1) / 1024);
 	qla_printk(KERN_INFO, ha, "Resizing request queue depth "
-	    "(%d -> %d)...\n", ha->request_q_length, request_q_length);
+	    "(%d -> %d)...\n", req->length, request_q_length);
 
 	/* Clear old allocations. */
 	dma_free_coherent(&ha->pdev->dev,
-	    (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring,
-	    ha->request_dma);
+	    (req->length + 1) * sizeof(request_t), req->ring,
+	    req->dma);
 
 	/* Begin using larger queue. */
-	ha->request_q_length = request_q_length;
-	ha->request_ring = request_ring;
-	ha->request_dma = request_dma;
+	req->length = request_q_length;
+	req->ring = request_ring;
+	req->dma = request_dma;
 }
 
 /**
@@ -929,10 +952,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_setup_chip(scsi_qla_host_t *ha)
+qla2x00_setup_chip(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint32_t srisc_address = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	unsigned long flags;
 
@@ -945,28 +969,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
 	}
 
 	/* Load firmware sequences */
-	rval = ha->isp_ops->load_risc(ha, &srisc_address);
+	rval = ha->isp_ops->load_risc(vha, &srisc_address);
 	if (rval == QLA_SUCCESS) {
 		DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
-		    "code.\n", ha->host_no));
+		    "code.\n", vha->host_no));
 
-		rval = qla2x00_verify_checksum(ha, srisc_address);
+		rval = qla2x00_verify_checksum(vha, srisc_address);
 		if (rval == QLA_SUCCESS) {
 			/* Start firmware execution. */
 			DEBUG(printk("scsi(%ld): Checksum OK, start "
-			    "firmware.\n", ha->host_no));
+			    "firmware.\n", vha->host_no));
 
-			rval = qla2x00_execute_fw(ha, srisc_address);
+			rval = qla2x00_execute_fw(vha, srisc_address);
 			/* Retrieve firmware information. */
 			if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
-				qla2x00_get_fw_version(ha,
+				qla2x00_get_fw_version(vha,
 				    &ha->fw_major_version,
 				    &ha->fw_minor_version,
 				    &ha->fw_subminor_version,
 				    &ha->fw_attributes, &ha->fw_memory_size);
 				ha->flags.npiv_supported = 0;
-				if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
-				     IS_QLA84XX(ha)) &&
+				if (IS_QLA2XXX_MIDTYPE(ha) &&
 					 (ha->fw_attributes & BIT_2)) {
 					ha->flags.npiv_supported = 1;
 					if ((!ha->max_npiv_vports) ||
@@ -975,15 +998,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
 						ha->max_npiv_vports =
 						    MIN_MULTI_ID_FABRIC - 1;
 				}
-				qla2x00_resize_request_q(ha);
+				qla2x00_resize_request_q(vha);
 
 				if (ql2xallocfwdump)
-					qla2x00_alloc_fw_dump(ha);
+					qla2x00_alloc_fw_dump(vha);
 			}
 		} else {
 			DEBUG2(printk(KERN_INFO
 			    "scsi(%ld): ISP Firmware failed checksum.\n",
-			    ha->host_no));
+			    vha->host_no));
 		}
 	}
 
@@ -1002,7 +1025,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
 
 	if (rval) {
 		DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -1017,14 +1040,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
  *
  * Returns 0 on success.
  */
-static void
-qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
+void
+qla2x00_init_response_q_entries(struct rsp_que *rsp)
 {
 	uint16_t cnt;
 	response_t *pkt;
 
-	pkt = ha->response_ring_ptr;
-	for (cnt = 0; cnt < ha->response_q_length; cnt++) {
+	pkt = rsp->ring_ptr;
+	for (cnt = 0; cnt < rsp->length; cnt++) {
 		pkt->signature = RESPONSE_PROCESSED;
 		pkt++;
 	}
@@ -1038,19 +1061,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 void
-qla2x00_update_fw_options(scsi_qla_host_t *ha)
+qla2x00_update_fw_options(scsi_qla_host_t *vha)
 {
 	uint16_t swing, emphasis, tx_sens, rx_sens;
+	struct qla_hw_data *ha = vha->hw;
 
 	memset(ha->fw_options, 0, sizeof(ha->fw_options));
-	qla2x00_get_fw_options(ha, ha->fw_options);
+	qla2x00_get_fw_options(vha, ha->fw_options);
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return;
 
 	/* Serial Link options. */
 	DEBUG3(printk("scsi(%ld): Serial link options:\n",
-	    ha->host_no));
+	    vha->host_no));
 	DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
 	    sizeof(ha->fw_seriallink_options)));
 
@@ -1108,19 +1132,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
 		ha->fw_options[2] |= BIT_13;
 
 	/* Update firmware options. */
-	qla2x00_set_fw_options(ha, ha->fw_options);
+	qla2x00_set_fw_options(vha, ha->fw_options);
 }
 
 void
-qla24xx_update_fw_options(scsi_qla_host_t *ha)
+qla24xx_update_fw_options(scsi_qla_host_t *vha)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Update Serial Link options. */
 	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
 		return;
 
-	rval = qla2x00_set_serdes_params(ha,
+	rval = qla2x00_set_serdes_params(vha,
 	    le16_to_cpu(ha->fw_seriallink_options24[1]),
 	    le16_to_cpu(ha->fw_seriallink_options24[2]),
 	    le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1131,19 +1156,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
 }
 
 void
-qla2x00_config_rings(struct scsi_qla_host *ha)
+qla2x00_config_rings(struct scsi_qla_host *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	/* Setup ring parameters in initialization control block. */
 	ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
 	ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
-	ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length);
-	ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length);
-	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma));
-	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma));
-	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma));
-	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma));
+	ha->init_cb->request_q_length = cpu_to_le16(req->length);
+	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
+	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
 	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1153,27 +1181,62 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
 }
 
 void
-qla24xx_config_rings(struct scsi_qla_host *ha)
+qla24xx_config_rings(struct scsi_qla_host *vha)
 {
-	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+	struct qla_msix_entry *msix;
 	struct init_cb_24xx *icb;
+	uint16_t rid = 0;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
-	/* Setup ring parameters in initialization control block. */
+/* Setup ring parameters in initialization control block. */
 	icb = (struct init_cb_24xx *)ha->init_cb;
 	icb->request_q_outpointer = __constant_cpu_to_le16(0);
 	icb->response_q_inpointer = __constant_cpu_to_le16(0);
-	icb->request_q_length = cpu_to_le16(ha->request_q_length);
-	icb->response_q_length = cpu_to_le16(ha->response_q_length);
-	icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma));
-	icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma));
-	icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma));
-	icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma));
-
-	WRT_REG_DWORD(&reg->req_q_in, 0);
-	WRT_REG_DWORD(&reg->req_q_out, 0);
-	WRT_REG_DWORD(&reg->rsp_q_in, 0);
-	WRT_REG_DWORD(&reg->rsp_q_out, 0);
-	RD_REG_DWORD(&reg->rsp_q_out);
+	icb->request_q_length = cpu_to_le16(req->length);
+	icb->response_q_length = cpu_to_le16(rsp->length);
+	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+	if (ha->mqenable) {
+		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+		icb->rid = __constant_cpu_to_le16(rid);
+		if (ha->flags.msix_enabled) {
+			msix = &ha->msix_entries[1];
+			DEBUG2_17(printk(KERN_INFO
+			"Reistering vector 0x%x for base que\n", msix->entry));
+			icb->msix = cpu_to_le16(msix->entry);
+		}
+		/* Use alternate PCI bus number */
+		if (MSB(rid))
+			icb->firmware_options_2 |=
+				__constant_cpu_to_le32(BIT_19);
+		/* Use alternate PCI devfn */
+		if (LSB(rid))
+			icb->firmware_options_2 |=
+				__constant_cpu_to_le32(BIT_18);
+
+		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
+		ha->rsp_q_map[0]->options = icb->firmware_options_2;
+
+		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
+		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
+		WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
+		WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+	} else {
+		WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
+		WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
+		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
+		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+	}
+	/* PCI posting */
+	RD_REG_DWORD(&ioreg->hccr);
 }
 
 /**
@@ -1186,11 +1249,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_init_rings(scsi_qla_host_t *ha)
+qla2x00_init_rings(scsi_qla_host_t *vha)
 {
 	int	rval;
 	unsigned long flags = 0;
 	int cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 	struct mid_init_cb_24xx *mid_init_cb =
 	    (struct mid_init_cb_24xx *) ha->init_cb;
 
@@ -1198,45 +1264,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
 
 	/* Clear outstanding commands array. */
 	for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
-		ha->outstanding_cmds[cnt] = NULL;
+		req->outstanding_cmds[cnt] = NULL;
 
-	ha->current_outstanding_cmd = 0;
+	req->current_outstanding_cmd = 0;
 
 	/* Clear RSCN queue. */
-	ha->rscn_in_ptr = 0;
-	ha->rscn_out_ptr = 0;
+	vha->rscn_in_ptr = 0;
+	vha->rscn_out_ptr = 0;
 
 	/* Initialize firmware. */
-	ha->request_ring_ptr  = ha->request_ring;
-	ha->req_ring_index    = 0;
-	ha->req_q_cnt         = ha->request_q_length;
-	ha->response_ring_ptr = ha->response_ring;
-	ha->rsp_ring_index    = 0;
+	req->ring_ptr  = req->ring;
+	req->ring_index    = 0;
+	req->cnt      = req->length;
+	rsp->ring_ptr = rsp->ring;
+	rsp->ring_index    = 0;
 
 	/* Initialize response queue entries */
-	qla2x00_init_response_q_entries(ha);
+	qla2x00_init_response_q_entries(rsp);
 
-	ha->isp_ops->config_rings(ha);
+	ha->isp_ops->config_rings(vha);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	/* Update any ISP specific firmware options before initialization. */
-	ha->isp_ops->update_fw_options(ha);
+	ha->isp_ops->update_fw_options(vha);
 
-	DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
+	DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
 
 	if (ha->flags.npiv_supported)
 		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
 
 	mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
 
-	rval = qla2x00_init_firmware(ha, ha->init_cb_size);
+	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
 	if (rval) {
 		DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
-		    ha->host_no));
+		    vha->host_no));
 	} else {
 		DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -1249,13 +1315,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
  * Returns 0 on success.
  */
 static int
-qla2x00_fw_ready(scsi_qla_host_t *ha)
+qla2x00_fw_ready(scsi_qla_host_t *vha)
 {
 	int		rval;
 	unsigned long	wtime, mtime, cs84xx_time;
 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
 	uint16_t	state[3];
+	struct qla_hw_data *ha = vha->hw;
 
 	rval = QLA_SUCCESS;
 
@@ -1277,29 +1344,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 	wtime = jiffies + (wait_time * HZ);
 
 	/* Wait for ISP to finish LIP */
-	if (!ha->flags.init_done)
+	if (!vha->flags.init_done)
  		qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
 
 	DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	do {
-		rval = qla2x00_get_firmware_state(ha, state);
+		rval = qla2x00_get_firmware_state(vha, state);
 		if (rval == QLA_SUCCESS) {
 			if (state[0] < FSTATE_LOSS_OF_SYNC) {
-				ha->device_flags &= ~DFLG_NO_CABLE;
+				vha->device_flags &= ~DFLG_NO_CABLE;
 			}
 			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
 				DEBUG16(printk("scsi(%ld): fw_state=%x "
-				    "84xx=%x.\n", ha->host_no, state[0],
+				    "84xx=%x.\n", vha->host_no, state[0],
 				    state[2]));
 				if ((state[2] & FSTATE_LOGGED_IN) &&
 				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
 					DEBUG16(printk("scsi(%ld): Sending "
-					    "verify iocb.\n", ha->host_no));
+					    "verify iocb.\n", vha->host_no));
 
 					cs84xx_time = jiffies;
-					rval = qla84xx_init_chip(ha);
+					rval = qla84xx_init_chip(vha);
 					if (rval != QLA_SUCCESS)
 						break;
 
@@ -1309,13 +1376,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 					mtime += cs84xx_time;
 					DEBUG16(printk("scsi(%ld): Increasing "
 					    "wait time by %ld. New time %ld\n",
-					    ha->host_no, cs84xx_time, wtime));
+					    vha->host_no, cs84xx_time, wtime));
 				}
 			} else if (state[0] == FSTATE_READY) {
 				DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
-				    ha->host_no));
+				    vha->host_no));
 
-				qla2x00_get_retry_cnt(ha, &ha->retry_count,
+				qla2x00_get_retry_cnt(vha, &ha->retry_count,
 				    &ha->login_timeout, &ha->r_a_tov);
 
 				rval = QLA_SUCCESS;
@@ -1324,7 +1391,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 
 			rval = QLA_FUNCTION_FAILED;
 
-			if (atomic_read(&ha->loop_down_timer) &&
+			if (atomic_read(&vha->loop_down_timer) &&
 			    state[0] != FSTATE_READY) {
 				/* Loop down. Timeout on min_wait for states
 				 * other than Wait for Login.
@@ -1333,7 +1400,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 					qla_printk(KERN_INFO, ha,
 					    "Cable is unplugged...\n");
 
-					ha->device_flags |= DFLG_NO_CABLE;
+					vha->device_flags |= DFLG_NO_CABLE;
 					break;
 				}
 			}
@@ -1350,15 +1417,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 		msleep(500);
 
 		DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
-		    ha->host_no, state[0], jiffies));
+		    vha->host_no, state[0], jiffies));
 	} while (1);
 
 	DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
-	    ha->host_no, state[0], jiffies));
+	    vha->host_no, state[0], jiffies));
 
 	if (rval) {
 		DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return (rval);
@@ -1378,7 +1445,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
 *      Kernel context.
 */
 static int
-qla2x00_configure_hba(scsi_qla_host_t *ha)
+qla2x00_configure_hba(scsi_qla_host_t *vha)
 {
 	int       rval;
 	uint16_t      loop_id;
@@ -1388,19 +1455,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 	uint8_t       area;
 	uint8_t       domain;
 	char		connect_type[22];
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Get host addresses. */
-	rval = qla2x00_get_adapter_id(ha,
+	rval = qla2x00_get_adapter_id(vha,
 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
 	if (rval != QLA_SUCCESS) {
-		if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
+		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
 			DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
-			    __func__, ha->host_no));
+			    __func__, vha->host_no));
 		} else {
 			qla_printk(KERN_WARNING, ha,
 			    "ERROR -- Unable to get host loop ID.\n");
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		}
 		return (rval);
 	}
@@ -1411,7 +1479,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 		return (QLA_FUNCTION_FAILED);
 	}
 
-	ha->loop_id = loop_id;
+	vha->loop_id = loop_id;
 
 	/* initialize */
 	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1421,14 +1489,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 	switch (topo) {
 	case 0:
 		DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
-		    ha->host_no));
+		    vha->host_no));
 		ha->current_topology = ISP_CFG_NL;
 		strcpy(connect_type, "(Loop)");
 		break;
 
 	case 1:
 		DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
-		    ha->host_no));
+		    vha->host_no));
 		ha->switch_cap = sw_cap;
 		ha->current_topology = ISP_CFG_FL;
 		strcpy(connect_type, "(FL_Port)");
@@ -1436,7 +1504,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 
 	case 2:
 		DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
-		    ha->host_no));
+		    vha->host_no));
 		ha->operating_mode = P2P;
 		ha->current_topology = ISP_CFG_N;
 		strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1444,7 +1512,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 
 	case 3:
 		DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
-		    ha->host_no));
+		    vha->host_no));
 		ha->switch_cap = sw_cap;
 		ha->operating_mode = P2P;
 		ha->current_topology = ISP_CFG_F;
@@ -1454,7 +1522,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 	default:
 		DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
 		    "Using NL.\n",
-		    ha->host_no, topo));
+		    vha->host_no, topo));
 		ha->current_topology = ISP_CFG_NL;
 		strcpy(connect_type, "(Loop)");
 		break;
@@ -1462,29 +1530,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
 
 	/* Save Host port and loop ID. */
 	/* byte order - Big Endian */
-	ha->d_id.b.domain = domain;
-	ha->d_id.b.area = area;
-	ha->d_id.b.al_pa = al_pa;
+	vha->d_id.b.domain = domain;
+	vha->d_id.b.area = area;
+	vha->d_id.b.al_pa = al_pa;
 
-	if (!ha->flags.init_done)
+	if (!vha->flags.init_done)
  		qla_printk(KERN_INFO, ha,
 		    "Topology - %s, Host Loop address 0x%x\n",
- 		    connect_type, ha->loop_id);
+		    connect_type, vha->loop_id);
 
 	if (rval) {
-		DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no));
+		DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
 	} else {
-		DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no));
+		DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
 	}
 
 	return(rval);
 }
 
 static inline void
-qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def)
+qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+	char *def)
 {
 	char *st, *en;
 	uint16_t index;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (memcmp(model, BINZERO, len) != 0) {
 		strncpy(ha->model_number, model, len);
@@ -1516,16 +1586,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
 		}
 	}
 	if (IS_FWI2_CAPABLE(ha))
-		qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
+		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
 		    sizeof(ha->model_desc));
 }
 
 /* On sparc systems, obtain port and node WWN from firmware
  * properties.
  */
-static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
+static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
 {
 #ifdef CONFIG_SPARC
+	struct qla_hw_data *ha = vha->hw;
 	struct pci_dev *pdev = ha->pdev;
 	struct device_node *dp = pci_device_to_OF_node(pdev);
 	const u8 *val;
@@ -1555,12 +1626,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
 *      0 = success.
 */
 int
-qla2x00_nvram_config(scsi_qla_host_t *ha)
+qla2x00_nvram_config(scsi_qla_host_t *vha)
 {
 	int             rval;
 	uint8_t         chksum = 0;
 	uint16_t        cnt;
 	uint8_t         *dptr1, *dptr2;
+	struct qla_hw_data *ha = vha->hw;
 	init_cb_t       *icb = ha->init_cb;
 	nvram_t         *nv = ha->nvram;
 	uint8_t         *ptr = ha->nvram;
@@ -1576,11 +1648,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 			ha->nvram_base = 0x80;
 
 	/* Get NVRAM data and calculate checksum. */
-	ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
+	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
 		chksum += *ptr++;
 
-	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
 	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
 
 	/* Bad NVRAM data, set defaults parameters. */
@@ -1594,7 +1666,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 		    "invalid -- WWPN) defaults.\n");
 
 		if (chksum)
-			qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
+			qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
 			    MSW(chksum), LSW(chksum));
 
 		/*
@@ -1631,7 +1703,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 		nv->port_name[3] = 224;
 		nv->port_name[4] = 139;
 
-		qla2xxx_nvram_wwn_from_ofw(ha, nv);
+		qla2xxx_nvram_wwn_from_ofw(vha, nv);
 
 		nv->login_timeout = 4;
 
@@ -1684,7 +1756,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 				strcpy(ha->model_number, "QLA2300");
 			}
 		} else {
-			qla2x00_set_model_info(ha, nv->model_number,
+			qla2x00_set_model_info(vha, nv->model_number,
 			    sizeof(nv->model_number), "QLA23xx");
 		}
 	} else if (IS_QLA2200(ha)) {
@@ -1760,8 +1832,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 	ha->serial0 = icb->port_name[5];
 	ha->serial1 = icb->port_name[6];
 	ha->serial2 = icb->port_name[7];
-	ha->node_name = icb->node_name;
-	ha->port_name = icb->port_name;
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
 
 	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
 
@@ -1829,10 +1901,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 		icb->response_accumulation_timer = 3;
 		icb->interrupt_delay_timer = 5;
 
-		ha->flags.process_response_queue = 1;
+		vha->flags.process_response_queue = 1;
 	} else {
 		/* Enable ZIO. */
-		if (!ha->flags.init_done) {
+		if (!vha->flags.init_done) {
 			ha->zio_mode = icb->add_firmware_options[0] &
 			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
 			ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1840,12 +1912,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 		}
 		icb->add_firmware_options[0] &=
 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
-		ha->flags.process_response_queue = 0;
+		vha->flags.process_response_queue = 0;
 		if (ha->zio_mode != QLA_ZIO_DISABLED) {
 			ha->zio_mode = QLA_ZIO_MODE_6;
 
 			DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
-			    "delay (%d us).\n", ha->host_no, ha->zio_mode,
+			    "delay (%d us).\n", vha->host_no, ha->zio_mode,
 			    ha->zio_timer * 100));
 			qla_printk(KERN_INFO, ha,
 			    "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1853,13 +1925,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
 
 			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
 			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
-			ha->flags.process_response_queue = 1;
+			vha->flags.process_response_queue = 1;
 		}
 	}
 
 	if (rval) {
 		DEBUG2_3(printk(KERN_WARNING
-		    "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
+		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
 	}
 	return (rval);
 }
@@ -1870,10 +1942,10 @@ qla2x00_rport_del(void *data)
 	fc_port_t *fcport = data;
 	struct fc_rport *rport;
 
-	spin_lock_irq(fcport->ha->host->host_lock);
+	spin_lock_irq(fcport->vha->host->host_lock);
 	rport = fcport->drport;
 	fcport->drport = NULL;
-	spin_unlock_irq(fcport->ha->host->host_lock);
+	spin_unlock_irq(fcport->vha->host->host_lock);
 	if (rport)
 		fc_remote_port_delete(rport);
 }
@@ -1886,7 +1958,7 @@ qla2x00_rport_del(void *data)
  * Returns a pointer to the allocated fcport, or NULL, if none available.
  */
 static fc_port_t *
-qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
+qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 {
 	fc_port_t *fcport;
 
@@ -1895,8 +1967,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
 		return NULL;
 
 	/* Setup fcport template structure. */
-	fcport->ha = ha;
-	fcport->vp_idx = ha->vp_idx;
+	fcport->vha = vha;
+	fcport->vp_idx = vha->vp_idx;
 	fcport->port_type = FCT_UNKNOWN;
 	fcport->loop_id = FC_NO_LOOP_ID;
 	atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1919,101 +1991,97 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
  *      2 = database was full and device was not configured.
  */
 static int
-qla2x00_configure_loop(scsi_qla_host_t *ha)
+qla2x00_configure_loop(scsi_qla_host_t *vha)
 {
 	int  rval;
 	unsigned long flags, save_flags;
-
+	struct qla_hw_data *ha = vha->hw;
 	rval = QLA_SUCCESS;
 
 	/* Get Initiator ID */
-	if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) {
-		rval = qla2x00_configure_hba(ha);
+	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
+		rval = qla2x00_configure_hba(vha);
 		if (rval != QLA_SUCCESS) {
 			DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
-			    ha->host_no));
+			    vha->host_no));
 			return (rval);
 		}
 	}
 
-	save_flags = flags = ha->dpc_flags;
+	save_flags = flags = vha->dpc_flags;
 	DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
-	    ha->host_no, flags));
+	    vha->host_no, flags));
 
 	/*
 	 * If we have both an RSCN and PORT UPDATE pending then handle them
 	 * both at the same time.
 	 */
-	clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
-	clear_bit(RSCN_UPDATE, &ha->dpc_flags);
+	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
 
 	/* Determine what we need to do */
 	if (ha->current_topology == ISP_CFG_FL &&
 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
-		ha->flags.rscn_queue_overflow = 1;
+		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 
 	} else if (ha->current_topology == ISP_CFG_F &&
 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
-		ha->flags.rscn_queue_overflow = 1;
+		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 		clear_bit(LOCAL_LOOP_UPDATE, &flags);
 
 	} else if (ha->current_topology == ISP_CFG_N) {
 		clear_bit(RSCN_UPDATE, &flags);
 
-	} else if (!ha->flags.online ||
+	} else if (!vha->flags.online ||
 	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
 
-		ha->flags.rscn_queue_overflow = 1;
+		vha->flags.rscn_queue_overflow = 1;
 		set_bit(RSCN_UPDATE, &flags);
 		set_bit(LOCAL_LOOP_UPDATE, &flags);
 	}
 
 	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
-		if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
 			rval = QLA_FUNCTION_FAILED;
-		} else {
-			rval = qla2x00_configure_local_loop(ha);
-		}
+		else
+			rval = qla2x00_configure_local_loop(vha);
 	}
 
 	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
-		if (LOOP_TRANSITION(ha)) {
+		if (LOOP_TRANSITION(vha))
 			rval = QLA_FUNCTION_FAILED;
-		} else {
-			rval = qla2x00_configure_fabric(ha);
-		}
+		else
+			rval = qla2x00_configure_fabric(vha);
 	}
 
 	if (rval == QLA_SUCCESS) {
-		if (atomic_read(&ha->loop_down_timer) ||
-		    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+		if (atomic_read(&vha->loop_down_timer) ||
+		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
 			rval = QLA_FUNCTION_FAILED;
 		} else {
-			atomic_set(&ha->loop_state, LOOP_READY);
+			atomic_set(&vha->loop_state, LOOP_READY);
 
-			DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no));
+			DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
 		}
 	}
 
 	if (rval) {
 		DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 	} else {
 		DEBUG3(printk("%s: exiting normally\n", __func__));
 	}
 
 	/* Restore state if a resync event occurred during processing */
-	if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
 		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
-			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
-		if (test_bit(RSCN_UPDATE, &save_flags)) {
-			ha->flags.rscn_queue_overflow = 1;
-			set_bit(RSCN_UPDATE, &ha->dpc_flags);
-		}
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		if (test_bit(RSCN_UPDATE, &save_flags))
+			set_bit(RSCN_UPDATE, &vha->dpc_flags);
 	}
 
 	return (rval);
@@ -2032,7 +2100,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
  *	0 = success.
  */
 static int
-qla2x00_configure_local_loop(scsi_qla_host_t *ha)
+qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 {
 	int		rval, rval2;
 	int		found_devs;
@@ -2044,18 +2112,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 	char		*id_iter;
 	uint16_t	loop_id;
 	uint8_t		domain, area, al_pa;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
 
 	found_devs = 0;
 	new_fcport = NULL;
 	entries = MAX_FIBRE_DEVICES;
 
-	DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no));
-	DEBUG3(qla2x00_get_fcal_position_map(ha, NULL));
+	DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
+	DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
 
 	/* Get list of logged in devices. */
 	memset(ha->gid_list, 0, GID_LIST_SIZE);
-	rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma,
+	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
 	    &entries);
 	if (rval != QLA_SUCCESS)
 		goto cleanup_allocation;
@@ -2066,7 +2134,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 	    entries * sizeof(struct gid_list_info)));
 
 	/* Allocate temporary fcport for any new fcports discovered. */
-	new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 	if (new_fcport == NULL) {
 		rval = QLA_MEMORY_ALLOC_FAILED;
 		goto cleanup_allocation;
@@ -2076,17 +2144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 	/*
 	 * Mark local devices that were present with FCF_DEVICE_LOST for now.
 	 */
-	list_for_each_entry(fcport, &pha->fcports, list) {
-		if (fcport->vp_idx != ha->vp_idx)
-			continue;
-
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (atomic_read(&fcport->state) == FCS_ONLINE &&
 		    fcport->port_type != FCT_BROADCAST &&
 		    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
 
 			DEBUG(printk("scsi(%ld): Marking port lost, "
 			    "loop_id=0x%04x\n",
-			    ha->host_no, fcport->loop_id));
+			    vha->host_no, fcport->loop_id));
 
 			atomic_set(&fcport->state, FCS_DEVICE_LOST);
 			fcport->flags &= ~FCF_FARP_DONE;
@@ -2113,7 +2178,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 
 		/* Bypass if not same domain and area of adapter. */
 		if (area && domain &&
-		    (area != ha->d_id.b.area || domain != ha->d_id.b.domain))
+		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
 			continue;
 
 		/* Bypass invalid local loop ID. */
@@ -2125,26 +2190,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 		new_fcport->d_id.b.area = area;
 		new_fcport->d_id.b.al_pa = al_pa;
 		new_fcport->loop_id = loop_id;
-		new_fcport->vp_idx = ha->vp_idx;
-		rval2 = qla2x00_get_port_database(ha, new_fcport, 0);
+		new_fcport->vp_idx = vha->vp_idx;
+		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
 		if (rval2 != QLA_SUCCESS) {
 			DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
 			    "information -- get_port_database=%x, "
 			    "loop_id=0x%04x\n",
-			    ha->host_no, rval2, new_fcport->loop_id));
+			    vha->host_no, rval2, new_fcport->loop_id));
 			DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
-			    ha->host_no));
-			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+			    vha->host_no));
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			continue;
 		}
 
 		/* Check for matching device in port list. */
 		found = 0;
 		fcport = NULL;
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (fcport->vp_idx != ha->vp_idx)
-				continue;
-
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (memcmp(new_fcport->port_name, fcport->port_name,
 			    WWN_SIZE))
 				continue;
@@ -2164,17 +2226,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 		if (!found) {
 			/* New device, add to fcports list. */
 			new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
-			if (ha->parent) {
-				new_fcport->ha = ha;
-				new_fcport->vp_idx = ha->vp_idx;
-				list_add_tail(&new_fcport->vp_fcport,
-				    &ha->vp_fcports);
+			if (vha->vp_idx) {
+				new_fcport->vha = vha;
+				new_fcport->vp_idx = vha->vp_idx;
 			}
-			list_add_tail(&new_fcport->list, &pha->fcports);
+			list_add_tail(&new_fcport->list, &vha->vp_fcports);
 
 			/* Allocate a new replacement fcport. */
 			fcport = new_fcport;
-			new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 			if (new_fcport == NULL) {
 				rval = QLA_MEMORY_ALLOC_FAILED;
 				goto cleanup_allocation;
@@ -2185,7 +2245,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
 		/* Base iIDMA settings on HBA port speed. */
 		fcport->fp_speed = ha->link_data_rate;
 
-		qla2x00_update_fcport(ha, fcport);
+		qla2x00_update_fcport(vha, fcport);
 
 		found_devs++;
 	}
@@ -2195,24 +2255,25 @@ cleanup_allocation:
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
-		    "rval=%x\n", ha->host_no, rval));
+		    "rval=%x\n", vha->host_no, rval));
 	}
 
 	if (found_devs) {
-		ha->device_flags |= DFLG_LOCAL_DEVICES;
-		ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
+		vha->device_flags |= DFLG_LOCAL_DEVICES;
+		vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
 	}
 
 	return (rval);
 }
 
 static void
-qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 #define LS_UNKNOWN      2
 	static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
 	int rval;
 	uint16_t mb[6];
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_IIDMA_CAPABLE(ha))
 		return;
@@ -2221,12 +2282,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
 	    fcport->fp_speed > ha->link_data_rate)
 		return;
 
-	rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed,
+	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
 	    mb);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
 		    "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
-		    ha->host_no, fcport->port_name[0], fcport->port_name[1],
+		    vha->host_no, fcport->port_name[0], fcport->port_name[1],
 		    fcport->port_name[2], fcport->port_name[3],
 		    fcport->port_name[4], fcport->port_name[5],
 		    fcport->port_name[6], fcport->port_name[7], rval,
@@ -2244,10 +2305,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
 }
 
 static void
-qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	struct fc_rport_identifiers rport_ids;
 	struct fc_rport *rport;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (fcport->drport)
 		qla2x00_rport_del(fcport);
@@ -2257,15 +2319,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
 	rport_ids.port_id = fcport->d_id.b.domain << 16 |
 	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
-	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
 	if (!rport) {
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to allocate fc remote port!\n");
 		return;
 	}
-	spin_lock_irq(fcport->ha->host->host_lock);
+	spin_lock_irq(fcport->vha->host->host_lock);
 	*((fc_port_t **)rport->dd_data) = fcport;
-	spin_unlock_irq(fcport->ha->host->host_lock);
+	spin_unlock_irq(fcport->vha->host->host_lock);
 
 	rport->supported_classes = fcport->supported_classes;
 
@@ -2293,23 +2355,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
  *	Kernel context.
  */
 void
-qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
 
-	fcport->ha = ha;
+	fcport->vha = vha;
 	fcport->login_retry = 0;
-	fcport->port_login_retry_count = pha->port_down_retry_count *
+	fcport->port_login_retry_count = ha->port_down_retry_count *
 	    PORT_RETRY_TIME;
-	atomic_set(&fcport->port_down_timer, pha->port_down_retry_count *
+	atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
 	    PORT_RETRY_TIME);
 	fcport->flags &= ~FCF_LOGIN_NEEDED;
 
-	qla2x00_iidma_fcport(ha, fcport);
+	qla2x00_iidma_fcport(vha, fcport);
 
 	atomic_set(&fcport->state, FCS_ONLINE);
 
-	qla2x00_reg_remote_port(ha, fcport);
+	qla2x00_reg_remote_port(vha, fcport);
 }
 
 /*
@@ -2324,7 +2386,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
  *      BIT_0 = error
  */
 static int
-qla2x00_configure_fabric(scsi_qla_host_t *ha)
+qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
 	int	rval, rval2;
 	fc_port_t	*fcport, *fcptemp;
@@ -2332,25 +2394,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
 	uint16_t	loop_id;
 	LIST_HEAD(new_fcports);
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	/* If FL port exists, then SNS is present */
 	if (IS_FWI2_CAPABLE(ha))
 		loop_id = NPH_F_PORT;
 	else
 		loop_id = SNS_FL_PORT;
-	rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1);
+	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
-		    "Port\n", ha->host_no));
+		    "Port\n", vha->host_no));
 
-		ha->device_flags &= ~SWITCH_FOUND;
+		vha->device_flags &= ~SWITCH_FOUND;
 		return (QLA_SUCCESS);
 	}
-	ha->device_flags |= SWITCH_FOUND;
+	vha->device_flags |= SWITCH_FOUND;
 
 	/* Mark devices that need re-synchronization. */
-	rval2 = qla2x00_device_resync(ha);
+	rval2 = qla2x00_device_resync(vha);
 	if (rval2 == QLA_RSCNS_HANDLED) {
 		/* No point doing the scan, just continue. */
 		return (QLA_SUCCESS);
@@ -2358,15 +2421,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 	do {
 		/* FDMI support. */
 		if (ql2xfdmienable &&
-		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
-			qla2x00_fdmi_register(ha);
+		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+			qla2x00_fdmi_register(vha);
 
 		/* Ensure we are logged into the SNS. */
 		if (IS_FWI2_CAPABLE(ha))
 			loop_id = NPH_SNS;
 		else
 			loop_id = SIMPLE_NAME_SERVER;
-		ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
+		ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
 		    0xfc, mb, BIT_1 | BIT_0);
 		if (mb[0] != MBS_COMMAND_COMPLETE) {
 			DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2376,29 +2439,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 			return (QLA_SUCCESS);
 		}
 
-		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) {
-			if (qla2x00_rft_id(ha)) {
+		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
+			if (qla2x00_rft_id(vha)) {
 				/* EMPTY */
 				DEBUG2(printk("scsi(%ld): Register FC-4 "
-				    "TYPE failed.\n", ha->host_no));
+				    "TYPE failed.\n", vha->host_no));
 			}
-			if (qla2x00_rff_id(ha)) {
+			if (qla2x00_rff_id(vha)) {
 				/* EMPTY */
 				DEBUG2(printk("scsi(%ld): Register FC-4 "
-				    "Features failed.\n", ha->host_no));
+				    "Features failed.\n", vha->host_no));
 			}
-			if (qla2x00_rnn_id(ha)) {
+			if (qla2x00_rnn_id(vha)) {
 				/* EMPTY */
 				DEBUG2(printk("scsi(%ld): Register Node Name "
-				    "failed.\n", ha->host_no));
-			} else if (qla2x00_rsnn_nn(ha)) {
+				    "failed.\n", vha->host_no));
+			} else if (qla2x00_rsnn_nn(vha)) {
 				/* EMPTY */
 				DEBUG2(printk("scsi(%ld): Register Symbolic "
-				    "Node Name failed.\n", ha->host_no));
+				    "Node Name failed.\n", vha->host_no));
 			}
 		}
 
-		rval = qla2x00_find_all_fabric_devs(ha, &new_fcports);
+		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
 		if (rval != QLA_SUCCESS)
 			break;
 
@@ -2406,24 +2469,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 		 * Logout all previous fabric devices marked lost, except
 		 * tape devices.
 		 */
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (fcport->vp_idx !=ha->vp_idx)
-				continue;
-
-			if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
 				break;
 
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
 				continue;
 
 			if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
-				qla2x00_mark_device_lost(ha, fcport,
+				qla2x00_mark_device_lost(vha, fcport,
 				    ql2xplogiabsentdevice, 0);
 				if (fcport->loop_id != FC_NO_LOOP_ID &&
 				    (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
 				    fcport->port_type != FCT_INITIATOR &&
 				    fcport->port_type != FCT_BROADCAST) {
-					ha->isp_ops->fabric_logout(ha,
+					ha->isp_ops->fabric_logout(vha,
 					    fcport->loop_id,
 					    fcport->d_id.b.domain,
 					    fcport->d_id.b.area,
@@ -2434,18 +2494,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 		}
 
 		/* Starting free loop ID. */
-		next_loopid = pha->min_external_loopid;
+		next_loopid = ha->min_external_loopid;
 
 		/*
 		 * Scan through our port list and login entries that need to be
 		 * logged in.
 		 */
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (fcport->vp_idx != ha->vp_idx)
-				continue;
-
-			if (atomic_read(&ha->loop_down_timer) ||
-			    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (atomic_read(&vha->loop_down_timer) ||
+			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
 				break;
 
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2455,14 +2512,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 			if (fcport->loop_id == FC_NO_LOOP_ID) {
 				fcport->loop_id = next_loopid;
 				rval = qla2x00_find_new_loop_id(
-				    to_qla_parent(ha), fcport);
+				    base_vha, fcport);
 				if (rval != QLA_SUCCESS) {
 					/* Ran out of IDs to use */
 					break;
 				}
 			}
 			/* Login and update database */
-			qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
+			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
 		}
 
 		/* Exit if out of loop IDs. */
@@ -2474,31 +2531,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 		 * Login and add the new devices to our port list.
 		 */
 		list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-			if (atomic_read(&ha->loop_down_timer) ||
-			    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+			if (atomic_read(&vha->loop_down_timer) ||
+			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
 				break;
 
 			/* Find a new loop ID to use. */
 			fcport->loop_id = next_loopid;
-			rval = qla2x00_find_new_loop_id(to_qla_parent(ha),
-			    fcport);
+			rval = qla2x00_find_new_loop_id(base_vha, fcport);
 			if (rval != QLA_SUCCESS) {
 				/* Ran out of IDs to use */
 				break;
 			}
 
 			/* Login and update database */
-			qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
-
-			if (ha->parent) {
-				fcport->ha = ha;
-				fcport->vp_idx = ha->vp_idx;
-				list_add_tail(&fcport->vp_fcport,
-				    &ha->vp_fcports);
-				list_move_tail(&fcport->list,
-				    &ha->parent->fcports);
-			} else
-				list_move_tail(&fcport->list, &ha->fcports);
+			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+			if (vha->vp_idx) {
+				fcport->vha = vha;
+				fcport->vp_idx = vha->vp_idx;
+			}
+			list_move_tail(&fcport->list, &vha->vp_fcports);
 		}
 	} while (0);
 
@@ -2510,7 +2562,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 
 	if (rval) {
 		DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
-		    "rval=%d\n", ha->host_no, rval));
+		    "rval=%d\n", vha->host_no, rval));
 	}
 
 	return (rval);
@@ -2531,7 +2583,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
  *	Kernel context.
  */
 static int
-qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+	struct list_head *new_fcports)
 {
 	int		rval;
 	uint16_t	loop_id;
@@ -2542,11 +2595,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 	int		swl_idx;
 	int		first_dev, last_dev;
 	port_id_t	wrap, nxt_d_id;
-	int 		vp_index;
-	int		empty_vp_index;
-	int		found_vp;
-	scsi_qla_host_t *vha;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
 
 	rval = QLA_SUCCESS;
 
@@ -2555,43 +2605,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 	if (!swl) {
 		/*EMPTY*/
 		DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
-		    "on GA_NXT\n", ha->host_no));
+		    "on GA_NXT\n", vha->host_no));
 	} else {
-		if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) {
+		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
 			kfree(swl);
 			swl = NULL;
-		} else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) {
+		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
 			kfree(swl);
 			swl = NULL;
-		} else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
+		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
 			kfree(swl);
 			swl = NULL;
 		} else if (ql2xiidmaenable &&
-		    qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
-			qla2x00_gpsc(ha, swl);
+		    qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
+			qla2x00_gpsc(vha, swl);
 		}
 	}
 	swl_idx = 0;
 
 	/* Allocate temporary fcport for any new fcports discovered. */
-	new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 	if (new_fcport == NULL) {
 		kfree(swl);
 		return (QLA_MEMORY_ALLOC_FAILED);
 	}
 	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
-	new_fcport->vp_idx = ha->vp_idx;
 	/* Set start port ID scan at adapter ID. */
 	first_dev = 1;
 	last_dev = 0;
 
 	/* Starting free loop ID. */
-	loop_id = pha->min_external_loopid;
-	for (; loop_id <= ha->last_loop_id; loop_id++) {
-		if (qla2x00_is_reserved_id(ha, loop_id))
+	loop_id = ha->min_external_loopid;
+	for (; loop_id <= ha->max_loop_id; loop_id++) {
+		if (qla2x00_is_reserved_id(vha, loop_id))
 			continue;
 
-		if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha))
+		if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
 			break;
 
 		if (swl != NULL) {
@@ -2614,7 +2663,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 			}
 		} else {
 			/* Send GA_NXT to the switch */
-			rval = qla2x00_ga_nxt(ha, new_fcport);
+			rval = qla2x00_ga_nxt(vha, new_fcport);
 			if (rval != QLA_SUCCESS) {
 				qla_printk(KERN_WARNING, ha,
 				    "SNS scan failed -- assuming zero-entry "
@@ -2635,44 +2684,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 			first_dev = 0;
 		} else if (new_fcport->d_id.b24 == wrap.b24) {
 			DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
-			    ha->host_no, new_fcport->d_id.b.domain,
+			    vha->host_no, new_fcport->d_id.b.domain,
 			    new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
 			break;
 		}
 
 		/* Bypass if same physical adapter. */
-		if (new_fcport->d_id.b24 == pha->d_id.b24)
+		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
 			continue;
 
 		/* Bypass virtual ports of the same host. */
-		if (pha->num_vhosts) {
-			for_each_mapped_vp_idx(pha, vp_index) {
-				empty_vp_index = 1;
-				found_vp = 0;
-				list_for_each_entry(vha, &pha->vp_list,
-				    vp_list) {
-					if (vp_index == vha->vp_idx) {
-						empty_vp_index = 0;
-						found_vp = 1;
-						break;
-					}
-				}
-
-				if (empty_vp_index)
-					continue;
-
-				if (found_vp &&
-				    new_fcport->d_id.b24 == vha->d_id.b24)
+		found = 0;
+		if (ha->num_vhosts) {
+			list_for_each_entry(vp, &ha->vp_list, list) {
+				if (new_fcport->d_id.b24 == vp->d_id.b24) {
+					found = 1;
 					break;
+				}
 			}
-
-			if (vp_index <= pha->max_npiv_vports)
+			if (found)
 				continue;
 		}
 
 		/* Bypass if same domain and area of adapter. */
 		if (((new_fcport->d_id.b24 & 0xffff00) ==
-		    (ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
+		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
 			ISP_CFG_FL)
 			    continue;
 
@@ -2682,9 +2718,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 
 		/* Locate matching device in database. */
 		found = 0;
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (new_fcport->vp_idx != fcport->vp_idx)
-				continue;
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (memcmp(new_fcport->port_name, fcport->port_name,
 			    WWN_SIZE))
 				continue;
@@ -2728,7 +2762,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 			    (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
 			    fcport->port_type != FCT_INITIATOR &&
 			    fcport->port_type != FCT_BROADCAST) {
-				ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+				ha->isp_ops->fabric_logout(vha, fcport->loop_id,
 				    fcport->d_id.b.domain, fcport->d_id.b.area,
 				    fcport->d_id.b.al_pa);
 				fcport->loop_id = FC_NO_LOOP_ID;
@@ -2739,27 +2773,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
 
 		if (found)
 			continue;
-
 		/* If device was not in our fcports list, then add it. */
 		list_add_tail(&new_fcport->list, new_fcports);
 
 		/* Allocate a new replacement fcport. */
 		nxt_d_id.b24 = new_fcport->d_id.b24;
-		new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 		if (new_fcport == NULL) {
 			kfree(swl);
 			return (QLA_MEMORY_ALLOC_FAILED);
 		}
 		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
 		new_fcport->d_id.b24 = nxt_d_id.b24;
-		new_fcport->vp_idx = ha->vp_idx;
 	}
 
 	kfree(swl);
 	kfree(new_fcport);
 
 	if (!list_empty(new_fcports))
-		ha->device_flags |= DFLG_FABRIC_DEVICES;
+		vha->device_flags |= DFLG_FABRIC_DEVICES;
 
 	return (rval);
 }
@@ -2779,13 +2811,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
  *	Kernel context.
  */
 static int
-qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
+qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
 {
 	int	rval;
 	int	found;
 	fc_port_t *fcport;
 	uint16_t first_loop_id;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
 
 	rval = QLA_SUCCESS;
 
@@ -2794,17 +2827,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
 
 	for (;;) {
 		/* Skip loop ID if already used by adapter. */
-		if (dev->loop_id == ha->loop_id) {
+		if (dev->loop_id == vha->loop_id)
 			dev->loop_id++;
-		}
 
 		/* Skip reserved loop IDs. */
-		while (qla2x00_is_reserved_id(ha, dev->loop_id)) {
+		while (qla2x00_is_reserved_id(vha, dev->loop_id))
 			dev->loop_id++;
-		}
 
 		/* Reset loop ID if passed the end. */
-		if (dev->loop_id > ha->last_loop_id) {
+		if (dev->loop_id > ha->max_loop_id) {
 			/* first loop ID. */
 			dev->loop_id = ha->min_external_loopid;
 		}
@@ -2812,12 +2843,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
 		/* Check for loop ID being already in use. */
 		found = 0;
 		fcport = NULL;
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (fcport->loop_id == dev->loop_id && fcport != dev) {
-				/* ID possibly in use */
-				found++;
-				break;
+		list_for_each_entry(vp, &ha->vp_list, list) {
+			list_for_each_entry(fcport, &vp->vp_fcports, list) {
+				if (fcport->loop_id == dev->loop_id &&
+								fcport != dev) {
+					/* ID possibly in use */
+					found++;
+					break;
+				}
 			}
+			if (found)
+				break;
 		}
 
 		/* If not in use then it is free to use. */
@@ -2850,7 +2886,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
  *	Kernel context.
  */
 static int
-qla2x00_device_resync(scsi_qla_host_t *ha)
+qla2x00_device_resync(scsi_qla_host_t *vha)
 {
 	int	rval;
 	uint32_t mask;
@@ -2859,14 +2895,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
 	uint8_t rscn_out_iter;
 	uint8_t format;
 	port_id_t d_id;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
 
 	rval = QLA_RSCNS_HANDLED;
 
-	while (ha->rscn_out_ptr != ha->rscn_in_ptr ||
-	    ha->flags.rscn_queue_overflow) {
+	while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
+	    vha->flags.rscn_queue_overflow) {
 
-		rscn_entry = ha->rscn_queue[ha->rscn_out_ptr];
+		rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
 		format = MSB(MSW(rscn_entry));
 		d_id.b.domain = LSB(MSW(rscn_entry));
 		d_id.b.area = MSB(LSW(rscn_entry));
@@ -2874,37 +2909,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
 
 		DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
 		    "[%02x/%02x%02x%02x].\n",
-		    ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain,
+		    vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
 		    d_id.b.area, d_id.b.al_pa));
 
-		ha->rscn_out_ptr++;
-		if (ha->rscn_out_ptr == MAX_RSCN_COUNT)
-			ha->rscn_out_ptr = 0;
+		vha->rscn_out_ptr++;
+		if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
+			vha->rscn_out_ptr = 0;
 
 		/* Skip duplicate entries. */
-		for (rscn_out_iter = ha->rscn_out_ptr;
-		    !ha->flags.rscn_queue_overflow &&
-		    rscn_out_iter != ha->rscn_in_ptr;
+		for (rscn_out_iter = vha->rscn_out_ptr;
+		    !vha->flags.rscn_queue_overflow &&
+		    rscn_out_iter != vha->rscn_in_ptr;
 		    rscn_out_iter = (rscn_out_iter ==
 			(MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
 
-			if (rscn_entry != ha->rscn_queue[rscn_out_iter])
+			if (rscn_entry != vha->rscn_queue[rscn_out_iter])
 				break;
 
 			DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
-			    "entry found at [%d].\n", ha->host_no,
+			    "entry found at [%d].\n", vha->host_no,
 			    rscn_out_iter));
 
-			ha->rscn_out_ptr = rscn_out_iter;
+			vha->rscn_out_ptr = rscn_out_iter;
 		}
 
 		/* Queue overflow, set switch default case. */
-		if (ha->flags.rscn_queue_overflow) {
+		if (vha->flags.rscn_queue_overflow) {
 			DEBUG(printk("scsi(%ld): device_resync: rscn "
-			    "overflow.\n", ha->host_no));
+			    "overflow.\n", vha->host_no));
 
 			format = 3;
-			ha->flags.rscn_queue_overflow = 0;
+			vha->flags.rscn_queue_overflow = 0;
 		}
 
 		switch (format) {
@@ -2920,16 +2955,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
 		default:
 			mask = 0x0;
 			d_id.b24 = 0;
-			ha->rscn_out_ptr = ha->rscn_in_ptr;
+			vha->rscn_out_ptr = vha->rscn_in_ptr;
 			break;
 		}
 
 		rval = QLA_SUCCESS;
 
-		list_for_each_entry(fcport, &pha->fcports, list) {
-			if (fcport->vp_idx != ha->vp_idx)
-				continue;
-
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
 			    (fcport->d_id.b24 & mask) != d_id.b24 ||
 			    fcport->port_type == FCT_BROADCAST)
@@ -2938,7 +2970,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
 			if (atomic_read(&fcport->state) == FCS_ONLINE) {
 				if (format != 3 ||
 				    fcport->port_type != FCT_INITIATOR) {
-					qla2x00_mark_device_lost(ha, fcport,
+					qla2x00_mark_device_lost(vha, fcport,
 					    0, 0);
 				}
 			}
@@ -2965,30 +2997,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
  *	Kernel context.
  */
 static int
-qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
     uint16_t *next_loopid)
 {
 	int	rval;
 	int	retry;
 	uint8_t opts;
+	struct qla_hw_data *ha = vha->hw;
 
 	rval = QLA_SUCCESS;
 	retry = 0;
 
-	rval = qla2x00_fabric_login(ha, fcport, next_loopid);
+	rval = qla2x00_fabric_login(vha, fcport, next_loopid);
 	if (rval == QLA_SUCCESS) {
 		/* Send an ADISC to tape devices.*/
 		opts = 0;
 		if (fcport->flags & FCF_TAPE_PRESENT)
 			opts |= BIT_1;
-		rval = qla2x00_get_port_database(ha, fcport, opts);
+		rval = qla2x00_get_port_database(vha, fcport, opts);
 		if (rval != QLA_SUCCESS) {
-			ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
 			    fcport->d_id.b.domain, fcport->d_id.b.area,
 			    fcport->d_id.b.al_pa);
-			qla2x00_mark_device_lost(ha, fcport, 1, 0);
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
 		} else {
-			qla2x00_update_fcport(ha, fcport);
+			qla2x00_update_fcport(vha, fcport);
 		}
 	}
 
@@ -3010,13 +3043,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
  *      3 - Fatal error
  */
 int
-qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
     uint16_t *next_loopid)
 {
 	int	rval;
 	int	retry;
 	uint16_t tmp_loopid;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
 
 	retry = 0;
 	tmp_loopid = 0;
@@ -3024,11 +3058,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
 	for (;;) {
 		DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
  		    "for port %02x%02x%02x.\n",
- 		    ha->host_no, fcport->loop_id, fcport->d_id.b.domain,
+		    vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa));
 
 		/* Login fcport on switch. */
-		ha->isp_ops->fabric_login(ha, fcport->loop_id,
+		ha->isp_ops->fabric_login(vha, fcport->loop_id,
 		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa, mb, BIT_0);
 		if (mb[0] == MBS_PORT_ID_USED) {
@@ -3084,7 +3118,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
 			 * Loop ID already used, try next loop ID.
 			 */
 			fcport->loop_id++;
-			rval = qla2x00_find_new_loop_id(ha, fcport);
+			rval = qla2x00_find_new_loop_id(vha, fcport);
 			if (rval != QLA_SUCCESS) {
 				/* Ran out of loop IDs to use */
 				break;
@@ -3096,10 +3130,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
 			 * dead.
 			 */
 			*next_loopid = fcport->loop_id;
-			ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
 			    fcport->d_id.b.domain, fcport->d_id.b.area,
 			    fcport->d_id.b.al_pa);
-			qla2x00_mark_device_lost(ha, fcport, 1, 0);
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
 
 			rval = 1;
 			break;
@@ -3109,12 +3143,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
 			 */
 			DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
  			    "loop_id=%x jiffies=%lx.\n",
- 			    __func__, ha->host_no, mb[0],
+			    __func__, vha->host_no, mb[0],
 			    fcport->d_id.b.domain, fcport->d_id.b.area,
 			    fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
 
 			*next_loopid = fcport->loop_id;
-			ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
 			    fcport->d_id.b.domain, fcport->d_id.b.area,
 			    fcport->d_id.b.al_pa);
 			fcport->loop_id = FC_NO_LOOP_ID;
@@ -3142,13 +3176,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
  *      3 - Fatal error
  */
 int
-qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	int		rval;
 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
 
 	memset(mb, 0, sizeof(mb));
-	rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0);
+	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
 	if (rval == QLA_SUCCESS) {
 		/* Interrogate mailbox registers for any errors */
 		if (mb[0] == MBS_COMMAND_ERROR)
@@ -3172,57 +3206,57 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
  *      0 = success
  */
 int
-qla2x00_loop_resync(scsi_qla_host_t *ha)
+qla2x00_loop_resync(scsi_qla_host_t *vha)
 {
-	int   rval;
+	int rval = QLA_SUCCESS;
 	uint32_t wait_time;
-
-	rval = QLA_SUCCESS;
-
-	atomic_set(&ha->loop_state, LOOP_UPDATE);
-	clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
-	if (ha->flags.online) {
-		if (!(rval = qla2x00_fw_ready(ha))) {
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	atomic_set(&vha->loop_state, LOOP_UPDATE);
+	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+	if (vha->flags.online) {
+		if (!(rval = qla2x00_fw_ready(vha))) {
 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
 			wait_time = 256;
 			do {
-				atomic_set(&ha->loop_state, LOOP_UPDATE);
+				atomic_set(&vha->loop_state, LOOP_UPDATE);
 
 				/* Issue a marker after FW becomes ready. */
-				qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
-				ha->marker_needed = 0;
+				qla2x00_marker(vha, req, rsp, 0, 0,
+					MK_SYNC_ALL);
+				vha->marker_needed = 0;
 
 				/* Remap devices on Loop. */
-				clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 
-				qla2x00_configure_loop(ha);
+				qla2x00_configure_loop(vha);
 				wait_time--;
-			} while (!atomic_read(&ha->loop_down_timer) &&
-				!(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) &&
-				wait_time &&
-				(test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
+			} while (!atomic_read(&vha->loop_down_timer) &&
+				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+				&vha->dpc_flags)));
 		}
 	}
 
-	if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
+	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
 		return (QLA_FUNCTION_FAILED);
-	}
 
-	if (rval) {
+	if (rval)
 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
-	}
 
 	return (rval);
 }
 
 void
-qla2x00_update_fcports(scsi_qla_host_t *ha)
+qla2x00_update_fcports(scsi_qla_host_t *vha)
 {
 	fc_port_t *fcport;
 
 	/* Go with deferred removal of rport references. */
-	list_for_each_entry(fcport, &ha->fcports, list)
-		if (fcport->drport &&
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		if (fcport && fcport->drport &&
 		    atomic_read(&fcport->state) != FCS_UNCONFIGURED)
 			qla2x00_rport_del(fcport);
 }
@@ -3238,63 +3272,65 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
 *      0 = success
 */
 int
-qla2x00_abort_isp(scsi_qla_host_t *ha)
+qla2x00_abort_isp(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint8_t        status = 0;
-	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
+	struct req_que *req = ha->req_q_map[0];
 
-	if (ha->flags.online) {
-		ha->flags.online = 0;
-		clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+	if (vha->flags.online) {
+		vha->flags.online = 0;
+		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		ha->qla_stats.total_isp_aborts++;
 
 		qla_printk(KERN_INFO, ha,
 		    "Performing ISP error recovery - ha= %p.\n", ha);
-		ha->isp_ops->reset_chip(ha);
-
-		atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			qla2x00_mark_all_devices_lost(ha, 0);
-			list_for_each_entry(vha, &ha->vp_list, vp_list)
-			       qla2x00_mark_all_devices_lost(vha, 0);
+		ha->isp_ops->reset_chip(vha);
+
+		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			qla2x00_mark_all_devices_lost(vha, 0);
+			list_for_each_entry(vp, &ha->vp_list, list)
+			       qla2x00_mark_all_devices_lost(vp, 0);
 		} else {
-			if (!atomic_read(&ha->loop_down_timer))
-				atomic_set(&ha->loop_down_timer,
+			if (!atomic_read(&vha->loop_down_timer))
+				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
 		}
 
 		/* Requeue all commands in outstanding command list. */
-		qla2x00_abort_all_cmds(ha, DID_RESET << 16);
+		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
 
-		ha->isp_ops->get_flash_version(ha, ha->request_ring);
+		ha->isp_ops->get_flash_version(vha, req->ring);
 
-		ha->isp_ops->nvram_config(ha);
+		ha->isp_ops->nvram_config(vha);
 
-		if (!qla2x00_restart_isp(ha)) {
-			clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+		if (!qla2x00_restart_isp(vha)) {
+			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
 
-			if (!atomic_read(&ha->loop_down_timer)) {
+			if (!atomic_read(&vha->loop_down_timer)) {
 				/*
 				 * Issue marker command only when we are going
 				 * to start the I/O .
 				 */
-				ha->marker_needed = 1;
+				vha->marker_needed = 1;
 			}
 
-			ha->flags.online = 1;
+			vha->flags.online = 1;
 
 			ha->isp_ops->enable_intrs(ha);
 
 			ha->isp_abort_cnt = 0;
-			clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
 
 			if (ha->fce) {
 				ha->flags.fce_enabled = 1;
 				memset(ha->fce, 0,
 				    fce_calc_size(ha->fce_bufs));
-				rval = qla2x00_enable_fce_trace(ha,
+				rval = qla2x00_enable_fce_trace(vha,
 				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
 				    &ha->fce_bufs);
 				if (rval) {
@@ -3307,7 +3343,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
 
 			if (ha->eft) {
 				memset(ha->eft, 0, EFT_SIZE);
-				rval = qla2x00_enable_eft_trace(ha,
+				rval = qla2x00_enable_eft_trace(vha,
 				    ha->eft_dma, EFT_NUM_BUFFERS);
 				if (rval) {
 					qla_printk(KERN_WARNING, ha,
@@ -3316,8 +3352,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
 				}
 			}
 		} else {	/* failed the ISP abort */
-			ha->flags.online = 1;
-			if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
+			vha->flags.online = 1;
+			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
 				if (ha->isp_abort_cnt == 0) {
  					qla_printk(KERN_WARNING, ha,
 					    "ISP error recovery failed - "
@@ -3326,37 +3362,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
 					 * The next call disables the board
 					 * completely.
 					 */
-					ha->isp_ops->reset_adapter(ha);
-					ha->flags.online = 0;
+					ha->isp_ops->reset_adapter(vha);
+					vha->flags.online = 0;
 					clear_bit(ISP_ABORT_RETRY,
-					    &ha->dpc_flags);
+					    &vha->dpc_flags);
 					status = 0;
 				} else { /* schedule another ISP abort */
 					ha->isp_abort_cnt--;
 					DEBUG(printk("qla%ld: ISP abort - "
 					    "retry remaining %d\n",
-					    ha->host_no, ha->isp_abort_cnt));
+					    vha->host_no, ha->isp_abort_cnt));
 					status = 1;
 				}
 			} else {
 				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
 				DEBUG(printk("qla2x00(%ld): ISP error recovery "
 				    "- retrying (%d) more times\n",
-				    ha->host_no, ha->isp_abort_cnt));
-				set_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+				    vha->host_no, ha->isp_abort_cnt));
+				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
 				status = 1;
 			}
 		}
 
 	}
 
-	if (status) {
+	if (!status) {
+		DEBUG(printk(KERN_INFO
+				"qla2x00_abort_isp(%ld): succeeded.\n",
+				vha->host_no));
+		list_for_each_entry(vp, &ha->vp_list, list) {
+			if (vp->vp_idx)
+				qla2x00_vp_abort_isp(vp);
+		}
+	} else {
 		qla_printk(KERN_INFO, ha,
 			"qla2x00_abort_isp: **** FAILED ****\n");
-	} else {
-		DEBUG(printk(KERN_INFO
-				"qla2x00_abort_isp(%ld): exiting.\n",
-				ha->host_no));
 	}
 
 	return(status);
@@ -3373,42 +3413,50 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
 *      0 = success
 */
 static int
-qla2x00_restart_isp(scsi_qla_host_t *ha)
+qla2x00_restart_isp(scsi_qla_host_t *vha)
 {
 	uint8_t		status = 0;
 	uint32_t wait_time;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	/* If firmware needs to be loaded */
-	if (qla2x00_isp_firmware(ha)) {
-		ha->flags.online = 0;
-		if (!(status = ha->isp_ops->chip_diag(ha)))
-			status = qla2x00_setup_chip(ha);
+	if (qla2x00_isp_firmware(vha)) {
+		vha->flags.online = 0;
+		status = ha->isp_ops->chip_diag(vha);
+		if (!status)
+			status = qla2x00_setup_chip(vha);
 	}
 
-	if (!status && !(status = qla2x00_init_rings(ha))) {
-		clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-		if (!(status = qla2x00_fw_ready(ha))) {
+	if (!status && !(status = qla2x00_init_rings(vha))) {
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		/* Initialize the queues in use */
+		qla25xx_init_queues(ha);
+
+		status = qla2x00_fw_ready(vha);
+		if (!status) {
 			DEBUG(printk("%s(): Start configure loop, "
 			    "status = %d\n", __func__, status));
 
 			/* Issue a marker after FW becomes ready. */
-			qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 
-			ha->flags.online = 1;
+			vha->flags.online = 1;
 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
 			wait_time = 256;
 			do {
-				clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-				qla2x00_configure_loop(ha);
+				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				qla2x00_configure_loop(vha);
 				wait_time--;
-			} while (!atomic_read(&ha->loop_down_timer) &&
-				!(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) &&
-				wait_time &&
-				(test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
+			} while (!atomic_read(&vha->loop_down_timer) &&
+				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+				&vha->dpc_flags)));
 		}
 
 		/* if no cable then assume it's good */
-		if ((ha->device_flags & DFLG_NO_CABLE))
+		if ((vha->device_flags & DFLG_NO_CABLE))
 			status = 0;
 
 		DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3418,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
 	return (status);
 }
 
+static int
+qla25xx_init_queues(struct qla_hw_data *ha)
+{
+	struct rsp_que *rsp = NULL;
+	struct req_que *req = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	int ret = -1;
+	int i;
+
+	for (i = 1; i < ha->max_queues; i++) {
+		rsp = ha->rsp_q_map[i];
+		if (rsp) {
+			rsp->options &= ~BIT_0;
+			ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options);
+			if (ret != QLA_SUCCESS)
+				DEBUG2_17(printk(KERN_WARNING
+					"%s Rsp que:%d init failed\n", __func__,
+						rsp->id));
+			else
+				DEBUG2_17(printk(KERN_INFO
+					"%s Rsp que:%d inited\n", __func__,
+						rsp->id));
+		}
+		req = ha->req_q_map[i];
+		if (req) {
+			req->options &= ~BIT_0;
+			ret = qla25xx_init_req_que(base_vha, req, req->options);
+			if (ret != QLA_SUCCESS)
+				DEBUG2_17(printk(KERN_WARNING
+					"%s Req que:%d init failed\n", __func__,
+						req->id));
+			else
+				DEBUG2_17(printk(KERN_WARNING
+					"%s Rsp que:%d inited\n", __func__,
+						req->id));
+		}
+	}
+	return ret;
+}
+
 /*
 * qla2x00_reset_adapter
 *      Reset adapter.
@@ -3426,12 +3514,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
 *      ha = adapter block pointer.
 */
 void
-qla2x00_reset_adapter(scsi_qla_host_t *ha)
+qla2x00_reset_adapter(scsi_qla_host_t *vha)
 {
 	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
-	ha->flags.online = 0;
+	vha->flags.online = 0;
 	ha->isp_ops->disable_intrs(ha);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3443,12 +3532,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
 }
 
 void
-qla24xx_reset_adapter(scsi_qla_host_t *ha)
+qla24xx_reset_adapter(scsi_qla_host_t *vha)
 {
 	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-	ha->flags.online = 0;
+	vha->flags.online = 0;
 	ha->isp_ops->disable_intrs(ha);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3462,9 +3552,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
 /* On sparc systems, obtain port and node WWN from firmware
  * properties.
  */
-static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv)
+static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
+	struct nvram_24xx *nv)
 {
 #ifdef CONFIG_SPARC
+	struct qla_hw_data *ha = vha->hw;
 	struct pci_dev *pdev = ha->pdev;
 	struct device_node *dp = pci_device_to_OF_node(pdev);
 	const u8 *val;
@@ -3481,7 +3573,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
 }
 
 int
-qla24xx_nvram_config(scsi_qla_host_t *ha)
+qla24xx_nvram_config(scsi_qla_host_t *vha)
 {
 	int   rval;
 	struct init_cb_24xx *icb;
@@ -3490,6 +3582,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 	uint8_t  *dptr1, *dptr2;
 	uint32_t chksum;
 	uint16_t cnt;
+	struct qla_hw_data *ha = vha->hw;
 
 	rval = QLA_SUCCESS;
 	icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3507,12 +3600,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 
 	/* Get VPD data into cache */
 	ha->vpd = ha->nvram + VPD_OFFSET;
-	ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd,
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
 	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
 
 	/* Get NVRAM data into cache and calculate checksum. */
 	dptr = (uint32_t *)nv;
-	ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
+	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
 	    ha->nvram_size);
 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
 		chksum += le32_to_cpu(*dptr++);
@@ -3557,7 +3650,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 		nv->node_name[5] = 0x1c;
 		nv->node_name[6] = 0x55;
 		nv->node_name[7] = 0x86;
-		qla24xx_nvram_wwn_from_ofw(ha, nv);
+		qla24xx_nvram_wwn_from_ofw(vha, nv);
 		nv->login_retry_count = __constant_cpu_to_le16(8);
 		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
 		nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3577,7 +3670,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 	}
 
 	/* Reset Initialization control block */
-	memset(icb, 0, sizeof(struct init_cb_24xx));
+	memset(icb, 0, ha->init_cb_size);
 
 	/* Copy 1st segment. */
 	dptr1 = (uint8_t *)icb;
@@ -3600,7 +3693,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 	/*
 	 * Setup driver NVRAM options.
 	 */
-	qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name),
+	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
 	    "QLA2462");
 
 	/* Use alternate WWN? */
@@ -3639,8 +3732,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 	ha->serial0 = icb->port_name[5];
 	ha->serial1 = icb->port_name[6];
 	ha->serial2 = icb->port_name[7];
-	ha->node_name = icb->node_name;
-	ha->port_name = icb->port_name;
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
 
 	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
 
@@ -3695,7 +3788,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 		ha->login_retry_count = ql2xloginretrycount;
 
 	/* Enable ZIO. */
-	if (!ha->flags.init_done) {
+	if (!vha->flags.init_done) {
 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3703,12 +3796,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 	}
 	icb->firmware_options_2 &= __constant_cpu_to_le32(
 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
-	ha->flags.process_response_queue = 0;
+	vha->flags.process_response_queue = 0;
 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
 		ha->zio_mode = QLA_ZIO_MODE_6;
 
 		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
-		    "(%d us).\n", ha->host_no, ha->zio_mode,
+		    "(%d us).\n", vha->host_no, ha->zio_mode,
 		    ha->zio_timer * 100));
 		qla_printk(KERN_INFO, ha,
 		    "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3717,36 +3810,37 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
 		icb->firmware_options_2 |= cpu_to_le32(
 		    (uint32_t)ha->zio_mode);
 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
-		ha->flags.process_response_queue = 1;
+		vha->flags.process_response_queue = 1;
 	}
 
 	if (rval) {
 		DEBUG2_3(printk(KERN_WARNING
-		    "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
+		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
 	}
 	return (rval);
 }
 
 static int
-qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
-	int	rval;
+	int	rval = QLA_SUCCESS;
 	int	segments, fragment;
 	uint32_t faddr;
 	uint32_t *dcode, dlen;
 	uint32_t risc_addr;
 	uint32_t risc_size;
 	uint32_t i;
-
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 	rval = QLA_SUCCESS;
 
 	segments = FA_RISC_CODE_SEGMENTS;
 	faddr = ha->flt_region_fw;
-	dcode = (uint32_t *)ha->request_ring;
+	dcode = (uint32_t *)req->ring;
 	*srisc_addr = 0;
 
 	/* Validate firmware image by checking version. */
-	qla24xx_read_flash_data(ha, dcode, faddr + 4, 4);
+	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
 	for (i = 0; i < 4; i++)
 		dcode[i] = be32_to_cpu(dcode[i]);
 	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3764,7 +3858,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 
 	while (segments && rval == QLA_SUCCESS) {
 		/* Read segment's load information. */
-		qla24xx_read_flash_data(ha, dcode, faddr, 4);
+		qla24xx_read_flash_data(vha, dcode, faddr, 4);
 
 		risc_addr = be32_to_cpu(dcode[2]);
 		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3778,17 +3872,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 
 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
 			    "addr %x, number of dwords 0x%x, offset 0x%x.\n",
-			    ha->host_no, risc_addr, dlen, faddr));
+			    vha->host_no, risc_addr, dlen, faddr));
 
-			qla24xx_read_flash_data(ha, dcode, faddr, dlen);
+			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
 			for (i = 0; i < dlen; i++)
 				dcode[i] = swab32(dcode[i]);
 
-			rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
 			    dlen);
 			if (rval) {
 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
-				    "segment %d of firmware\n", ha->host_no,
+				    "segment %d of firmware\n", vha->host_no,
 				    fragment));
 				qla_printk(KERN_WARNING, ha,
 				    "[ERROR] Failed to load segment %d of "
@@ -3812,16 +3906,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
 
 int
-qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
 	int	rval;
 	int	i, fragment;
 	uint16_t *wcode, *fwcode;
 	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
 	struct fw_blob *blob;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	/* Load firmware blob. */
-	blob = qla2x00_request_firmware(ha);
+	blob = qla2x00_request_firmware(vha);
 	if (!blob) {
 		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
 		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3831,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 
 	rval = QLA_SUCCESS;
 
-	wcode = (uint16_t *)ha->request_ring;
+	wcode = (uint16_t *)req->ring;
 	*srisc_addr = 0;
 	fwcode = (uint16_t *)blob->fw->data;
 	fwclen = 0;
@@ -3878,17 +3974,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 				wlen = risc_size;
 
 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
-			    "addr %x, number of words 0x%x.\n", ha->host_no,
+			    "addr %x, number of words 0x%x.\n", vha->host_no,
 			    risc_addr, wlen));
 
 			for (i = 0; i < wlen; i++)
 				wcode[i] = swab16(fwcode[i]);
 
-			rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
 			    wlen);
 			if (rval) {
 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
-				    "segment %d of firmware\n", ha->host_no,
+				    "segment %d of firmware\n", vha->host_no,
 				    fragment));
 				qla_printk(KERN_WARNING, ha,
 				    "[ERROR] Failed to load segment %d of "
@@ -3912,7 +4008,7 @@ fail_fw_integrity:
 }
 
 int
-qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
 	int	rval;
 	int	segments, fragment;
@@ -3922,9 +4018,11 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 	uint32_t i;
 	struct fw_blob *blob;
 	uint32_t *fwcode, fwclen;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	/* Load firmware blob. */
-	blob = qla2x00_request_firmware(ha);
+	blob = qla2x00_request_firmware(vha);
 	if (!blob) {
 		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
 		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3933,13 +4031,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 		/* Try to load RISC code from flash. */
 		qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
 		    "outdated) firmware from flash.\n");
-		return qla24xx_load_risc_flash(ha, srisc_addr);
+		return qla24xx_load_risc_flash(vha, srisc_addr);
 	}
 
 	rval = QLA_SUCCESS;
 
 	segments = FA_RISC_CODE_SEGMENTS;
-	dcode = (uint32_t *)ha->request_ring;
+	dcode = (uint32_t *)req->ring;
 	*srisc_addr = 0;
 	fwcode = (uint32_t *)blob->fw->data;
 	fwclen = 0;
@@ -3987,17 +4085,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
 				dlen = risc_size;
 
 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
-			    "addr %x, number of dwords 0x%x.\n", ha->host_no,
+			    "addr %x, number of dwords 0x%x.\n", vha->host_no,
 			    risc_addr, dlen));
 
 			for (i = 0; i < dlen; i++)
 				dcode[i] = swab32(fwcode[i]);
 
-			rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
 			    dlen);
 			if (rval) {
 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
-				    "segment %d of firmware\n", ha->host_no,
+				    "segment %d of firmware\n", vha->host_no,
 				    fragment));
 				qla_printk(KERN_WARNING, ha,
 				    "[ERROR] Failed to load segment %d of "
@@ -4021,49 +4119,53 @@ fail_fw_integrity:
 }
 
 void
-qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
+qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
 {
 	int ret, retries;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_FWI2_CAPABLE(ha))
 		return;
 	if (!ha->fw_major_version)
 		return;
 
-	ret = qla2x00_stop_firmware(ha);
+	ret = qla2x00_stop_firmware(vha);
 	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
 	    retries ; retries--) {
-		ha->isp_ops->reset_chip(ha);
-		if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
+		ha->isp_ops->reset_chip(vha);
+		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
 			continue;
-		if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
+		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
 			continue;
 		qla_printk(KERN_INFO, ha,
 		    "Attempting retry of stop-firmware command...\n");
-		ret = qla2x00_stop_firmware(ha);
+		ret = qla2x00_stop_firmware(vha);
 	}
 }
 
 int
-qla24xx_configure_vhba(scsi_qla_host_t *ha)
+qla24xx_configure_vhba(scsi_qla_host_t *vha)
 {
 	int rval = QLA_SUCCESS;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
-	if (!ha->parent)
+	if (!vha->vp_idx)
 		return -EINVAL;
 
-	rval = qla2x00_fw_ready(ha->parent);
+	rval = qla2x00_fw_ready(base_vha);
 	if (rval == QLA_SUCCESS) {
-		clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-		qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 	}
 
-	ha->flags.management_server_logged_in = 0;
+	vha->flags.management_server_logged_in = 0;
 
 	/* Login to SNS first */
-	qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc,
-	    mb, BIT_1);
+	ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
 	if (mb[0] != MBS_COMMAND_COMPLETE) {
 		DEBUG15(qla_printk(KERN_INFO, ha,
 		    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4072,11 +4174,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
 		return (QLA_FUNCTION_FAILED);
 	}
 
-	atomic_set(&ha->loop_down_timer, 0);
-	atomic_set(&ha->loop_state, LOOP_UP);
-	set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-	set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
-	rval = qla2x00_loop_resync(ha->parent);
+	atomic_set(&vha->loop_down_timer, 0);
+	atomic_set(&vha->loop_state, LOOP_UP);
+	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	rval = qla2x00_loop_resync(base_vha);
 
 	return rval;
 }
@@ -4087,9 +4189,10 @@ static LIST_HEAD(qla_cs84xx_list);
 static DEFINE_MUTEX(qla_cs84xx_mutex);
 
 static struct qla_chip_state_84xx *
-qla84xx_get_chip(struct scsi_qla_host *ha)
+qla84xx_get_chip(struct scsi_qla_host *vha)
 {
 	struct qla_chip_state_84xx *cs84xx;
+	struct qla_hw_data *ha = vha->hw;
 
 	mutex_lock(&qla_cs84xx_mutex);
 
@@ -4129,21 +4232,23 @@ __qla84xx_chip_release(struct kref *kref)
 }
 
 void
-qla84xx_put_chip(struct scsi_qla_host *ha)
+qla84xx_put_chip(struct scsi_qla_host *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	if (ha->cs84xx)
 		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
 }
 
 static int
-qla84xx_init_chip(scsi_qla_host_t *ha)
+qla84xx_init_chip(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint16_t status[2];
+	struct qla_hw_data *ha = vha->hw;
 
 	mutex_lock(&ha->cs84xx->fw_update_mutex);
 
-	rval = qla84xx_verify_chip(ha, status);
+	rval = qla84xx_verify_chip(vha, status);
 
 	mutex_unlock(&ha->cs84xx->fw_update_mutex);
 
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..5e0a7095c9f2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,47 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
 }
 
 static inline void
-qla2x00_poll(scsi_qla_host_t *ha)
+qla2x00_poll(struct rsp_que *rsp)
 {
 	unsigned long flags;
-
+	struct qla_hw_data *ha = rsp->hw;
 	local_irq_save(flags);
-	ha->isp_ops->intr_handler(0, ha);
+	ha->isp_ops->intr_handler(0, rsp);
 	local_irq_restore(flags);
 }
 
-static __inline__ scsi_qla_host_t *
-to_qla_parent(scsi_qla_host_t *ha)
-{
-	return ha->parent ? ha->parent : ha;
-}
-
-/**
- * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
- * @ha: HA context
- * @ha_locked: is function called with the hardware lock
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-static inline int
-qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
-{
-	/* Send marker if required */
-	if (ha->marker_needed != 0) {
-		if (ha_locked) {
-			if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
-			    QLA_SUCCESS)
-				return (QLA_FUNCTION_FAILED);
-		} else {
-			if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
-			    QLA_SUCCESS)
-				return (QLA_FUNCTION_FAILED);
-		}
-		ha->marker_needed = 0;
-	}
-	return (QLA_SUCCESS);
-}
-
 static inline uint8_t *
 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
 {
@@ -87,11 +55,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
 }
 
 static inline int
-qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
+qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
 {
+	struct qla_hw_data *ha = vha->hw;
 	if (IS_FWI2_CAPABLE(ha))
 		return (loop_id > NPH_LAST_HANDLE);
 
-	return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+	return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
 	    loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
-};
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
 
 #include <scsi/scsi_tcq.h>
 
-static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
-static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
+static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
+							struct rsp_que *rsp);
+static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
 
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
 	/* Set transfer direction */
 	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cflags = CF_WRITE;
-		sp->fcport->ha->qla_stats.output_bytes +=
+		sp->fcport->vha->hw->qla_stats.output_bytes +=
 		    scsi_bufflen(sp->cmd);
 	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cflags = CF_READ;
-		sp->fcport->ha->qla_stats.input_bytes +=
+		sp->fcport->vha->hw->qla_stats.input_bytes +=
 		    scsi_bufflen(sp->cmd);
 	}
 	return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
  * Returns a pointer to the Continuation Type 0 IOCB packet.
  */
 static inline cont_entry_t *
-qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
 {
 	cont_entry_t *cont_pkt;
-
 	/* Adjust ring index. */
-	ha->req_ring_index++;
-	if (ha->req_ring_index == ha->request_q_length) {
-		ha->req_ring_index = 0;
-		ha->request_ring_ptr = ha->request_ring;
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
 	} else {
-		ha->request_ring_ptr++;
+		req->ring_ptr++;
 	}
 
-	cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
+	cont_pkt = (cont_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
 	*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
 {
 	cont_a64_entry_t *cont_pkt;
 
 	/* Adjust ring index. */
-	ha->req_ring_index++;
-	if (ha->req_ring_index == ha->request_q_length) {
-		ha->req_ring_index = 0;
-		ha->request_ring_ptr = ha->request_ring;
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
 	} else {
-		ha->request_ring_ptr++;
+		req->ring_ptr++;
 	}
 
-	cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
+	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
 	*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 {
 	uint16_t	avail_dsds;
 	uint32_t	*cur_dsd;
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
 	int i;
+	struct req_que *req;
 
 	cmd = sp->cmd;
 
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 		return;
 	}
 
-	ha = sp->ha;
+	vha = sp->vha;
+	req = sp->que;
 
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 			 * Seven DSDs are available in the Continuation
 			 * Type 0 IOCB.
 			 */
-			cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
+			cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
 			avail_dsds = 7;
 		}
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 {
 	uint16_t	avail_dsds;
 	uint32_t	*cur_dsd;
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
 	int i;
+	struct req_que *req;
 
 	cmd = sp->cmd;
 
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 		return;
 	}
 
-	ha = sp->ha;
+	vha = sp->vha;
+	req = sp->que;
 
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 			 * Five DSDs are available in the Continuation
 			 * Type 1 IOCB.
 			 */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 		}
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
 {
 	int		ret, nseg;
 	unsigned long   flags;
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
 	uint32_t        index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct device_reg_2xxx __iomem *reg;
+	struct qla_hw_data *ha;
+	struct req_que *req;
+	struct rsp_que *rsp;
 
 	/* Setup device pointers. */
 	ret = 0;
-	ha = sp->ha;
+	vha = sp->vha;
+	ha = vha->hw;
 	reg = &ha->iobase->isp;
 	cmd = sp->cmd;
+	req = ha->req_q_map[0];
+	rsp = ha->rsp_q_map[0];
 	/* So we know we haven't pci_map'ed anything yet */
 	tot_dsds = 0;
 
 	/* Send marker if required */
-	if (ha->marker_needed != 0) {
-		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+							!= QLA_SUCCESS)
 			return (QLA_FUNCTION_FAILED);
-		}
-		ha->marker_needed = 0;
+		vha->marker_needed = 0;
 	}
 
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	/* Check for room in outstanding command list. */
-	handle = ha->current_outstanding_cmd;
+	handle = req->current_outstanding_cmd;
 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
 		handle++;
 		if (handle == MAX_OUTSTANDING_COMMANDS)
 			handle = 1;
-		if (!ha->outstanding_cmds[handle])
+		if (!req->outstanding_cmds[handle])
 			break;
 	}
 	if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
 
 	/* Calculate the number of request entries needed. */
 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
-	if (ha->req_q_cnt < (req_cnt + 2)) {
+	if (req->cnt < (req_cnt + 2)) {
 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
-		if (ha->req_ring_index < cnt)
-			ha->req_q_cnt = cnt - ha->req_ring_index;
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
 		else
-			ha->req_q_cnt = ha->request_q_length -
-			    (ha->req_ring_index - cnt);
+			req->cnt = req->length -
+			    (req->ring_index - cnt);
 	}
-	if (ha->req_q_cnt < (req_cnt + 2))
+	if (req->cnt < (req_cnt + 2))
 		goto queuing_error;
 
 	/* Build command packet */
-	ha->current_outstanding_cmd = handle;
-	ha->outstanding_cmds[handle] = sp;
-	sp->ha = ha;
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->vha = vha;
+	sp->que = req;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
-	ha->req_q_cnt -= req_cnt;
+	req->cnt -= req_cnt;
 
-	cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
+	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 	cmd_pkt->handle = handle;
 	/* Zero out remaining portion of packet. */
 	clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
 	wmb();
 
 	/* Adjust ring index. */
-	ha->req_ring_index++;
-	if (ha->req_ring_index == ha->request_q_length) {
-		ha->req_ring_index = 0;
-		ha->request_ring_ptr = ha->request_ring;
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
 	} else
-		ha->request_ring_ptr++;
+		req->ring_ptr++;
 
 	sp->flags |= SRB_DMA_VALID;
 
 	/* Set chip new ring index. */
-	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
+	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
 
 	/* Manage unprocessed RIO/ZIO commands in response queue. */
-	if (ha->flags.process_response_queue &&
-	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
-		qla2x00_process_response_queue(ha);
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla2x00_process_response_queue(rsp);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
  * Returns non-zero if a failure occurred, else zero.
  */
 int
-__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
-    uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+			struct rsp_que *rsp, uint16_t loop_id,
+			uint16_t lun, uint8_t type)
 {
 	mrk_entry_t *mrk;
 	struct mrk_entry_24xx *mrk24;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	mrk24 = NULL;
-	mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
+	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
 	if (mrk == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
-		    __func__, ha->host_no));
+		    __func__, base_vha->host_no));
 
 		return (QLA_FUNCTION_FAILED);
 	}
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
 			mrk24->lun[1] = LSB(lun);
 			mrk24->lun[2] = MSB(lun);
 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
-			mrk24->vp_index = ha->vp_idx;
+			mrk24->vp_index = vha->vp_idx;
 		} else {
 			SET_TARGET_ID(ha, mrk->target, loop_id);
 			mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
 	}
 	wmb();
 
-	qla2x00_isp_cmd(pha);
+	qla2x00_isp_cmd(vha, req);
 
 	return (QLA_SUCCESS);
 }
 
 int
-qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
-    uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
+		uint8_t type)
 {
 	int ret;
 	unsigned long flags = 0;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
 
-	spin_lock_irqsave(&pha->hardware_lock, flags);
-	ret = __qla2x00_marker(ha, loop_id, lun, type);
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 
 	return (ret);
 }
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  * Returns NULL if function failed, else, a pointer to the request packet.
  */
 static request_t *
-qla2x00_req_pkt(scsi_qla_host_t *ha)
+qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
+		struct rsp_que *rsp)
 {
-	device_reg_t __iomem *reg = ha->iobase;
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
 	request_t	*pkt = NULL;
 	uint16_t	cnt;
 	uint32_t	*dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
 
 	/* Wait 1 second for slot. */
 	for (timer = HZ; timer; timer--) {
-		if ((req_cnt + 2) >= ha->req_q_cnt) {
+		if ((req_cnt + 2) >= req->cnt) {
 			/* Calculate number of free request entries. */
-			if (IS_FWI2_CAPABLE(ha))
-				cnt = (uint16_t)RD_REG_DWORD(
-				    &reg->isp24.req_q_out);
-			else
-				cnt = qla2x00_debounce_register(
-				    ISP_REQ_Q_OUT(ha, &reg->isp));
-			if  (ha->req_ring_index < cnt)
-				ha->req_q_cnt = cnt - ha->req_ring_index;
+			if (ha->mqenable)
+				cnt = (uint16_t)
+					RD_REG_DWORD(&reg->isp25mq.req_q_out);
+			else {
+				if (IS_FWI2_CAPABLE(ha))
+					cnt = (uint16_t)RD_REG_DWORD(
+						&reg->isp24.req_q_out);
+				else
+					cnt = qla2x00_debounce_register(
+						ISP_REQ_Q_OUT(ha, &reg->isp));
+			}
+			if  (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
 			else
-				ha->req_q_cnt = ha->request_q_length -
-				    (ha->req_ring_index - cnt);
+				req->cnt = req->length -
+				    (req->ring_index - cnt);
 		}
 		/* If room for request in request ring. */
-		if ((req_cnt + 2) < ha->req_q_cnt) {
-			ha->req_q_cnt--;
-			pkt = ha->request_ring_ptr;
+		if ((req_cnt + 2) < req->cnt) {
+			req->cnt--;
+			pkt = req->ring_ptr;
 
 			/* Zero out packet. */
 			dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
 				*dword_ptr++ = 0;
 
 			/* Set system defined field. */
-			pkt->sys_define = (uint8_t)ha->req_ring_index;
+			pkt->sys_define = (uint8_t)req->ring_index;
 
 			/* Set entry count. */
 			pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
 		}
 
 		/* Release ring specific lock */
-		spin_unlock(&ha->hardware_lock);
+		spin_unlock_irq(&ha->hardware_lock);
 
 		udelay(2);   /* 2 us */
 
 		/* Check for pending interrupts. */
 		/* During init we issue marker directly */
-		if (!ha->marker_needed && !ha->flags.init_done)
-			qla2x00_poll(ha);
-
+		if (!vha->marker_needed && !vha->flags.init_done)
+			qla2x00_poll(rsp);
 		spin_lock_irq(&ha->hardware_lock);
 	}
 	if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
  * Note: The caller must hold the hardware lock before calling this routine.
  */
 static void
-qla2x00_isp_cmd(scsi_qla_host_t *ha)
+qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
 {
-	device_reg_t __iomem *reg = ha->iobase;
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
 
 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
 	DEBUG5(qla2x00_dump_buffer(
-	    (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
+	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
 
 	/* Adjust ring index. */
-	ha->req_ring_index++;
-	if (ha->req_ring_index == ha->request_q_length) {
-		ha->req_ring_index = 0;
-		ha->request_ring_ptr = ha->request_ring;
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
 	} else
-		ha->request_ring_ptr++;
+		req->ring_ptr++;
 
 	/* Set chip new ring index. */
-	if (IS_FWI2_CAPABLE(ha)) {
-		WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
-		RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
-	} else {
-		WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
-		RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+	if (ha->mqenable) {
+		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+		RD_REG_DWORD(&ioreg->hccr);
+	}
+	else {
+		if (IS_FWI2_CAPABLE(ha)) {
+			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+		} else {
+			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+				req->ring_index);
+			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+		}
 	}
 
 }
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 {
 	uint16_t	avail_dsds;
 	uint32_t	*cur_dsd;
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
 	int i;
+	struct req_que *req;
 
 	cmd = sp->cmd;
 
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 		return;
 	}
 
-	ha = sp->ha;
+	vha = sp->vha;
+	req = sp->que;
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
-		sp->fcport->ha->qla_stats.output_bytes +=
+		sp->fcport->vha->hw->qla_stats.output_bytes +=
 		    scsi_bufflen(sp->cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_READ_DATA);
-		sp->fcport->ha->qla_stats.input_bytes +=
+		sp->fcport->vha->hw->qla_stats.input_bytes +=
 		    scsi_bufflen(sp->cmd);
 	}
 
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 			 * Five DSDs are available in the Continuation
 			 * Type 1 IOCB.
 			 */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 		}
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
 {
 	int		ret, nseg;
 	unsigned long   flags;
-	scsi_qla_host_t	*ha, *pha;
-	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
 	uint32_t        index;
 	uint32_t	handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
-	struct device_reg_24xx __iomem *reg;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_cmnd *cmd = sp->cmd;
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t que_id;
 
 	/* Setup device pointers. */
 	ret = 0;
-	ha = sp->ha;
-	pha = to_qla_parent(ha);
-	reg = &ha->iobase->isp24;
-	cmd = sp->cmd;
+	que_id = vha->req_ques[0];
+
+	req = ha->req_q_map[que_id];
+	sp->que = req;
+
+	if (req->rsp)
+		rsp = req->rsp;
+	else
+		rsp = ha->rsp_q_map[que_id];
 	/* So we know we haven't pci_map'ed anything yet */
 	tot_dsds = 0;
 
 	/* Send marker if required */
-	if (ha->marker_needed != 0) {
-		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+							!= QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
-		}
-		ha->marker_needed = 0;
+		vha->marker_needed = 0;
 	}
 
 	/* Acquire ring specific lock */
-	spin_lock_irqsave(&pha->hardware_lock, flags);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	/* Check for room in outstanding command list. */
-	handle = ha->current_outstanding_cmd;
+	handle = req->current_outstanding_cmd;
 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
 		handle++;
 		if (handle == MAX_OUTSTANDING_COMMANDS)
 			handle = 1;
-		if (!ha->outstanding_cmds[handle])
+		if (!req->outstanding_cmds[handle])
 			break;
 	}
 	if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
 	tot_dsds = nseg;
 
 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
-	if (ha->req_q_cnt < (req_cnt + 2)) {
-		cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
-		if (ha->req_ring_index < cnt)
-			ha->req_q_cnt = cnt - ha->req_ring_index;
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = ha->isp_ops->rd_req_reg(ha, req->id);
+
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
 		else
-			ha->req_q_cnt = ha->request_q_length -
-				(ha->req_ring_index - cnt);
+			req->cnt = req->length -
+				(req->ring_index - cnt);
 	}
-	if (ha->req_q_cnt < (req_cnt + 2))
+	if (req->cnt < (req_cnt + 2))
 		goto queuing_error;
 
 	/* Build command packet. */
-	ha->current_outstanding_cmd = handle;
-	ha->outstanding_cmds[handle] = sp;
-	sp->ha = ha;
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->vha = vha;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
-	ha->req_q_cnt -= req_cnt;
+	req->cnt -= req_cnt;
 
-	cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
+	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
 	cmd_pkt->handle = handle;
 
 	/* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
 	wmb();
 
 	/* Adjust ring index. */
-	ha->req_ring_index++;
-	if (ha->req_ring_index == ha->request_q_length) {
-		ha->req_ring_index = 0;
-		ha->request_ring_ptr = ha->request_ring;
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
 	} else
-		ha->request_ring_ptr++;
+		req->ring_ptr++;
 
 	sp->flags |= SRB_DMA_VALID;
 
 	/* Set chip new ring index. */
-	WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
-	RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */
+	ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
 
 	/* Manage unprocessed RIO/ZIO commands in response queue. */
-	if (ha->flags.process_response_queue &&
-	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
-		qla24xx_process_response_queue(ha);
+	if (vha->flags.process_response_queue &&
+		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(rsp);
 
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return QLA_SUCCESS;
 
 queuing_error:
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return QLA_FUNCTION_FAILED;
 }
+
+uint16_t
+qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+	device_reg_t __iomem *reg = (void *) ha->iobase;
+	return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
+}
+
+uint16_t
+qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+	device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+	return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
+}
+
+void
+qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+	device_reg_t __iomem *reg = (void *) ha->iobase;
+	WRT_REG_DWORD(&reg->isp24.req_q_in, index);
+	RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+}
+
+void
+qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+	device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+	WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
+	RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a76efd99d007..d5fb79a88001 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,13 @@
 #include <scsi/scsi_tcq.h>
 
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
-static void qla2x00_status_entry(scsi_qla_host_t *, void *);
+static void qla2x00_process_completed_request(struct scsi_qla_host *,
+	struct req_que *, uint32_t);
+static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
-static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
+static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+	sts_entry_t *);
+static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +30,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
 irqreturn_t
 qla2100_intr_handler(int irq, void *dev_id)
 {
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
 	struct device_reg_2xxx __iomem *reg;
 	int		status;
 	unsigned long	iter;
 	uint16_t	hccr;
 	uint16_t	mb[4];
+	struct rsp_que *rsp;
 
-	ha = (scsi_qla_host_t *) dev_id;
-	if (!ha) {
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
 		printk(KERN_INFO
-		    "%s(): NULL host pointer\n", __func__);
+		    "%s(): NULL response queue pointer\n", __func__);
 		return (IRQ_NONE);
 	}
 
+	ha = rsp->hw;
 	reg = &ha->iobase->isp;
 	status = 0;
 
 	spin_lock(&ha->hardware_lock);
+	vha = qla2x00_get_rsp_host(rsp);
 	for (iter = 50; iter--; ) {
 		hccr = RD_REG_WORD(&reg->hccr);
 		if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +66,8 @@ qla2100_intr_handler(int irq, void *dev_id)
 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
 			RD_REG_WORD(&reg->hccr);
 
-			ha->isp_ops->fw_dump(ha, 1);
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 			break;
 		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
 			break;
@@ -72,24 +79,24 @@ qla2100_intr_handler(int irq, void *dev_id)
 			/* Get mailbox data. */
 			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
 			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
-				qla2x00_mbx_completion(ha, mb[0]);
+				qla2x00_mbx_completion(vha, mb[0]);
 				status |= MBX_INTERRUPT;
 			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
 				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
 				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
 				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
-				qla2x00_async_event(ha, mb);
+				qla2x00_async_event(vha, rsp, mb);
 			} else {
 				/*EMPTY*/
 				DEBUG2(printk("scsi(%ld): Unrecognized "
 				    "interrupt type (%d).\n",
-				    ha->host_no, mb[0]));
+				    vha->host_no, mb[0]));
 			}
 			/* Release mailbox registers. */
 			WRT_REG_WORD(&reg->semaphore, 0);
 			RD_REG_WORD(&reg->semaphore);
 		} else {
-			qla2x00_process_response_queue(ha);
+			qla2x00_process_response_queue(rsp);
 
 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
 			RD_REG_WORD(&reg->hccr);
@@ -118,25 +125,29 @@ qla2100_intr_handler(int irq, void *dev_id)
 irqreturn_t
 qla2300_intr_handler(int irq, void *dev_id)
 {
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
 	struct device_reg_2xxx __iomem *reg;
 	int		status;
 	unsigned long	iter;
 	uint32_t	stat;
 	uint16_t	hccr;
 	uint16_t	mb[4];
+	struct rsp_que *rsp;
+	struct qla_hw_data *ha;
 
-	ha = (scsi_qla_host_t *) dev_id;
-	if (!ha) {
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
 		printk(KERN_INFO
-		    "%s(): NULL host pointer\n", __func__);
+		    "%s(): NULL response queue pointer\n", __func__);
 		return (IRQ_NONE);
 	}
 
+	ha = rsp->hw;
 	reg = &ha->iobase->isp;
 	status = 0;
 
 	spin_lock(&ha->hardware_lock);
+	vha = qla2x00_get_rsp_host(rsp);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
 		if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +170,8 @@ qla2300_intr_handler(int irq, void *dev_id)
 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
 			RD_REG_WORD(&reg->hccr);
 
-			ha->isp_ops->fw_dump(ha, 1);
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 			break;
 		} else if ((stat & HSR_RISC_INT) == 0)
 			break;
@@ -170,7 +181,7 @@ qla2300_intr_handler(int irq, void *dev_id)
 		case 0x2:
 		case 0x10:
 		case 0x11:
-			qla2x00_mbx_completion(ha, MSW(stat));
+			qla2x00_mbx_completion(vha, MSW(stat));
 			status |= MBX_INTERRUPT;
 
 			/* Release mailbox registers. */
@@ -181,26 +192,26 @@ qla2300_intr_handler(int irq, void *dev_id)
 			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
 			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
-			qla2x00_async_event(ha, mb);
+			qla2x00_async_event(vha, rsp, mb);
 			break;
 		case 0x13:
-			qla2x00_process_response_queue(ha);
+			qla2x00_process_response_queue(rsp);
 			break;
 		case 0x15:
 			mb[0] = MBA_CMPLT_1_16BIT;
 			mb[1] = MSW(stat);
-			qla2x00_async_event(ha, mb);
+			qla2x00_async_event(vha, rsp, mb);
 			break;
 		case 0x16:
 			mb[0] = MBA_SCSI_COMPLETION;
 			mb[1] = MSW(stat);
 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
-			qla2x00_async_event(ha, mb);
+			qla2x00_async_event(vha, rsp, mb);
 			break;
 		default:
 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
 			    "(%d).\n",
-			    ha->host_no, stat & 0xff));
+			    vha->host_no, stat & 0xff));
 			break;
 		}
 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +234,11 @@ qla2300_intr_handler(int irq, void *dev_id)
  * @mb0: Mailbox0 register
  */
 static void
-qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
+qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
 	uint16_t	cnt;
 	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Load return mailbox registers. */
@@ -247,10 +259,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
 
 	if (ha->mcp) {
 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
-		    __func__, ha->host_no, ha->mcp->mb[0]));
+		    __func__, vha->host_no, ha->mcp->mb[0]));
 	} else {
 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 	}
 }
 
@@ -260,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  * @mb: Mailbox registers (0 - 3)
  */
 void
-qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
+qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
 #define LS_UNKNOWN	2
 	static char	*link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +280,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 	uint16_t	handle_cnt;
 	uint16_t	cnt;
 	uint32_t	handles[5];
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t	rscn_entry, host_pid;
 	uint8_t		rscn_queue_index;
@@ -329,17 +342,19 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 
 	switch (mb[0]) {
 	case MBA_SCSI_COMPLETION:	/* Fast Post */
-		if (!ha->flags.online)
+		if (!vha->flags.online)
 			break;
 
 		for (cnt = 0; cnt < handle_cnt; cnt++)
-			qla2x00_process_completed_request(ha, handles[cnt]);
+			qla2x00_process_completed_request(vha, rsp->req,
+				handles[cnt]);
 		break;
 
 	case MBA_RESET:			/* Reset */
-		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
+		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
+			vha->host_no));
 
-		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
 		break;
 
 	case MBA_SYSTEM_ERR:		/* System Error */
@@ -347,70 +362,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
 		    mb[1], mb[2], mb[3]);
 
-		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
-		ha->isp_ops->fw_dump(ha, 1);
+		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+		ha->isp_ops->fw_dump(vha, 1);
 
 		if (IS_FWI2_CAPABLE(ha)) {
 			if (mb[1] == 0 && mb[2] == 0) {
 				qla_printk(KERN_ERR, ha,
 				    "Unrecoverable Hardware Error: adapter "
 				    "marked OFFLINE!\n");
-				ha->flags.online = 0;
+				vha->flags.online = 0;
 			} else
-				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		} else if (mb[1] == 0) {
 			qla_printk(KERN_INFO, ha,
 			    "Unrecoverable Hardware Error: adapter marked "
 			    "OFFLINE!\n");
-			ha->flags.online = 0;
+			vha->flags.online = 0;
 		} else
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
 		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
 
-		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
 		DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
 
-		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
 		DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
-		    ha->host_no));
+		    vha->host_no));
 		break;
 
 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
-		DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
+		DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
 		    mb[1]));
 		qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
 
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(ha, 1);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
 		}
 
-		if (ha->parent) {
-			atomic_set(&ha->vp_state, VP_FAILED);
-			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 		}
 
-		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
-		set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 
-		ha->flags.management_server_logged_in = 0;
-		qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
 		break;
 
 	case MBA_LOOP_UP:		/* Loop Up Event */
@@ -425,59 +440,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 		}
 
 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
-		    ha->host_no, link_speed));
+		    vha->host_no, link_speed));
 		qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
 		    link_speed);
 
-		ha->flags.management_server_logged_in = 0;
-		qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
-		    "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
+		    "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
 		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
 		    mb[1], mb[2], mb[3]);
 
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-			ha->device_flags |= DFLG_NO_CABLE;
-			qla2x00_mark_all_devices_lost(ha, 1);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			vha->device_flags |= DFLG_NO_CABLE;
+			qla2x00_mark_all_devices_lost(vha, 1);
 		}
 
-		if (ha->parent) {
-			atomic_set(&ha->vp_state, VP_FAILED);
-			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 		}
 
-		ha->flags.management_server_logged_in = 0;
+		vha->flags.management_server_logged_in = 0;
 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
-		qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
+		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
 		break;
 
 	case MBA_LIP_RESET:		/* LIP reset occurred */
 		DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
-		    ha->host_no, mb[1]));
+		    vha->host_no, mb[1]));
 		qla_printk(KERN_INFO, ha,
 		    "LIP reset occurred (%x).\n", mb[1]);
 
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(ha, 1);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
 		}
 
-		if (ha->parent) {
-			atomic_set(&ha->vp_state, VP_FAILED);
-			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 		}
 
-		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
 
 		ha->operating_mode = LOOP;
-		ha->flags.management_server_logged_in = 0;
-		qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
 		break;
 
 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
@@ -485,33 +500,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 			break;
 
 		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
-		    ha->host_no));
+		    vha->host_no));
 
 		/*
 		 * Until there's a transition from loop down to loop up, treat
 		 * this as loop down only.
 		 */
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			if (!atomic_read(&ha->loop_down_timer))
-				atomic_set(&ha->loop_down_timer,
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			if (!atomic_read(&vha->loop_down_timer))
+				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(ha, 1);
+			qla2x00_mark_all_devices_lost(vha, 1);
 		}
 
-		if (ha->parent) {
-			atomic_set(&ha->vp_state, VP_FAILED);
-			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 		}
 
-		if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
-			set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-		}
-		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
-		set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
+			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 
 		ha->flags.gpsc_supported = 1;
-		ha->flags.management_server_logged_in = 0;
+		vha->flags.management_server_logged_in = 0;
 		break;
 
 	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
@@ -520,134 +535,137 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 
 		DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
 		    "received.\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_INFO, ha,
 		    "Configuration change detected: value=%x.\n", mb[1]);
 
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
-			atomic_set(&ha->loop_state, LOOP_DOWN);
-			if (!atomic_read(&ha->loop_down_timer))
-				atomic_set(&ha->loop_down_timer,
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			if (!atomic_read(&vha->loop_down_timer))
+				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(ha, 1);
+			qla2x00_mark_all_devices_lost(vha, 1);
 		}
 
-		if (ha->parent) {
-			atomic_set(&ha->vp_state, VP_FAILED);
-			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 		}
 
-		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		break;
 
 	case MBA_PORT_UPDATE:		/* Port database update */
+		/* Only handle SCNs for our Vport index. */
+		if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
+			break;
+
 		/*
 		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
 		 * event etc. earlier indicating loop is down) then process
 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
 		 */
-		atomic_set(&ha->loop_down_timer, 0);
-		if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
-		    atomic_read(&ha->loop_state) != LOOP_DEAD) {
+		atomic_set(&vha->loop_down_timer, 0);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
 			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
-			    "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
+			    "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
 			    mb[2], mb[3]));
 			break;
 		}
 
 		DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
-		    ha->host_no));
+		    vha->host_no));
 		DEBUG(printk(KERN_INFO
 		    "scsi(%ld): Port database changed %04x %04x %04x.\n",
-		    ha->host_no, mb[1], mb[2], mb[3]));
+		    vha->host_no, mb[1], mb[2], mb[3]));
 
 		/*
 		 * Mark all devices as missing so we will login again.
 		 */
-		atomic_set(&ha->loop_state, LOOP_UP);
+		atomic_set(&vha->loop_state, LOOP_UP);
 
-		qla2x00_mark_all_devices_lost(ha, 1);
+		qla2x00_mark_all_devices_lost(vha, 1);
 
-		ha->flags.rscn_queue_overflow = 1;
+		vha->flags.rscn_queue_overflow = 1;
 
-		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		break;
 
 	case MBA_RSCN_UPDATE:		/* State Change Registration */
 		/* Check if the Vport has issued a SCR */
-		if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
+		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
 			break;
 		/* Only handle SCNs for our Vport index. */
-		if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
+		if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
 			break;
-
 		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
-		    ha->host_no));
+		    vha->host_no));
 		DEBUG(printk(KERN_INFO
 		    "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
-		    ha->host_no, mb[1], mb[2], mb[3]));
+		    vha->host_no, mb[1], mb[2], mb[3]));
 
 		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
-		host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
-		    ha->d_id.b.al_pa;
+		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
+				| vha->d_id.b.al_pa;
 		if (rscn_entry == host_pid) {
 			DEBUG(printk(KERN_INFO
 			    "scsi(%ld): Ignoring RSCN update to local host "
 			    "port ID (%06x)\n",
-			    ha->host_no, host_pid));
+			    vha->host_no, host_pid));
 			break;
 		}
 
 		/* Ignore reserved bits from RSCN-payload. */
 		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
-		rscn_queue_index = ha->rscn_in_ptr + 1;
+		rscn_queue_index = vha->rscn_in_ptr + 1;
 		if (rscn_queue_index == MAX_RSCN_COUNT)
 			rscn_queue_index = 0;
-		if (rscn_queue_index != ha->rscn_out_ptr) {
-			ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
-			ha->rscn_in_ptr = rscn_queue_index;
+		if (rscn_queue_index != vha->rscn_out_ptr) {
+			vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
+			vha->rscn_in_ptr = rscn_queue_index;
 		} else {
-			ha->flags.rscn_queue_overflow = 1;
+			vha->flags.rscn_queue_overflow = 1;
 		}
 
-		atomic_set(&ha->loop_state, LOOP_UPDATE);
-		atomic_set(&ha->loop_down_timer, 0);
-		ha->flags.management_server_logged_in = 0;
+		atomic_set(&vha->loop_state, LOOP_UPDATE);
+		atomic_set(&vha->loop_down_timer, 0);
+		vha->flags.management_server_logged_in = 0;
 
-		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-		set_bit(RSCN_UPDATE, &ha->dpc_flags);
-		qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		set_bit(RSCN_UPDATE, &vha->dpc_flags);
+		qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
 		break;
 
 	/* case MBA_RIO_RESPONSE: */
 	case MBA_ZIO_RESPONSE:
 		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
-		    ha->host_no));
+		    vha->host_no));
 		DEBUG(printk(KERN_INFO
 		    "scsi(%ld): [R|Z]IO update completion.\n",
-		    ha->host_no));
+		    vha->host_no));
 
 		if (IS_FWI2_CAPABLE(ha))
-			qla24xx_process_response_queue(ha);
+			qla24xx_process_response_queue(rsp);
 		else
-			qla2x00_process_response_queue(ha);
+			qla2x00_process_response_queue(rsp);
 		break;
 
 	case MBA_DISCARD_RND_FRAME:
 		DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
-		    "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
+		    "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
 		break;
 
 	case MBA_TRACE_NOTIFICATION:
 		DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
-		ha->host_no, mb[1], mb[2]));
+		vha->host_no, mb[1], mb[2]));
 		break;
 
 	case MBA_ISP84XX_ALERT:
 		DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
-		    "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
+		    "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
 
 		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
 		switch (mb[1]) {
@@ -682,16 +700,22 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 		break;
 	}
 
-	if (!ha->parent && ha->num_vhosts)
-		qla2x00_alert_all_vps(ha, mb);
+	if (!vha->vp_idx && ha->num_vhosts)
+		qla2x00_alert_all_vps(rsp, mb);
 }
 
 static void
 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
 {
 	fc_port_t *fcport = data;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = NULL;
 
-	if (fcport->ha->max_q_depth <= sdev->queue_depth)
+	req = ha->req_q_map[vha->req_ques[0]];
+	if (!req)
+		return;
+	if (req->max_q_depth <= sdev->queue_depth)
 		return;
 
 	if (sdev->ordered_tags)
@@ -703,9 +727,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
 
 	fcport->last_ramp_up = jiffies;
 
-	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
+	DEBUG2(qla_printk(KERN_INFO, ha,
 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
-	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
+	    fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
 	    sdev->queue_depth));
 }
 
@@ -717,20 +741,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
 	if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
 		return;
 
-	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
+	DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
-	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
+	    fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
 	    sdev->queue_depth));
 }
 
 static inline void
-qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
+								srb_t *sp)
 {
 	fc_port_t *fcport;
 	struct scsi_device *sdev;
 
 	sdev = sp->cmd->device;
-	if (sdev->queue_depth >= ha->max_q_depth)
+	if (sdev->queue_depth >= req->max_q_depth)
 		return;
 
 	fcport = sp->fcport;
@@ -751,25 +776,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
  * @index: SRB index
  */
 static void
-qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
+qla2x00_process_completed_request(struct scsi_qla_host *vha,
+				struct req_que *req, uint32_t index)
 {
 	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Validate handle. */
 	if (index >= MAX_OUTSTANDING_COMMANDS) {
 		DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
-		    ha->host_no, index));
+		    vha->host_no, index));
 		qla_printk(KERN_WARNING, ha,
 		    "Invalid SCSI completion handle %d.\n", index);
 
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		return;
 	}
 
-	sp = ha->outstanding_cmds[index];
+	sp = req->outstanding_cmds[index];
 	if (sp) {
 		/* Free outstanding command slot. */
-		ha->outstanding_cmds[index] = NULL;
+		req->outstanding_cmds[index] = NULL;
 
 		CMD_COMPL_STATUS(sp->cmd) = 0L;
 		CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +804,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
 		/* Save ISP completion status */
 		sp->cmd->result = DID_OK << 16;
 
-		qla2x00_ramp_up_queue_depth(ha, sp);
+		qla2x00_ramp_up_queue_depth(vha, req, sp);
 		qla2x00_sp_compl(ha, sp);
 	} else {
 		DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha,
 		    "Invalid ISP SCSI completion handle\n");
 
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 	}
 }
 
@@ -794,32 +821,36 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
  * @ha: SCSI driver HA context
  */
 void
-qla2x00_process_response_queue(struct scsi_qla_host *ha)
+qla2x00_process_response_queue(struct rsp_que *rsp)
 {
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha = rsp->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	sts_entry_t	*pkt;
 	uint16_t        handle_cnt;
 	uint16_t        cnt;
 
-	if (!ha->flags.online)
+	vha = qla2x00_get_rsp_host(rsp);
+
+	if (!vha->flags.online)
 		return;
 
-	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
-		pkt = (sts_entry_t *)ha->response_ring_ptr;
+	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+		pkt = (sts_entry_t *)rsp->ring_ptr;
 
-		ha->rsp_ring_index++;
-		if (ha->rsp_ring_index == ha->response_q_length) {
-			ha->rsp_ring_index = 0;
-			ha->response_ring_ptr = ha->response_ring;
+		rsp->ring_index++;
+		if (rsp->ring_index == rsp->length) {
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
 		} else {
-			ha->response_ring_ptr++;
+			rsp->ring_ptr++;
 		}
 
 		if (pkt->entry_status != 0) {
 			DEBUG3(printk(KERN_INFO
-			    "scsi(%ld): Process error entry.\n", ha->host_no));
+			    "scsi(%ld): Process error entry.\n", vha->host_no));
 
-			qla2x00_error_entry(ha, pkt);
+			qla2x00_error_entry(vha, rsp, pkt);
 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 			wmb();
 			continue;
@@ -827,31 +858,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
 
 		switch (pkt->entry_type) {
 		case STATUS_TYPE:
-			qla2x00_status_entry(ha, pkt);
+			qla2x00_status_entry(vha, rsp, pkt);
 			break;
 		case STATUS_TYPE_21:
 			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
 			for (cnt = 0; cnt < handle_cnt; cnt++) {
-				qla2x00_process_completed_request(ha,
+				qla2x00_process_completed_request(vha, rsp->req,
 				    ((sts21_entry_t *)pkt)->handle[cnt]);
 			}
 			break;
 		case STATUS_TYPE_22:
 			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
 			for (cnt = 0; cnt < handle_cnt; cnt++) {
-				qla2x00_process_completed_request(ha,
+				qla2x00_process_completed_request(vha, rsp->req,
 				    ((sts22_entry_t *)pkt)->handle[cnt]);
 			}
 			break;
 		case STATUS_CONT_TYPE:
-			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
+			qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
 			break;
 		default:
 			/* Type Not Supported. */
 			DEBUG4(printk(KERN_WARNING
 			    "scsi(%ld): Received unknown response pkt type %x "
 			    "entry status=%x.\n",
-			    ha->host_no, pkt->entry_type, pkt->entry_status));
+			    vha->host_no, pkt->entry_type, pkt->entry_status));
 			break;
 		}
 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +890,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
 	}
 
 	/* Adjust ring index */
-	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
+	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
 }
 
 static inline void
@@ -881,10 +912,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
 	sp->request_sense_ptr += sense_len;
 	sp->request_sense_length -= sense_len;
 	if (sp->request_sense_length != 0)
-		sp->fcport->ha->status_srb = sp;
+		sp->fcport->vha->status_srb = sp;
 
 	DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
-	    "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no,
+	    "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
 	    cp->device->channel, cp->device->id, cp->device->lun, cp,
 	    cp->serial_number));
 	if (sense_len)
@@ -898,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
  * @pkt: Entry pointer
  */
 static void
-qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
+qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 {
 	srb_t		*sp;
 	fc_port_t	*fcport;
@@ -911,6 +942,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 	int32_t		resid;
 	uint32_t	sense_len, rsp_info_len, resid_len, fw_resid_len;
 	uint8_t		*rsp_info, *sense_data;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = rsp->req;
 
 	sts = (sts_entry_t *) pkt;
 	sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +957,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 
 	/* Fast path completion. */
 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
-		qla2x00_process_completed_request(ha, sts->handle);
+		qla2x00_process_completed_request(vha, req, sts->handle);
 
 		return;
 	}
 
 	/* Validate handle. */
 	if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
-		sp = ha->outstanding_cmds[sts->handle];
-		ha->outstanding_cmds[sts->handle] = NULL;
+		sp = req->outstanding_cmds[sts->handle];
+		req->outstanding_cmds[sts->handle] = NULL;
 	} else
 		sp = NULL;
 
 	if (sp == NULL) {
 		DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
 
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-		qla2xxx_wake_dpc(ha);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
 		return;
 	}
 	cp = sp->cmd;
 	if (cp == NULL) {
 		DEBUG2(printk("scsi(%ld): Command already returned back to OS "
-		    "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
+		    "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
 		qla_printk(KERN_WARNING, ha,
 		    "Command is NULL: already returned to OS (sp=%p)\n", sp);
 
@@ -987,7 +1020,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		if (rsp_info_len > 3 && rsp_info[3]) {
 			DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
 			    "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
-			    "retrying command\n", ha->host_no,
+			    "retrying command\n", vha->host_no,
 			    cp->device->channel, cp->device->id,
 			    cp->device->lun, rsp_info_len, rsp_info[0],
 			    rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
@@ -1025,7 +1058,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 				qla_printk(KERN_INFO, ha,
 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
 					   "detected (%x of %x bytes)...returning "
-					   "error status.\n", ha->host_no,
+					   "error status.\n", vha->host_no,
 					   cp->device->channel, cp->device->id,
 					   cp->device->lun, resid,
 					   scsi_bufflen(cp));
@@ -1039,7 +1072,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
 			DEBUG2(printk(KERN_INFO
 			    "scsi(%ld): QUEUE FULL status detected "
-			    "0x%x-0x%x.\n", ha->host_no, comp_status,
+			    "0x%x-0x%x.\n", vha->host_no, comp_status,
 			    scsi_status));
 
 			/* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1111,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 			DEBUG2(printk(KERN_INFO
 			    "scsi(%ld:%d:%d) UNDERRUN status detected "
 			    "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
-			    "os_underflow=0x%x\n", ha->host_no,
+			    "os_underflow=0x%x\n", vha->host_no,
 			    cp->device->id, cp->device->lun, comp_status,
 			    scsi_status, resid_len, resid, cp->cmnd[0],
 			    cp->underflow));
@@ -1095,7 +1128,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
 				DEBUG2(printk(KERN_INFO
 				    "scsi(%ld): QUEUE FULL status detected "
-				    "0x%x-0x%x.\n", ha->host_no, comp_status,
+				    "0x%x-0x%x.\n", vha->host_no, comp_status,
 				    scsi_status));
 
 				/*
@@ -1125,10 +1158,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 			if (!(scsi_status & SS_RESIDUAL_UNDER)) {
 				DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
 					      "frame(s) detected (%x of %x bytes)..."
-					      "retrying command.\n", ha->host_no,
-					      cp->device->channel, cp->device->id,
-					      cp->device->lun, resid,
-					      scsi_bufflen(cp)));
+					      "retrying command.\n",
+					vha->host_no, cp->device->channel,
+					cp->device->id, cp->device->lun, resid,
+					scsi_bufflen(cp)));
 
 				cp->result = DID_BUS_BUSY << 16;
 				break;
@@ -1140,7 +1173,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 				qla_printk(KERN_INFO, ha,
 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
 					   "detected (%x of %x bytes)...returning "
-					   "error status.\n", ha->host_no,
+					   "error status.\n", vha->host_no,
 					   cp->device->channel, cp->device->id,
 					   cp->device->lun, resid,
 					   scsi_bufflen(cp));
@@ -1157,7 +1190,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 	case CS_DATA_OVERRUN:
 		DEBUG2(printk(KERN_INFO
 		    "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
-		    ha->host_no, cp->device->id, cp->device->lun, comp_status,
+		    vha->host_no, cp->device->id, cp->device->lun, comp_status,
 		    scsi_status));
 		DEBUG2(printk(KERN_INFO
 		    "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,7 +1216,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		 */
 		DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
 		    "pid=%ld, compl status=0x%x, port state=0x%x\n",
-		    ha->host_no, cp->device->id, cp->device->lun,
+		    vha->host_no, cp->device->id, cp->device->lun,
 		    cp->serial_number, comp_status,
 		    atomic_read(&fcport->state)));
 
@@ -1194,13 +1227,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		 */
 		cp->result = DID_TRANSPORT_DISRUPTED << 16;
 		if (atomic_read(&fcport->state) == FCS_ONLINE)
-			qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
 		break;
 
 	case CS_RESET:
 		DEBUG2(printk(KERN_INFO
 		    "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
-		    ha->host_no, comp_status, scsi_status));
+		    vha->host_no, comp_status, scsi_status));
 
 		cp->result = DID_RESET << 16;
 		break;
@@ -1213,7 +1246,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		 */
 		DEBUG2(printk(KERN_INFO
 		    "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
-		    ha->host_no, comp_status, scsi_status));
+		    vha->host_no, comp_status, scsi_status));
 
 		cp->result = DID_RESET << 16;
 		break;
@@ -1229,25 +1262,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 		if (IS_FWI2_CAPABLE(ha)) {
 			DEBUG2(printk(KERN_INFO
 			    "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
-			    "0x%x-0x%x\n", ha->host_no, cp->device->channel,
+			    "0x%x-0x%x\n", vha->host_no, cp->device->channel,
 			    cp->device->id, cp->device->lun, comp_status,
 			    scsi_status));
 			break;
 		}
 		DEBUG2(printk(KERN_INFO
 		    "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
-		    "sflags=%x.\n", ha->host_no, cp->device->channel,
+		    "sflags=%x.\n", vha->host_no, cp->device->channel,
 		    cp->device->id, cp->device->lun, comp_status, scsi_status,
 		    le16_to_cpu(sts->status_flags)));
 
 		/* Check to see if logout occurred. */
 		if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
-			qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
 		break;
 
 	default:
 		DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
-		    "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
+		    "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
 		qla_printk(KERN_INFO, ha,
 		    "Unknown status detected 0x%x-0x%x.\n",
 		    comp_status, scsi_status);
@@ -1257,7 +1290,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 	}
 
 	/* Place command on done queue. */
-	if (ha->status_srb == NULL)
+	if (vha->status_srb == NULL)
 		qla2x00_sp_compl(ha, sp);
 }
 
@@ -1269,10 +1302,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
  * Extended sense data.
  */
 static void
-qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
+qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
 {
 	uint8_t		sense_sz = 0;
-	srb_t		*sp = ha->status_srb;
+	struct qla_hw_data *ha = vha->hw;
+	srb_t		*sp = vha->status_srb;
 	struct scsi_cmnd *cp;
 
 	if (sp != NULL && sp->request_sense_length != 0) {
@@ -1284,7 +1318,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
 			    "cmd is NULL: already returned to OS (sp=%p)\n",
 			    sp);
 
-			ha->status_srb = NULL;
+			vha->status_srb = NULL;
 			return;
 		}
 
@@ -1305,7 +1339,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
 
 		/* Place command on done queue. */
 		if (sp->request_sense_length == 0) {
-			ha->status_srb = NULL;
+			vha->status_srb = NULL;
 			qla2x00_sp_compl(ha, sp);
 		}
 	}
@@ -1317,10 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
  * @pkt: Entry pointer
  */
 static void
-qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
+qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 {
 	srb_t *sp;
-
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = rsp->req;
 #if defined(QL_DEBUG_LEVEL_2)
 	if (pkt->entry_status & RF_INV_E_ORDER)
 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1339,13 +1374,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 
 	/* Validate handle. */
 	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
-		sp = ha->outstanding_cmds[pkt->handle];
+		sp = req->outstanding_cmds[pkt->handle];
 	else
 		sp = NULL;
 
 	if (sp) {
 		/* Free outstanding command slot. */
-		ha->outstanding_cmds[pkt->handle] = NULL;
+		req->outstanding_cmds[pkt->handle] = NULL;
 
 		/* Bad payload or header */
 		if (pkt->entry_status &
@@ -1362,12 +1397,12 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 	} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
 	    COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
 		DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
-		    ha->host_no));
+		    vha->host_no));
 		qla_printk(KERN_WARNING, ha,
 		    "Error entry - invalid handle\n");
 
-		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-		qla2xxx_wake_dpc(ha);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
 	}
 }
 
@@ -1377,10 +1412,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
  * @mb0: Mailbox0 register
  */
 static void
-qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
+qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
 	uint16_t	cnt;
 	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	/* Load return mailbox registers. */
@@ -1395,10 +1431,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
 
 	if (ha->mcp) {
 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
-		    __func__, ha->host_no, ha->mcp->mb[0]));
+		    __func__, vha->host_no, ha->mcp->mb[0]));
 	} else {
 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 	}
 }
 
@@ -1407,30 +1443,33 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  * @ha: SCSI driver HA context
  */
 void
-qla24xx_process_response_queue(struct scsi_qla_host *ha)
+qla24xx_process_response_queue(struct rsp_que *rsp)
 {
-	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct qla_hw_data *ha = rsp->hw;
 	struct sts_entry_24xx *pkt;
+	struct scsi_qla_host *vha;
+
+	vha = qla2x00_get_rsp_host(rsp);
 
-	if (!ha->flags.online)
+	if (!vha->flags.online)
 		return;
 
-	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
-		pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
+	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
 
-		ha->rsp_ring_index++;
-		if (ha->rsp_ring_index == ha->response_q_length) {
-			ha->rsp_ring_index = 0;
-			ha->response_ring_ptr = ha->response_ring;
+		rsp->ring_index++;
+		if (rsp->ring_index == rsp->length) {
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
 		} else {
-			ha->response_ring_ptr++;
+			rsp->ring_ptr++;
 		}
 
 		if (pkt->entry_status != 0) {
 			DEBUG3(printk(KERN_INFO
-			    "scsi(%ld): Process error entry.\n", ha->host_no));
+			    "scsi(%ld): Process error entry.\n", vha->host_no));
 
-			qla2x00_error_entry(ha, (sts_entry_t *) pkt);
+			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 			wmb();
 			continue;
@@ -1438,13 +1477,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
 
 		switch (pkt->entry_type) {
 		case STATUS_TYPE:
-			qla2x00_status_entry(ha, pkt);
+			qla2x00_status_entry(vha, rsp, pkt);
 			break;
 		case STATUS_CONT_TYPE:
-			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
+			qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
 			break;
 		case VP_RPT_ID_IOCB_TYPE:
-			qla24xx_report_id_acquisition(ha,
+			qla24xx_report_id_acquisition(vha,
 			    (struct vp_rpt_id_entry_24xx *)pkt);
 			break;
 		default:
@@ -1452,7 +1491,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
 			DEBUG4(printk(KERN_WARNING
 			    "scsi(%ld): Received unknown response pkt type %x "
 			    "entry status=%x.\n",
-			    ha->host_no, pkt->entry_type, pkt->entry_status));
+			    vha->host_no, pkt->entry_type, pkt->entry_status));
 			break;
 		}
 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1460,14 +1499,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
 	}
 
 	/* Adjust ring index */
-	WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
+	ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
 }
 
 static void
-qla2xxx_check_risc_status(scsi_qla_host_t *ha)
+qla2xxx_check_risc_status(scsi_qla_host_t *vha)
 {
 	int rval;
 	uint32_t cnt;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	if (!IS_QLA25XX(ha))
@@ -1521,25 +1561,29 @@ done:
 irqreturn_t
 qla24xx_intr_handler(int irq, void *dev_id)
 {
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
 	struct device_reg_24xx __iomem *reg;
 	int		status;
 	unsigned long	iter;
 	uint32_t	stat;
 	uint32_t	hccr;
 	uint16_t	mb[4];
+	struct rsp_que *rsp;
 
-	ha = (scsi_qla_host_t *) dev_id;
-	if (!ha) {
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
 		printk(KERN_INFO
-		    "%s(): NULL host pointer\n", __func__);
+		    "%s(): NULL response queue pointer\n", __func__);
 		return IRQ_NONE;
 	}
 
+	ha = rsp->hw;
 	reg = &ha->iobase->isp24;
 	status = 0;
 
 	spin_lock(&ha->hardware_lock);
+	vha = qla2x00_get_rsp_host(rsp);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->host_status);
 		if (stat & HSRX_RISC_PAUSED) {
@@ -1547,7 +1591,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
 				break;
 
 			if (ha->hw_event_pause_errors == 0)
-				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+				qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
 				    0, MSW(stat), LSW(stat));
 			else if (ha->hw_event_pause_errors < 0xffffffff)
 				ha->hw_event_pause_errors++;
@@ -1557,10 +1601,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
 			    "Dumping firmware!\n", hccr);
 
-			qla2xxx_check_risc_status(ha);
+			qla2xxx_check_risc_status(vha);
 
-			ha->isp_ops->fw_dump(ha, 1);
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 			break;
 		} else if ((stat & HSRX_RISC_INT) == 0)
 			break;
@@ -1570,7 +1614,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
 		case 0x2:
 		case 0x10:
 		case 0x11:
-			qla24xx_mbx_completion(ha, MSW(stat));
+			qla24xx_mbx_completion(vha, MSW(stat));
 			status |= MBX_INTERRUPT;
 
 			break;
@@ -1579,15 +1623,16 @@ qla24xx_intr_handler(int irq, void *dev_id)
 			mb[1] = RD_REG_WORD(&reg->mailbox1);
 			mb[2] = RD_REG_WORD(&reg->mailbox2);
 			mb[3] = RD_REG_WORD(&reg->mailbox3);
-			qla2x00_async_event(ha, mb);
+			qla2x00_async_event(vha, rsp, mb);
 			break;
 		case 0x13:
-			qla24xx_process_response_queue(ha);
+		case 0x14:
+			qla24xx_process_response_queue(rsp);
 			break;
 		default:
 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
 			    "(%d).\n",
-			    ha->host_no, stat & 0xff));
+			    vha->host_no, stat & 0xff));
 			break;
 		}
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1607,15 +1652,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
 static irqreturn_t
 qla24xx_msix_rsp_q(int irq, void *dev_id)
 {
-	scsi_qla_host_t	*ha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
 	struct device_reg_24xx __iomem *reg;
 
-	ha = dev_id;
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		printk(KERN_INFO
+		"%s(): NULL response queue pointer\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
 	reg = &ha->iobase->isp24;
 
 	spin_lock_irq(&ha->hardware_lock);
 
-	qla24xx_process_response_queue(ha);
+	qla24xx_process_response_queue(rsp);
 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 
 	spin_unlock_irq(&ha->hardware_lock);
@@ -1624,20 +1676,64 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 }
 
 static irqreturn_t
+qla25xx_msix_rsp_q(int irq, void *dev_id)
+{
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_24xx __iomem *reg;
+	uint16_t msix_disabled_hccr = 0;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		printk(KERN_INFO
+			"%s(): NULL response queue pointer\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+	reg = &ha->iobase->isp24;
+
+	spin_lock_irq(&ha->hardware_lock);
+
+	msix_disabled_hccr = rsp->options;
+	if (!rsp->id)
+		msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
+	else
+		msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
+
+	qla24xx_process_response_queue(rsp);
+
+	if (!msix_disabled_hccr)
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+
+	spin_unlock_irq(&ha->hardware_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
 qla24xx_msix_default(int irq, void *dev_id)
 {
-	scsi_qla_host_t	*ha;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
 	struct device_reg_24xx __iomem *reg;
 	int		status;
 	uint32_t	stat;
 	uint32_t	hccr;
 	uint16_t	mb[4];
 
-	ha = dev_id;
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		DEBUG(printk(
+		"%s(): NULL response queue pointer\n", __func__));
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
 	reg = &ha->iobase->isp24;
 	status = 0;
 
 	spin_lock_irq(&ha->hardware_lock);
+	vha = qla2x00_get_rsp_host(rsp);
 	do {
 		stat = RD_REG_DWORD(&reg->host_status);
 		if (stat & HSRX_RISC_PAUSED) {
@@ -1645,7 +1741,7 @@ qla24xx_msix_default(int irq, void *dev_id)
 				break;
 
 			if (ha->hw_event_pause_errors == 0)
-				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+				qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
 				    0, MSW(stat), LSW(stat));
 			else if (ha->hw_event_pause_errors < 0xffffffff)
 				ha->hw_event_pause_errors++;
@@ -1655,10 +1751,10 @@ qla24xx_msix_default(int irq, void *dev_id)
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
 			    "Dumping firmware!\n", hccr);
 
-			qla2xxx_check_risc_status(ha);
+			qla2xxx_check_risc_status(vha);
 
-			ha->isp_ops->fw_dump(ha, 1);
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 			break;
 		} else if ((stat & HSRX_RISC_INT) == 0)
 			break;
@@ -1668,7 +1764,7 @@ qla24xx_msix_default(int irq, void *dev_id)
 		case 0x2:
 		case 0x10:
 		case 0x11:
-			qla24xx_mbx_completion(ha, MSW(stat));
+			qla24xx_mbx_completion(vha, MSW(stat));
 			status |= MBX_INTERRUPT;
 
 			break;
@@ -1677,15 +1773,16 @@ qla24xx_msix_default(int irq, void *dev_id)
 			mb[1] = RD_REG_WORD(&reg->mailbox1);
 			mb[2] = RD_REG_WORD(&reg->mailbox2);
 			mb[3] = RD_REG_WORD(&reg->mailbox3);
-			qla2x00_async_event(ha, mb);
+			qla2x00_async_event(vha, rsp, mb);
 			break;
 		case 0x13:
-			qla24xx_process_response_queue(ha);
+		case 0x14:
+			qla24xx_process_response_queue(rsp);
 			break;
 		default:
 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
 			    "(%d).\n",
-			    ha->host_no, stat & 0xff));
+			    vha->host_no, stat & 0xff));
 			break;
 		}
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1710,70 +1807,138 @@ struct qla_init_msix_entry {
 	irq_handler_t handler;
 };
 
-static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
-	{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
-		"qla2xxx (default)", qla24xx_msix_default },
+static struct qla_init_msix_entry base_queue = {
+	.entry = 0,
+	.index = 0,
+	.name = "qla2xxx (default)",
+	.handler = qla24xx_msix_default,
+};
+
+static struct qla_init_msix_entry base_rsp_queue = {
+	.entry = 1,
+	.index = 1,
+	.name = "qla2xxx (rsp_q)",
+	.handler = qla24xx_msix_rsp_q,
+};
 
-	{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
-		"qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+static struct qla_init_msix_entry multi_rsp_queue = {
+	.entry = 1,
+	.index = 1,
+	.name = "qla2xxx (multi_q)",
+	.handler = qla25xx_msix_rsp_q,
 };
 
 static void
-qla24xx_disable_msix(scsi_qla_host_t *ha)
+qla24xx_disable_msix(struct qla_hw_data *ha)
 {
 	int i;
 	struct qla_msix_entry *qentry;
 
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
-		qentry = &ha->msix_entries[imsix_entries[i].index];
+	for (i = 0; i < ha->msix_count; i++) {
+		qentry = &ha->msix_entries[i];
 		if (qentry->have_irq)
-			free_irq(qentry->msix_vector, ha);
+			free_irq(qentry->vector, qentry->rsp);
 	}
 	pci_disable_msix(ha->pdev);
+	kfree(ha->msix_entries);
+	ha->msix_entries = NULL;
+	ha->flags.msix_enabled = 0;
 }
 
 static int
-qla24xx_enable_msix(scsi_qla_host_t *ha)
+qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
 	int i, ret;
-	struct msix_entry entries[QLA_MSIX_ENTRIES];
+	struct msix_entry *entries;
 	struct qla_msix_entry *qentry;
+	struct qla_init_msix_entry *msix_queue;
 
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++)
-		entries[i].entry = imsix_entries[i].entry;
+	entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
+					GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
 
-	ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
+	for (i = 0; i < ha->msix_count; i++)
+		entries[i].entry = i;
+
+	ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
 	if (ret) {
 		qla_printk(KERN_WARNING, ha,
-		    "MSI-X: Failed to enable support -- %d/%d\n",
-		    QLA_MSIX_ENTRIES, ret);
+			"MSI-X: Failed to enable support -- %d/%d\n"
+			" Retry with %d vectors\n", ha->msix_count, ret, ret);
+		ha->msix_count = ret;
+		ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
+		if (ret) {
+			qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
+				" support, giving up -- %d/%d\n",
+				ha->msix_count, ret);
+			goto msix_out;
+		}
+		ha->max_queues = ha->msix_count - 1;
+	}
+	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+				ha->msix_count, GFP_KERNEL);
+	if (!ha->msix_entries) {
+		ret = -ENOMEM;
 		goto msix_out;
 	}
 	ha->flags.msix_enabled = 1;
 
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
-		qentry = &ha->msix_entries[imsix_entries[i].index];
-		qentry->msix_vector = entries[i].vector;
-		qentry->msix_entry = entries[i].entry;
+	for (i = 0; i < ha->msix_count; i++) {
+		qentry = &ha->msix_entries[i];
+		qentry->vector = entries[i].vector;
+		qentry->entry = entries[i].entry;
 		qentry->have_irq = 0;
-		ret = request_irq(qentry->msix_vector,
-		    imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
-		if (ret) {
-			qla_printk(KERN_WARNING, ha,
-			    "MSI-X: Unable to register handler -- %x/%d.\n",
-			    imsix_entries[i].index, ret);
-			qla24xx_disable_msix(ha);
-			goto msix_out;
-		}
-		qentry->have_irq = 1;
+		qentry->rsp = NULL;
+	}
+
+	/* Enable MSI-X for AENs for queue 0 */
+	qentry = &ha->msix_entries[0];
+	ret = request_irq(qentry->vector, base_queue.handler, 0,
+					base_queue.name, rsp);
+	if (ret) {
+		qla_printk(KERN_WARNING, ha,
+			"MSI-X: Unable to register handler -- %x/%d.\n",
+			qentry->vector, ret);
+		qla24xx_disable_msix(ha);
+		goto msix_out;
 	}
+	qentry->have_irq = 1;
+	qentry->rsp = rsp;
+
+	/* Enable MSI-X vector for response queue update for queue 0 */
+	if (ha->max_queues > 1 && ha->mqiobase) {
+		ha->mqenable = 1;
+		msix_queue = &multi_rsp_queue;
+		qla_printk(KERN_INFO, ha,
+				"MQ enabled, Number of Queue Resources: %d \n",
+				ha->max_queues);
+	} else {
+		ha->mqenable = 0;
+		msix_queue = &base_rsp_queue;
+	}
+
+	qentry = &ha->msix_entries[1];
+	ret = request_irq(qentry->vector, msix_queue->handler, 0,
+						msix_queue->name, rsp);
+	if (ret) {
+		qla_printk(KERN_WARNING, ha,
+			"MSI-X: Unable to register handler -- %x/%d.\n",
+			qentry->vector, ret);
+		qla24xx_disable_msix(ha);
+		ha->mqenable = 0;
+		goto msix_out;
+	}
+	qentry->have_irq = 1;
+	qentry->rsp = rsp;
 
 msix_out:
+	kfree(entries);
 	return ret;
 }
 
 int
-qla2x00_request_irqs(scsi_qla_host_t *ha)
+qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
 	int ret;
 	device_reg_t __iomem *reg = ha->iobase;
@@ -1782,11 +1947,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
 		goto skip_msix;
 
-        if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
-	    !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
+		!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
 		DEBUG2(qla_printk(KERN_WARNING, ha,
-		    "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
-		    ha->chip_revision, ha->fw_attributes));
+		"MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
+			ha->pdev->revision, ha->fw_attributes));
 
 		goto skip_msix;
 	}
@@ -1803,7 +1968,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
 		goto skip_msi;
 	}
 
-	ret = qla24xx_enable_msix(ha);
+	ret = qla24xx_enable_msix(ha, rsp);
 	if (!ret) {
 		DEBUG2(qla_printk(KERN_INFO, ha,
 		    "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1825,7 +1990,7 @@ skip_msix:
 skip_msi:
 
 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
-	    IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
+	    IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
 	if (ret) {
 		qla_printk(KERN_WARNING, ha,
 		    "Failed to reserve interrupt %d already in use.\n",
@@ -1833,10 +1998,8 @@ skip_msi:
 		goto fail;
 	}
 	ha->flags.inta_enabled = 1;
-	ha->host->irq = ha->pdev->irq;
 clear_risc_ints:
 
-	ha->isp_ops->disable_intrs(ha);
 	spin_lock_irq(&ha->hardware_lock);
 	if (IS_FWI2_CAPABLE(ha)) {
 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1853,13 +2016,74 @@ fail:
 }
 
 void
-qla2x00_free_irqs(scsi_qla_host_t *ha)
+qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
+	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	if (ha->flags.msix_enabled)
 		qla24xx_disable_msix(ha);
 	else if (ha->flags.inta_enabled) {
-		free_irq(ha->host->irq, ha);
+		free_irq(ha->pdev->irq, rsp);
 		pci_disable_msi(ha->pdev);
 	}
 }
+
+static struct scsi_qla_host *
+qla2x00_get_rsp_host(struct rsp_que *rsp)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = rsp->hw;
+	struct scsi_qla_host *vha = NULL;
+	struct sts_entry_24xx *pkt;
+	struct req_que *req;
+
+	if (rsp->id) {
+		pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
+		req = rsp->req;
+		if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
+			sp = req->outstanding_cmds[pkt->handle];
+			if (sp)
+				vha = sp->vha;
+		}
+	}
+	if (!vha)
+	/* handle it in base queue */
+		vha = pci_get_drvdata(ha->pdev);
+
+	return vha;
+}
+
+int qla25xx_request_irq(struct rsp_que *rsp)
+{
+	struct qla_hw_data *ha = rsp->hw;
+	struct qla_init_msix_entry *intr = &multi_rsp_queue;
+	struct qla_msix_entry *msix = rsp->msix;
+	int ret;
+
+	ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+	if (ret) {
+		qla_printk(KERN_WARNING, ha,
+			"MSI-X: Unable to register handler -- %x/%d.\n",
+			msix->vector, ret);
+		return ret;
+	}
+	msix->have_irq = 1;
+	msix->rsp = rsp;
+	return ret;
+}
+
+void
+qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+	device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+	WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
+}
+
+void
+qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+	device_reg_t __iomem *reg = (void *) ha->iobase;
+	WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3402746ec128..a99976f5fabd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -29,7 +29,7 @@
  *	Kernel context.
  */
 static int
-qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
+qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 {
 	int		rval;
 	unsigned long    flags = 0;
@@ -42,15 +42,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 	uint32_t	cnt;
 	uint32_t	mboxes;
 	unsigned long	wait_time;
-	scsi_qla_host_t *ha = to_qla_parent(pvha);
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	reg = ha->iobase;
-	io_lock_on = ha->flags.init_done;
+	io_lock_on = base_vha->flags.init_done;
 
 	rval = QLA_SUCCESS;
-	abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
 
 	/*
 	 * Wait for active mailbox commands to finish by waiting at most tov
@@ -62,7 +63,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 		    mcp->tov * HZ)) {
 			/* Timeout occurred. Return error. */
 			DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
-			    "Exiting.\n", __func__, ha->host_no));
+			    "Exiting.\n", __func__, base_vha->host_no));
 			return QLA_FUNCTION_TIMEOUT;
 		}
 	}
@@ -72,7 +73,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 	ha->mcp = mcp;
 
 	DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
-	    ha->host_no, mcp->mb[0]));
+	    base_vha->host_no, mcp->mb[0]));
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
@@ -100,15 +101,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 
 #if defined(QL_DEBUG_LEVEL_1)
 	printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
-	    __func__, ha->host_no);
+	    __func__, base_vha->host_no);
 	qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
 	printk("\n");
 	qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
 	printk("\n");
 	qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
 	printk("\n");
-	printk("%s(%ld): I/O address = %p.\n", __func__, ha->host_no, optr);
-	qla2x00_dump_regs(ha);
+	printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
+		optr);
+	qla2x00_dump_regs(base_vha);
 #endif
 
 	/* Issue set host interrupt command to send cmd out. */
@@ -117,7 +119,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 
 	/* Unlock mbx registers and wait for interrupt */
 	DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
-	    "jiffies=%lx.\n", __func__, ha->host_no, jiffies));
+	    "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
 
 	/* Wait for mbx cmd completion until timeout */
 
@@ -137,7 +139,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 
 	} else {
 		DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
-		    ha->host_no, command));
+		    base_vha->host_no, command));
 
 		if (IS_FWI2_CAPABLE(ha))
 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -151,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 				break;
 
 			/* Check for pending interrupts. */
-			qla2x00_poll(ha);
+			qla2x00_poll(ha->rsp_q_map[0]);
 
 			if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
 			    !ha->flags.mbox_int)
@@ -164,7 +166,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 		uint16_t *iptr2;
 
 		DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
-		    ha->host_no, command));
+		    base_vha->host_no, command));
 
 		/* Got interrupt. Clear the flag. */
 		ha->flags.mbox_int = 0;
@@ -200,12 +202,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 			ictrl = RD_REG_WORD(&reg->isp.ictrl);
 		}
 		printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
-		    __func__, ha->host_no, command);
+		    __func__, base_vha->host_no, command);
 		printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
-		    ha->host_no, ictrl, jiffies);
+		    base_vha->host_no, ictrl, jiffies);
 		printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
-		    ha->host_no, mb0);
-		qla2x00_dump_regs(ha);
+		    base_vha->host_no, mb0);
+		qla2x00_dump_regs(base_vha);
 #endif
 
 		rval = QLA_FUNCTION_TIMEOUT;
@@ -218,10 +220,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 
 	if (abort_active || !io_lock_on) {
 		DEBUG11(printk("%s(%ld): checking for additional resp "
-		    "interrupt.\n", __func__, ha->host_no));
+		    "interrupt.\n", __func__, base_vha->host_no));
 
 		/* polling mode for non isp_abort commands. */
-		qla2x00_poll(ha);
+		qla2x00_poll(ha->rsp_q_map[0]);
 	}
 
 	if (rval == QLA_FUNCTION_TIMEOUT &&
@@ -229,35 +231,37 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 		if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
 			/* not in dpc. schedule it for dpc to take over. */
 			DEBUG(printk("%s(%ld): timeout schedule "
-			    "isp_abort_needed.\n", __func__, ha->host_no));
+			"isp_abort_needed.\n", __func__,
+			base_vha->host_no));
 			DEBUG2_3_11(printk("%s(%ld): timeout schedule "
-			    "isp_abort_needed.\n", __func__, ha->host_no));
+			"isp_abort_needed.\n", __func__,
+			base_vha->host_no));
 			qla_printk(KERN_WARNING, ha,
 			    "Mailbox command timeout occurred. Scheduling ISP "
 			    "abort.\n");
-			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-			qla2xxx_wake_dpc(ha);
+			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
 		} else if (!abort_active) {
 			/* call abort directly since we are in the DPC thread */
 			DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
-			    __func__, ha->host_no));
+			    __func__, base_vha->host_no));
 			DEBUG2_3_11(printk("%s(%ld): timeout calling "
-			    "abort_isp\n", __func__, ha->host_no));
+			    "abort_isp\n", __func__, base_vha->host_no));
 			qla_printk(KERN_WARNING, ha,
 			    "Mailbox command timeout occurred. Issuing ISP "
 			    "abort.\n");
 
-			set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
-			clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-			if (qla2x00_abort_isp(ha)) {
+			set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+			clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+			if (qla2x00_abort_isp(base_vha)) {
 				/* Failed. retry later. */
-				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+				set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
 			}
-			clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 			DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
-			    ha->host_no));
+			    base_vha->host_no));
 			DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
-			    __func__, ha->host_no));
+			    __func__, base_vha->host_no));
 		}
 	}
 
@@ -267,24 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
 
 	if (rval) {
 		DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
-		    "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no,
+		    "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
 		    mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__,
+		base_vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
+qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
     uint32_t risc_code_size)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -312,13 +318,13 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -340,13 +346,14 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
  *     Kernel context.
  */
 int
-qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
+qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
 	mcp->out_mb = MBX_0;
@@ -369,18 +376,18 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
 
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
 		if (IS_FWI2_CAPABLE(ha)) {
 			DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
-			    __func__, ha->host_no, mcp->mb[1]));
+			    __func__, vha->host_no, mcp->mb[1]));
 		} else {
 			DEBUG11(printk("%s(%ld): done.\n", __func__,
-			    ha->host_no));
+			    vha->host_no));
 		}
 	}
 
@@ -404,28 +411,28 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
  *	Kernel context.
  */
 void
-qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
+qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
     uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
 {
 	int		rval;
 	mbx_cmd_t	mc;
 	mbx_cmd_t	*mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return mailbox data. */
 	*major = mcp->mb[1];
 	*minor = mcp->mb[2];
 	*subminor = mcp->mb[3];
 	*attributes = mcp->mb[6];
-	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
 		*memory = 0x1FFFF;			/* Defaults to 128KB. */
 	else
 		*memory = (mcp->mb[5] << 16) | mcp->mb[4];
@@ -433,10 +440,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 }
 
@@ -455,32 +462,32 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
  *	Kernel context.
  */
 int
-qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
+qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		fwopts[0] = mcp->mb[0];
 		fwopts[1] = mcp->mb[1];
 		fwopts[2] = mcp->mb[2];
 		fwopts[3] = mcp->mb[3];
 
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -502,13 +509,13 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
  *	Kernel context.
  */
 int
-qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
+qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
 	mcp->mb[1] = fwopts[1];
@@ -516,7 +523,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
 	mcp->mb[3] = fwopts[3];
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->in_mb |= MBX_1;
 	} else {
 		mcp->mb[10] = fwopts[10];
@@ -526,17 +533,17 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
 	}
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	fwopts[0] = mcp->mb[0];
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
-		    ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -558,13 +565,14 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
  *	Kernel context.
  */
 int
-qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
+qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no));
+	DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
 
 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
 	mcp->mb[1] = 0xAAAA;
@@ -578,7 +586,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
 		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
@@ -591,7 +599,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
 			struct device_reg_24xx __iomem *reg =
 			    &ha->iobase->isp24;
 
-			qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0,
+			qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
 			    LSW(RD_REG_DWORD(&reg->hccr)),
 			    LSW(RD_REG_DWORD(&reg->istatus)));
 		}
@@ -600,11 +608,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -626,18 +634,18 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
  *	Kernel context.
  */
 int
-qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
+qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_0;
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[1] = MSW(risc_addr);
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->out_mb |= MBX_2|MBX_1;
@@ -650,14 +658,14 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
 
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
-		    ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
+		    vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
 		    (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -682,7 +690,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
  *	Kernel context.
  */
 static int
-qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
     dma_addr_t phys_addr, size_t size, uint32_t tov)
 {
 	int		rval;
@@ -699,30 +707,28 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
 	mcp->in_mb = MBX_2|MBX_0;
 	mcp->tov = tov;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
-		    ha->host_no, rval));
-		DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		sts_entry_t *sts_entry = (sts_entry_t *) buffer;
 
 		/* Mask reserved bits. */
 		sts_entry->entry_status &=
-		    IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
+		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
 	}
 
 	return rval;
 }
 
 int
-qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
+qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
     size_t size)
 {
-	return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size,
+	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
 	    MBX_TOV_SECONDS);
 }
 
@@ -741,22 +747,23 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
  *	Kernel context.
  */
 int
-qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
 {
 	unsigned long   flags = 0;
 	fc_port_t	*fcport;
 	int		rval;
-	uint32_t	handle;
+	uint32_t	handle = 0;
 	mbx_cmd_t	mc;
 	mbx_cmd_t	*mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no));
+	DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
 
 	fcport = sp->fcport;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
-		if (ha->outstanding_cmds[handle] == sp)
+		if (req->outstanding_cmds[handle] == sp)
 			break;
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -778,14 +785,14 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -797,40 +804,45 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
 	int rval, rval2;
 	mbx_cmd_t  mc;
 	mbx_cmd_t  *mcp = &mc;
-	scsi_qla_host_t *ha;
+	scsi_qla_host_t *vha;
+	struct req_que *req;
+	struct rsp_que *rsp;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
 
 	l = l;
-	ha = fcport->ha;
+	vha = fcport->vha;
+	req = vha->hw->req_q_map[0];
+	rsp = vha->hw->rsp_q_map[0];
 	mcp->mb[0] = MBC_ABORT_TARGET;
 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
-	if (HAS_EXTENDED_IDS(ha)) {
+	if (HAS_EXTENDED_IDS(vha->hw)) {
 		mcp->mb[1] = fcport->loop_id;
 		mcp->mb[10] = 0;
 		mcp->out_mb |= MBX_10;
 	} else {
 		mcp->mb[1] = fcport->loop_id << 8;
 	}
-	mcp->mb[2] = ha->loop_reset_delay;
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[2] = vha->hw->loop_reset_delay;
+	mcp->mb[9] = vha->vp_idx;
 
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+							MK_SYNC_ID);
 	if (rval2 != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval2));
+		    "(%x).\n", __func__, vha->host_no, rval2));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -842,37 +854,42 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
 	int rval, rval2;
 	mbx_cmd_t  mc;
 	mbx_cmd_t  *mcp = &mc;
-	scsi_qla_host_t *ha;
+	scsi_qla_host_t *vha;
+	struct req_que *req;
+	struct rsp_que *rsp;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
 
-	ha = fcport->ha;
+	vha = fcport->vha;
+	req = vha->hw->req_q_map[0];
+	rsp = vha->hw->rsp_q_map[0];
 	mcp->mb[0] = MBC_LUN_RESET;
 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (HAS_EXTENDED_IDS(ha))
+	if (HAS_EXTENDED_IDS(vha->hw))
 		mcp->mb[1] = fcport->loop_id;
 	else
 		mcp->mb[1] = fcport->loop_id << 8;
 	mcp->mb[2] = l;
 	mcp->mb[3] = 0;
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[9] = vha->vp_idx;
 
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN);
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+								MK_SYNC_ID_LUN);
 	if (rval2 != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval2));
+		    "(%x).\n", __func__, vha->host_no, rval2));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -899,7 +916,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
  *	Kernel context.
  */
 int
-qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
+qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
     uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
 {
 	int rval;
@@ -907,15 +924,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[9] = vha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_0;
 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
 		rval = QLA_COMMAND_ERROR;
 	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
@@ -932,11 +949,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -958,7 +975,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
  *	Kernel context.
  */
 int
-qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
+qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
     uint16_t *r_a_tov)
 {
 	int rval;
@@ -967,19 +984,19 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
-			ha->host_no));
+			vha->host_no));
 
 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
-		    ha->host_no, mcp->mb[0]));
+		    vha->host_no, mcp->mb[0]));
 	} else {
 		/* Convert returned data and check our values. */
 		*r_a_tov = mcp->mb[3] / 2;
@@ -991,7 +1008,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
 		}
 
 		DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
-		    "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov));
+		    "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
 	}
 
 	return rval;
@@ -1015,14 +1032,15 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
  *	Kernel context.
  */
 int
-qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
+qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	if (ha->flags.npiv_supported)
 		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
@@ -1040,17 +1058,17 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
 	mcp->buf_size = size;
 	mcp->flags = MBX_DMA_OUT;
 	mcp->tov = MBX_TOV_SECONDS;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
 		    "mb0=%x.\n",
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1073,7 +1091,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
  *	Kernel context.
  */
 int
-qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
+qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 {
 	int rval;
 	mbx_cmd_t mc;
@@ -1081,14 +1099,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
 	port_database_t *pd;
 	struct port_database_24xx *pd24;
 	dma_addr_t pd_dma;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	pd24 = NULL;
 	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
 	if (pd  == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
-		    "structure.\n", __func__, ha->host_no));
+		    "structure.\n", __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1100,7 +1119,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
 	mcp->mb[3] = LSW(pd_dma);
 	mcp->mb[6] = MSW(MSD(pd_dma));
 	mcp->mb[7] = LSW(MSD(pd_dma));
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[9] = vha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
 	mcp->in_mb = MBX_0;
 	if (IS_FWI2_CAPABLE(ha)) {
@@ -1120,7 +1139,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
 	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
 	mcp->flags = MBX_DMA_IN;
 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS)
 		goto gpd_error_out;
 
@@ -1132,7 +1151,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
 		    pd24->last_login_state != PDS_PRLI_COMPLETE) {
 			DEBUG2(printk("%s(%ld): Unable to verify "
 			    "login-state (%x/%x) for loop_id %x\n",
-			    __func__, ha->host_no,
+			    __func__, vha->host_no,
 			    pd24->current_login_state,
 			    pd24->last_login_state, fcport->loop_id));
 			rval = QLA_FUNCTION_FAILED;
@@ -1192,9 +1211,9 @@ gpd_error_out:
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
-		    __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -1217,21 +1236,21 @@ gpd_error_out:
  *	Kernel context.
  */
 int
-qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
+qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return firmware states. */
 	states[0] = mcp->mb[1];
@@ -1241,11 +1260,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
-		    "failed=%x.\n", ha->host_no, rval));
+		    "failed=%x.\n", vha->host_no, rval));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1270,7 +1289,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
  *	Kernel context.
  */
 int
-qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
+qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
     uint8_t opt)
 {
 	int rval;
@@ -1278,12 +1297,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	mcp->mb[0] = MBC_GET_PORT_NAME;
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[9] = vha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
-	if (HAS_EXTENDED_IDS(ha)) {
+	if (HAS_EXTENDED_IDS(vha->hw)) {
 		mcp->mb[1] = loop_id;
 		mcp->mb[10] = opt;
 		mcp->out_mb |= MBX_10;
@@ -1294,12 +1313,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		if (name != NULL) {
 			/* This function returns name in big endian. */
@@ -1314,7 +1333,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
 		}
 
 		DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1336,45 +1355,45 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
  *	Kernel context.
  */
 int
-qla2x00_lip_reset(scsi_qla_host_t *ha)
+qla2x00_lip_reset(scsi_qla_host_t *vha)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
 		mcp->mb[1] = BIT_6;
 		mcp->mb[2] = 0;
-		mcp->mb[3] = ha->loop_reset_delay;
+		mcp->mb[3] = vha->hw->loop_reset_delay;
 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	} else {
 		mcp->mb[0] = MBC_LIP_RESET;
 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
-		if (HAS_EXTENDED_IDS(ha)) {
+		if (HAS_EXTENDED_IDS(vha->hw)) {
 			mcp->mb[1] = 0x00ff;
 			mcp->mb[10] = 0;
 			mcp->out_mb |= MBX_10;
 		} else {
 			mcp->mb[1] = 0xff00;
 		}
-		mcp->mb[2] = ha->loop_reset_delay;
+		mcp->mb[2] = vha->hw->loop_reset_delay;
 		mcp->mb[3] = 0;
 	}
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
-		    __func__, ha->host_no, rval));
+		    __func__, vha->host_no, rval));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -1399,7 +1418,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
  *	Kernel context.
  */
 int
-qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
+qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
     uint16_t cmd_size, size_t buf_size)
 {
 	int rval;
@@ -1407,10 +1426,11 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
-	    "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov));
+		"tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
+		mcp->tov));
 
 	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
 	mcp->mb[1] = cmd_size;
@@ -1422,25 +1442,25 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
 	mcp->in_mb = MBX_0|MBX_1;
 	mcp->buf_size = buf_size;
 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
-	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
-	rval = qla2x00_mailbox_command(ha, mcp);
+	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
-		    "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 		DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
-		    "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no));
+		DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
 {
 	int		rval;
@@ -1448,13 +1468,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	struct logio_entry_24xx *lg;
 	dma_addr_t	lg_dma;
 	uint32_t	iop[2];
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
 	if (lg == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1470,14 +1491,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	lg->port_id[0] = al_pa;
 	lg->port_id[1] = area;
 	lg->port_id[2] = domain;
-	lg->vp_index = ha->vp_idx;
-	rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
+	lg->vp_index = vha->vp_idx;
+	rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval));
+		    "(%x).\n", __func__, vha->host_no, rval));
 	} else if (lg->entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, vha->host_no,
 		    lg->entry_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
@@ -1486,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- completion status (%x)  ioparam=%x/%x.\n", __func__,
-		    ha->host_no, le16_to_cpu(lg->comp_status), iop[0],
+		    vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
 		    iop[1]));
 
 		switch (iop[0]) {
@@ -1515,7 +1536,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 			break;
 		}
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 
 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
 
@@ -1562,14 +1583,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
  *	Kernel context.
  */
 int
-qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no));
+	DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
 
 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1586,7 +1608,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return mailbox statuses. */
 	if (mb != NULL) {
@@ -1613,12 +1635,12 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
-		    "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval,
+		    "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
 		    mcp->mb[0], mcp->mb[1], mcp->mb[2]));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1641,19 +1663,20 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
  *
  */
 int
-qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
     uint16_t *mb_ret, uint8_t opt)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_FWI2_CAPABLE(ha))
-		return qla24xx_login_fabric(ha, fcport->loop_id,
+		return qla24xx_login_fabric(vha, fcport->loop_id,
 		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa, mb_ret, opt);
 
-	DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
 	if (HAS_EXTENDED_IDS(ha))
@@ -1665,7 +1688,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
  	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
  	/* Return mailbox statuses. */
  	if (mb_ret != NULL) {
@@ -1686,33 +1709,34 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
  			rval = QLA_SUCCESS;
 
 		DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
-		    "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
+		    "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
 		    mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
 		DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
-		    "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
+		    "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
 		    mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
 	} else {
 		/*EMPTY*/
-		DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return (rval);
 }
 
 int
-qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
     uint8_t area, uint8_t al_pa)
 {
 	int		rval;
 	struct logio_entry_24xx *lg;
 	dma_addr_t	lg_dma;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
 	if (lg == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1725,25 +1749,26 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	lg->port_id[0] = al_pa;
 	lg->port_id[1] = area;
 	lg->port_id[2] = domain;
-	lg->vp_index = ha->vp_idx;
-	rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
+	lg->vp_index = vha->vp_idx;
+
+	rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval));
+		    "(%x).\n", __func__, vha->host_no, rval));
 	} else if (lg->entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, vha->host_no,
 		    lg->entry_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
-		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+		DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
 		    "-- completion status (%x)  ioparam=%x/%x.\n", __func__,
-		    ha->host_no, le16_to_cpu(lg->comp_status),
+		    vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
 		    le32_to_cpu(lg->io_parameter[0]),
 		    le32_to_cpu(lg->io_parameter[1])));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1768,7 +1793,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
  *	Kernel context.
  */
 int
-qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
     uint8_t area, uint8_t al_pa)
 {
 	int rval;
@@ -1776,11 +1801,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
 	mcp->out_mb = MBX_1|MBX_0;
-	if (HAS_EXTENDED_IDS(ha)) {
+	if (HAS_EXTENDED_IDS(vha->hw)) {
 		mcp->mb[1] = loop_id;
 		mcp->mb[10] = 0;
 		mcp->out_mb |= MBX_10;
@@ -1791,16 +1816,16 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
 	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
-		    "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]));
+		    "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1822,33 +1847,33 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
  *	Kernel context.
  */
 int
-qla2x00_full_login_lip(scsi_qla_host_t *ha)
+qla2x00_full_login_lip(scsi_qla_host_t *vha)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
-	mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
+	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
 	mcp->mb[2] = 0;
 	mcp->mb[3] = 0;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		/*EMPTY*/
 		DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1867,7 +1892,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
  *	Kernel context.
  */
 int
-qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
+qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
     uint16_t *entries)
 {
 	int rval;
@@ -1875,20 +1900,20 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
 	mbx_cmd_t *mcp = &mc;
 
 	DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
-	    ha->host_no));
+	    vha->host_no));
 
 	if (id_list == NULL)
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_GET_ID_LIST;
 	mcp->out_mb = MBX_0;
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[2] = MSW(id_list_dma);
 		mcp->mb[3] = LSW(id_list_dma);
 		mcp->mb[6] = MSW(MSD(id_list_dma));
 		mcp->mb[7] = LSW(MSD(id_list_dma));
 		mcp->mb[8] = 0;
-		mcp->mb[9] = ha->vp_idx;
+		mcp->mb[9] = vha->vp_idx;
 		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
 	} else {
 		mcp->mb[1] = MSW(id_list_dma);
@@ -1900,16 +1925,16 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
 	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
 		*entries = mcp->mb[1];
 		DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
-		    ha->host_no));
+		    vha->host_no));
 	}
 
 	return rval;
@@ -1929,7 +1954,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
  *	Kernel context.
  */
 int
-qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
+qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
     uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
     uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
 {
@@ -1937,22 +1962,22 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
-		    ha->host_no, mcp->mb[0]));
+		    vha->host_no, mcp->mb[0]));
 	} else {
 		DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
-		    "mb7=%x mb10=%x mb11=%x.\n", __func__, ha->host_no,
+		    "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
 		    mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
 		    mcp->mb[10], mcp->mb[11]));
 
@@ -1964,7 +1989,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
 			*cur_iocb_cnt = mcp->mb[7];
 		if (orig_iocb_cnt)
 			*orig_iocb_cnt = mcp->mb[10];
-		if (ha->flags.npiv_supported && max_npiv_vports)
+		if (vha->hw->flags.npiv_supported && max_npiv_vports)
 			*max_npiv_vports = mcp->mb[11];
 	}
 
@@ -1987,18 +2012,19 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
  *	Kernel context.
  */
 int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 	char *pmap;
 	dma_addr_t pmap_dma;
+	struct qla_hw_data *ha = vha->hw;
 
 	pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
 	if (pmap  == NULL) {
 		DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2013,11 +2039,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
 	mcp->buf_size = FCAL_MAP_SIZE;
 	mcp->flags = MBX_DMA_IN;
 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
 		DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
-		    "size (%x)\n", __func__, ha->host_no, mcp->mb[0],
+		    "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
 		    mcp->mb[1], (unsigned)pmap[0]));
 		DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
 
@@ -2028,9 +2054,9 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -2051,15 +2077,16 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
  *	BIT_1 = mailbox error.
  */
 int
-qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
+qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
     struct link_statistics *stats, dma_addr_t stats_dma)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 	uint32_t *siter, *diter, dwords;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GET_LINK_STATUS;
 	mcp->mb[2] = MSW(stats_dma);
@@ -2084,12 +2111,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
 	}
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = IOCTL_CMD;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
 			DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
-			    __func__, ha->host_no, mcp->mb[0]));
+			    __func__, vha->host_no, mcp->mb[0]));
 			rval = QLA_FUNCTION_FAILED;
 		} else {
 			/* Copy over data -- firmware data is LE. */
@@ -2101,14 +2128,14 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
 	} else {
 		/* Failed. */
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	}
 
 	return rval;
 }
 
 int
-qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
+qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
     dma_addr_t stats_dma)
 {
 	int rval;
@@ -2116,7 +2143,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
 	mbx_cmd_t *mcp = &mc;
 	uint32_t *siter, *diter, dwords;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
 	mcp->mb[2] = MSW(stats_dma);
@@ -2124,18 +2151,18 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
 	mcp->mb[6] = MSW(MSD(stats_dma));
 	mcp->mb[7] = LSW(MSD(stats_dma));
 	mcp->mb[8] = sizeof(struct link_statistics) / 4;
-	mcp->mb[9] = ha->vp_idx;
+	mcp->mb[9] = vha->vp_idx;
 	mcp->mb[10] = 0;
 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = IOCTL_CMD;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
 			DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
-			    __func__, ha->host_no, mcp->mb[0]));
+			    __func__, vha->host_no, mcp->mb[0]));
 			rval = QLA_FUNCTION_FAILED;
 		} else {
 			/* Copy over data -- firmware data is LE. */
@@ -2147,14 +2174,14 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
 	} else {
 		/* Failed. */
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	}
 
 	return rval;
 }
 
 int
-qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
 {
 	int		rval;
 	fc_port_t	*fcport;
@@ -2163,18 +2190,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
 	struct abort_entry_24xx *abt;
 	dma_addr_t	abt_dma;
 	uint32_t	handle;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	fcport = sp->fcport;
 
-	spin_lock_irqsave(&pha->hardware_lock, flags);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
-		if (pha->outstanding_cmds[handle] == sp)
+		if (req->outstanding_cmds[handle] == sp)
 			break;
 	}
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	if (handle == MAX_OUTSTANDING_COMMANDS) {
 		/* Command not found. */
 		return QLA_FUNCTION_FAILED;
@@ -2183,7 +2210,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
 	abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
 	if (abt == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
-		    __func__, ha->host_no));
+		    __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2196,22 +2223,25 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
 	abt->port_id[1] = fcport->d_id.b.area;
 	abt->port_id[2] = fcport->d_id.b.domain;
 	abt->vp_index = fcport->vp_idx;
-	rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
+
+	abt->req_que_no = cpu_to_le16(req->id);
+
+	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
-		    __func__, ha->host_no, rval));
+		    __func__, vha->host_no, rval));
 	} else if (abt->entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, vha->host_no,
 		    abt->entry_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- completion status (%x).\n", __func__, ha->host_no,
+		    "-- completion status (%x).\n", __func__, vha->host_no,
 		    le16_to_cpu(abt->nport_handle)));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2233,16 +2263,21 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 	int		rval, rval2;
 	struct tsk_mgmt_cmd *tsk;
 	dma_addr_t	tsk_dma;
-	scsi_qla_host_t *ha, *pha;
+	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha;
+	struct req_que *req;
+	struct rsp_que *rsp;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
 
-	ha = fcport->ha;
-	pha = to_qla_parent(ha);
-	tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+	vha = fcport->vha;
+	ha = vha->hw;
+	req = ha->req_q_map[0];
+	rsp = ha->rsp_q_map[0];
+	tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
 	if (tsk == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
-		    "IOCB.\n", __func__, ha->host_no));
+		    "IOCB.\n", __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2262,34 +2297,34 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 		    sizeof(tsk->p.tsk.lun));
 	}
 
-	rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
+	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
-		    "(%x).\n", __func__, ha->host_no, name, rval));
+		    "(%x).\n", __func__, vha->host_no, name, rval));
 	} else if (tsk->p.sts.entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, vha->host_no,
 		    tsk->p.sts.entry_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else if (tsk->p.sts.comp_status !=
 	    __constant_cpu_to_le16(CS_COMPLETE)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
 		    "-- completion status (%x).\n", __func__,
-		    ha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
+		    vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
 		rval = QLA_FUNCTION_FAILED;
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(ha, fcport->loop_id, l,
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
 	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
 	if (rval2 != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
-		    "(%x).\n", __func__, ha->host_no, rval2));
+		    "(%x).\n", __func__, vha->host_no, rval2));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
-	dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
+	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
 
 	return rval;
 }
@@ -2307,29 +2342,30 @@ qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
 }
 
 int
-qla2x00_system_error(scsi_qla_host_t *ha)
+qla2x00_system_error(scsi_qla_host_t *vha)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->tov = 5;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -2342,14 +2378,14 @@ qla2x00_system_error(scsi_qla_host_t *ha)
  * Returns
  */
 int
-qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
+qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
     uint16_t sw_em_2g, uint16_t sw_em_4g)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_SERDES_PARAMS;
 	mcp->mb[1] = BIT_0;
@@ -2360,61 +2396,61 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
 		/*EMPTY*/
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_stop_firmware(scsi_qla_host_t *ha)
+qla2x00_stop_firmware(scsi_qla_host_t *vha)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_STOP_FIRMWARE;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_0;
 	mcp->tov = 5;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
+qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
     uint16_t buffers)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_TRACE_CONTROL;
 	mcp->mb[1] = TC_EFT_ENABLE;
@@ -2428,28 +2464,28 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
 	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
-		    __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
+qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_TRACE_CONTROL;
 	mcp->mb[1] = TC_EFT_DISABLE;
@@ -2457,29 +2493,29 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
 	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
-		    __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
+qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
     uint16_t buffers, uint16_t *mb, uint32_t *dwords)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA25XX(ha))
+	if (!IS_QLA25XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_TRACE_CONTROL;
 	mcp->mb[1] = TC_FCE_ENABLE;
@@ -2497,12 +2533,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
-		    __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 
 		if (mb)
 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2514,16 +2550,16 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
 }
 
 int
-qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
+qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_TRACE_CONTROL;
 	mcp->mb[1] = TC_FCE_DISABLE;
@@ -2533,12 +2569,12 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
 	    MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
-		    __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+		    __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 
 		if (wr)
 			*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2556,17 +2592,17 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
 }
 
 int
-qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
+qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
     uint16_t off, uint16_t count)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_READ_SFP;
 	mcp->mb[1] = addr;
@@ -2581,30 +2617,30 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 int
-qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
+qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
     uint16_t port_speed, uint16_t *mb)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_IIDMA_CAPABLE(ha))
+	if (!IS_IIDMA_CAPABLE(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_PORT_PARAMS;
 	mcp->mb[1] = loop_id;
@@ -2615,7 +2651,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
 	mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return mailbox statuses. */
 	if (mb != NULL) {
@@ -2628,28 +2664,29 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
 }
 
 void
-qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
+qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 	struct vp_rpt_id_entry_24xx *rptid_entry)
 {
 	uint8_t vp_idx;
 	uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
-	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *vp;
 
 	if (rptid_entry->entry_status != 0)
 		return;
 
 	if (rptid_entry->format == 0) {
 		DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
-			" number of VPs acquired %d\n", __func__, ha->host_no,
+			" number of VPs acquired %d\n", __func__, vha->host_no,
 			MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
 		DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
 			rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2658,7 +2695,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
 		vp_idx = LSB(stat);
 		DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
 		    "- status %d - "
-		    "with port id %02x%02x%02x\n",__func__,ha->host_no,
+		    "with port id %02x%02x%02x\n", __func__, vha->host_no,
 		    vp_idx, MSB(stat),
 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
 		    rptid_entry->port_id[0]));
@@ -2668,25 +2705,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
 		if (MSB(stat) == 1)
 			return;
 
-		list_for_each_entry(vha, &ha->vp_list, vp_list)
-			if (vp_idx == vha->vp_idx)
+		list_for_each_entry(vp, &ha->vp_list, list)
+			if (vp_idx == vp->vp_idx)
 				break;
-
-		if (!vha)
+		if (!vp)
 			return;
 
-		vha->d_id.b.domain = rptid_entry->port_id[2];
-		vha->d_id.b.area =  rptid_entry->port_id[1];
-		vha->d_id.b.al_pa = rptid_entry->port_id[0];
+		vp->d_id.b.domain = rptid_entry->port_id[2];
+		vp->d_id.b.area =  rptid_entry->port_id[1];
+		vp->d_id.b.al_pa = rptid_entry->port_id[0];
 
 		/*
 		 * Cannot configure here as we are still sitting on the
 		 * response queue. Handle it in dpc context.
 		 */
-		set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
-		set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+		set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 
-		qla2xxx_wake_dpc(ha);
+		qla2xxx_wake_dpc(vha);
 	}
 }
 
@@ -2709,15 +2745,15 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
 	int		rval;
 	struct vp_config_entry_24xx *vpmod;
 	dma_addr_t	vpmod_dma;
-	scsi_qla_host_t *pha;
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	/* This can be called by the parent */
-	pha = to_qla_parent(vha);
 
-	vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+	vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
 	if (!vpmod) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
-		    "IOCB.\n", __func__, pha->host_no));
+		    "IOCB.\n", __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 
@@ -2732,26 +2768,27 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
 	vpmod->entry_count = 1;
 
-	rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0);
+	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
-			"(%x).\n", __func__, pha->host_no, rval));
+			"(%x).\n", __func__, base_vha->host_no, rval));
 	} else if (vpmod->comp_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-			"-- error status (%x).\n", __func__, pha->host_no,
+			"-- error status (%x).\n", __func__, base_vha->host_no,
 			vpmod->comp_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- completion status (%x).\n", __func__, pha->host_no,
+		    "-- completion status (%x).\n", __func__, base_vha->host_no,
 		    le16_to_cpu(vpmod->comp_status)));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		/* EMPTY */
-		DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__,
+							base_vha->host_no));
 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
 	}
-	dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma);
+	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
 
 	return rval;
 }
@@ -2778,11 +2815,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 	int		map, pos;
 	struct vp_ctrl_entry_24xx   *vce;
 	dma_addr_t	vce_dma;
-	scsi_qla_host_t *ha = vha->parent;
+	struct qla_hw_data *ha = vha->hw;
 	int	vp_index = vha->vp_idx;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
-	    ha->host_no, vp_index));
+	    vha->host_no, vp_index));
 
 	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
 		return QLA_PARAMETER_ERROR;
@@ -2791,7 +2829,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 	if (!vce) {
 		DEBUG2_3(printk("%s(%ld): "
 		    "failed to allocate VP Control IOCB.\n", __func__,
-		    ha->host_no));
+		    base_vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 	memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -2810,30 +2848,30 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 	vce->vp_idx_map[map] |= 1 << pos;
 	mutex_unlock(&ha->vport_lock);
 
-	rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0);
+	rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
-		    "(%x).\n", __func__, ha->host_no, rval));
+		    "(%x).\n", __func__, base_vha->host_no, rval));
 		printk("%s(%ld): failed to issue VP control IOCB"
-		    "(%x).\n", __func__, ha->host_no, rval);
+		    "(%x).\n", __func__, base_vha->host_no, rval);
 	} else if (vce->entry_status != 0) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, base_vha->host_no,
 		    vce->entry_status));
 		printk("%s(%ld): failed to complete IOCB "
-		    "-- error status (%x).\n", __func__, ha->host_no,
+		    "-- error status (%x).\n", __func__, base_vha->host_no,
 		    vce->entry_status);
 		rval = QLA_FUNCTION_FAILED;
 	} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
 		DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
-		    "-- completion status (%x).\n", __func__, ha->host_no,
+		    "-- completion status (%x).\n", __func__, base_vha->host_no,
 		    le16_to_cpu(vce->comp_status)));
 		printk("%s(%ld): failed to complete IOCB "
-		    "-- completion status (%x).\n", __func__, ha->host_no,
+		    "-- completion status (%x).\n", __func__, base_vha->host_no,
 		    le16_to_cpu(vce->comp_status));
 		rval = QLA_FUNCTION_FAILED;
 	} else {
-		DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
 	}
 
 	dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -2863,7 +2901,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
  */
 
 int
-qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
+qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
 			    uint16_t vp_idx)
 {
 	int rval;
@@ -2884,7 +2922,7 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
 	mcp->in_mb = MBX_0|MBX_1;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval == QLA_SUCCESS) {
 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -2897,16 +2935,16 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
 }
 
 int
-qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
+qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
     uint32_t size)
 {
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
-	if (MSW(addr) || IS_FWI2_CAPABLE(ha)) {
+	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
 		mcp->mb[8] = MSW(addr);
 		mcp->out_mb = MBX_8|MBX_0;
@@ -2920,7 +2958,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
 	mcp->mb[6] = MSW(MSD(req_dma));
 	mcp->mb[7] = LSW(MSD(req_dma));
 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[4] = MSW(size);
 		mcp->mb[5] = LSW(size);
 		mcp->out_mb |= MBX_5|MBX_4;
@@ -2932,13 +2970,13 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
 	mcp->in_mb = MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
-	rval = qla2x00_mailbox_command(ha, mcp);
+	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
-		    ha->host_no, rval, mcp->mb[0]));
+		    vha->host_no, rval, mcp->mb[0]));
 	} else {
-		DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
 	}
 
 	return rval;
@@ -2954,20 +2992,21 @@ struct cs84xx_mgmt_cmd {
 };
 
 int
-qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
+qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
 {
 	int rval, retry;
 	struct cs84xx_mgmt_cmd *mn;
 	dma_addr_t mn_dma;
 	uint16_t options;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 
-	DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+	DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
 	if (mn == NULL) {
 		DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
-		    "IOCB.\n", __func__, ha->host_no));
+		    "IOCB.\n", __func__, vha->host_no));
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
 
@@ -2986,19 +3025,19 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
 		mn->p.req.options = cpu_to_le16(options);
 
 		DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
-		    ha->host_no));
+		    vha->host_no));
 		DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
 		    sizeof(*mn)));
 
-		rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120);
+		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
 		if (rval != QLA_SUCCESS) {
 			DEBUG2_16(printk("%s(%ld): failed to issue Verify "
-			    "IOCB (%x).\n", __func__, ha->host_no, rval));
+			    "IOCB (%x).\n", __func__, vha->host_no, rval));
 			goto verify_done;
 		}
 
 		DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
-		    ha->host_no));
+		    vha->host_no));
 		DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
 		    sizeof(*mn)));
 
@@ -3006,21 +3045,21 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
 		DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
-		    ha->host_no, status[0], status[1]));
+		    vha->host_no, status[0], status[1]));
 
 		if (status[0] != CS_COMPLETE) {
 			rval = QLA_FUNCTION_FAILED;
 			if (!(options & VCO_DONT_UPDATE_FW)) {
 				DEBUG2_16(printk("%s(%ld): Firmware update "
 				    "failed. Retrying without update "
-				    "firmware.\n", __func__, ha->host_no));
+				    "firmware.\n", __func__, vha->host_no));
 				options |= VCO_DONT_UPDATE_FW;
 				options &= ~VCO_FORCE_UPDATE;
 				retry = 1;
 			}
 		} else {
 			DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
-			    __func__, ha->host_no,
+			    __func__, vha->host_no,
 			    le32_to_cpu(mn->p.rsp.fw_ver)));
 
 			/* NOTE: we only update OP firmware. */
@@ -3037,10 +3076,115 @@ verify_done:
 
 	if (rval != QLA_SUCCESS) {
 		DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
-		    ha->host_no, rval));
+		    vha->host_no, rval));
 	} else {
-		DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+		DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
+	}
+
+	return rval;
+}
+
+int
+qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req,
+	uint8_t options)
+{
+	int rval;
+	unsigned long flags;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct device_reg_25xxmq __iomem *reg;
+	struct qla_hw_data *ha = vha->hw;
+
+	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+	mcp->mb[1] = options;
+	mcp->mb[2] = MSW(LSD(req->dma));
+	mcp->mb[3] = LSW(LSD(req->dma));
+	mcp->mb[6] = MSW(MSD(req->dma));
+	mcp->mb[7] = LSW(MSD(req->dma));
+	mcp->mb[5] = req->length;
+	if (req->rsp)
+		mcp->mb[10] = req->rsp->id;
+	mcp->mb[12] = req->qos;
+	mcp->mb[11] = req->vp_idx;
+	mcp->mb[13] = req->rid;
+
+	reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+		QLA_QUE_PAGE * req->id);
+
+	mcp->mb[4] = req->id;
+	/* que in ptr index */
+	mcp->mb[8] = 0;
+	/* que out ptr index */
+	mcp->mb[9] = 0;
+	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
+			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = 60;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (!(options & BIT_0)) {
+		WRT_REG_DWORD(&reg->req_q_in, 0);
+		WRT_REG_DWORD(&reg->req_q_out, 0);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
+			__func__, vha->host_no, rval, mcp->mb[0]));
+	return rval;
+}
+
+int
+qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
+	uint8_t options)
+{
+	int rval;
+	unsigned long flags;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct device_reg_25xxmq __iomem *reg;
+	struct qla_hw_data *ha = vha->hw;
+
+	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+	mcp->mb[1] = options;
+	mcp->mb[2] = MSW(LSD(rsp->dma));
+	mcp->mb[3] = LSW(LSD(rsp->dma));
+	mcp->mb[6] = MSW(MSD(rsp->dma));
+	mcp->mb[7] = LSW(MSD(rsp->dma));
+	mcp->mb[5] = rsp->length;
+	mcp->mb[11] = rsp->vp_idx;
+	mcp->mb[14] = rsp->msix->vector;
+	mcp->mb[13] = rsp->rid;
+
+	reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+		QLA_QUE_PAGE * rsp->id);
+
+	mcp->mb[4] = rsp->id;
+	/* que in ptr index */
+	mcp->mb[8] = 0;
+	/* que out ptr index */
+	mcp->mb[9] = 0;
+	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
+			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = 60;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (!(options & BIT_0)) {
+		WRT_REG_DWORD(&reg->rsp_q_out, 0);
+		WRT_REG_DWORD(&reg->rsp_q_in, 0);
 	}
 
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
+			"mb0=%x.\n", __func__,
+			vha->host_no, rval, mcp->mb[0]));
 	return rval;
 }
+
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 93560cd72784..386ffeae5b5a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_gbl.h"
 
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
@@ -18,7 +19,7 @@
 void
 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
 {
-	if (vha->parent && vha->timer_active) {
+	if (vha->vp_idx && vha->timer_active) {
 		del_timer_sync(&vha->timer);
 		vha->timer_active = 0;
 	}
@@ -28,7 +29,7 @@ static uint32_t
 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 {
 	uint32_t vp_id;
-	scsi_qla_host_t *ha = vha->parent;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Find an empty slot and assign an vp_id */
 	mutex_lock(&ha->vport_lock);
@@ -44,7 +45,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 	ha->num_vhosts++;
 	ha->cur_vport_count++;
 	vha->vp_idx = vp_id;
-	list_add_tail(&vha->vp_list, &ha->vp_list);
+	list_add_tail(&vha->list, &ha->vp_list);
 	mutex_unlock(&ha->vport_lock);
 	return vp_id;
 }
@@ -53,24 +54,24 @@ void
 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 {
 	uint16_t vp_id;
-	scsi_qla_host_t *ha = vha->parent;
+	struct qla_hw_data *ha = vha->hw;
 
 	mutex_lock(&ha->vport_lock);
 	vp_id = vha->vp_idx;
 	ha->num_vhosts--;
 	ha->cur_vport_count--;
 	clear_bit(vp_id, ha->vp_idx_map);
-	list_del(&vha->vp_list);
+	list_del(&vha->list);
 	mutex_unlock(&ha->vport_lock);
 }
 
 static scsi_qla_host_t *
-qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
+qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 {
 	scsi_qla_host_t *vha;
 
 	/* Locate matching device in database. */
-	list_for_each_entry(vha, &ha->vp_list, vp_list) {
+	list_for_each_entry(vha, &ha->vp_list, list) {
 		if (!memcmp(port_name, vha->port_name, WWN_SIZE))
 			return vha;
 	}
@@ -94,16 +95,13 @@ static void
 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 {
 	fc_port_t *fcport;
-	scsi_qla_host_t *pha = to_qla_parent(vha);
-
-	list_for_each_entry(fcport, &pha->fcports, list) {
-		if (fcport->vp_idx != vha->vp_idx)
-			continue;
 
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		DEBUG15(printk("scsi(%ld): Marking port dead, "
 		    "loop_id=0x%04x :%x\n",
 		    vha->host_no, fcport->loop_id, fcport->vp_idx));
 
+		atomic_set(&fcport->state, FCS_DEVICE_DEAD);
 		qla2x00_mark_device_lost(vha, fcport, 0, 0);
 		atomic_set(&fcport->state, FCS_UNCONFIGURED);
 	}
@@ -118,7 +116,6 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 
-	/* Delete all vp's fcports from parent's list */
 	qla2x00_mark_vp_devices_dead(vha);
 	atomic_set(&vha->vp_state, VP_FAILED);
 	vha->flags.management_server_logged_in = 0;
@@ -135,11 +132,12 @@ int
 qla24xx_enable_vp(scsi_qla_host_t *vha)
 {
 	int ret;
-	scsi_qla_host_t *ha = vha->parent;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	/* Check if physical ha port is Up */
-	if (atomic_read(&ha->loop_state) == LOOP_DOWN  ||
-		atomic_read(&ha->loop_state) == LOOP_DEAD ) {
+	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
+		atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 		vha->vp_err_state =  VP_ERR_PORTDWN;
 		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
 		goto enable_failed;
@@ -177,8 +175,8 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 	    vha->host_no, __func__));
 	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
 	if (ret != QLA_SUCCESS) {
-		DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving"
-		    " of RSCN requests: 0x%x\n", ret));
+		DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
+		    "receiving of RSCN requests: 0x%x\n", ret));
 		return;
 	} else {
 		/* Corresponds to SCR enabled */
@@ -194,25 +192,14 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 }
 
 void
-qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
+qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 {
-	int i, vp_idx_matched;
 	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha = rsp->hw;
+	int i = 0;
 
-	if (ha->parent)
-		return;
-
-	for_each_mapped_vp_idx(ha, i) {
-		vp_idx_matched = 0;
-
-		list_for_each_entry(vha, &ha->vp_list, vp_list) {
-			if (i == vha->vp_idx) {
-				vp_idx_matched = 1;
-				break;
-			}
-		}
-
-		if (vp_idx_matched) {
+	list_for_each_entry(vha, &ha->vp_list, list) {
+		if (vha->vp_idx) {
 			switch (mb[0]) {
 			case MBA_LIP_OCCURRED:
 			case MBA_LOOP_UP:
@@ -223,16 +210,17 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
 			case MBA_PORT_UPDATE:
 			case MBA_RSCN_UPDATE:
 				DEBUG15(printk("scsi(%ld)%s: Async_event for"
-				    " VP[%d], mb = 0x%x, vha=%p\n",
-				    vha->host_no, __func__,i, *mb, vha));
-				qla2x00_async_event(vha, mb);
+				" VP[%d], mb = 0x%x, vha=%p\n",
+				vha->host_no, __func__, i, *mb, vha));
+				qla2x00_async_event(vha, rsp, mb);
 				break;
 			}
 		}
+		i++;
 	}
 }
 
-void
+int
 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 {
 	/*
@@ -247,38 +235,56 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	}
 
+	/* To exclusively reset vport, we need to log it out first.*/
+	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+
 	DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
 	    vha->host_no, vha->vp_idx));
-	qla24xx_enable_vp(vha);
+	return qla24xx_enable_vp(vha);
 }
 
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
-	scsi_qla_host_t *ha = vha->parent;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
 		/* VP acquired. complete port configuration */
-		if (atomic_read(&ha->loop_state) == LOOP_READY) {
+		if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
 			qla24xx_configure_vp(vha);
 		} else {
 			set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
-			set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+			set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
 		}
 
 		return 0;
 	}
 
-	if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
-		qla2x00_vp_abort_isp(vha);
+	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+		qla2x00_update_fcports(vha);
+		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+	}
+
+	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
+		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+		atomic_read(&vha->loop_state) != LOOP_DOWN) {
+
+		DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
+						vha->host_no));
+		qla2x00_relogin(vha);
+
+		DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
+							vha->host_no));
+	}
 
 	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
 	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
 		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
 	}
 
-	if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
-	    test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
 		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
 			qla2x00_loop_resync(vha);
 			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -289,38 +295,30 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 }
 
 void
-qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
 {
 	int ret;
-	int i, vp_idx_matched;
-	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *vp;
 
-	if (ha->parent)
+	if (vha->vp_idx)
 		return;
 	if (list_empty(&ha->vp_list))
 		return;
 
-	clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
-
-	for_each_mapped_vp_idx(ha, i) {
-		vp_idx_matched = 0;
-
-		list_for_each_entry(vha, &ha->vp_list, vp_list) {
-			if (i == vha->vp_idx) {
-				vp_idx_matched = 1;
-				break;
-			}
-		}
+	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 
-		if (vp_idx_matched)
-			ret = qla2x00_do_dpc_vp(vha);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		if (vp->vp_idx)
+			ret = qla2x00_do_dpc_vp(vp);
 	}
 }
 
 int
 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 {
-	scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	struct qla_hw_data *ha = base_vha->hw;
 	scsi_qla_host_t *vha;
 	uint8_t port_name[WWN_SIZE];
 
@@ -337,7 +335,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 
 	/* Check up unique WWPN */
 	u64_to_wwn(fc_vport->port_name, port_name);
-	if (!memcmp(port_name, ha->port_name, WWN_SIZE))
+	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
 		return VPCERR_BAD_WWN;
 	vha = qla24xx_find_vhost_by_name(ha, port_name);
 	if (vha)
@@ -346,7 +344,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 	/* Check up max-npiv-supports */
 	if (ha->num_vhosts > ha->max_npiv_vports) {
 		DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
-		    "max_npv_vports %ud.\n", ha->host_no,
+		    "max_npv_vports %ud.\n", base_vha->host_no,
 		    ha->num_vhosts, ha->max_npiv_vports));
 		return VPCERR_UNSUPPORTED;
 	}
@@ -356,59 +354,34 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 scsi_qla_host_t *
 qla24xx_create_vhost(struct fc_vport *fc_vport)
 {
-	scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	struct qla_hw_data *ha = base_vha->hw;
 	scsi_qla_host_t *vha;
+	struct scsi_host_template *sht = &qla24xx_driver_template;
 	struct Scsi_Host *host;
 
-	host = scsi_host_alloc(&qla24xx_driver_template,
-	    sizeof(scsi_qla_host_t));
-	if (!host) {
-		printk(KERN_WARNING
-		    "qla2xxx: scsi_host_alloc() failed for vport\n");
+	vha = qla2x00_create_host(sht, ha);
+	if (!vha) {
+		DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
 		return(NULL);
 	}
 
-	vha = shost_priv(host);
-
-	/* clone the parent hba */
-	memcpy(vha, ha, sizeof (scsi_qla_host_t));
-
+	host = vha->host;
 	fc_vport->dd_data = vha;
-
-	vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
-	if (!vha->node_name)
-		goto create_vhost_failed_1;
-
-	vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
-	if (!vha->port_name)
-		goto create_vhost_failed_2;
-
 	/* New host info */
 	u64_to_wwn(fc_vport->node_name, vha->node_name);
 	u64_to_wwn(fc_vport->port_name, vha->port_name);
 
-	vha->host = host;
-	vha->host_no = host->host_no;
-	vha->parent = ha;
 	vha->fc_vport = fc_vport;
 	vha->device_flags = 0;
 	vha->vp_idx = qla24xx_allocate_vp_id(vha);
 	if (vha->vp_idx > ha->max_npiv_vports) {
 		DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
 			vha->host_no));
-		goto create_vhost_failed_3;
+		goto create_vhost_failed;
 	}
 	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
 
-	init_completion(&vha->mbx_cmd_comp);
-	complete(&vha->mbx_cmd_comp);
-	init_completion(&vha->mbx_intr_comp);
-
-	INIT_LIST_HEAD(&vha->list);
-	INIT_LIST_HEAD(&vha->fcports);
-	INIT_LIST_HEAD(&vha->vp_fcports);
-	INIT_LIST_HEAD(&vha->work_list);
-
 	vha->dpc_flags = 0L;
 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 	set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -423,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 
 	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
 
-	host->can_queue = vha->request_q_length + 128;
+	memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES);
+	vha->req_ques[0] = ha->req_q_map[0]->id;
+	host->can_queue = ha->req_q_map[0]->length + 128;
 	host->this_id = 255;
 	host->cmd_per_lun = 3;
 	host->max_cmd_len = MAX_CMDSZ;
@@ -440,12 +415,341 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 
 	return vha;
 
-create_vhost_failed_3:
-	kfree(vha->port_name);
+create_vhost_failed:
+	return NULL;
+}
 
-create_vhost_failed_2:
-	kfree(vha->node_name);
+static void
+qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t que_id = req->id;
+
+	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
+		sizeof(request_t), req->ring, req->dma);
+	req->ring = NULL;
+	req->dma = 0;
+	if (que_id) {
+		ha->req_q_map[que_id] = NULL;
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->req_qid_map);
+		mutex_unlock(&ha->vport_lock);
+	}
+	kfree(req);
+	req = NULL;
+}
 
-create_vhost_failed_1:
-	return NULL;
+static void
+qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t que_id = rsp->id;
+
+	if (rsp->msix && rsp->msix->have_irq) {
+		free_irq(rsp->msix->vector, rsp);
+		rsp->msix->have_irq = 0;
+		rsp->msix->rsp = NULL;
+	}
+	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
+		sizeof(response_t), rsp->ring, rsp->dma);
+	rsp->ring = NULL;
+	rsp->dma = 0;
+	if (que_id) {
+		ha->rsp_q_map[que_id] = NULL;
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->rsp_qid_map);
+		mutex_unlock(&ha->vport_lock);
+	}
+	kfree(rsp);
+	rsp = NULL;
+}
+
+int
+qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+	int ret = -1;
+
+	if (req) {
+		req->options |= BIT_0;
+		ret = qla25xx_init_req_que(vha, req, req->options);
+	}
+	if (ret == QLA_SUCCESS)
+		qla25xx_free_req_que(vha, req);
+
+	return ret;
+}
+
+int
+qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+	int ret = -1;
+
+	if (rsp) {
+		rsp->options |= BIT_0;
+		ret = qla25xx_init_rsp_que(vha, rsp, rsp->options);
+	}
+	if (ret == QLA_SUCCESS)
+		qla25xx_free_rsp_que(vha, rsp);
+
+	return ret;
+}
+
+int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
+{
+	int ret = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[que];
+
+	req->options |= BIT_3;
+	req->qos = qos;
+	ret = qla25xx_init_req_que(vha, req, req->options);
+	if (ret != QLA_SUCCESS)
+		DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
+	/* restore options bit */
+	req->options &= ~BIT_3;
+	return ret;
+}
+
+
+/* Delete all queues for a given vhost */
+int
+qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
+{
+	int cnt, ret = 0;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (que_no) {
+	/* Delete request queue */
+		req = ha->req_q_map[que_no];
+		if (req) {
+			rsp = req->rsp;
+			ret = qla25xx_delete_req_que(vha, req);
+			if (ret != QLA_SUCCESS) {
+				qla_printk(KERN_WARNING, ha,
+				"Couldn't delete req que %d\n", req->id);
+				return ret;
+			}
+			/* Delete associated response queue */
+			if (rsp) {
+				ret = qla25xx_delete_rsp_que(vha, rsp);
+				if (ret != QLA_SUCCESS) {
+					qla_printk(KERN_WARNING, ha,
+						"Couldn't delete rsp que %d\n",
+						rsp->id);
+					return ret;
+				}
+			}
+		}
+	} else {  /* delete all queues of this host */
+		for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
+			/* Delete request queues */
+			req = ha->req_q_map[vha->req_ques[cnt]];
+			if (req && req->id) {
+				rsp = req->rsp;
+				ret = qla25xx_delete_req_que(vha, req);
+				if (ret != QLA_SUCCESS) {
+					qla_printk(KERN_WARNING, ha,
+						"Couldn't delete req que %d\n",
+						vha->req_ques[cnt]);
+					return ret;
+				}
+				vha->req_ques[cnt] = ha->req_q_map[0]->id;
+			/* Delete associated response queue */
+				if (rsp && rsp->id) {
+					ret = qla25xx_delete_rsp_que(vha, rsp);
+					if (ret != QLA_SUCCESS) {
+						qla_printk(KERN_WARNING, ha,
+						"Couldn't delete rsp que %d\n",
+						rsp->id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+	qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
+		vha->vp_idx);
+	return ret;
+}
+
+int
+qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
+	uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
+{
+	int ret = 0;
+	struct req_que *req = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	uint16_t que_id = 0;
+
+	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+	if (req == NULL) {
+		qla_printk(KERN_WARNING, ha, "could not allocate memory"
+			"for request que\n");
+		goto que_failed;
+	}
+
+	req->length = REQUEST_ENTRY_CNT_24XX;
+	req->ring = dma_alloc_coherent(&ha->pdev->dev,
+			(req->length + 1) * sizeof(request_t),
+			&req->dma, GFP_KERNEL);
+	if (req->ring == NULL) {
+		qla_printk(KERN_WARNING, ha,
+		"Memory Allocation failed - request_ring\n");
+		goto que_failed;
+	}
+
+	mutex_lock(&ha->vport_lock);
+	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
+	if (que_id >= ha->max_queues) {
+		mutex_unlock(&ha->vport_lock);
+		qla_printk(KERN_INFO, ha, "No resources to create "
+			 "additional request queue\n");
+		goto que_failed;
+	}
+	set_bit(que_id, ha->req_qid_map);
+	ha->req_q_map[que_id] = req;
+	req->rid = rid;
+	req->vp_idx = vp_idx;
+	req->qos = qos;
+
+	if (ha->rsp_q_map[rsp_que])
+		req->rsp = ha->rsp_q_map[rsp_que];
+	/* Use alternate PCI bus number */
+	if (MSB(req->rid))
+		options |= BIT_4;
+	/* Use alternate PCI devfn */
+	if (LSB(req->rid))
+		options |= BIT_5;
+	req->options = options;
+	req->ring_ptr = req->ring;
+	req->ring_index = 0;
+	req->cnt = req->length;
+	req->id = que_id;
+	mutex_unlock(&ha->vport_lock);
+
+	ret = qla25xx_init_req_que(base_vha, req, options);
+	if (ret != QLA_SUCCESS) {
+		qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->req_qid_map);
+		mutex_unlock(&ha->vport_lock);
+		goto que_failed;
+	}
+
+	return req->id;
+
+que_failed:
+	qla25xx_free_req_que(base_vha, req);
+	return 0;
+}
+
+/* create response queue */
+int
+qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
+	uint8_t vp_idx, uint16_t rid)
+{
+	int ret = 0;
+	struct rsp_que *rsp = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	uint16_t que_id = 0;;
+
+	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+	if (rsp == NULL) {
+		qla_printk(KERN_WARNING, ha, "could not allocate memory for"
+				" response que\n");
+		goto que_failed;
+	}
+
+	rsp->length = RESPONSE_ENTRY_CNT_2300;
+	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
+			(rsp->length + 1) * sizeof(response_t),
+			&rsp->dma, GFP_KERNEL);
+	if (rsp->ring == NULL) {
+		qla_printk(KERN_WARNING, ha,
+		"Memory Allocation failed - response_ring\n");
+		goto que_failed;
+	}
+
+	mutex_lock(&ha->vport_lock);
+	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
+	if (que_id >= ha->max_queues) {
+		mutex_unlock(&ha->vport_lock);
+		qla_printk(KERN_INFO, ha, "No resources to create "
+			 "additional response queue\n");
+		goto que_failed;
+	}
+	set_bit(que_id, ha->rsp_qid_map);
+
+	if (ha->flags.msix_enabled)
+		rsp->msix = &ha->msix_entries[que_id + 1];
+	else
+		qla_printk(KERN_WARNING, ha, "msix not enabled\n");
+
+	ha->rsp_q_map[que_id] = rsp;
+	rsp->rid = rid;
+	rsp->vp_idx = vp_idx;
+	rsp->hw = ha;
+	/* Use alternate PCI bus number */
+	if (MSB(rsp->rid))
+		options |= BIT_4;
+	/* Use alternate PCI devfn */
+	if (LSB(rsp->rid))
+		options |= BIT_5;
+	rsp->options = options;
+	rsp->ring_ptr = rsp->ring;
+	rsp->ring_index = 0;
+	rsp->id = que_id;
+	mutex_unlock(&ha->vport_lock);
+
+	ret = qla25xx_request_irq(rsp);
+	if (ret)
+		goto que_failed;
+
+	ret = qla25xx_init_rsp_que(base_vha, rsp, options);
+	if (ret != QLA_SUCCESS) {
+		qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->rsp_qid_map);
+		mutex_unlock(&ha->vport_lock);
+		goto que_failed;
+	}
+
+	qla2x00_init_response_q_entries(rsp);
+
+	return rsp->id;
+
+que_failed:
+	qla25xx_free_rsp_que(base_vha, rsp);
+	return 0;
+}
+
+int
+qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
+{
+	uint16_t options = 0;
+	uint8_t ret = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	options |= BIT_1;
+	ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
+	if (!ret) {
+		qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
+		return ret;
+	} else
+		qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
+
+	options = 0;
+	if (qos & BIT_7)
+		options |= BIT_8;
+	ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
+					qos & ~BIT_7);
+	if (ret) {
+		vha->req_ques[0] = ret;
+		qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
+	} else
+		qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
+
+	return ret;
 }
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 35567203ef61..8ea927788b3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
 		"Enables iIDMA settings "
 		"Default is 1 - perform iIDMA. 0 - no iIDMA.");
 
-
+int ql2xmaxqueues = 1;
+module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xmaxqueues,
+		"Enables MQ settings "
+		"Default is 1 for single queue. Set it to number \
+			of queues in MQ mode.");
 /*
  * SCSI host template entry points
  */
@@ -183,42 +188,108 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
  */
 
 __inline__ void
-qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
+qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
 {
-	init_timer(&ha->timer);
-	ha->timer.expires = jiffies + interval * HZ;
-	ha->timer.data = (unsigned long)ha;
-	ha->timer.function = (void (*)(unsigned long))func;
-	add_timer(&ha->timer);
-	ha->timer_active = 1;
+	init_timer(&vha->timer);
+	vha->timer.expires = jiffies + interval * HZ;
+	vha->timer.data = (unsigned long)vha;
+	vha->timer.function = (void (*)(unsigned long))func;
+	add_timer(&vha->timer);
+	vha->timer_active = 1;
 }
 
 static inline void
-qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
+qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
 {
-	mod_timer(&ha->timer, jiffies + interval * HZ);
+	mod_timer(&vha->timer, jiffies + interval * HZ);
 }
 
 static __inline__ void
-qla2x00_stop_timer(scsi_qla_host_t *ha)
+qla2x00_stop_timer(scsi_qla_host_t *vha)
 {
-	del_timer_sync(&ha->timer);
-	ha->timer_active = 0;
+	del_timer_sync(&vha->timer);
+	vha->timer_active = 0;
 }
 
 static int qla2x00_do_dpc(void *data);
 
 static void qla2x00_rst_aen(scsi_qla_host_t *);
 
-static int qla2x00_mem_alloc(scsi_qla_host_t *);
-static void qla2x00_mem_free(scsi_qla_host_t *ha);
-static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
+static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
+	struct req_que **, struct rsp_que **);
+static void qla2x00_mem_free(struct qla_hw_data *);
+static void qla2x00_sp_free_dma(srb_t *);
 
 /* -------------------------------------------------------------------------- */
+static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+{
+	ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
+				GFP_KERNEL);
+	if (!ha->req_q_map) {
+		qla_printk(KERN_WARNING, ha,
+			"Unable to allocate memory for request queue ptrs\n");
+		goto fail_req_map;
+	}
+
+	ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
+				GFP_KERNEL);
+	if (!ha->rsp_q_map) {
+		qla_printk(KERN_WARNING, ha,
+			"Unable to allocate memory for response queue ptrs\n");
+		goto fail_rsp_map;
+	}
+	set_bit(0, ha->rsp_qid_map);
+	set_bit(0, ha->req_qid_map);
+	return 1;
+
+fail_rsp_map:
+	kfree(ha->req_q_map);
+	ha->req_q_map = NULL;
+fail_req_map:
+	return -ENOMEM;
+}
+
+static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
+	struct rsp_que *rsp)
+{
+	if (rsp && rsp->ring)
+		dma_free_coherent(&ha->pdev->dev,
+		(rsp->length + 1) * sizeof(response_t),
+		rsp->ring, rsp->dma);
+
+	kfree(rsp);
+	rsp = NULL;
+	if (req && req->ring)
+		dma_free_coherent(&ha->pdev->dev,
+		(req->length + 1) * sizeof(request_t),
+		req->ring, req->dma);
+
+	kfree(req);
+	req = NULL;
+}
+
+static void qla2x00_free_queues(struct qla_hw_data *ha)
+{
+	struct req_que *req;
+	struct rsp_que *rsp;
+	int cnt;
+
+	for (cnt = 0; cnt < ha->max_queues; cnt++) {
+		rsp = ha->rsp_q_map[cnt];
+		req = ha->req_q_map[cnt];
+		qla2x00_free_que(ha, req, rsp);
+	}
+	kfree(ha->rsp_q_map);
+	ha->rsp_q_map = NULL;
+
+	kfree(ha->req_q_map);
+	ha->req_q_map = NULL;
+}
 
 static char *
-qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
 {
+	struct qla_hw_data *ha = vha->hw;
 	static char *pci_bus_modes[] = {
 		"33", "66", "100", "133",
 	};
@@ -240,9 +311,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
 }
 
 static char *
-qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
 {
 	static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+	struct qla_hw_data *ha = vha->hw;
 	uint32_t pci_bus;
 	int pcie_reg;
 
@@ -290,9 +362,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
 }
 
 static char *
-qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
 {
 	char un_str[10];
+	struct qla_hw_data *ha = vha->hw;
 
 	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
 	    ha->fw_minor_version,
@@ -328,8 +401,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
 }
 
 static char *
-qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
 {
+	struct qla_hw_data *ha = vha->hw;
 	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
 	    ha->fw_minor_version,
 	    ha->fw_subminor_version);
@@ -354,18 +428,20 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
 }
 
 static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
     struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 {
 	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
 
 	sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
 	if (!sp)
 		return sp;
 
-	sp->ha = ha;
+	sp->vha = vha;
 	sp->fcport = fcport;
 	sp->cmd = cmd;
+	sp->que = ha->req_q_map[0];
 	sp->flags = 0;
 	CMD_SP(cmd) = (void *)sp;
 	cmd->scsi_done = done;
@@ -376,9 +452,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
 static int
 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+	struct qla_hw_data *ha = vha->hw;
 	srb_t *sp;
 	int rval;
 
@@ -399,33 +476,33 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-		    atomic_read(&ha->loop_state) == LOOP_DEAD) {
+		    atomic_read(&vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc_fail_command;
 		}
 		goto qc_target_busy;
 	}
 
-	spin_unlock_irq(ha->host->host_lock);
+	spin_unlock_irq(vha->host->host_lock);
 
-	sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
+	sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
 	if (!sp)
 		goto qc_host_busy_lock;
 
-	rval = qla2x00_start_scsi(sp);
+	rval = ha->isp_ops->start_scsi(sp);
 	if (rval != QLA_SUCCESS)
 		goto qc_host_busy_free_sp;
 
-	spin_lock_irq(ha->host->host_lock);
+	spin_lock_irq(vha->host->host_lock);
 
 	return 0;
 
 qc_host_busy_free_sp:
-	qla2x00_sp_free_dma(ha, sp);
+	qla2x00_sp_free_dma(sp);
 	mempool_free(sp, ha->srb_mempool);
 
 qc_host_busy_lock:
-	spin_lock_irq(ha->host->host_lock);
+	spin_lock_irq(vha->host->host_lock);
 	return SCSI_MLQUEUE_HOST_BUSY;
 
 qc_target_busy:
@@ -441,14 +518,15 @@ qc_fail_command:
 static int
 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	srb_t *sp;
 	int rval;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
 
-	if (unlikely(pci_channel_offline(pha->pdev))) {
+	if (unlikely(pci_channel_offline(ha->pdev))) {
 		cmd->result = DID_REQUEUE << 16;
 		goto qc24_fail_command;
 	}
@@ -465,33 +543,33 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-		    atomic_read(&pha->loop_state) == LOOP_DEAD) {
+		    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc24_fail_command;
 		}
 		goto qc24_target_busy;
 	}
 
-	spin_unlock_irq(ha->host->host_lock);
+	spin_unlock_irq(vha->host->host_lock);
 
-	sp = qla2x00_get_new_sp(pha, fcport, cmd, done);
+	sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
 	if (!sp)
 		goto qc24_host_busy_lock;
 
-	rval = qla24xx_start_scsi(sp);
+	rval = ha->isp_ops->start_scsi(sp);
 	if (rval != QLA_SUCCESS)
 		goto qc24_host_busy_free_sp;
 
-	spin_lock_irq(ha->host->host_lock);
+	spin_lock_irq(vha->host->host_lock);
 
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2x00_sp_free_dma(pha, sp);
-	mempool_free(sp, pha->srb_mempool);
+	qla2x00_sp_free_dma(sp);
+	mempool_free(sp, ha->srb_mempool);
 
 qc24_host_busy_lock:
-	spin_lock_irq(ha->host->host_lock);
+	spin_lock_irq(vha->host->host_lock);
 	return SCSI_MLQUEUE_HOST_BUSY;
 
 qc24_target_busy:
@@ -510,17 +588,14 @@ qc24_fail_command:
  *    max time.
  *
  * Input:
- *    ha = actual ha whose done queue will contain the command
- *	      returned by firmware.
  *    cmd = Scsi Command to wait on.
- *    flag = Abort/Reset(Bus or Device Reset)
  *
  * Return:
  *    Not Found : 0
  *    Found : 1
  */
 static int
-qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
+qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
 {
 #define ABORT_POLLING_PERIOD	1000
 #define ABORT_WAIT_ITER		((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -557,21 +632,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
  *    Failed  (Adapter is offline/disabled) : 1
  */
 int
-qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
+qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
 {
 	int		return_status;
 	unsigned long	wait_online;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-	while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) ||
-	    test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) ||
-	    test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) ||
-	    pha->dpc_active) && time_before(jiffies, wait_online)) {
+	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+	    ha->dpc_active) && time_before(jiffies, wait_online)) {
 
 		msleep(1000);
 	}
-	if (pha->flags.online)
+	if (base_vha->flags.online)
 		return_status = QLA_SUCCESS;
 	else
 		return_status = QLA_FUNCTION_FAILED;
@@ -596,19 +672,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
  *    Failed  (LOOP_NOT_READY) : 1
  */
 static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
+qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
 {
 	int 	 return_status = QLA_SUCCESS;
 	unsigned long loop_timeout ;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	/* wait for 5 min at the max for loop to be ready */
 	loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
 
-	while ((!atomic_read(&pha->loop_down_timer) &&
-	    atomic_read(&pha->loop_state) == LOOP_DOWN) ||
-	    atomic_read(&pha->loop_state) != LOOP_READY) {
-		if (atomic_read(&pha->loop_state) == LOOP_DEAD) {
+	while ((!atomic_read(&base_vha->loop_down_timer) &&
+	    atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
+	    atomic_read(&base_vha->loop_state) != LOOP_READY) {
+		if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			return_status = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -624,35 +701,42 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
 void
 qla2x00_abort_fcport_cmds(fc_port_t *fcport)
 {
-	int cnt;
+	int cnt, que, id;
 	unsigned long flags;
 	srb_t *sp;
-	scsi_qla_host_t *ha = fcport->ha;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	scsi_qla_host_t *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
 
-	spin_lock_irqsave(&pha->hardware_lock, flags);
-	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-		sp = pha->outstanding_cmds[cnt];
-		if (!sp)
-			continue;
-		if (sp->fcport != fcport)
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
+		id = vha->req_ques[que];
+		req = ha->req_q_map[id];
+		if (!req)
 			continue;
+		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+			sp = req->outstanding_cmds[cnt];
+			if (!sp)
+				continue;
+			if (sp->fcport != fcport)
+				continue;
 
-		spin_unlock_irqrestore(&pha->hardware_lock, flags);
-		if (ha->isp_ops->abort_command(ha, sp)) {
-			DEBUG2(qla_printk(KERN_WARNING, ha,
-			    "Abort failed --  %lx\n", sp->cmd->serial_number));
-		} else {
-			if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
-			    QLA_SUCCESS)
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			if (ha->isp_ops->abort_command(vha, sp, req)) {
 				DEBUG2(qla_printk(KERN_WARNING, ha,
-				    "Abort failed while waiting --  %lx\n",
-				    sp->cmd->serial_number));
-
+				"Abort failed --  %lx\n",
+				sp->cmd->serial_number));
+			} else {
+				if (qla2x00_eh_wait_on_command(sp->cmd) !=
+					QLA_SUCCESS)
+					DEBUG2(qla_printk(KERN_WARNING, ha,
+					"Abort failed while waiting --  %lx\n",
+					sp->cmd->serial_number));
+			}
+			spin_lock_irqsave(&ha->hardware_lock, flags);
 		}
-		spin_lock_irqsave(&pha->hardware_lock, flags);
 	}
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 static void
@@ -690,14 +774,16 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
 static int
 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	srb_t *sp;
 	int ret, i;
 	unsigned int id, lun;
 	unsigned long serial;
 	unsigned long flags;
 	int wait = 0;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+	srb_t *spt;
 
 	qla2x00_block_error_handler(cmd);
 
@@ -709,11 +795,15 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	id = cmd->device->id;
 	lun = cmd->device->lun;
 	serial = cmd->serial_number;
+	spt = (srb_t *) CMD_SP(cmd);
+	if (!spt)
+		return SUCCESS;
+	req = spt->que;
 
 	/* Check active list for command command. */
-	spin_lock_irqsave(&pha->hardware_lock, flags);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
-		sp = pha->outstanding_cmds[i];
+		sp = req->outstanding_cmds[i];
 
 		if (sp == NULL)
 			continue;
@@ -721,38 +811,36 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 		if (sp->cmd != cmd)
 			continue;
 
-		DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
-		    __func__, ha->host_no, sp, serial));
+		DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
+		" pid=%ld.\n", __func__, vha->host_no, sp, serial));
 
-		spin_unlock_irqrestore(&pha->hardware_lock, flags);
-		if (ha->isp_ops->abort_command(ha, sp)) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		if (ha->isp_ops->abort_command(vha, sp, req)) {
 			DEBUG2(printk("%s(%ld): abort_command "
-			    "mbx failed.\n", __func__, ha->host_no));
-			ret = FAILED;
+			"mbx failed.\n", __func__, vha->host_no));
 		} else {
 			DEBUG3(printk("%s(%ld): abort_command "
-			    "mbx success.\n", __func__, ha->host_no));
+			"mbx success.\n", __func__, vha->host_no));
 			wait = 1;
 		}
-		spin_lock_irqsave(&pha->hardware_lock, flags);
-
+		spin_lock_irqsave(&ha->hardware_lock, flags);
 		break;
 	}
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	/* Wait for the command to be returned. */
 	if (wait) {
-		if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
+		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
 			qla_printk(KERN_ERR, ha,
 			    "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
-			    "%x.\n", ha->host_no, id, lun, serial, ret);
+			    "%x.\n", vha->host_no, id, lun, serial, ret);
 			ret = FAILED;
 		}
 	}
 
 	qla_printk(KERN_INFO, ha,
 	    "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
-	    ha->host_no, id, lun, wait, serial, ret);
+	    vha->host_no, id, lun, wait, serial, ret);
 
 	return ret;
 }
@@ -764,23 +852,27 @@ enum nexus_wait_type {
 };
 
 static int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
-    unsigned int l, enum nexus_wait_type type)
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+	unsigned int l, srb_t *sp, enum nexus_wait_type type)
 {
 	int cnt, match, status;
-	srb_t *sp;
 	unsigned long flags;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
 
 	status = QLA_SUCCESS;
-	spin_lock_irqsave(&pha->hardware_lock, flags);
-	for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS;
-	    cnt++) {
-		sp = pha->outstanding_cmds[cnt];
+	if (!sp)
+		return status;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	req = sp->que;
+	for (cnt = 1; status == QLA_SUCCESS &&
+		cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+		sp = req->outstanding_cmds[cnt];
 		if (!sp)
 			continue;
 
-		if (ha->vp_idx != sp->fcport->ha->vp_idx)
+		if (vha->vp_idx != sp->fcport->vha->vp_idx)
 			continue;
 		match = 0;
 		switch (type) {
@@ -792,17 +884,17 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
 			break;
 		case WAIT_LUN:
 			match = (sp->cmd->device->id == t &&
-			    sp->cmd->device->lun == l);
+				sp->cmd->device->lun == l);
 			break;
 		}
 		if (!match)
 			continue;
 
-		spin_unlock_irqrestore(&pha->hardware_lock, flags);
-		status = qla2x00_eh_wait_on_command(ha, sp->cmd);
-		spin_lock_irqsave(&pha->hardware_lock, flags);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		status = qla2x00_eh_wait_on_command(sp->cmd);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
 	}
-	spin_unlock_irqrestore(&pha->hardware_lock, flags);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return status;
 }
@@ -818,7 +910,7 @@ static int
 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
     struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	int err;
 
@@ -827,31 +919,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 	if (!fcport)
 		return FAILED;
 
-	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
-	    ha->host_no, cmd->device->id, cmd->device->lun, name);
+	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
+	    vha->host_no, cmd->device->id, cmd->device->lun, name);
 
 	err = 0;
-	if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
 		goto eh_reset_failed;
 	err = 1;
-	if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS)
+	if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
 		goto eh_reset_failed;
 	err = 2;
 	if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
 		goto eh_reset_failed;
 	err = 3;
-	if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id,
-	    cmd->device->lun, type) != QLA_SUCCESS)
+	if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
+	    cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
 		goto eh_reset_failed;
 
-	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
-	    ha->host_no, cmd->device->id, cmd->device->lun, name);
+	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
+	    vha->host_no, cmd->device->id, cmd->device->lun, name);
 
 	return SUCCESS;
 
  eh_reset_failed:
-	qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n",
-	    ha->host_no, cmd->device->id, cmd->device->lun, name,
+	qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
+	    , vha->host_no, cmd->device->id, cmd->device->lun, name,
 	    reset_errors[err]);
 	return FAILED;
 }
@@ -859,7 +951,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 static int
 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
 
 	return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
 	    ha->isp_ops->lun_reset);
@@ -868,7 +961,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 static int
 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
 
 	return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
 	    ha->isp_ops->target_reset);
@@ -892,12 +986,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 static int
 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	int ret = FAILED;
 	unsigned int id, lun;
 	unsigned long serial;
+	srb_t *sp = (srb_t *) CMD_SP(cmd);
 
 	qla2x00_block_error_handler(cmd);
 
@@ -908,28 +1002,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 	if (!fcport)
 		return ret;
 
-	qla_printk(KERN_INFO, ha,
-	    "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun);
+	qla_printk(KERN_INFO, vha->hw,
+	    "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
 
-	if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 		DEBUG2(printk("%s failed:board disabled\n",__func__));
 		goto eh_bus_reset_done;
 	}
 
-	if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
-		if (qla2x00_loop_reset(ha) == QLA_SUCCESS)
+	if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
+		if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
 			ret = SUCCESS;
 	}
 	if (ret == FAILED)
 		goto eh_bus_reset_done;
 
 	/* Flush outstanding commands. */
-	if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) !=
+	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
 	    QLA_SUCCESS)
 		ret = FAILED;
 
 eh_bus_reset_done:
-	qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
+	qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
 	    (ret == FAILED) ? "failed" : "succeded");
 
 	return ret;
@@ -953,12 +1047,14 @@ eh_bus_reset_done:
 static int
 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 {
-	scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	struct qla_hw_data *ha = vha->hw;
 	int ret = FAILED;
 	unsigned int id, lun;
 	unsigned long serial;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	srb_t *sp = (srb_t *) CMD_SP(cmd);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	qla2x00_block_error_handler(cmd);
 
@@ -970,9 +1066,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 		return ret;
 
 	qla_printk(KERN_INFO, ha,
-	    "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun);
+	    "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
 
-	if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
 		goto eh_host_reset_lock;
 
 	/*
@@ -983,26 +1079,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 	 * devices as lost kicking of the port_down_timer
 	 * while dpc is stuck for the mailbox to complete.
 	 */
-	qla2x00_wait_for_loop_ready(ha);
-	set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
-	if (qla2x00_abort_isp(pha)) {
-		clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
-		/* failed. schedule dpc to try */
-		set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
-
-		if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+	qla2x00_wait_for_loop_ready(vha);
+	if (vha != base_vha) {
+		if (qla2x00_vp_abort_isp(vha))
 			goto eh_host_reset_lock;
+	} else {
+		set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+		if (qla2x00_abort_isp(base_vha)) {
+			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+			/* failed. schedule dpc to try */
+			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+
+			if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+				goto eh_host_reset_lock;
+		}
+		clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 	}
-	clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
 
-	/* Waiting for our command in done_queue to be returned to OS.*/
-	if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) ==
-	    QLA_SUCCESS)
+	/* Waiting for command to be returned to OS.*/
+	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
+		QLA_SUCCESS)
 		ret = SUCCESS;
 
-	if (ha->parent)
-		qla2x00_vp_abort_isp(ha);
-
 eh_host_reset_lock:
 	qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
 	    (ret == FAILED) ? "failed" : "succeded");
@@ -1021,35 +1119,36 @@ eh_host_reset_lock:
 *      0 = success
 */
 int
-qla2x00_loop_reset(scsi_qla_host_t *ha)
+qla2x00_loop_reset(scsi_qla_host_t *vha)
 {
 	int ret;
 	struct fc_port *fcport;
+	struct qla_hw_data *ha = vha->hw;
 
-	if (ha->flags.enable_lip_full_login) {
-		ret = qla2x00_full_login_lip(ha);
+	if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
+		ret = qla2x00_full_login_lip(vha);
 		if (ret != QLA_SUCCESS) {
-			DEBUG2_3(printk("%s(%ld): bus_reset failed: "
-			    "full_login_lip=%d.\n", __func__, ha->host_no,
+			DEBUG2_3(printk("%s(%ld): failed: "
+			    "full_login_lip=%d.\n", __func__, vha->host_no,
 			    ret));
 		}
-		atomic_set(&ha->loop_state, LOOP_DOWN);
-		atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-		qla2x00_mark_all_devices_lost(ha, 0);
-		qla2x00_wait_for_loop_ready(ha);
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+		qla2x00_mark_all_devices_lost(vha, 0);
+		qla2x00_wait_for_loop_ready(vha);
 	}
 
-	if (ha->flags.enable_lip_reset) {
-		ret = qla2x00_lip_reset(ha);
+	if (ha->flags.enable_lip_reset && !vha->vp_idx) {
+		ret = qla2x00_lip_reset(vha);
 		if (ret != QLA_SUCCESS) {
-			DEBUG2_3(printk("%s(%ld): bus_reset failed: "
-			    "lip_reset=%d.\n", __func__, ha->host_no, ret));
-		}
-		qla2x00_wait_for_loop_ready(ha);
+			DEBUG2_3(printk("%s(%ld): failed: "
+			    "lip_reset=%d.\n", __func__, vha->host_no, ret));
+		} else
+			qla2x00_wait_for_loop_ready(vha);
 	}
 
 	if (ha->flags.enable_target_reset) {
-		list_for_each_entry(fcport, &ha->fcports, list) {
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (fcport->port_type != FCT_TARGET)
 				continue;
 
@@ -1057,31 +1156,37 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
 			if (ret != QLA_SUCCESS) {
 				DEBUG2_3(printk("%s(%ld): bus_reset failed: "
 				    "target_reset=%d d_id=%x.\n", __func__,
-				    ha->host_no, ret, fcport->d_id.b24));
+				    vha->host_no, ret, fcport->d_id.b24));
 			}
 		}
 	}
-
 	/* Issue marker command only when we are going to start the I/O */
-	ha->marker_needed = 1;
+	vha->marker_needed = 1;
 
 	return QLA_SUCCESS;
 }
 
 void
-qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 {
-	int cnt;
+	int que, cnt;
 	unsigned long flags;
 	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-		sp = ha->outstanding_cmds[cnt];
-		if (sp) {
-			ha->outstanding_cmds[cnt] = NULL;
-			sp->cmd->result = res;
-			qla2x00_sp_compl(ha, sp);
+	for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
+		req = ha->req_q_map[vha->req_ques[que]];
+		if (!req)
+			continue;
+		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+			sp = req->outstanding_cmds[cnt];
+			if (sp && sp->vha == vha) {
+				req->outstanding_cmds[cnt] = NULL;
+				sp->cmd->result = res;
+				qla2x00_sp_compl(ha, sp);
+			}
 		}
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1103,13 +1208,15 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
 static int
 qla2xxx_slave_configure(struct scsi_device *sdev)
 {
-	scsi_qla_host_t *ha = shost_priv(sdev->host);
+	scsi_qla_host_t *vha = shost_priv(sdev->host);
+	struct qla_hw_data *ha = vha->hw;
 	struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+	struct req_que *req = ha->req_q_map[0];
 
 	if (sdev->tagged_supported)
-		scsi_activate_tcq(sdev, ha->max_q_depth);
+		scsi_activate_tcq(sdev, req->max_q_depth);
 	else
-		scsi_deactivate_tcq(sdev, ha->max_q_depth);
+		scsi_deactivate_tcq(sdev, req->max_q_depth);
 
 	rport->dev_loss_tmo = ha->port_down_retry_count;
 
@@ -1152,8 +1259,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
  * supported addressing method.
  */
 static void
-qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
+qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	/* Assume a 32bit DMA mask. */
 	ha->flags.enable_64bit_addressing = 0;
 
@@ -1174,7 +1282,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
 }
 
 static void
-qla2x00_enable_intrs(scsi_qla_host_t *ha)
+qla2x00_enable_intrs(struct qla_hw_data *ha)
 {
 	unsigned long flags = 0;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1189,7 +1297,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
 }
 
 static void
-qla2x00_disable_intrs(scsi_qla_host_t *ha)
+qla2x00_disable_intrs(struct qla_hw_data *ha)
 {
 	unsigned long flags = 0;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1203,7 +1311,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
 }
 
 static void
-qla24xx_enable_intrs(scsi_qla_host_t *ha)
+qla24xx_enable_intrs(struct qla_hw_data *ha)
 {
 	unsigned long flags = 0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1216,7 +1324,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
 }
 
 static void
-qla24xx_disable_intrs(scsi_qla_host_t *ha)
+qla24xx_disable_intrs(struct qla_hw_data *ha)
 {
 	unsigned long flags = 0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1260,6 +1368,10 @@ static struct isp_operations qla2100_isp_ops = {
 	.read_optrom		= qla2x00_read_optrom_data,
 	.write_optrom		= qla2x00_write_optrom_data,
 	.get_flash_version	= qla2x00_get_flash_version,
+	.start_scsi		= qla2x00_start_scsi,
+	.wrt_req_reg		= NULL,
+	.wrt_rsp_reg		= NULL,
+	.rd_req_reg		= NULL,
 };
 
 static struct isp_operations qla2300_isp_ops = {
@@ -1294,6 +1406,10 @@ static struct isp_operations qla2300_isp_ops = {
 	.read_optrom		= qla2x00_read_optrom_data,
 	.write_optrom		= qla2x00_write_optrom_data,
 	.get_flash_version	= qla2x00_get_flash_version,
+	.start_scsi		= qla2x00_start_scsi,
+	.wrt_req_reg		= NULL,
+	.wrt_rsp_reg		= NULL,
+	.rd_req_reg		= NULL,
 };
 
 static struct isp_operations qla24xx_isp_ops = {
@@ -1328,6 +1444,10 @@ static struct isp_operations qla24xx_isp_ops = {
 	.read_optrom		= qla24xx_read_optrom_data,
 	.write_optrom		= qla24xx_write_optrom_data,
 	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_start_scsi,
+	.wrt_req_reg		= qla24xx_wrt_req_reg,
+	.wrt_rsp_reg		= qla24xx_wrt_rsp_reg,
+	.rd_req_reg		= qla24xx_rd_req_reg,
 };
 
 static struct isp_operations qla25xx_isp_ops = {
@@ -1362,10 +1482,14 @@ static struct isp_operations qla25xx_isp_ops = {
 	.read_optrom		= qla25xx_read_optrom_data,
 	.write_optrom		= qla24xx_write_optrom_data,
 	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_start_scsi,
+	.wrt_req_reg		= qla24xx_wrt_req_reg,
+	.wrt_rsp_reg		= qla24xx_wrt_rsp_reg,
+	.rd_req_reg		= qla24xx_rd_req_reg,
 };
 
 static inline void
-qla2x00_set_isp_flags(scsi_qla_host_t *ha)
+qla2x00_set_isp_flags(struct qla_hw_data *ha)
 {
 	ha->device_type = DT_EXTENDED_IDS;
 	switch (ha->pdev->device) {
@@ -1447,9 +1571,10 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
 }
 
 static int
-qla2x00_iospace_config(scsi_qla_host_t *ha)
+qla2x00_iospace_config(struct qla_hw_data *ha)
 {
 	resource_size_t pio;
+	uint16_t msix;
 
 	if (pci_request_selected_regions(ha->pdev, ha->bars,
 	    QLA2XXX_DRIVER_NAME)) {
@@ -1502,6 +1627,30 @@ skip_pio:
 		goto iospace_error_exit;
 	}
 
+	/* Determine queue resources */
+	ha->max_queues = 1;
+	if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
+		goto mqiobase_exit;
+	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+			pci_resource_len(ha->pdev, 3));
+	if (ha->mqiobase) {
+		/* Read MSIX vector size of the board */
+		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+		ha->msix_count = msix;
+		/* Max queues are bounded by available msix vectors */
+		/* queue 0 uses two msix vectors */
+		if (ha->msix_count - 1 < ql2xmaxqueues)
+			ha->max_queues = ha->msix_count - 1;
+		else if (ql2xmaxqueues > QLA_MQ_SIZE)
+			ha->max_queues = QLA_MQ_SIZE;
+		else
+			ha->max_queues = ql2xmaxqueues;
+		qla_printk(KERN_INFO, ha,
+			"MSI-X vector count: %d\n", msix);
+	}
+
+mqiobase_exit:
+	ha->msix_count = ha->max_queues + 1;
 	return (0);
 
 iospace_error_exit:
@@ -1511,25 +1660,25 @@ iospace_error_exit:
 static void
 qla2xxx_scan_start(struct Scsi_Host *shost)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
-	set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
-	set_bit(RSCN_UPDATE, &ha->dpc_flags);
-	set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
+	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	set_bit(RSCN_UPDATE, &vha->dpc_flags);
+	set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
 }
 
 static int
 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
-	scsi_qla_host_t *ha = shost_priv(shost);
+	scsi_qla_host_t *vha = shost_priv(shost);
 
-	if (!ha->host)
+	if (!vha->host)
 		return 1;
-	if (time > ha->loop_reset_delay * HZ)
+	if (time > vha->hw->loop_reset_delay * HZ)
 		return 1;
 
-	return atomic_read(&ha->loop_state) == LOOP_READY;
+	return atomic_read(&vha->loop_state) == LOOP_READY;
 }
 
 /*
@@ -1540,11 +1689,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	int	ret = -ENODEV;
 	struct Scsi_Host *host;
-	scsi_qla_host_t *ha;
+	scsi_qla_host_t *base_vha = NULL;
+	struct qla_hw_data *ha;
 	char pci_info[30];
 	char fw_str[30];
 	struct scsi_host_template *sht;
-	int bars, mem_only = 0;
+	int bars, max_id, mem_only = 0;
+	uint16_t req_length = 0, rsp_length = 0;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
 
 	bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 	sht = &qla2x00_driver_template;
@@ -1570,33 +1723,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* This may fail but that's ok */
 	pci_enable_pcie_error_reporting(pdev);
 
-	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
-	if (host == NULL) {
-		printk(KERN_WARNING
-		    "qla2xxx: Couldn't allocate host from scsi layer!\n");
-		goto probe_disable_device;
+	ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
+	if (!ha) {
+		DEBUG(printk("Unable to allocate memory for ha\n"));
+		goto probe_out;
 	}
+	ha->pdev = pdev;
 
 	/* Clear our data area */
-	ha = shost_priv(host);
-	memset(ha, 0, sizeof(scsi_qla_host_t));
-
-	ha->pdev = pdev;
-	ha->host = host;
-	ha->host_no = host->host_no;
-	sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
-	ha->parent = NULL;
 	ha->bars = bars;
 	ha->mem_only = mem_only;
 	spin_lock_init(&ha->hardware_lock);
 
 	/* Set ISP-type information. */
 	qla2x00_set_isp_flags(ha);
-
 	/* Configure PCI I/O space */
 	ret = qla2x00_iospace_config(ha);
 	if (ret)
-		goto probe_failed;
+		goto probe_hw_failed;
 
 	qla_printk(KERN_INFO, ha,
 	    "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1604,95 +1748,137 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	ha->prev_topology = 0;
 	ha->init_cb_size = sizeof(init_cb_t);
-	ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
 	ha->link_data_rate = PORT_SPEED_UNKNOWN;
 	ha->optrom_size = OPTROM_SIZE_2300;
 
-	ha->max_q_depth = MAX_Q_DEPTH;
-	if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
-		ha->max_q_depth = ql2xmaxqdepth;
-
 	/* Assign ISP specific operations. */
+	max_id = MAX_TARGETS_2200;
 	if (IS_QLA2100(ha)) {
-		host->max_id = MAX_TARGETS_2100;
+		max_id = MAX_TARGETS_2100;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
-		ha->request_q_length = REQUEST_ENTRY_CNT_2100;
-		ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
-		ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
-		host->sg_tablesize = 32;
+		req_length = REQUEST_ENTRY_CNT_2100;
+		rsp_length = RESPONSE_ENTRY_CNT_2100;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
 		ha->gid_list_info_size = 4;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA2200(ha)) {
-		host->max_id = MAX_TARGETS_2200;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
-		ha->request_q_length = REQUEST_ENTRY_CNT_2200;
-		ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
-		ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
+		req_length = REQUEST_ENTRY_CNT_2200;
+		rsp_length = RESPONSE_ENTRY_CNT_2100;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
 		ha->gid_list_info_size = 4;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA23XX(ha)) {
-		host->max_id = MAX_TARGETS_2200;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
-		ha->request_q_length = REQUEST_ENTRY_CNT_2200;
-		ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
-		ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+		req_length = REQUEST_ENTRY_CNT_2200;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
 		ha->gid_list_info_size = 6;
 		if (IS_QLA2322(ha) || IS_QLA6322(ha))
 			ha->optrom_size = OPTROM_SIZE_2322;
 		ha->isp_ops = &qla2300_isp_ops;
 	} else if (IS_QLA24XX_TYPE(ha)) {
-		host->max_id = MAX_TARGETS_2200;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
-		ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
-		ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
-		ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
-		ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
 		ha->gid_list_info_size = 8;
 		ha->optrom_size = OPTROM_SIZE_24XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
 		ha->isp_ops = &qla24xx_isp_ops;
 	} else if (IS_QLA25XX(ha)) {
-		host->max_id = MAX_TARGETS_2200;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
-		ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
-		ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
-		ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
-		ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
 		ha->gid_list_info_size = 8;
 		ha->optrom_size = OPTROM_SIZE_25XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
 		ha->isp_ops = &qla25xx_isp_ops;
 	}
-	host->can_queue = ha->request_q_length + 128;
 
 	mutex_init(&ha->vport_lock);
 	init_completion(&ha->mbx_cmd_comp);
 	complete(&ha->mbx_cmd_comp);
 	init_completion(&ha->mbx_intr_comp);
 
-	INIT_LIST_HEAD(&ha->list);
-	INIT_LIST_HEAD(&ha->fcports);
-	INIT_LIST_HEAD(&ha->vp_list);
-	INIT_LIST_HEAD(&ha->work_list);
-
 	set_bit(0, (unsigned long *) ha->vp_idx_map);
 
-	qla2x00_config_dma_addressing(ha);
-	if (qla2x00_mem_alloc(ha)) {
+	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
+	if (!ret) {
 		qla_printk(KERN_WARNING, ha,
 		    "[ERROR] Failed to allocate memory for adapter\n");
 
+		goto probe_hw_failed;
+	}
+
+	req->max_q_depth = MAX_Q_DEPTH;
+	if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
+		req->max_q_depth = ql2xmaxqdepth;
+
+
+	base_vha = qla2x00_create_host(sht, ha);
+	if (!base_vha) {
+		qla_printk(KERN_WARNING, ha,
+		    "[ERROR] Failed to allocate memory for scsi_host\n");
+
 		ret = -ENOMEM;
+		goto probe_hw_failed;
+	}
+
+	pci_set_drvdata(pdev, base_vha);
+
+	qla2x00_config_dma_addressing(base_vha);
+
+	host = base_vha->host;
+	base_vha->req_ques[0] = req->id;
+	host->can_queue = req->length + 128;
+	if (IS_QLA2XXX_MIDTYPE(ha))
+		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+	else
+		base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
+						base_vha->vp_idx;
+	if (IS_QLA2100(ha))
+		host->sg_tablesize = 32;
+	host->max_id = max_id;
+	host->this_id = 255;
+	host->cmd_per_lun = 3;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = MAX_CMDSZ;
+	host->max_channel = MAX_BUSES - 1;
+	host->max_lun = MAX_LUNS;
+	host->transportt = qla2xxx_transport_template;
+
+	/* Set up the irqs */
+	ret = qla2x00_request_irqs(ha, rsp);
+	if (ret)
+		goto probe_failed;
+
+	/* Alloc arrays of request and response ring ptrs */
+	if (!qla2x00_alloc_queues(ha)) {
+		qla_printk(KERN_WARNING, ha,
+		"[ERROR] Failed to allocate memory for queue"
+		" pointers\n");
 		goto probe_failed;
 	}
+	ha->rsp_q_map[0] = rsp;
+	ha->req_q_map[0] = req;
 
-	if (qla2x00_initialize_adapter(ha)) {
+	if (ha->mqenable) {
+		ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
+		ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
+		ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
+	}
+
+	if (qla2x00_initialize_adapter(base_vha)) {
 		qla_printk(KERN_WARNING, ha,
 		    "Failed to initialize adapter\n");
 
 		DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
 		    "Adapter flags %x.\n",
-		    ha->host_no, ha->device_flags));
+		    base_vha->host_no, base_vha->device_flags));
 
 		ret = -ENODEV;
 		goto probe_failed;
@@ -1702,7 +1888,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	 * Startup the kernel thread for this host adapter
 	 */
 	ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
-			"%s_dpc", ha->host_str);
+			"%s_dpc", base_vha->host_str);
 	if (IS_ERR(ha->dpc_thread)) {
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to start DPC thread!\n");
@@ -1710,28 +1896,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto probe_failed;
 	}
 
-	host->this_id = 255;
-	host->cmd_per_lun = 3;
-	host->unique_id = host->host_no;
-	host->max_cmd_len = MAX_CMDSZ;
-	host->max_channel = MAX_BUSES - 1;
-	host->max_lun = MAX_LUNS;
-	host->transportt = qla2xxx_transport_template;
-
-	ret = qla2x00_request_irqs(ha);
-	if (ret)
-		goto probe_failed;
+	list_add_tail(&base_vha->list, &ha->vp_list);
+	base_vha->host->irq = ha->pdev->irq;
 
 	/* Initialized the timer */
-	qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL);
+	qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
 
 	DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
-	    ha->host_no, ha));
+	    base_vha->host_no, ha));
 
-	pci_set_drvdata(pdev, ha);
-
-	ha->flags.init_done = 1;
-	ha->flags.online = 1;
+	base_vha->flags.init_done = 1;
+	base_vha->flags.online = 1;
 
 	ret = scsi_add_host(host, &pdev->dev);
 	if (ret)
@@ -1741,76 +1916,98 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	scsi_scan_host(host);
 
-	qla2x00_alloc_sysfs_attr(ha);
+	qla2x00_alloc_sysfs_attr(base_vha);
 
-	qla2x00_init_host_attr(ha);
+	qla2x00_init_host_attr(base_vha);
 
-	qla2x00_dfs_setup(ha);
+	qla2x00_dfs_setup(base_vha);
 
 	qla_printk(KERN_INFO, ha, "\n"
 	    " QLogic Fibre Channel HBA Driver: %s\n"
 	    "  QLogic %s - %s\n"
 	    "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
 	    qla2x00_version_str, ha->model_number,
-	    ha->model_desc ? ha->model_desc: "", pdev->device,
-	    ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
-	    ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
-	    ha->isp_ops->fw_version_str(ha, fw_str));
+	    ha->model_desc ? ha->model_desc : "", pdev->device,
+	    ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
+	    ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
+	    ha->isp_ops->fw_version_str(base_vha, fw_str));
 
 	return 0;
 
 probe_failed:
-	qla2x00_free_device(ha);
+	qla2x00_free_que(ha, req, rsp);
+	qla2x00_free_device(base_vha);
 
-	scsi_host_put(host);
+	scsi_host_put(base_vha->host);
 
-probe_disable_device:
-	pci_disable_device(pdev);
+probe_hw_failed:
+	if (ha->iobase)
+		iounmap(ha->iobase);
+
+	pci_release_selected_regions(ha->pdev, ha->bars);
+	kfree(ha);
+	ha = NULL;
 
 probe_out:
+	pci_disable_device(pdev);
 	return ret;
 }
 
 static void
 qla2x00_remove_one(struct pci_dev *pdev)
 {
-	scsi_qla_host_t *ha, *vha, *temp;
+	scsi_qla_host_t *base_vha, *vha, *temp;
+	struct qla_hw_data  *ha;
+
+	base_vha = pci_get_drvdata(pdev);
+	ha = base_vha->hw;
+
+	list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
+		if (vha && vha->fc_vport)
+			fc_vport_terminate(vha->fc_vport);
+	}
 
-	ha = pci_get_drvdata(pdev);
+	set_bit(UNLOADING, &base_vha->dpc_flags);
 
-	list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list)
-		fc_vport_terminate(vha->fc_vport);
+	qla2x00_dfs_remove(base_vha);
 
-	set_bit(UNLOADING, &ha->dpc_flags);
+	qla84xx_put_chip(base_vha);
 
-	qla2x00_dfs_remove(ha);
+	qla2x00_free_sysfs_attr(base_vha);
 
-	qla84xx_put_chip(ha);
+	fc_remove_host(base_vha->host);
 
-	qla2x00_free_sysfs_attr(ha);
+	scsi_remove_host(base_vha->host);
 
-	fc_remove_host(ha->host);
+	qla2x00_free_device(base_vha);
 
-	scsi_remove_host(ha->host);
+	scsi_host_put(base_vha->host);
 
-	qla2x00_free_device(ha);
+	if (ha->iobase)
+		iounmap(ha->iobase);
 
-	scsi_host_put(ha->host);
+	if (ha->mqiobase)
+		iounmap(ha->mqiobase);
+
+	pci_release_selected_regions(ha->pdev, ha->bars);
+	kfree(ha);
+	ha = NULL;
 
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 }
 
 static void
-qla2x00_free_device(scsi_qla_host_t *ha)
+qla2x00_free_device(scsi_qla_host_t *vha)
 {
-	qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
+	struct qla_hw_data *ha = vha->hw;
+	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
 
 	/* Disable timer */
-	if (ha->timer_active)
-		qla2x00_stop_timer(ha);
+	if (vha->timer_active)
+		qla2x00_stop_timer(vha);
 
-	ha->flags.online = 0;
+	vha->flags.online = 0;
 
 	/* Kill the kernel thread for this host */
 	if (ha->dpc_thread) {
@@ -1825,45 +2022,41 @@ qla2x00_free_device(scsi_qla_host_t *ha)
 	}
 
 	if (ha->flags.fce_enabled)
-		qla2x00_disable_fce_trace(ha, NULL, NULL);
+		qla2x00_disable_fce_trace(vha, NULL, NULL);
 
 	if (ha->eft)
-		qla2x00_disable_eft_trace(ha);
+		qla2x00_disable_eft_trace(vha);
 
 	/* Stop currently executing firmware. */
-	qla2x00_try_to_stop_firmware(ha);
+	qla2x00_try_to_stop_firmware(vha);
 
 	/* turn-off interrupts on the card */
 	if (ha->interrupts_on)
 		ha->isp_ops->disable_intrs(ha);
 
-	qla2x00_mem_free(ha);
+	qla2x00_free_irqs(vha);
 
-	qla2x00_free_irqs(ha);
+	qla2x00_mem_free(ha);
 
-	/* release io space registers  */
-	if (ha->iobase)
-		iounmap(ha->iobase);
-	pci_release_selected_regions(ha->pdev, ha->bars);
+	qla2x00_free_queues(ha);
 }
 
 static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
     int defer)
 {
 	struct fc_rport *rport;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
 
 	if (!fcport->rport)
 		return;
 
 	rport = fcport->rport;
 	if (defer) {
-		spin_lock_irq(ha->host->host_lock);
+		spin_lock_irq(vha->host->host_lock);
 		fcport->drport = rport;
-		spin_unlock_irq(ha->host->host_lock);
-		set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
-		qla2xxx_wake_dpc(pha);
+		spin_unlock_irq(vha->host->host_lock);
+		set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
 	} else
 		fc_remote_port_delete(rport);
 }
@@ -1877,13 +2070,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
  *
  * Context:
  */
-void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
+void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
     int do_login, int defer)
 {
 	if (atomic_read(&fcport->state) == FCS_ONLINE &&
-	    ha->vp_idx == fcport->vp_idx)
-		qla2x00_schedule_rport_del(ha, fcport, defer);
-
+	    vha->vp_idx == fcport->vp_idx) {
+		atomic_set(&fcport->state, FCS_DEVICE_LOST);
+		qla2x00_schedule_rport_del(vha, fcport, defer);
+	}
 	/*
 	 * We may need to retry the login, so don't change the state of the
 	 * port but do the retries.
@@ -1895,13 +2089,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
 		return;
 
 	if (fcport->login_retry == 0) {
-		fcport->login_retry = ha->login_retry_count;
-		set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
+		fcport->login_retry = vha->hw->login_retry_count;
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 
 		DEBUG(printk("scsi(%ld): Port login retry: "
 		    "%02x%02x%02x%02x%02x%02x%02x%02x, "
 		    "id = 0x%04x retry cnt=%d\n",
-		    ha->host_no,
+		    vha->host_no,
 		    fcport->port_name[0],
 		    fcport->port_name[1],
 		    fcport->port_name[2],
@@ -1929,13 +2123,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
  * Context:
  */
 void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
 {
 	fc_port_t *fcport;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
 
-	list_for_each_entry(fcport, &pha->fcports, list) {
-		if (ha->vp_idx != fcport->vp_idx)
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (vha->vp_idx != fcport->vp_idx)
 			continue;
 		/*
 		 * No point in marking the device as lost, if the device is
@@ -1943,9 +2136,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
 		 */
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
 			continue;
-		if (atomic_read(&fcport->state) == FCS_ONLINE)
-			qla2x00_schedule_rport_del(ha, fcport, defer);
-		atomic_set(&fcport->state, FCS_DEVICE_LOST);
+		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+			atomic_set(&fcport->state, FCS_DEVICE_LOST);
+			qla2x00_schedule_rport_del(vha, fcport, defer);
+		} else
+			atomic_set(&fcport->state, FCS_DEVICE_LOST);
 	}
 }
 
@@ -1958,105 +2153,153 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
 *      !0  = failure.
 */
 static int
-qla2x00_mem_alloc(scsi_qla_host_t *ha)
+qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+	struct req_que **req, struct rsp_que **rsp)
 {
 	char	name[16];
 
-	ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
-	    (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
-	    GFP_KERNEL);
-	if (!ha->request_ring)
-		goto fail;
-
-	ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
-	    (ha->response_q_length + 1) * sizeof(response_t),
-	    &ha->response_dma, GFP_KERNEL);
-	if (!ha->response_ring)
-		goto fail_free_request_ring;
-
-	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
-	    &ha->gid_list_dma, GFP_KERNEL);
-	if (!ha->gid_list)
-		goto fail_free_response_ring;
+	ha->init_cb_size = sizeof(init_cb_t);
+	if (IS_QLA2XXX_MIDTYPE(ha))
+		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
 
 	ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
-	    &ha->init_cb_dma, GFP_KERNEL);
+		&ha->init_cb_dma, GFP_KERNEL);
 	if (!ha->init_cb)
-		goto fail_free_gid_list;
+		goto fail;
 
-	snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
-	    ha->host_no);
-	ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
-	    DMA_POOL_SIZE, 8, 0);
-	if (!ha->s_dma_pool)
+	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
+		&ha->gid_list_dma, GFP_KERNEL);
+	if (!ha->gid_list)
 		goto fail_free_init_cb;
 
 	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
 	if (!ha->srb_mempool)
-		goto fail_free_s_dma_pool;
+		goto fail_free_gid_list;
 
 	/* Get memory for cached NVRAM */
 	ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
 	if (!ha->nvram)
 		goto fail_free_srb_mempool;
 
+	snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
+		ha->pdev->device);
+	ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+		DMA_POOL_SIZE, 8, 0);
+	if (!ha->s_dma_pool)
+		goto fail_free_nvram;
+
 	/* Allocate memory for SNS commands */
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-		/* Get consistent memory allocated for SNS commands */
+	/* Get consistent memory allocated for SNS commands */
 		ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
-		    sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
+		sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
 		if (!ha->sns_cmd)
-			goto fail_free_nvram;
+			goto fail_dma_pool;
 	} else {
-		/* Get consistent memory allocated for MS IOCB */
+	/* Get consistent memory allocated for MS IOCB */
 		ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
-		    &ha->ms_iocb_dma);
+			&ha->ms_iocb_dma);
 		if (!ha->ms_iocb)
-			goto fail_free_nvram;
-
-		/* Get consistent memory allocated for CT SNS commands */
+			goto fail_dma_pool;
+	/* Get consistent memory allocated for CT SNS commands */
 		ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
-		    sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
+			sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
 		if (!ha->ct_sns)
 			goto fail_free_ms_iocb;
 	}
 
-	return 0;
+	/* Allocate memory for request ring */
+	*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+	if (!*req) {
+		DEBUG(printk("Unable to allocate memory for req\n"));
+		goto fail_req;
+	}
+	(*req)->length = req_len;
+	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
+		((*req)->length + 1) * sizeof(request_t),
+		&(*req)->dma, GFP_KERNEL);
+	if (!(*req)->ring) {
+		DEBUG(printk("Unable to allocate memory for req_ring\n"));
+		goto fail_req_ring;
+	}
+	/* Allocate memory for response ring */
+	*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+	if (!*rsp) {
+		qla_printk(KERN_WARNING, ha,
+			"Unable to allocate memory for rsp\n");
+		goto fail_rsp;
+	}
+	(*rsp)->hw = ha;
+	(*rsp)->length = rsp_len;
+	(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
+		((*rsp)->length + 1) * sizeof(response_t),
+		&(*rsp)->dma, GFP_KERNEL);
+	if (!(*rsp)->ring) {
+		qla_printk(KERN_WARNING, ha,
+			"Unable to allocate memory for rsp_ring\n");
+		goto fail_rsp_ring;
+	}
+	(*req)->rsp = *rsp;
+	(*rsp)->req = *req;
+	/* Allocate memory for NVRAM data for vports */
+	if (ha->nvram_npiv_size) {
+		ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
+					ha->nvram_npiv_size, GFP_KERNEL);
+		if (!ha->npiv_info) {
+			qla_printk(KERN_WARNING, ha,
+				"Unable to allocate memory for npiv info\n");
+			goto fail_npiv_info;
+		}
+	} else
+		ha->npiv_info = NULL;
 
+	INIT_LIST_HEAD(&ha->vp_list);
+	return 1;
+
+fail_npiv_info:
+	dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
+		sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
+	(*rsp)->ring = NULL;
+	(*rsp)->dma = 0;
+fail_rsp_ring:
+	kfree(*rsp);
+fail_rsp:
+	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
+		sizeof(request_t), (*req)->ring, (*req)->dma);
+	(*req)->ring = NULL;
+	(*req)->dma = 0;
+fail_req_ring:
+	kfree(*req);
+fail_req:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+		ha->ct_sns, ha->ct_sns_dma);
+	ha->ct_sns = NULL;
+	ha->ct_sns_dma = 0;
 fail_free_ms_iocb:
 	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
 	ha->ms_iocb = NULL;
 	ha->ms_iocb_dma = 0;
+fail_dma_pool:
+	dma_pool_destroy(ha->s_dma_pool);
+	ha->s_dma_pool = NULL;
 fail_free_nvram:
 	kfree(ha->nvram);
 	ha->nvram = NULL;
 fail_free_srb_mempool:
 	mempool_destroy(ha->srb_mempool);
 	ha->srb_mempool = NULL;
-fail_free_s_dma_pool:
-	dma_pool_destroy(ha->s_dma_pool);
-	ha->s_dma_pool = NULL;
-fail_free_init_cb:
-	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
-	    ha->init_cb_dma);
-	ha->init_cb = NULL;
-	ha->init_cb_dma = 0;
 fail_free_gid_list:
 	dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
-	    ha->gid_list_dma);
+	ha->gid_list_dma);
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
-fail_free_response_ring:
-	dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
-	    sizeof(response_t), ha->response_ring, ha->response_dma);
-	ha->response_ring = NULL;
-	ha->response_dma = 0;
-fail_free_request_ring:
-	dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
-	    sizeof(request_t), ha->request_ring, ha->request_dma);
-	ha->request_ring = NULL;
-	ha->request_dma = 0;
+fail_free_init_cb:
+	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
+	ha->init_cb_dma);
+	ha->init_cb = NULL;
+	ha->init_cb_dma = 0;
 fail:
+	DEBUG(printk("%s: Memory allocation failure\n", __func__));
 	return -ENOMEM;
 }
 
@@ -2068,32 +2311,29 @@ fail:
 *      ha = adapter block pointer.
 */
 static void
-qla2x00_mem_free(scsi_qla_host_t *ha)
+qla2x00_mem_free(struct qla_hw_data *ha)
 {
-	struct list_head	*fcpl, *fcptemp;
-	fc_port_t	*fcport;
-
 	if (ha->srb_mempool)
 		mempool_destroy(ha->srb_mempool);
 
 	if (ha->fce)
 		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
-		    ha->fce_dma);
+		ha->fce_dma);
 
 	if (ha->fw_dump) {
 		if (ha->eft)
 			dma_free_coherent(&ha->pdev->dev,
-			    ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+			ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
 		vfree(ha->fw_dump);
 	}
 
 	if (ha->sns_cmd)
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
-		    ha->sns_cmd, ha->sns_cmd_dma);
+		ha->sns_cmd, ha->sns_cmd_dma);
 
 	if (ha->ct_sns)
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
-		    ha->ct_sns, ha->ct_sns_dma);
+		ha->ct_sns, ha->ct_sns_dma);
 
 	if (ha->sfp_data)
 		dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2104,23 +2344,18 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
 	if (ha->s_dma_pool)
 		dma_pool_destroy(ha->s_dma_pool);
 
-	if (ha->init_cb)
-		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
-		    ha->init_cb, ha->init_cb_dma);
 
 	if (ha->gid_list)
 		dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
-		    ha->gid_list_dma);
+		ha->gid_list_dma);
 
-	if (ha->response_ring)
-		dma_free_coherent(&ha->pdev->dev,
-		    (ha->response_q_length + 1) * sizeof(response_t),
-		    ha->response_ring, ha->response_dma);
 
-	if (ha->request_ring)
-		dma_free_coherent(&ha->pdev->dev,
-		    (ha->request_q_length + 1) * sizeof(request_t),
-		    ha->request_ring, ha->request_dma);
+	if (ha->init_cb)
+		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+		ha->init_cb, ha->init_cb_dma);
+	vfree(ha->optrom_buffer);
+	kfree(ha->nvram);
+	kfree(ha->npiv_info);
 
 	ha->srb_mempool = NULL;
 	ha->eft = NULL;
@@ -2139,30 +2374,45 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
 
-	ha->response_ring = NULL;
-	ha->response_dma = 0;
-	ha->request_ring = NULL;
-	ha->request_dma = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dumped = 0;
+	ha->fw_dump_reading = 0;
+}
 
-	list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
-		fcport = list_entry(fcpl, fc_port_t, list);
+struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+						struct qla_hw_data *ha)
+{
+	struct Scsi_Host *host;
+	struct scsi_qla_host *vha = NULL;
 
-		/* fc ports */
-		list_del_init(&fcport->list);
-		kfree(fcport);
+	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
+	if (host == NULL) {
+		printk(KERN_WARNING
+		"qla2xxx: Couldn't allocate host from scsi layer!\n");
+		goto fail;
 	}
-	INIT_LIST_HEAD(&ha->fcports);
 
-	ha->fw_dump = NULL;
-	ha->fw_dumped = 0;
-	ha->fw_dump_reading = 0;
+	/* Clear our data area */
+	vha = shost_priv(host);
+	memset(vha, 0, sizeof(scsi_qla_host_t));
 
-	vfree(ha->optrom_buffer);
-	kfree(ha->nvram);
+	vha->host = host;
+	vha->host_no = host->host_no;
+	vha->hw = ha;
+
+	INIT_LIST_HEAD(&vha->vp_fcports);
+	INIT_LIST_HEAD(&vha->work_list);
+	INIT_LIST_HEAD(&vha->list);
+
+	sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+	return vha;
+
+fail:
+	return vha;
 }
 
 static struct qla_work_evt *
-qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
     int locked)
 {
 	struct qla_work_evt *e;
@@ -2179,42 +2429,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
 }
 
 static int
-qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
 {
 	unsigned long uninitialized_var(flags);
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!locked)
-		spin_lock_irqsave(&pha->hardware_lock, flags);
-	list_add_tail(&e->list, &ha->work_list);
-	qla2xxx_wake_dpc(ha);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	list_add_tail(&e->list, &vha->work_list);
+	qla2xxx_wake_dpc(vha);
 	if (!locked)
-		spin_unlock_irqrestore(&pha->hardware_lock, flags);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return QLA_SUCCESS;
 }
 
 int
-qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code,
+qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
     u32 data)
 {
 	struct qla_work_evt *e;
 
-	e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1);
+	e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
 	if (!e)
 		return QLA_FUNCTION_FAILED;
 
 	e->u.aen.code = code;
 	e->u.aen.data = data;
-	return qla2x00_post_work(ha, e, 1);
+	return qla2x00_post_work(vha, e, 1);
 }
 
 int
-qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
+qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
     uint16_t d2, uint16_t d3)
 {
 	struct qla_work_evt *e;
 
-	e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1);
+	e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
 	if (!e)
 		return QLA_FUNCTION_FAILED;
 
@@ -2222,36 +2472,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
 	e->u.hwe.d1 = d1;
 	e->u.hwe.d2 = d2;
 	e->u.hwe.d3 = d3;
-	return qla2x00_post_work(ha, e, 1);
+	return qla2x00_post_work(vha, e, 1);
 }
 
 static void
-qla2x00_do_work(struct scsi_qla_host *ha)
+qla2x00_do_work(struct scsi_qla_host *vha)
 {
 	struct qla_work_evt *e;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
+	struct qla_hw_data *ha = vha->hw;
 
-	spin_lock_irq(&pha->hardware_lock);
-	while (!list_empty(&ha->work_list)) {
-		e = list_entry(ha->work_list.next, struct qla_work_evt, list);
+	spin_lock_irq(&ha->hardware_lock);
+	while (!list_empty(&vha->work_list)) {
+		e = list_entry(vha->work_list.next, struct qla_work_evt, list);
 		list_del_init(&e->list);
-		spin_unlock_irq(&pha->hardware_lock);
+		spin_unlock_irq(&ha->hardware_lock);
 
 		switch (e->type) {
 		case QLA_EVT_AEN:
-			fc_host_post_event(ha->host, fc_get_event_number(),
+			fc_host_post_event(vha->host, fc_get_event_number(),
 			    e->u.aen.code, e->u.aen.data);
 			break;
 		case QLA_EVT_HWE_LOG:
-			qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1,
+			qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
 			    e->u.hwe.d2, e->u.hwe.d3);
 			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
-		spin_lock_irq(&pha->hardware_lock);
+		spin_lock_irq(&ha->hardware_lock);
+	}
+	spin_unlock_irq(&ha->hardware_lock);
+}
+/* Relogins all the fcports of a vport
+ * Context: dpc thread
+ */
+void qla2x00_relogin(struct scsi_qla_host *vha)
+{
+	fc_port_t       *fcport;
+	uint8_t         status;
+	uint16_t        next_loopid = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+	/*
+	 * If the port is not ONLINE then try to login
+	 * to it if we haven't run out of retries.
+	 */
+		if (atomic_read(&fcport->state) !=
+			FCS_ONLINE && fcport->login_retry) {
+
+			if (fcport->flags & FCF_FABRIC_DEVICE) {
+				if (fcport->flags & FCF_TAPE_PRESENT)
+					ha->isp_ops->fabric_logout(vha,
+							fcport->loop_id,
+							fcport->d_id.b.domain,
+							fcport->d_id.b.area,
+							fcport->d_id.b.al_pa);
+
+				status = qla2x00_fabric_login(vha, fcport,
+							&next_loopid);
+			} else
+				status = qla2x00_local_device_login(vha,
+								fcport);
+
+			fcport->login_retry--;
+			if (status == QLA_SUCCESS) {
+				fcport->old_loop_id = fcport->loop_id;
+
+				DEBUG(printk("scsi(%ld): port login OK: logged "
+				"in ID 0x%x\n", vha->host_no, fcport->loop_id));
+
+				qla2x00_update_fcport(vha, fcport);
+
+			} else if (status == 1) {
+				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+				/* retry the login again */
+				DEBUG(printk("scsi(%ld): Retrying"
+				" %d login again loop_id 0x%x\n",
+				vha->host_no, fcport->login_retry,
+						fcport->loop_id));
+			} else {
+				fcport->login_retry = 0;
+			}
+
+			if (fcport->login_retry == 0 && status != QLA_SUCCESS)
+				fcport->loop_id = FC_NO_LOOP_ID;
+		}
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
 	}
-	spin_unlock_irq(&pha->hardware_lock);
 }
 
 /**************************************************************************
@@ -2271,15 +2580,11 @@ static int
 qla2x00_do_dpc(void *data)
 {
 	int		rval;
-	scsi_qla_host_t *ha;
-	fc_port_t	*fcport;
-	uint8_t		status;
-	uint16_t	next_loopid;
-	struct scsi_qla_host *vha;
-	int             i;
+	scsi_qla_host_t *base_vha;
+	struct qla_hw_data *ha;
 
-
-	ha = (scsi_qla_host_t *)data;
+	ha = (struct qla_hw_data *)data;
+	base_vha = pci_get_drvdata(ha->pdev);
 
 	set_user_nice(current, -20);
 
@@ -2293,10 +2598,10 @@ qla2x00_do_dpc(void *data)
 		DEBUG3(printk("qla2x00: DPC handler waking up\n"));
 
 		/* Initialization not yet finished. Don't do anything yet. */
-		if (!ha->flags.init_done)
+		if (!base_vha->flags.init_done)
 			continue;
 
-		DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no));
+		DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
 
 		ha->dpc_active = 1;
 
@@ -2305,149 +2610,98 @@ qla2x00_do_dpc(void *data)
 			continue;
 		}
 
-		qla2x00_do_work(ha);
+		qla2x00_do_work(base_vha);
 
-		if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
+		if (test_and_clear_bit(ISP_ABORT_NEEDED,
+						&base_vha->dpc_flags)) {
 
 			DEBUG(printk("scsi(%ld): dpc: sched "
 			    "qla2x00_abort_isp ha = %p\n",
-			    ha->host_no, ha));
+			    base_vha->host_no, ha));
 			if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
-			    &ha->dpc_flags))) {
+			    &base_vha->dpc_flags))) {
 
-				if (qla2x00_abort_isp(ha)) {
+				if (qla2x00_abort_isp(base_vha)) {
 					/* failed. retry later */
 					set_bit(ISP_ABORT_NEEDED,
-					    &ha->dpc_flags);
-				}
-				clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
-			}
-
-			for_each_mapped_vp_idx(ha, i) {
-				list_for_each_entry(vha, &ha->vp_list,
-				    vp_list) {
-					if (i == vha->vp_idx) {
-						set_bit(ISP_ABORT_NEEDED,
-						    &vha->dpc_flags);
-						break;
-					}
+					    &base_vha->dpc_flags);
 				}
+				clear_bit(ABORT_ISP_ACTIVE,
+						&base_vha->dpc_flags);
 			}
 
 			DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
-			    ha->host_no));
+			    base_vha->host_no));
 		}
 
-		if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
-			qla2x00_update_fcports(ha);
-			clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+		if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
+			qla2x00_update_fcports(base_vha);
+			clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		}
 
-		if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
-		    (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
+		if (test_and_clear_bit(RESET_MARKER_NEEDED,
+							&base_vha->dpc_flags) &&
+		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
 
 			DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
-			    ha->host_no));
+			    base_vha->host_no));
 
-			qla2x00_rst_aen(ha);
-			clear_bit(RESET_ACTIVE, &ha->dpc_flags);
+			qla2x00_rst_aen(base_vha);
+			clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
 		}
 
 		/* Retry each device up to login retry count */
-		if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
-		    !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) &&
-		    atomic_read(&ha->loop_state) != LOOP_DOWN) {
+		if ((test_and_clear_bit(RELOGIN_NEEDED,
+						&base_vha->dpc_flags)) &&
+		    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
+		    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
 
 			DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
-			    ha->host_no));
-
-			next_loopid = 0;
-			list_for_each_entry(fcport, &ha->fcports, list) {
-				/*
-				 * If the port is not ONLINE then try to login
-				 * to it if we haven't run out of retries.
-				 */
-				if (atomic_read(&fcport->state) != FCS_ONLINE &&
-				    fcport->login_retry) {
-
-					if (fcport->flags & FCF_FABRIC_DEVICE) {
-						if (fcport->flags &
-						    FCF_TAPE_PRESENT)
-							ha->isp_ops->fabric_logout(
-							    ha, fcport->loop_id,
-							    fcport->d_id.b.domain,
-							    fcport->d_id.b.area,
-							    fcport->d_id.b.al_pa);
-						status = qla2x00_fabric_login(
-						    ha, fcport, &next_loopid);
-					} else
-						status =
-						    qla2x00_local_device_login(
-							ha, fcport);
-
-					fcport->login_retry--;
-					if (status == QLA_SUCCESS) {
-						fcport->old_loop_id = fcport->loop_id;
-
-						DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
-						    ha->host_no, fcport->loop_id));
-
-						qla2x00_update_fcport(ha,
-						    fcport);
-					} else if (status == 1) {
-						set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
-						/* retry the login again */
-						DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
-						    ha->host_no,
-						    fcport->login_retry, fcport->loop_id));
-					} else {
-						fcport->login_retry = 0;
-					}
-					if (fcport->login_retry == 0 && status != QLA_SUCCESS)
-						fcport->loop_id = FC_NO_LOOP_ID;
-				}
-				if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
-					break;
-			}
+					base_vha->host_no));
+			qla2x00_relogin(base_vha);
+
 			DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
-			    ha->host_no));
+			    base_vha->host_no));
 		}
 
-		if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+							&base_vha->dpc_flags)) {
 
 			DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
-			    ha->host_no));
+				base_vha->host_no));
 
 			if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
-			    &ha->dpc_flags))) {
+			    &base_vha->dpc_flags))) {
 
-				rval = qla2x00_loop_resync(ha);
+				rval = qla2x00_loop_resync(base_vha);
 
-				clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
+				clear_bit(LOOP_RESYNC_ACTIVE,
+						&base_vha->dpc_flags);
 			}
 
 			DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
-			    ha->host_no));
+			    base_vha->host_no));
 		}
 
-		if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
-		    atomic_read(&ha->loop_state) == LOOP_READY) {
-			clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
-			qla2xxx_flash_npiv_conf(ha);
+		if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
+		    atomic_read(&base_vha->loop_state) == LOOP_READY) {
+			clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
+			qla2xxx_flash_npiv_conf(base_vha);
 		}
 
 		if (!ha->interrupts_on)
 			ha->isp_ops->enable_intrs(ha);
 
-		if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
-			ha->isp_ops->beacon_blink(ha);
+		if (test_and_clear_bit(BEACON_BLINK_NEEDED,
+					&base_vha->dpc_flags))
+			ha->isp_ops->beacon_blink(base_vha);
 
-		qla2x00_do_dpc_all_vps(ha);
+		qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
 	} /* End of while(1) */
 
-	DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no));
+	DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
 	/*
 	 * Make sure that nobody tries to wake us up again.
@@ -2458,11 +2712,12 @@ qla2x00_do_dpc(void *data)
 }
 
 void
-qla2xxx_wake_dpc(scsi_qla_host_t *ha)
+qla2xxx_wake_dpc(struct scsi_qla_host *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	struct task_struct *t = ha->dpc_thread;
 
-	if (!test_bit(UNLOADING, &ha->dpc_flags) && t)
+	if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
 		wake_up_process(t);
 }
 
@@ -2474,26 +2729,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
 *      ha  = adapter block pointer.
 */
 static void
-qla2x00_rst_aen(scsi_qla_host_t *ha)
+qla2x00_rst_aen(scsi_qla_host_t *vha)
 {
-	if (ha->flags.online && !ha->flags.reset_active &&
-	    !atomic_read(&ha->loop_down_timer) &&
-	    !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
+	if (vha->flags.online && !vha->flags.reset_active &&
+	    !atomic_read(&vha->loop_down_timer) &&
+	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
 		do {
-			clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
 
 			/*
 			 * Issue marker command only when we are going to start
 			 * the I/O.
 			 */
-			ha->marker_needed = 1;
-		} while (!atomic_read(&ha->loop_down_timer) &&
-		    (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags)));
+			vha->marker_needed = 1;
+		} while (!atomic_read(&vha->loop_down_timer) &&
+		    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
 	}
 }
 
 static void
-qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_sp_free_dma(srb_t *sp)
 {
 	struct scsi_cmnd *cmd = sp->cmd;
 
@@ -2505,11 +2760,11 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
 }
 
 void
-qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
 {
 	struct scsi_cmnd *cmd = sp->cmd;
 
-	qla2x00_sp_free_dma(ha, sp);
+	qla2x00_sp_free_dma(sp);
 
 	mempool_free(sp, ha->srb_mempool);
 
@@ -2525,7 +2780,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
 * Context: Interrupt
 ***************************************************************************/
 void
-qla2x00_timer(scsi_qla_host_t *ha)
+qla2x00_timer(scsi_qla_host_t *vha)
 {
 	unsigned long	cpu_flags = 0;
 	fc_port_t	*fcport;
@@ -2533,8 +2788,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
 	int		index;
 	srb_t		*sp;
 	int		t;
-	scsi_qla_host_t *pha = to_qla_parent(ha);
-
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
 	/*
 	 * Ports - Port down timer.
 	 *
@@ -2543,7 +2798,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
 	 * the port it marked DEAD.
 	 */
 	t = 0;
-	list_for_each_entry(fcport, &ha->fcports, list) {
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (fcport->port_type != FCT_TARGET)
 			continue;
 
@@ -2557,7 +2812,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
 
 			DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
 			    "%d remaining\n",
-			    ha->host_no,
+			    vha->host_no,
 			    t, atomic_read(&fcport->port_down_timer)));
 		}
 		t++;
@@ -2565,30 +2820,32 @@ qla2x00_timer(scsi_qla_host_t *ha)
 
 
 	/* Loop down handler. */
-	if (atomic_read(&ha->loop_down_timer) > 0 &&
-	    !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) {
+	if (atomic_read(&vha->loop_down_timer) > 0 &&
+	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+		&& vha->flags.online) {
 
-		if (atomic_read(&ha->loop_down_timer) ==
-		    ha->loop_down_abort_time) {
+		if (atomic_read(&vha->loop_down_timer) ==
+		    vha->loop_down_abort_time) {
 
 			DEBUG(printk("scsi(%ld): Loop Down - aborting the "
 			    "queues before time expire\n",
-			    ha->host_no));
+			    vha->host_no));
 
-			if (!IS_QLA2100(ha) && ha->link_down_timeout)
-				atomic_set(&ha->loop_state, LOOP_DEAD);
+			if (!IS_QLA2100(ha) && vha->link_down_timeout)
+				atomic_set(&vha->loop_state, LOOP_DEAD);
 
 			/* Schedule an ISP abort to return any tape commands. */
 			/* NPIV - scan physical port only */
-			if (!ha->parent) {
+			if (!vha->vp_idx) {
 				spin_lock_irqsave(&ha->hardware_lock,
 				    cpu_flags);
+				req = ha->req_q_map[0];
 				for (index = 1;
 				    index < MAX_OUTSTANDING_COMMANDS;
 				    index++) {
 					fc_port_t *sfcp;
 
-					sp = ha->outstanding_cmds[index];
+					sp = req->outstanding_cmds[index];
 					if (!sp)
 						continue;
 					sfcp = sp->fcport;
@@ -2596,63 +2853,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
 						continue;
 
 					set_bit(ISP_ABORT_NEEDED,
-					    &ha->dpc_flags);
+							&vha->dpc_flags);
 					break;
 				}
 				spin_unlock_irqrestore(&ha->hardware_lock,
-				    cpu_flags);
+								cpu_flags);
 			}
-			set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
+			set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
 			start_dpc++;
 		}
 
 		/* if the loop has been down for 4 minutes, reinit adapter */
-		if (atomic_dec_and_test(&ha->loop_down_timer) != 0) {
+		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
 			DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
 			    "restarting queues.\n",
-			    ha->host_no));
+			    vha->host_no));
 
-			set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
+			set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
 			start_dpc++;
 
-			if (!(ha->device_flags & DFLG_NO_CABLE) &&
-			    !ha->parent) {
+			if (!(vha->device_flags & DFLG_NO_CABLE) &&
+			    !vha->vp_idx) {
 				DEBUG(printk("scsi(%ld): Loop down - "
 				    "aborting ISP.\n",
-				    ha->host_no));
+				    vha->host_no));
 				qla_printk(KERN_WARNING, ha,
 				    "Loop down - aborting ISP.\n");
 
-				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 			}
 		}
 		DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
-		    ha->host_no,
-		    atomic_read(&ha->loop_down_timer)));
+		    vha->host_no,
+		    atomic_read(&vha->loop_down_timer)));
 	}
 
 	/* Check if beacon LED needs to be blinked */
 	if (ha->beacon_blink_led == 1) {
-		set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags);
+		set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
 		start_dpc++;
 	}
 
 	/* Process any deferred work. */
-	if (!list_empty(&ha->work_list))
+	if (!list_empty(&vha->work_list))
 		start_dpc++;
 
 	/* Schedule the DPC routine if needed */
-	if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
-	    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
-	    test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) ||
+	if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+	    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
 	    start_dpc ||
-	    test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
-	    test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
-	    test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
-	    test_bit(RELOGIN_NEEDED, &ha->dpc_flags)))
-		qla2xxx_wake_dpc(pha);
+	    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
+	    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+	    test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
+	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
+		qla2xxx_wake_dpc(vha);
 
-	qla2x00_restart_timer(ha, WATCH_INTERVAL);
+	qla2x00_restart_timer(vha, WATCH_INTERVAL);
 }
 
 /* Firmware interface routines. */
@@ -2684,8 +2941,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
 };
 
 struct fw_blob *
-qla2x00_request_firmware(scsi_qla_host_t *ha)
+qla2x00_request_firmware(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
 	struct fw_blob *blob;
 
 	blob = NULL;
@@ -2709,7 +2967,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
 
 	if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
 		DEBUG2(printk("scsi(%ld): Failed to load firmware image "
-		    "(%s).\n", ha->host_no, blob->name));
+		    "(%s).\n", vha->host_no, blob->name));
 		blob->fw = NULL;
 		blob = NULL;
 		goto out;
@@ -2754,7 +3012,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
 	int risc_paused = 0;
 	uint32_t stat;
 	unsigned long flags;
-	scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 
@@ -2777,7 +3036,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
 	if (risc_paused) {
 		qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
 		    "Dumping firmware!\n");
-		ha->isp_ops->fw_dump(ha, 0);
+		ha->isp_ops->fw_dump(base_vha, 0);
 
 		return PCI_ERS_RESULT_NEED_RESET;
 	} else
@@ -2788,7 +3047,8 @@ static pci_ers_result_t
 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
 {
 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
-	scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
 	int rc;
 
 	if (ha->mem_only)
@@ -2804,13 +3064,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
 	}
 	pci_set_master(pdev);
 
-	if (ha->isp_ops->pci_config(ha))
+	if (ha->isp_ops->pci_config(base_vha))
 		return ret;
 
-	set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
-	if (qla2x00_abort_isp(ha)== QLA_SUCCESS)
+	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+	if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
 		ret =  PCI_ERS_RESULT_RECOVERED;
-	clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 
 	return ret;
 }
@@ -2818,10 +3078,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
 static void
 qla2xxx_pci_resume(struct pci_dev *pdev)
 {
-	scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
 	int ret;
 
-	ret = qla2x00_wait_for_hba_online(ha);
+	ret = qla2x00_wait_for_hba_online(base_vha);
 	if (ret != QLA_SUCCESS) {
 		qla_printk(KERN_ERR, ha,
 		    "the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index e4af678eb2d6..c538ee1b1a31 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -10,10 +10,6 @@
 #include <linux/vmalloc.h>
 #include <asm/uaccess.h>
 
-static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t);
-static void qla2x00_nv_deselect(scsi_qla_host_t *);
-static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
-
 /*
  * NVRAM support routines
  */
@@ -23,7 +19,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
  * @ha: HA context
  */
 static void
-qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
+qla2x00_lock_nvram_access(struct qla_hw_data *ha)
 {
 	uint16_t data;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -56,7 +52,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
  * @ha: HA context
  */
 static void
-qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
+qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
 {
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
@@ -67,6 +63,84 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
 }
 
 /**
+ * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
+ * @ha: HA context
+ * @data: Serial interface selector
+ */
+static void
+qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+	    NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
+ *	NVRAM.
+ * @ha: HA context
+ * @nv_cmd: NVRAM command
+ *
+ * Bit definitions for NVRAM command:
+ *
+ *	Bit 26     = start bit
+ *	Bit 25, 24 = opcode
+ *	Bit 23-16  = address
+ *	Bit 15-0   = write data
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
+{
+	uint8_t		cnt;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint16_t	data = 0;
+	uint16_t	reg_data;
+
+	/* Send command to NVRAM. */
+	nv_cmd <<= 5;
+	for (cnt = 0; cnt < 11; cnt++) {
+		if (nv_cmd & BIT_31)
+			qla2x00_nv_write(ha, NVR_DATA_OUT);
+		else
+			qla2x00_nv_write(ha, 0);
+		nv_cmd <<= 1;
+	}
+
+	/* Read data from NVRAM. */
+	for (cnt = 0; cnt < 16; cnt++) {
+		WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		NVRAM_DELAY();
+		data <<= 1;
+		reg_data = RD_REG_WORD(&reg->nvram);
+		if (reg_data & NVR_DATA_IN)
+			data |= BIT_0;
+		WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		NVRAM_DELAY();
+	}
+
+	/* Deselect chip. */
+	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+
+	return data;
+}
+
+
+/**
  * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
  *	request routine to get the word from NVRAM.
  * @ha: HA context
@@ -75,7 +149,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
  * Returns the word read from nvram @addr.
  */
 static uint16_t
-qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
+qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
 {
 	uint16_t	data;
 	uint32_t	nv_cmd;
@@ -88,13 +162,27 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
 }
 
 /**
+ * qla2x00_nv_deselect() - Deselect NVRAM operations.
+ * @ha: HA context
+ */
+static void
+qla2x00_nv_deselect(struct qla_hw_data *ha)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+}
+
+/**
  * qla2x00_write_nvram_word() - Write NVRAM data.
  * @ha: HA context
  * @addr: Address in NVRAM to write
  * @data: word to program
  */
 static void
-qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
 {
 	int count;
 	uint16_t word;
@@ -132,7 +220,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
 	do {
 		if (!--wait_cnt) {
 			DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
-			    __func__, ha->host_no));
+			    __func__, vha->host_no));
 			break;
 		}
 		NVRAM_DELAY();
@@ -150,8 +238,8 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
 }
 
 static int
-qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
-    uint32_t tmo)
+qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
+	uint16_t data, uint32_t tmo)
 {
 	int ret, count;
 	uint16_t word;
@@ -209,102 +297,11 @@ qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
 }
 
 /**
- * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
- *	NVRAM.
- * @ha: HA context
- * @nv_cmd: NVRAM command
- *
- * Bit definitions for NVRAM command:
- *
- *	Bit 26     = start bit
- *	Bit 25, 24 = opcode
- *	Bit 23-16  = address
- *	Bit 15-0   = write data
- *
- * Returns the word read from nvram @addr.
- */
-static uint16_t
-qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
-{
-	uint8_t		cnt;
-	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-	uint16_t	data = 0;
-	uint16_t	reg_data;
-
-	/* Send command to NVRAM. */
-	nv_cmd <<= 5;
-	for (cnt = 0; cnt < 11; cnt++) {
-		if (nv_cmd & BIT_31)
-			qla2x00_nv_write(ha, NVR_DATA_OUT);
-		else
-			qla2x00_nv_write(ha, 0);
-		nv_cmd <<= 1;
-	}
-
-	/* Read data from NVRAM. */
-	for (cnt = 0; cnt < 16; cnt++) {
-		WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
-		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
-		NVRAM_DELAY();
-		data <<= 1;
-		reg_data = RD_REG_WORD(&reg->nvram);
-		if (reg_data & NVR_DATA_IN)
-			data |= BIT_0;
-		WRT_REG_WORD(&reg->nvram, NVR_SELECT);
-		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
-		NVRAM_DELAY();
-	}
-
-	/* Deselect chip. */
-	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
-	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
-	NVRAM_DELAY();
-
-	return (data);
-}
-
-/**
- * qla2x00_nv_write() - Clean NVRAM operations.
- * @ha: HA context
- */
-static void
-qla2x00_nv_deselect(scsi_qla_host_t *ha)
-{
-	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
-	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
-	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
-	NVRAM_DELAY();
-}
-
-/**
- * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
- * @ha: HA context
- * @data: Serial interface selector
- */
-static void
-qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
-{
-	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
-	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
-	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
-	NVRAM_DELAY();
-	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT| NVR_CLOCK |
-	    NVR_WRT_ENABLE);
-	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
-	NVRAM_DELAY();
-	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
-	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
-	NVRAM_DELAY();
-}
-
-/**
  * qla2x00_clear_nvram_protection() -
  * @ha: HA context
  */
 static int
-qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
+qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
 {
 	int ret, stat;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -352,9 +349,8 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
 		wait_cnt = NVR_WAIT_CNT;
 		do {
 			if (!--wait_cnt) {
-				DEBUG9_10(printk("%s(%ld): NVRAM didn't go "
-				    "ready...\n", __func__,
-				    ha->host_no));
+				DEBUG9_10(qla_printk(
+				    "NVRAM didn't go ready...\n"));
 				break;
 			}
 			NVRAM_DELAY();
@@ -370,7 +366,7 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
 }
 
 static void
-qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
+qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
 {
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t word, wait_cnt;
@@ -412,8 +408,7 @@ qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
 	wait_cnt = NVR_WAIT_CNT;
 	do {
 		if (!--wait_cnt) {
-			DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
-			    __func__, ha->host_no));
+			DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
 			break;
 		}
 		NVRAM_DELAY();
@@ -454,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
 }
 
 static uint32_t
-qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
 {
 	int rval;
 	uint32_t cnt, data;
@@ -482,21 +477,20 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
 }
 
 uint32_t *
-qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
 	uint32_t i;
-
 	/* Dword reads to flash. */
 	for (i = 0; i < dwords; i++, faddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
 		    flash_data_to_access_addr(faddr)));
 
 	return dwptr;
 }
 
 static int
-qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
+qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
 {
 	int rval;
 	uint32_t cnt;
@@ -519,7 +513,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
 }
 
 static void
-qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
+qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
 	uint32_t ids;
@@ -544,7 +538,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
 }
 
 static int
-qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
+qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
 {
 	const char *loc, *locations[] = { "DEF", "PCI" };
 	uint32_t pcihdr, pcids;
@@ -552,6 +546,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
 	uint8_t *buf, *bcode, last_image;
 	uint16_t cnt, chksum, *wptr;
 	struct qla_flt_location *fltl;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	/*
 	 * FLT-location structure resides after the last PCI region.
@@ -563,20 +559,20 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
 	    FA_FLASH_LAYOUT_ADDR;
 
 	/* Begin with first PCI expansion ROM header. */
-	buf = (uint8_t *)ha->request_ring;
-	dcode = (uint32_t *)ha->request_ring;
+	buf = (uint8_t *)req->ring;
+	dcode = (uint32_t *)req->ring;
 	pcihdr = 0;
 	last_image = 1;
 	do {
 		/* Verify PCI expansion ROM header. */
-		qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
 		bcode = buf + (pcihdr % 4);
 		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
 			goto end;
 
 		/* Locate PCI data structure. */
 		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
-		qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
 		bcode = buf + (pcihdr % 4);
 
 		/* Validate signature of PCI data structure. */
@@ -591,14 +587,14 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
 	} while (!last_image);
 
 	/* Now verify FLT-location structure. */
-	fltl = (struct qla_flt_location *)ha->request_ring;
-	qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
+	fltl = (struct qla_flt_location *)req->ring;
+	qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
 	    sizeof(struct qla_flt_location) >> 2);
 	if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
 	    fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
 		goto end;
 
-	wptr = (uint16_t *)ha->request_ring;
+	wptr = (uint16_t *)req->ring;
 	cnt = sizeof(struct qla_flt_location) >> 1;
 	for (chksum = 0; cnt; cnt--)
 		chksum += le16_to_cpu(*wptr++);
@@ -619,7 +615,7 @@ end:
 }
 
 static void
-qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
+qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 {
 	const char *loc, *locations[] = { "DEF", "FLT" };
 	uint16_t *wptr;
@@ -627,12 +623,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
 	uint32_t start;
 	struct qla_flt_header *flt;
 	struct qla_flt_region *region;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
 	ha->flt_region_flt = flt_addr;
-	wptr = (uint16_t *)ha->request_ring;
-	flt = (struct qla_flt_header *)ha->request_ring;
+	wptr = (uint16_t *)req->ring;
+	flt = (struct qla_flt_header *)req->ring;
 	region = (struct qla_flt_region *)&flt[1];
-	ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
 	    flt_addr << 2, OPTROM_BURST_SIZE);
 	if (*wptr == __constant_cpu_to_le16(0xffff))
 		goto no_flash_data;
@@ -720,7 +718,7 @@ done:
 }
 
 static void
-qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
+qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
 {
 #define FLASH_BLK_SIZE_4K	0x1000
 #define FLASH_BLK_SIZE_32K	0x8000
@@ -731,10 +729,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
 	struct qla_fdt_layout *fdt;
 	uint8_t	man_id, flash_id;
 	uint16_t mid, fid;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
 
-	wptr = (uint16_t *)ha->request_ring;
-	fdt = (struct qla_fdt_layout *)ha->request_ring;
-	ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+	wptr = (uint16_t *)req->ring;
+	fdt = (struct qla_fdt_layout *)req->ring;
+	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
 	    ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
 	if (*wptr == __constant_cpu_to_le16(0xffff))
 		goto no_flash_data;
@@ -807,38 +807,41 @@ done:
 }
 
 int
-qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+qla2xxx_get_flash_info(scsi_qla_host_t *vha)
 {
 	int ret;
 	uint32_t flt_addr;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
 		return QLA_SUCCESS;
 
-	ret = qla2xxx_find_flt_start(ha, &flt_addr);
+	ret = qla2xxx_find_flt_start(vha, &flt_addr);
 	if (ret != QLA_SUCCESS)
 		return ret;
 
-	qla2xxx_get_flt_info(ha, flt_addr);
-	qla2xxx_get_fdt_info(ha);
+	qla2xxx_get_flt_info(vha, flt_addr);
+	qla2xxx_get_fdt_info(vha);
 
 	return QLA_SUCCESS;
 }
 
 void
-qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
 {
 #define NPIV_CONFIG_SIZE	(16*1024)
 	void *data;
 	uint16_t *wptr;
 	uint16_t cnt, chksum;
+	int i;
 	struct qla_npiv_header hdr;
 	struct qla_npiv_entry *entry;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
 		return;
 
-	ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
+	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
 	    ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
 	if (hdr.version == __constant_cpu_to_le16(0xffff))
 		return;
@@ -857,7 +860,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
 		return;
 	}
 
-	ha->isp_ops->read_optrom(ha, (uint8_t *)data,
+	ha->isp_ops->read_optrom(vha, (uint8_t *)data,
 	    ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
 
 	cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
@@ -874,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
 
 	entry = data + sizeof(struct qla_npiv_header);
 	cnt = le16_to_cpu(hdr.entries);
-	for ( ; cnt; cnt--, entry++) {
+	for (i = 0; cnt; cnt--, entry++, i++) {
 		uint16_t flags;
 		struct fc_vport_identifiers vid;
 		struct fc_vport *vport;
@@ -892,25 +895,29 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
 		vid.port_name = wwn_to_u64(entry->port_name);
 		vid.node_name = wwn_to_u64(entry->node_name);
 
+		memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
 		DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
-		    "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
-		    (unsigned long long)vid.port_name,
-		    (unsigned long long)vid.node_name,
-		    le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
-
-		vport = fc_vport_create(ha->host, 0, &vid);
-		if (!vport)
-			qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
-			    "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
-			    (unsigned long long)vid.port_name,
-			    (unsigned long long)vid.node_name);
+			"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+			vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
+			entry->q_qos, entry->f_qos));
+
+		if (i < QLA_PRECONFIG_VPORTS) {
+			vport = fc_vport_create(vha->host, 0, &vid);
+			if (!vport)
+				qla_printk(KERN_INFO, ha,
+				"NPIV-Config: Failed to create vport [%02x]: "
+				"wwpn=%llx wwnn=%llx.\n", cnt,
+				vid.port_name, vid.node_name);
+		}
 	}
 done:
 	kfree(data);
+	ha->npiv_info = NULL;
 }
 
 static void
-qla24xx_unprotect_flash(scsi_qla_host_t *ha)
+qla24xx_unprotect_flash(struct qla_hw_data *ha)
 {
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
@@ -929,7 +936,7 @@ qla24xx_unprotect_flash(scsi_qla_host_t *ha)
 }
 
 static void
-qla24xx_protect_flash(scsi_qla_host_t *ha)
+qla24xx_protect_flash(struct qla_hw_data *ha)
 {
 	uint32_t cnt;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -955,7 +962,7 @@ skip_wrt_protect:
 }
 
 static int
-qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
 	int ret;
@@ -965,6 +972,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
 	dma_addr_t optrom_dma;
 	void *optrom = NULL;
 	uint32_t *s, *d;
+	struct qla_hw_data *ha = vha->hw;
 
 	ret = QLA_SUCCESS;
 
@@ -1002,9 +1010,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
 			    (fdata & 0xff00) |((fdata << 16) &
 			    0xff0000) | ((fdata >> 16) & 0xff));
 			if (ret != QLA_SUCCESS) {
-				DEBUG9(printk("%s(%ld) Unable to flash "
-				    "sector: address=%x.\n", __func__,
-				    ha->host_no, faddr));
+				DEBUG9(qla_printk("Unable to flash sector: "
+				    "address=%x.\n", faddr));
 				break;
 			}
 		}
@@ -1016,7 +1023,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
 			    miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
 				*s = cpu_to_le32(*d);
 
-			ret = qla2x00_load_ram(ha, optrom_dma,
+			ret = qla2x00_load_ram(vha, optrom_dma,
 			    flash_data_to_access_addr(faddr),
 			    OPTROM_BURST_DWORDS);
 			if (ret != QLA_SUCCESS) {
@@ -1044,7 +1051,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
 		if (ret != QLA_SUCCESS) {
 			DEBUG9(printk("%s(%ld) Unable to program flash "
 			    "address=%x data=%x.\n", __func__,
-			    ha->host_no, faddr, *dwptr));
+			    vha->host_no, faddr, *dwptr));
 			break;
 		}
 
@@ -1067,11 +1074,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
 }
 
 uint8_t *
-qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	uint32_t i;
 	uint16_t *wptr;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Word reads to NVRAM via registers. */
 	wptr = (uint16_t *)buf;
@@ -1085,7 +1093,7 @@ qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 }
 
 uint8_t *
-qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	uint32_t i;
@@ -1094,20 +1102,21 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 	/* Dword reads to flash. */
 	dwptr = (uint32_t *)buf;
 	for (i = 0; i < bytes >> 2; i++, naddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
 		    nvram_data_to_access_addr(naddr)));
 
 	return buf;
 }
 
 int
-qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	int ret, stat;
 	uint32_t i;
 	uint16_t *wptr;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 
 	ret = QLA_SUCCESS;
 
@@ -1134,12 +1143,13 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 }
 
 int
-qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	int ret;
 	uint32_t i;
 	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	ret = QLA_SUCCESS;
@@ -1162,9 +1172,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 		    nvram_data_to_access_addr(naddr),
 		    cpu_to_le32(*dwptr));
 		if (ret != QLA_SUCCESS) {
-			DEBUG9(printk("%s(%ld) Unable to program "
-			    "nvram address=%x data=%x.\n", __func__,
-			    ha->host_no, naddr, *dwptr));
+			DEBUG9(qla_printk("Unable to program nvram address=%x "
+			    "data=%x.\n", naddr, *dwptr));
 			break;
 		}
 	}
@@ -1182,11 +1191,12 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 }
 
 uint8_t *
-qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	uint32_t i;
 	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Dword reads to flash. */
 	dwptr = (uint32_t *)buf;
@@ -1199,19 +1209,20 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 }
 
 int
-qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
     uint32_t bytes)
 {
+	struct qla_hw_data *ha = vha->hw;
 #define RMW_BUFFER_SIZE	(64 * 1024)
 	uint8_t *dbuf;
 
 	dbuf = vmalloc(RMW_BUFFER_SIZE);
 	if (!dbuf)
 		return QLA_MEMORY_ALLOC_FAILED;
-	ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
+	ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
 	    RMW_BUFFER_SIZE);
 	memcpy(dbuf + (naddr << 2), buf, bytes);
-	ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
+	ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
 	    RMW_BUFFER_SIZE);
 	vfree(dbuf);
 
@@ -1219,7 +1230,7 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
 }
 
 static inline void
-qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
+qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
 {
 	if (IS_QLA2322(ha)) {
 		/* Flip all colors. */
@@ -1249,12 +1260,13 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
 #define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
 
 void
-qla2x00_beacon_blink(struct scsi_qla_host *ha)
+qla2x00_beacon_blink(struct scsi_qla_host *vha)
 {
 	uint16_t gpio_enable;
 	uint16_t gpio_data;
 	uint16_t led_color = 0;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1298,17 +1310,18 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
 }
 
 int
-qla2x00_beacon_on(struct scsi_qla_host *ha)
+qla2x00_beacon_on(struct scsi_qla_host *vha)
 {
 	uint16_t gpio_enable;
 	uint16_t gpio_data;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
 	ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
 
-	if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+	if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to update fw options (beacon on).\n");
 		return QLA_FUNCTION_FAILED;
@@ -1354,9 +1367,10 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
 }
 
 int
-qla2x00_beacon_off(struct scsi_qla_host *ha)
+qla2x00_beacon_off(struct scsi_qla_host *vha)
 {
 	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
 
 	ha->beacon_blink_led = 0;
 
@@ -1366,12 +1380,12 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
 	else
 		ha->beacon_color_state = QLA_LED_GRN_ON;
 
-	ha->isp_ops->beacon_blink(ha);	/* This turns green LED off */
+	ha->isp_ops->beacon_blink(vha);	/* This turns green LED off */
 
 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
 	ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
 
-	rval = qla2x00_set_fw_options(ha, ha->fw_options);
+	rval = qla2x00_set_fw_options(vha, ha->fw_options);
 	if (rval != QLA_SUCCESS)
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to update fw options (beacon off).\n");
@@ -1380,7 +1394,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
 
 
 static inline void
-qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
+qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
 {
 	/* Flip all colors. */
 	if (ha->beacon_color_state == QLA_LED_ALL_ON) {
@@ -1395,11 +1409,12 @@ qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
 }
 
 void
-qla24xx_beacon_blink(struct scsi_qla_host *ha)
+qla24xx_beacon_blink(struct scsi_qla_host *vha)
 {
 	uint16_t led_color = 0;
 	uint32_t gpio_data;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	/* Save the Original GPIOD. */
@@ -1428,20 +1443,21 @@ qla24xx_beacon_blink(struct scsi_qla_host *ha)
 }
 
 int
-qla24xx_beacon_on(struct scsi_qla_host *ha)
+qla24xx_beacon_on(struct scsi_qla_host *vha)
 {
 	uint32_t gpio_data;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	if (ha->beacon_blink_led == 0) {
 		/* Enable firmware for update */
 		ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
 
-		if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS)
+		if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 
-		if (qla2x00_get_fw_options(ha, ha->fw_options) !=
+		if (qla2x00_get_fw_options(vha, ha->fw_options) !=
 		    QLA_SUCCESS) {
 			qla_printk(KERN_WARNING, ha,
 			    "Unable to update fw options (beacon on).\n");
@@ -1469,16 +1485,17 @@ qla24xx_beacon_on(struct scsi_qla_host *ha)
 }
 
 int
-qla24xx_beacon_off(struct scsi_qla_host *ha)
+qla24xx_beacon_off(struct scsi_qla_host *vha)
 {
 	uint32_t gpio_data;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	ha->beacon_blink_led = 0;
 	ha->beacon_color_state = QLA_LED_ALL_ON;
 
-	ha->isp_ops->beacon_blink(ha);	/* Will flip to all off. */
+	ha->isp_ops->beacon_blink(vha);	/* Will flip to all off. */
 
 	/* Give control back to firmware. */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1492,13 +1509,13 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
 
 	ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
 
-	if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+	if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to update fw options (beacon off).\n");
 		return QLA_FUNCTION_FAILED;
 	}
 
-	if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+	if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
 		qla_printk(KERN_WARNING, ha,
 		    "Unable to get fw options (beacon off).\n");
 		return QLA_FUNCTION_FAILED;
@@ -1517,7 +1534,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
  * @ha: HA context
  */
 static void
-qla2x00_flash_enable(scsi_qla_host_t *ha)
+qla2x00_flash_enable(struct qla_hw_data *ha)
 {
 	uint16_t data;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1533,7 +1550,7 @@ qla2x00_flash_enable(scsi_qla_host_t *ha)
  * @ha: HA context
  */
 static void
-qla2x00_flash_disable(scsi_qla_host_t *ha)
+qla2x00_flash_disable(struct qla_hw_data *ha)
 {
 	uint16_t data;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1554,7 +1571,7 @@ qla2x00_flash_disable(scsi_qla_host_t *ha)
  * Returns the byte read from flash @addr.
  */
 static uint8_t
-qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
+qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
 {
 	uint16_t data;
 	uint16_t bank_select;
@@ -1615,7 +1632,7 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
  * @data: Data to write
  */
 static void
-qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
+qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
 {
 	uint16_t bank_select;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1678,7 +1695,7 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
  * Returns 0 on success, else non-zero.
  */
 static int
-qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
+qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
     uint8_t man_id, uint8_t flash_id)
 {
 	int status;
@@ -1718,8 +1735,8 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
  * Returns 0 on success, else non-zero.
  */
 static int
-qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
-    uint8_t man_id, uint8_t flash_id)
+qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
+    uint8_t data, uint8_t man_id, uint8_t flash_id)
 {
 	/* Write Program Command Sequence. */
 	if (IS_OEM_001(ha)) {
@@ -1755,7 +1772,7 @@ qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
  * Returns 0 on success, else non-zero.
  */
 static int
-qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
+qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
 {
 	/* Individual Sector Erase Command Sequence */
 	if (IS_OEM_001(ha)) {
@@ -1791,7 +1808,7 @@ qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
  * Returns 0 on success, else non-zero.
  */
 static int
-qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
+qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
     uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
 {
 	/* Individual Sector Erase Command Sequence */
@@ -1817,7 +1834,7 @@ qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
  * @flash_id: Flash ID
  */
 static void
-qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
+qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
 	qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
@@ -1831,8 +1848,8 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
 }
 
 static void
-qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
-        uint32_t length)
+qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
+	uint32_t saddr, uint32_t length)
 {
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t midpoint, ilength;
@@ -1856,14 +1873,15 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
 }
 
 static inline void
-qla2x00_suspend_hba(struct scsi_qla_host *ha)
+qla2x00_suspend_hba(struct scsi_qla_host *vha)
 {
 	int cnt;
 	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Suspend HBA. */
-	scsi_block_requests(ha->host);
+	scsi_block_requests(vha->host);
 	ha->isp_ops->disable_intrs(ha);
 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
@@ -1884,26 +1902,29 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
 }
 
 static inline void
-qla2x00_resume_hba(struct scsi_qla_host *ha)
+qla2x00_resume_hba(struct scsi_qla_host *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
+
 	/* Resume HBA. */
 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
-	set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-	qla2xxx_wake_dpc(ha);
-	qla2x00_wait_for_hba_online(ha);
-	scsi_unblock_requests(ha->host);
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_hba_online(vha);
+	scsi_unblock_requests(vha->host);
 }
 
 uint8_t *
-qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
     uint32_t offset, uint32_t length)
 {
 	uint32_t addr, midpoint;
 	uint8_t *data;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Suspend HBA. */
-	qla2x00_suspend_hba(ha);
+	qla2x00_suspend_hba(vha);
 
 	/* Go with read. */
 	midpoint = ha->optrom_size / 2;
@@ -1922,13 +1943,13 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
 	qla2x00_flash_disable(ha);
 
 	/* Resume HBA. */
-	qla2x00_resume_hba(ha);
+	qla2x00_resume_hba(vha);
 
 	return buf;
 }
 
 int
-qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
     uint32_t offset, uint32_t length)
 {
 
@@ -1936,10 +1957,11 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
 	uint8_t man_id, flash_id, sec_number, data;
 	uint16_t wd;
 	uint32_t addr, liter, sec_mask, rest_addr;
+	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Suspend HBA. */
-	qla2x00_suspend_hba(ha);
+	qla2x00_suspend_hba(vha);
 
 	rval = QLA_SUCCESS;
 	sec_number = 0;
@@ -2139,55 +2161,58 @@ update_flash:
 	qla2x00_flash_disable(ha);
 
 	/* Resume HBA. */
-	qla2x00_resume_hba(ha);
+	qla2x00_resume_hba(vha);
 
 	return rval;
 }
 
 uint8_t *
-qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
     uint32_t offset, uint32_t length)
 {
+	struct qla_hw_data *ha = vha->hw;
+
 	/* Suspend HBA. */
-	scsi_block_requests(ha->host);
+	scsi_block_requests(vha->host);
 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
 	/* Go with read. */
-	qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2);
+	qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
 
 	/* Resume HBA. */
 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
-	scsi_unblock_requests(ha->host);
+	scsi_unblock_requests(vha->host);
 
 	return buf;
 }
 
 int
-qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
     uint32_t offset, uint32_t length)
 {
 	int rval;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Suspend HBA. */
-	scsi_block_requests(ha->host);
+	scsi_block_requests(vha->host);
 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
 	/* Go with write. */
-	rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2,
+	rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
 	    length >> 2);
 
 	/* Resume HBA -- RISC reset needed. */
 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
-	set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-	qla2xxx_wake_dpc(ha);
-	qla2x00_wait_for_hba_online(ha);
-	scsi_unblock_requests(ha->host);
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_hba_online(vha);
+	scsi_unblock_requests(vha->host);
 
 	return rval;
 }
 
 uint8_t *
-qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
     uint32_t offset, uint32_t length)
 {
 	int rval;
@@ -2195,6 +2220,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
 	void *optrom;
 	uint8_t *pbuf;
 	uint32_t faddr, left, burst;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (offset & 0xfff)
 		goto slow_read;
@@ -2219,7 +2245,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
 		if (burst > left)
 			burst = left;
 
-		rval = qla2x00_dump_ram(ha, optrom_dma,
+		rval = qla2x00_dump_ram(vha, optrom_dma,
 		    flash_data_to_access_addr(faddr), burst);
 		if (rval) {
 			qla_printk(KERN_WARNING, ha,
@@ -2248,7 +2274,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
 	return buf;
 
 slow_read:
-    return qla24xx_read_optrom_data(ha, buf, offset, length);
+    return qla24xx_read_optrom_data(vha, buf, offset, length);
 }
 
 /**
@@ -2270,7 +2296,7 @@ slow_read:
  * Returns QLA_SUCCESS on successful retrieval of version.
  */
 static void
-qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
+qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
 {
 	int ret = QLA_FUNCTION_FAILED;
 	uint32_t istart, iend, iter, vend;
@@ -2344,13 +2370,14 @@ qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
 }
 
 int
-qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
+qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 {
 	int ret = QLA_SUCCESS;
 	uint8_t code_type, last_image;
 	uint32_t pcihdr, pcids;
 	uint8_t *dbyte;
 	uint16_t *dcode;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!ha->pio_address || !mbuf)
 		return QLA_FUNCTION_FAILED;
@@ -2370,8 +2397,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 		if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
 		    qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
 			/* No signature */
-			DEBUG2(printk("scsi(%ld): No matching ROM "
-			    "signature.\n", ha->host_no));
+			DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
+			    "signature.\n"));
 			ret = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -2387,8 +2414,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 		    qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
 		    qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
 			/* Incorrect header. */
-			DEBUG2(printk("%s(): PCI data struct not found "
-			    "pcir_adr=%x.\n", __func__, pcids));
+			DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
+			    "found pcir_adr=%x.\n", pcids));
 			ret = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -2402,7 +2429,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 			    qla2x00_read_flash_byte(ha, pcids + 0x12);
 			ha->bios_revision[1] =
 			    qla2x00_read_flash_byte(ha, pcids + 0x13);
-			DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
+			DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
 			    ha->bios_revision[1], ha->bios_revision[0]));
 			break;
 		case ROM_CODE_TYPE_FCODE:
@@ -2416,12 +2443,12 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 			    qla2x00_read_flash_byte(ha, pcids + 0x12);
 			ha->efi_revision[1] =
 			    qla2x00_read_flash_byte(ha, pcids + 0x13);
-			DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
+			DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
 			    ha->efi_revision[1], ha->efi_revision[0]));
 			break;
 		default:
-			DEBUG2(printk("%s(): Unrecognized code type %x at "
-			    "pcids %x.\n", __func__, code_type, pcids));
+			DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
+			    "type %x at pcids %x.\n", code_type, pcids));
 			break;
 		}
 
@@ -2441,16 +2468,16 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 
 		qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
 		    8);
-		DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
-		    __func__, ha->host_no));
+		DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
+		    "flash:\n"));
 		DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
 
 		if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
 		    dcode[2] == 0xffff && dcode[3] == 0xffff) ||
 		    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
 		    dcode[3] == 0)) {
-			DEBUG2(printk("%s(): Unrecognized fw revision at "
-			    "%x.\n", __func__, ha->flt_region_fw * 4));
+			DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
+			    "revision at %x.\n", ha->flt_region_fw * 4));
 		} else {
 			/* values are in big endian */
 			ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2465,7 +2492,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 }
 
 int
-qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
+qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 {
 	int ret = QLA_SUCCESS;
 	uint32_t pcihdr, pcids;
@@ -2473,6 +2500,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 	uint8_t *bcode;
 	uint8_t code_type, last_image;
 	int i;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (!mbuf)
 		return QLA_FUNCTION_FAILED;
@@ -2489,12 +2517,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 	last_image = 1;
 	do {
 		/* Verify PCI expansion ROM header. */
-		qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
 		bcode = mbuf + (pcihdr % 4);
 		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
 			/* No signature */
-			DEBUG2(printk("scsi(%ld): No matching ROM "
-			    "signature.\n", ha->host_no));
+			DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
+			    "signature.\n"));
 			ret = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -2502,15 +2530,15 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 		/* Locate PCI data structure. */
 		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
 
-		qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
 		bcode = mbuf + (pcihdr % 4);
 
 		/* Validate signature of PCI data structure. */
 		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
 		    bcode[0x2] != 'I' || bcode[0x3] != 'R') {
 			/* Incorrect header. */
-			DEBUG2(printk("%s(): PCI data struct not found "
-			    "pcir_adr=%x.\n", __func__, pcids));
+			DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
+			    "found pcir_adr=%x.\n", pcids));
 			ret = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -2522,26 +2550,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 			/* Intel x86, PC-AT compatible. */
 			ha->bios_revision[0] = bcode[0x12];
 			ha->bios_revision[1] = bcode[0x13];
-			DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
+			DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
 			    ha->bios_revision[1], ha->bios_revision[0]));
 			break;
 		case ROM_CODE_TYPE_FCODE:
 			/* Open Firmware standard for PCI (FCode). */
 			ha->fcode_revision[0] = bcode[0x12];
 			ha->fcode_revision[1] = bcode[0x13];
-			DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__,
+			DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
 			    ha->fcode_revision[1], ha->fcode_revision[0]));
 			break;
 		case ROM_CODE_TYPE_EFI:
 			/* Extensible Firmware Interface (EFI). */
 			ha->efi_revision[0] = bcode[0x12];
 			ha->efi_revision[1] = bcode[0x13];
-			DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
+			DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
 			    ha->efi_revision[1], ha->efi_revision[0]));
 			break;
 		default:
-			DEBUG2(printk("%s(): Unrecognized code type %x at "
-			    "pcids %x.\n", __func__, code_type, pcids));
+			DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
+			    "type %x at pcids %x.\n", code_type, pcids));
 			break;
 		}
 
@@ -2555,7 +2583,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 	dcode = mbuf;
 
-	qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
+	qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
 	for (i = 0; i < 4; i++)
 		dcode[i] = be32_to_cpu(dcode[i]);
 
@@ -2563,8 +2591,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
 	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
 	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
 	    dcode[3] == 0)) {
-		DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
-		    __func__, ha->flt_region_fw));
+		DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
+		    "revision at %x.\n", ha->flt_region_fw * 4));
 	} else {
 		ha->fw_revision[0] = dcode[0];
 		ha->fw_revision[1] = dcode[1];
@@ -2593,8 +2621,9 @@ qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
 }
 
 int
-qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
+qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
 {
+	struct qla_hw_data *ha = vha->hw;
 	uint8_t *pos = ha->vpd;
 	uint8_t *end = pos + ha->vpd_size;
 	int len = 0;
@@ -2621,9 +2650,10 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
 }
 
 static int
-qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
+qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
 {
 	uint32_t d[2], faddr;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Locate first empty entry. */
 	for (;;) {
@@ -2634,7 +2664,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
 			return QLA_MEMORY_ALLOC_FAILED;
 		}
 
-		qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2);
+		qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
 		faddr = flash_data_to_access_addr(ha->hw_event_ptr);
 		ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
 		if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
@@ -2655,12 +2685,12 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
 }
 
 int
-qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
+qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
     uint16_t d2, uint16_t d3)
 {
 #define QMARK(a, b, c, d) \
     cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
-
+	struct qla_hw_data *ha = vha->hw;
 	int rval;
 	uint32_t marker[2], fdata[4];
 
@@ -2681,7 +2711,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
 		/* Locate marker. */
 		ha->hw_event_ptr = ha->flt_region_hw_event;
 		for (;;) {
-			qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
+			qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
 			    4);
 			if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
 			    fdata[1] == __constant_cpu_to_le32(0xffffffff))
@@ -2700,7 +2730,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
 		}
 		/* No marker, write it. */
 		if (!ha->flags.hw_event_marker_found) {
-			rval = qla2xxx_hw_event_store(ha, marker);
+			rval = qla2xxx_hw_event_store(vha, marker);
 			if (rval != QLA_SUCCESS) {
 				DEBUG2(qla_printk(KERN_WARNING, ha,
 				    "HW event -- Failed marker write=%x.!\n",
@@ -2714,7 +2744,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
 	/* Store error.  */
 	fdata[0] = cpu_to_le32(code << 16 | d1);
 	fdata[1] = cpu_to_le32(d2 << 16 | d3);
-	rval = qla2xxx_hw_event_store(ha, fdata);
+	rval = qla2xxx_hw_event_store(vha, fdata);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2(qla_printk(KERN_WARNING, ha,
 		    "HW event -- Failed error write=%x.!\n",
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index eea6720adf16..be22f3a09f8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.02.01-k9"
+#define QLA2XXX_VERSION      "8.02.03-k1"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	2
-#define QLA_DRIVER_PATCH_VER	1
+#define QLA_DRIVER_PATCH_VER	3
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index de7b3bc2cbc9..1ad51552d6b1 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -23,7 +23,7 @@
    Functions as standalone, loadable, and PCMCIA driver, the latter from
    Dave Hinds' PCMCIA package.
    
-   Cleaned up 26/10/2002 by Alan Cox <alan@redhat.com> as part of the 2.5
+   Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
    SCSI driver cleanup and audit. This driver still needs work on the
    following
    	-	Non terminating hardware waits
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index edfaf241c5ba..381838ebd460 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -136,7 +136,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
 	else
 		eh_timed_out = NULL;
 
-	if (eh_timed_out)
+	if (eh_timed_out) {
 		rtn = eh_timed_out(scmd);
 		switch (rtn) {
 		case BLK_EH_NOT_HANDLED:
@@ -144,6 +144,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
 		default:
 			return rtn;
 		}
+	}
 
 	if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
 		scmd->result |= DID_TIME_OUT << 16;
@@ -1405,8 +1406,9 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		return ADD_TO_MLQUEUE;
 	case GOOD:
 	case COMMAND_TERMINATED:
-	case TASK_ABORTED:
 		return SUCCESS;
+	case TASK_ABORTED:
+		goto maybe_retry;
 	case CHECK_CONDITION:
 		rtn = scsi_check_sense(scmd);
 		if (rtn == NEEDS_RETRY)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index dc1cfb2fd76b..2ae4f8fc5831 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -94,7 +94,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
 	SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
 
 	result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
-				  &sshdr, timeout, retries);
+				  &sshdr, timeout, retries, NULL);
 
 	SCSI_LOG_IOCTL(2, printk("Ioctl returned  0x%x\n", result));
 
@@ -270,11 +270,11 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 EXPORT_SYMBOL(scsi_ioctl);
 
 /**
- * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET
+ * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET
  * @sdev: scsi device receiving ioctl
  * @cmd: Must be SC_SCSI_RESET
  * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
- * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
+ * @ndelay: file mode O_NDELAY flag
  */
 int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
 			    void __user *arg, int ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 148d3af92aef..f2f51e0333eb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -183,13 +183,15 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  * @timeout:	request timeout in seconds
  * @retries:	number of times to retry request
  * @flags:	or into request flags;
+ * @resid:	optional residual length
  *
  * returns the req->errors value which is the scsi_cmnd result
  * field.
  */
 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 		 int data_direction, void *buffer, unsigned bufflen,
-		 unsigned char *sense, int timeout, int retries, int flags)
+		 unsigned char *sense, int timeout, int retries, int flags,
+		 int *resid)
 {
 	struct request *req;
 	int write = (data_direction == DMA_TO_DEVICE);
@@ -224,6 +226,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 	if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
 		memset(buffer + (bufflen - req->data_len), 0, req->data_len);
 
+	if (resid)
+		*resid = req->data_len;
 	ret = req->errors;
  out:
 	blk_put_request(req);
@@ -235,7 +239,8 @@ EXPORT_SYMBOL(scsi_execute);
 
 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
 		     int data_direction, void *buffer, unsigned bufflen,
-		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
+		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
+		     int *resid)
 {
 	char *sense = NULL;
 	int result;
@@ -246,7 +251,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
 			return DRIVER_ERROR << 24;
 	}
 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-			      sense, timeout, retries, 0);
+			      sense, timeout, retries, 0, resid);
 	if (sshdr)
 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
@@ -875,16 +880,24 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
  *              (the normal case for most drivers), we don't need
  *              the logic to deal with cleaning up afterwards.
  *
- *		We must do one of several things here:
+ *		We must call scsi_end_request().  This will finish off
+ *		the specified number of sectors.  If we are done, the
+ *		command block will be released and the queue function
+ *		will be goosed.  If we are not done then we have to
+ *		figure out what to do next:
  *
- *		a) Call scsi_end_request.  This will finish off the
- *		   specified number of sectors.  If we are done, the
- *		   command block will be released, and the queue
- *		   function will be goosed.  If we are not done, then
- *		   scsi_end_request will directly goose the queue.
+ *		a) We can call scsi_requeue_command().  The request
+ *		   will be unprepared and put back on the queue.  Then
+ *		   a new command will be created for it.  This should
+ *		   be used if we made forward progress, or if we want
+ *		   to switch from READ(10) to READ(6) for example.
  *
- *		b) We can just use scsi_requeue_command() here.  This would
- *		   be used if we just wanted to retry, for example.
+ *		b) We can call scsi_queue_insert().  The request will
+ *		   be put back on the queue and retried using the same
+ *		   command as before, possibly after a delay.
+ *
+ *		c) We can call blk_end_request() with -EIO to fail
+ *		   the remainder of the request.
  */
 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
@@ -896,6 +909,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	struct scsi_sense_hdr sshdr;
 	int sense_valid = 0;
 	int sense_deferred = 0;
+	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
+	      ACTION_DELAYED_RETRY} action;
+	char *description = NULL;
 
 	if (result) {
 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -947,10 +963,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 		return;
 	this_count = blk_rq_bytes(req);
 
-	/* good_bytes = 0, or (inclusive) there were leftovers and
-	 * result = 0, so scsi_end_request couldn't retry.
-	 */
-	if (sense_valid && !sense_deferred) {
+	if (host_byte(result) == DID_RESET) {
+		/* Third party bus reset or reset for error recovery
+		 * reasons.  Just retry the command and see what
+		 * happens.
+		 */
+		action = ACTION_RETRY;
+	} else if (sense_valid && !sense_deferred) {
 		switch (sshdr.sense_key) {
 		case UNIT_ATTENTION:
 			if (cmd->device->removable) {
@@ -958,16 +977,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				 * and quietly refuse further access.
 				 */
 				cmd->device->changed = 1;
-				scsi_end_request(cmd, -EIO, this_count, 1);
-				return;
+				description = "Media Changed";
+				action = ACTION_FAIL;
 			} else {
 				/* Must have been a power glitch, or a
 				 * bus reset.  Could not have been a
 				 * media change, so we just retry the
-				 * request and see what happens.
+				 * command and see what happens.
 				 */
-				scsi_requeue_command(q, cmd);
-				return;
+				action = ACTION_RETRY;
 			}
 			break;
 		case ILLEGAL_REQUEST:
@@ -983,21 +1001,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 			    (cmd->cmnd[0] == READ_10 ||
 			     cmd->cmnd[0] == WRITE_10)) {
+				/* This will issue a new 6-byte command. */
 				cmd->device->use_10_for_rw = 0;
-				/* This will cause a retry with a
-				 * 6-byte command.
-				 */
-				scsi_requeue_command(q, cmd);
-			} else if (sshdr.asc == 0x10) /* DIX */
-				scsi_end_request(cmd, -EIO, this_count, 0);
-			else
-				scsi_end_request(cmd, -EIO, this_count, 1);
-			return;
+				action = ACTION_REPREP;
+			} else
+				action = ACTION_FAIL;
+			break;
 		case ABORTED_COMMAND:
 			if (sshdr.asc == 0x10) { /* DIF */
-				scsi_end_request(cmd, -EIO, this_count, 0);
-				return;
-			}
+				action = ACTION_FAIL;
+				description = "Data Integrity Failure";
+			} else
+				action = ACTION_RETRY;
 			break;
 		case NOT_READY:
 			/* If the device is in the process of becoming
@@ -1012,49 +1027,57 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				case 0x07: /* operation in progress */
 				case 0x08: /* Long write in progress */
 				case 0x09: /* self test in progress */
-					scsi_requeue_command(q, cmd);
-					return;
-				default:
+					action = ACTION_DELAYED_RETRY;
 					break;
 				}
+			} else {
+				description = "Device not ready";
+				action = ACTION_FAIL;
 			}
-			if (!(req->cmd_flags & REQ_QUIET))
-				scsi_cmd_print_sense_hdr(cmd,
-							 "Device not ready",
-							 &sshdr);
-
-			scsi_end_request(cmd, -EIO, this_count, 1);
-			return;
+			break;
 		case VOLUME_OVERFLOW:
-			if (!(req->cmd_flags & REQ_QUIET)) {
-				scmd_printk(KERN_INFO, cmd,
-					    "Volume overflow, CDB: ");
-				__scsi_print_command(cmd->cmnd);
-				scsi_print_sense("", cmd);
-			}
 			/* See SSC3rXX or current. */
-			scsi_end_request(cmd, -EIO, this_count, 1);
-			return;
+			action = ACTION_FAIL;
+			break;
 		default:
+			description = "Unhandled sense code";
+			action = ACTION_FAIL;
 			break;
 		}
+	} else {
+		description = "Unhandled error code";
+		action = ACTION_FAIL;
 	}
-	if (host_byte(result) == DID_RESET) {
-		/* Third party bus reset or reset for error recovery
-		 * reasons.  Just retry the request and see what
-		 * happens.
-		 */
-		scsi_requeue_command(q, cmd);
-		return;
-	}
-	if (result) {
+
+	switch (action) {
+	case ACTION_FAIL:
+		/* Give up and fail the remainder of the request */
 		if (!(req->cmd_flags & REQ_QUIET)) {
+			if (description)
+				scmd_printk(KERN_INFO, cmd, "%s",
+					    description);
 			scsi_print_result(cmd);
 			if (driver_byte(result) & DRIVER_SENSE)
 				scsi_print_sense("", cmd);
 		}
+		blk_end_request(req, -EIO, blk_rq_bytes(req));
+		scsi_next_command(cmd);
+		break;
+	case ACTION_REPREP:
+		/* Unprep the request and put it back at the head of the queue.
+		 * A new command will be prepared and issued.
+		 */
+		scsi_requeue_command(q, cmd);
+		break;
+	case ACTION_RETRY:
+		/* Retry the same command immediately */
+		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+		break;
+	case ACTION_DELAYED_RETRY:
+		/* Retry the same command after a delay */
+		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+		break;
 	}
-	scsi_end_request(cmd, -EIO, this_count, !result);
 }
 
 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1998,7 +2021,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
 	}
 
 	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
-			       sshdr, timeout, retries);
+			       sshdr, timeout, retries, NULL);
 	kfree(real_buffer);
 	return ret;
 }
@@ -2063,7 +2086,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
 	memset(buffer, 0, len);
 
 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
-				  sshdr, timeout, retries);
+				  sshdr, timeout, retries, NULL);
 
 	/* This code looks awful: what it's doing is making sure an
 	 * ILLEGAL REQUEST sense return identifies the actual command
@@ -2145,7 +2168,7 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
 	/* try to eat the UNIT_ATTENTION if there are enough retries */
 	do {
 		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
-					  timeout, retries);
+					  timeout, retries, NULL);
 		if (sdev->removable && scsi_sense_valid(sshdr) &&
 		    sshdr->sense_key == UNIT_ATTENTION)
 			sdev->changed = 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b14dc02c3ded..18486b51668d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -216,7 +216,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
 	scsi_cmd[4] = 0x2a;     /* size */
 	scsi_cmd[5] = 0;
 	scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
-			 SCSI_TIMEOUT, 3);
+			 SCSI_TIMEOUT, 3, NULL);
 }
 
 /**
@@ -573,6 +573,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
 
 	/* Each pass gets up to three chances to ignore Unit Attention */
 	for (count = 0; count < 3; ++count) {
+		int resid;
+
 		memset(scsi_cmd, 0, 6);
 		scsi_cmd[0] = INQUIRY;
 		scsi_cmd[4] = (unsigned char) try_inquiry_len;
@@ -581,7 +583,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
 
 		result = scsi_execute_req(sdev,  scsi_cmd, DMA_FROM_DEVICE,
 					  inq_result, try_inquiry_len, &sshdr,
-					  HZ / 2 + HZ * scsi_inq_timeout, 3);
+					  HZ / 2 + HZ * scsi_inq_timeout, 3,
+					  &resid);
 
 		SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
 				"with code 0x%x\n",
@@ -602,6 +605,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
 				    (sshdr.ascq == 0))
 					continue;
 			}
+		} else {
+			/*
+			 * if nothing was transferred, we try
+			 * again. It's a workaround for some USB
+			 * devices.
+			 */
+			if (resid == try_inquiry_len)
+				continue;
 		}
 		break;
 	}
@@ -1390,7 +1401,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
 
 		result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
 					  lun_data, length, &sshdr,
-					  SCSI_TIMEOUT + 4 * HZ, 3);
+					  SCSI_TIMEOUT + 4 * HZ, 3, NULL);
 
 		SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
 				" %s (try %d) result 0x%x\n", result
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1e71abf0607a..062304de4854 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3012,6 +3012,16 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
 
+	/*
+	 * Pre-emptively kill I/O rather than waiting for the work queue
+	 * item to teardown the starget. (FCOE libFC folks prefer this
+	 * and to have the rport_port_id still set when it's done).
+	 */
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	fc_terminate_rport_io(rport);
+
+	BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
+
 	/* remove the identifiers that aren't used in the consisting binding */
 	switch (fc_host->tgtid_bind_type) {
 	case FC_TGTID_BIND_BY_WWPN:
@@ -3035,9 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
 	 * went away and didn't come back - we'll remove
 	 * all attached scsi devices.
 	 */
-	spin_unlock_irqrestore(shost->host_lock, flags);
-
-	scsi_target_unblock(&rport->dev);
 	fc_queue_work(shost, &rport->stgt_delete_work);
 }
 
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 7c2d28924d2a..f49f55c6bfc8 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -111,8 +111,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
 				      sense, DV_TIMEOUT, /* retries */ 1,
 				      REQ_FAILFAST_DEV |
 				      REQ_FAILFAST_TRANSPORT |
-				      REQ_FAILFAST_DRIVER);
-		if (result & DRIVER_SENSE) {
+				      REQ_FAILFAST_DRIVER,
+				      NULL);
+		if (driver_byte(result) & DRIVER_SENSE) {
 			struct scsi_sense_hdr sshdr_tmp;
 			if (!sshdr)
 				sshdr = &sshdr_tmp;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5081b3981d3c..62b28d58e65e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -884,7 +884,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
 		 * flush everything.
 		 */
 		res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-				       SD_TIMEOUT, SD_MAX_RETRIES);
+				       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
 		if (res == 0)
 			break;
 	}
@@ -1134,7 +1134,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
 			the_result = scsi_execute_req(sdkp->device, cmd,
 						      DMA_NONE, NULL, 0,
 						      &sshdr, SD_TIMEOUT,
-						      SD_MAX_RETRIES);
+						      SD_MAX_RETRIES, NULL);
 
 			/*
 			 * If the drive has indicated to us that it
@@ -1192,7 +1192,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
 					cmd[4] |= 1 << 4;
 				scsi_execute_req(sdkp->device, cmd, DMA_NONE,
 						 NULL, 0, &sshdr,
-						 SD_TIMEOUT, SD_MAX_RETRIES);
+						 SD_TIMEOUT, SD_MAX_RETRIES,
+						 NULL);
 				spintime_expire = jiffies + 100 * HZ;
 				spintime = 1;
 			}
@@ -1306,7 +1307,7 @@ repeat:
 		
 		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
 					      buffer, longrc ? 13 : 8, &sshdr,
-					      SD_TIMEOUT, SD_MAX_RETRIES);
+					      SD_TIMEOUT, SD_MAX_RETRIES, NULL);
 
 		if (media_not_present(sdkp, &sshdr))
 			return;
@@ -1986,7 +1987,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
 		return -ENODEV;
 
 	res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-			       SD_TIMEOUT, SD_MAX_RETRIES);
+			       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
 	if (res) {
 		sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
 		sd_print_result(sdkp, res);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 1bcf3c33d7ff..7f0df29f3a64 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -77,7 +77,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
 	};
 
 	return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
-				NULL, SES_TIMEOUT, SES_RETRIES);
+				NULL, SES_TIMEOUT, SES_RETRIES, NULL);
 }
 
 static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -95,7 +95,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
 	};
 
 	result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
-				  NULL, SES_TIMEOUT, SES_RETRIES);
+				  NULL, SES_TIMEOUT, SES_RETRIES, NULL);
 	if (result)
 		sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
 			    result);
@@ -369,7 +369,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 		return;
 
 	if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
-			     VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
+			     VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES,
+			     NULL))
 		goto free;
 
 	vpd_len = (buf[2] << 8) + buf[3];
@@ -380,7 +381,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 	cmd[3] = vpd_len >> 8;
 	cmd[4] = vpd_len & 0xff;
 	if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
-			     vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
+			     vpd_len, NULL, SES_TIMEOUT, SES_RETRIES, NULL))
 		goto free;
 
 	desc = buf + 4;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 45b66b98a516..e7fa3caead79 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -177,7 +177,7 @@ int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
 	do {
 		the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
 					      0, sshdr, SR_TIMEOUT,
-					      retries--);
+					      retries--, NULL);
 		if (scsi_sense_valid(sshdr) &&
 		    sshdr->sense_key == UNIT_ATTENTION)
 			sdev->changed = 1;
@@ -681,7 +681,7 @@ static void get_sectorsize(struct scsi_cd *cd)
 		/* Do the command and wait.. */
 		the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
 					      buffer, sizeof(buffer), NULL,
-					      SR_TIMEOUT, MAX_RETRIES);
+					      SR_TIMEOUT, MAX_RETRIES, NULL);
 
 		retries--;
 
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ae87d08df588..d92ff512d213 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -207,7 +207,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
 	memset(sense, 0, sizeof(*sense));
 	result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
 			      cgc->buffer, cgc->buflen, (char *)sense,
-			      cgc->timeout, IOCTL_RETRIES, 0);
+			      cgc->timeout, IOCTL_RETRIES, 0, NULL);
 
 	scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c959bdc55f4f..7f3f317ee6ca 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -451,9 +451,23 @@ static void st_sleep_done(void *data, char *sense, int result, int resid)
 		complete(SRpnt->waiting);
 }
 
-static struct st_request *st_allocate_request(void)
+static struct st_request *st_allocate_request(struct scsi_tape *stp)
 {
-	return kzalloc(sizeof(struct st_request), GFP_KERNEL);
+	struct st_request *streq;
+
+	streq = kzalloc(sizeof(*streq), GFP_KERNEL);
+	if (streq)
+		streq->stp = stp;
+	else {
+		DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n",
+			    tape_name(stp)););
+		if (signal_pending(current))
+			stp->buffer->syscall_result = -EINTR;
+		else
+			stp->buffer->syscall_result = -EBUSY;
+	}
+
+	return streq;
 }
 
 static void st_release_request(struct st_request *streq)
@@ -481,18 +495,10 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
 		return NULL;
 	}
 
-	if (SRpnt == NULL) {
-		SRpnt = st_allocate_request();
-		if (SRpnt == NULL) {
-			DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
-				     tape_name(STp)); );
-			if (signal_pending(current))
-				(STp->buffer)->syscall_result = (-EINTR);
-			else
-				(STp->buffer)->syscall_result = (-EBUSY);
+	if (!SRpnt) {
+		SRpnt = st_allocate_request(STp);
+		if (!SRpnt)
 			return NULL;
-		}
-		SRpnt->stp = STp;
 	}
 
 	/* If async IO, set last_SRpnt. This ptr tells write_behind_check
@@ -527,6 +533,28 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
 	return SRpnt;
 }
 
+static int st_scsi_kern_execute(struct st_request *streq,
+				const unsigned char *cmd, int data_direction,
+				void *buffer, unsigned bufflen, int timeout,
+				int retries)
+{
+	struct scsi_tape *stp = streq->stp;
+	int ret, resid;
+
+	stp->buffer->cmdstat.have_sense = 0;
+	memcpy(streq->cmd, cmd, sizeof(streq->cmd));
+
+	ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
+			   streq->sense, timeout, retries, 0, &resid);
+	if (driver_byte(ret) & DRIVER_ERROR)
+		return -EBUSY;
+
+	stp->buffer->cmdstat.midlevel_result = streq->result = ret;
+	stp->buffer->cmdstat.residual = resid;
+	stp->buffer->syscall_result = st_chk_result(stp, streq);
+
+	return 0;
+}
 
 /* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
    write has been correct but EOM early warning reached, -EIO if write ended in
@@ -599,6 +627,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
 {
 	struct st_request *SRpnt;
 	unsigned char cmd[MAX_COMMAND_SIZE];
+	int ret;
 
 	cmd[0] = SPACE;
 	cmd[1] = 0x01;		/* Space FileMarks */
@@ -612,19 +641,26 @@ static int cross_eof(struct scsi_tape * STp, int forward)
         DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
 		   tape_name(STp), forward ? "forward" : "backward"));
 
-	SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
-			   STp->device->timeout, MAX_RETRIES, 1);
+	SRpnt = st_allocate_request(STp);
 	if (!SRpnt)
-		return (STp->buffer)->syscall_result;
+		return STp->buffer->syscall_result;
 
-	st_release_request(SRpnt);
-	SRpnt = NULL;
+	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+				   STp->device->request_queue->rq_timeout,
+				   MAX_RETRIES);
+	if (ret)
+		goto out;
+
+	ret = STp->buffer->syscall_result;
 
 	if ((STp->buffer)->cmdstat.midlevel_result != 0)
 		printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
 		   tape_name(STp), forward ? "forward" : "backward");
 
-	return (STp->buffer)->syscall_result;
+out:
+	st_release_request(SRpnt);
+
+	return ret;
 }
 
 
@@ -657,7 +693,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
 		cmd[4] = blks;
 
 		SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
-				   STp->device->timeout, MAX_WRITE_RETRIES, 1);
+				   STp->device->request_queue->rq_timeout,
+				   MAX_WRITE_RETRIES, 1);
 		if (!SRpnt)
 			return (STp->buffer)->syscall_result;
 
@@ -844,21 +881,24 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
 	int attentions, waits, max_wait, scode;
 	int retval = CHKRES_READY, new_session = 0;
 	unsigned char cmd[MAX_COMMAND_SIZE];
-	struct st_request *SRpnt = NULL;
+	struct st_request *SRpnt;
 	struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
 
+	SRpnt = st_allocate_request(STp);
+	if (!SRpnt)
+		return STp->buffer->syscall_result;
+
 	max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
 
 	for (attentions=waits=0; ; ) {
 		memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
 		cmd[0] = TEST_UNIT_READY;
-		SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
-				   STp->long_timeout, MAX_READY_RETRIES, 1);
 
-		if (!SRpnt) {
-			retval = (STp->buffer)->syscall_result;
+		retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+					      STp->long_timeout,
+					      MAX_READY_RETRIES);
+		if (retval)
 			break;
-		}
 
 		if (cmdstatp->have_sense) {
 
@@ -902,8 +942,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
 		break;
 	}
 
-	if (SRpnt != NULL)
-		st_release_request(SRpnt);
+	st_release_request(SRpnt);
+
 	return retval;
 }
 
@@ -980,16 +1020,24 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
 		}
 	}
 
+	SRpnt = st_allocate_request(STp);
+	if (!SRpnt) {
+		retval = STp->buffer->syscall_result;
+		goto err_out;
+	}
+
 	if (STp->omit_blklims)
 		STp->min_block = STp->max_block = (-1);
 	else {
 		memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
 		cmd[0] = READ_BLOCK_LIMITS;
 
-		SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
-				   STp->device->timeout, MAX_READY_RETRIES, 1);
-		if (!SRpnt) {
-			retval = (STp->buffer)->syscall_result;
+		retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+					      STp->buffer->b_data, 6,
+					      STp->device->request_queue->rq_timeout,
+					      MAX_READY_RETRIES);
+		if (retval) {
+			st_release_request(SRpnt);
 			goto err_out;
 		}
 
@@ -1013,10 +1061,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
 	cmd[0] = MODE_SENSE;
 	cmd[4] = 12;
 
-	SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
-			STp->device->timeout, MAX_READY_RETRIES, 1);
-	if (!SRpnt) {
-		retval = (STp->buffer)->syscall_result;
+	retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+				      STp->buffer->b_data, 12,
+				      STp->device->request_queue->rq_timeout,
+				      MAX_READY_RETRIES);
+	if (retval) {
+		st_release_request(SRpnt);
 		goto err_out;
 	}
 
@@ -1246,10 +1296,17 @@ static int st_flush(struct file *filp, fl_owner_t id)
 		cmd[0] = WRITE_FILEMARKS;
 		cmd[4] = 1 + STp->two_fm;
 
-		SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
-				   STp->device->timeout, MAX_WRITE_RETRIES, 1);
+		SRpnt = st_allocate_request(STp);
 		if (!SRpnt) {
-			result = (STp->buffer)->syscall_result;
+			result = STp->buffer->syscall_result;
+			goto out;
+		}
+
+		result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+					      STp->device->request_queue->rq_timeout,
+					      MAX_WRITE_RETRIES);
+		if (result) {
+			st_release_request(SRpnt);
 			goto out;
 		}
 
@@ -1634,7 +1691,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
 		cmd[4] = blks;
 
 		SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
-				   STp->device->timeout, MAX_WRITE_RETRIES, !async_write);
+				   STp->device->request_queue->rq_timeout,
+				   MAX_WRITE_RETRIES, !async_write);
 		if (!SRpnt) {
 			retval = STbp->syscall_result;
 			goto out;
@@ -1804,7 +1862,8 @@ static long read_tape(struct scsi_tape *STp, long count,
 
 	SRpnt = *aSRpnt;
 	SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
-			   STp->device->timeout, MAX_RETRIES, 1);
+			   STp->device->request_queue->rq_timeout,
+			   MAX_RETRIES, 1);
 	release_buffering(STp, 1);
 	*aSRpnt = SRpnt;
 	if (!SRpnt)
@@ -2213,7 +2272,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
 			DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
 			       (value & ~MT_ST_SET_LONG_TIMEOUT)));
 		} else {
-			STp->device->timeout = value * HZ;
+			blk_queue_rq_timeout(STp->device->request_queue,
+					     value * HZ);
 			DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
 				name, value) );
 		}
@@ -2311,7 +2371,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
 static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
 {
 	unsigned char cmd[MAX_COMMAND_SIZE];
-	struct st_request *SRpnt = NULL;
+	struct st_request *SRpnt;
+	int ret;
 
 	memset(cmd, 0, MAX_COMMAND_SIZE);
 	cmd[0] = MODE_SENSE;
@@ -2320,14 +2381,17 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
 	cmd[2] = page;
 	cmd[4] = 255;
 
-	SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE,
-			   STp->device->timeout, 0, 1);
-	if (SRpnt == NULL)
-		return (STp->buffer)->syscall_result;
+	SRpnt = st_allocate_request(STp);
+	if (!SRpnt)
+		return STp->buffer->syscall_result;
 
+	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+				   STp->buffer->b_data, cmd[4],
+				   STp->device->request_queue->rq_timeout,
+				   MAX_RETRIES);
 	st_release_request(SRpnt);
 
-	return (STp->buffer)->syscall_result;
+	return ret ? : STp->buffer->syscall_result;
 }
 
 
@@ -2335,9 +2399,9 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
    in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
 static int write_mode_page(struct scsi_tape *STp, int page, int slow)
 {
-	int pgo;
+	int pgo, timeout, ret = 0;
 	unsigned char cmd[MAX_COMMAND_SIZE];
-	struct st_request *SRpnt = NULL;
+	struct st_request *SRpnt;
 
 	memset(cmd, 0, MAX_COMMAND_SIZE);
 	cmd[0] = MODE_SELECT;
@@ -2351,14 +2415,21 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
 	(STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
 	(STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
 
-	SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE,
-			   (slow ? STp->long_timeout : STp->device->timeout), 0, 1);
-	if (SRpnt == NULL)
-		return (STp->buffer)->syscall_result;
+	SRpnt = st_allocate_request(STp);
+	if (!SRpnt)
+		return ret;
+
+	timeout = slow ? STp->long_timeout :
+		STp->device->request_queue->rq_timeout;
+
+	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
+				   STp->buffer->b_data, cmd[4], timeout, 0);
+	if (!ret)
+		ret = STp->buffer->syscall_result;
 
 	st_release_request(SRpnt);
 
-	return (STp->buffer)->syscall_result;
+	return ret;
 }
 
 
@@ -2464,7 +2535,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
 	}
 	if (STp->immediate) {
 		cmd[1] = 1;	/* Don't wait for completion */
-		timeout = STp->device->timeout;
+		timeout = STp->device->request_queue->rq_timeout;
 	}
 	else
 		timeout = STp->long_timeout;
@@ -2476,13 +2547,16 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
 		printk(ST_DEB_MSG "%s: Loading tape.\n", name);
 		);
 
-	SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
-			   timeout, MAX_RETRIES, 1);
+	SRpnt = st_allocate_request(STp);
 	if (!SRpnt)
-		return (STp->buffer)->syscall_result;
+		return STp->buffer->syscall_result;
+
+	retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
+				      MAX_RETRIES);
+	if (retval)
+		goto out;
 
 	retval = (STp->buffer)->syscall_result;
-	st_release_request(SRpnt);
 
 	if (!retval) {	/* SCSI command successful */
 
@@ -2501,6 +2575,8 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
 		STps = &(STp->ps[STp->partition]);
 		STps->drv_file = STps->drv_block = (-1);
 	}
+out:
+	st_release_request(SRpnt);
 
 	return retval;
 }
@@ -2638,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		cmd[2] = (arg >> 16);
 		cmd[3] = (arg >> 8);
 		cmd[4] = arg;
-		timeout = STp->device->timeout;
+		timeout = STp->device->request_queue->rq_timeout;
                 DEBC(
                      if (cmd_in == MTWEOF)
                                printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
@@ -2656,7 +2732,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		cmd[0] = REZERO_UNIT;
 		if (STp->immediate) {
 			cmd[1] = 1;	/* Don't wait for completion */
-			timeout = STp->device->timeout;
+			timeout = STp->device->request_queue->rq_timeout;
 		}
                 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
 		fileno = blkno = at_sm = 0;
@@ -2669,7 +2745,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		cmd[0] = START_STOP;
 		if (STp->immediate) {
 			cmd[1] = 1;	/* Don't wait for completion */
-			timeout = STp->device->timeout;
+			timeout = STp->device->request_queue->rq_timeout;
 		}
 		cmd[4] = 3;
                 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
@@ -2702,7 +2778,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		cmd[1] = (arg ? 1 : 0);	/* Long erase with non-zero argument */
 		if (STp->immediate) {
 			cmd[1] |= 2;	/* Don't wait for completion */
-			timeout = STp->device->timeout;
+			timeout = STp->device->request_queue->rq_timeout;
 		}
 		else
 			timeout = STp->long_timeout * 8;
@@ -2754,7 +2830,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		(STp->buffer)->b_data[9] = (ltmp >> 16);
 		(STp->buffer)->b_data[10] = (ltmp >> 8);
 		(STp->buffer)->b_data[11] = ltmp;
-		timeout = STp->device->timeout;
+		timeout = STp->device->request_queue->rq_timeout;
                 DEBC(
 			if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
 				printk(ST_DEB_MSG
@@ -2776,12 +2852,15 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
 		return (-ENOSYS);
 	}
 
-	SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
-			   timeout, MAX_RETRIES, 1);
+	SRpnt = st_allocate_request(STp);
 	if (!SRpnt)
 		return (STp->buffer)->syscall_result;
 
-	ioctl_result = (STp->buffer)->syscall_result;
+	ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
+					    STp->buffer->b_data, datalen,
+					    timeout, MAX_RETRIES);
+	if (!ioctl_result)
+		ioctl_result = (STp->buffer)->syscall_result;
 
 	if (!ioctl_result) {	/* SCSI command successful */
 		st_release_request(SRpnt);
@@ -2943,10 +3022,17 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
 		if (!logical && !STp->scsi2_logical)
 			scmd[1] = 1;
 	}
-	SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
-			STp->device->timeout, MAX_READY_RETRIES, 1);
+
+	SRpnt = st_allocate_request(STp);
 	if (!SRpnt)
-		return (STp->buffer)->syscall_result;
+		return STp->buffer->syscall_result;
+
+	result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
+				      STp->buffer->b_data, 20,
+				      STp->device->request_queue->rq_timeout,
+				      MAX_READY_RETRIES);
+	if (result)
+		goto out;
 
 	if ((STp->buffer)->syscall_result != 0 ||
 	    (STp->device->scsi_level >= SCSI_2 &&
@@ -2974,6 +3060,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
                 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
                             *block, *partition));
 	}
+out:
 	st_release_request(SRpnt);
 	SRpnt = NULL;
 
@@ -3045,13 +3132,17 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
 	}
 	if (STp->immediate) {
 		scmd[1] |= 1;		/* Don't wait for completion */
-		timeout = STp->device->timeout;
+		timeout = STp->device->request_queue->rq_timeout;
 	}
 
-	SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
-			   timeout, MAX_READY_RETRIES, 1);
+	SRpnt = st_allocate_request(STp);
 	if (!SRpnt)
-		return (STp->buffer)->syscall_result;
+		return STp->buffer->syscall_result;
+
+	result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
+				      timeout, MAX_READY_RETRIES);
+	if (result)
+		goto out;
 
 	STps->drv_block = STps->drv_file = (-1);
 	STps->eof = ST_NOEOF;
@@ -3076,7 +3167,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
 			STps->drv_block = STps->drv_file = 0;
 		result = 0;
 	}
-
+out:
 	st_release_request(SRpnt);
 	SRpnt = NULL;
 
@@ -4029,7 +4120,7 @@ static int st_probe(struct device *dev)
 	tpnt->partition = 0;
 	tpnt->new_partition = 0;
 	tpnt->nbr_partitions = 0;
-	tpnt->device->timeout = ST_TIMEOUT;
+	blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
 	tpnt->long_timeout = ST_LONG_TIMEOUT;
 	tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
 
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 2fa830c0be27..a3a18ad73125 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1108,8 +1108,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto out_scsi_host_put;
 	}
 
-	hba->mmio_base = ioremap_nocache(pci_resource_start(pdev, 0),
-		pci_resource_len(pdev, 0));
+	hba->mmio_base = pci_ioremap_bar(pdev, 0);
 	if ( !hba->mmio_base) {
 		printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
 			pci_name(pdev));
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index f7d279542fa5..e5c369bb568f 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -6,7 +6,7 @@
  *  Changes : 
  * 
  *  Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking
- *  Alan Cox <alan@redhat.com> : Cleaned up code formatting
+ *  Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting
  *				 Fixed an irq locking bug
  *				 Added ISAPnP support
  *  Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 69ac6e590f1d..9a4273445c0d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2572,9 +2572,10 @@ static struct pci_driver dc390_driver = {
 
 static int __init dc390_module_init(void)
 {
-	if (!disable_clustering)
+	if (!disable_clustering) {
 		printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
 		printk(KERN_INFO "       with \"disable_clustering=1\" and report to maintainers\n");
+	}
 
 	if (tmscsim[0] == -1 || tmscsim[0] > 15) {
 		tmscsim[0] = 7;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 329eb8780e74..601e95141cbe 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,8 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
 
 static void map_dma(unsigned int i, unsigned int j) {
    unsigned int data_len = 0;
-   unsigned int k, count, pci_dir;
+   unsigned int k, pci_dir;
+   int count;
    struct scatterlist *sg;
    struct mscp *cpp;
    struct scsi_cmnd *SCpnt;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d4c13561f4a6..093610bcfcce 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -146,13 +146,13 @@
  *
  * use host->host_lock, not io_request_lock, cleanups
  *
- * 2002/10/04 - Alan Cox <alan@redhat.com>
+ * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
  * Use dev_id for interrupts, kill __func__ pasting
  * Add a lock for the scb pool, clean up all other cli/sti usage stuff
  * Use the adapter lock for the other places we had the cli's
  *
- * 2002/10/06 - Alan Cox <alan@redhat.com>
+ * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
  * Switch to new style error handling
  * Clean up delay to udelay, and yielding sleeps
diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h
new file mode 100644
index 000000000000..195ca014d3ce
--- /dev/null
+++ b/include/scsi/fc/fc_els.h
@@ -0,0 +1,816 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ELS_H_
+#define	_FC_ELS_H_
+
+/*
+ * Fibre Channel Switch - Enhanced Link Services definitions.
+ * From T11 FC-LS Rev 1.2 June 7, 2005.
+ */
+
+/*
+ * ELS Command codes - byte 0 of the frame payload
+ */
+enum fc_els_cmd {
+	ELS_LS_RJT =	0x01,	/* ESL reject */
+	ELS_LS_ACC =	0x02,	/* ESL Accept */
+	ELS_PLOGI =	0x03,	/* N_Port login */
+	ELS_FLOGI =	0x04,	/* F_Port login */
+	ELS_LOGO =	0x05,	/* Logout */
+	ELS_ABTX =	0x06,	/* Abort exchange - obsolete */
+	ELS_RCS =	0x07,	/* read connection status */
+	ELS_RES =	0x08,	/* read exchange status block */
+	ELS_RSS =	0x09,	/* read sequence status block */
+	ELS_RSI =	0x0a,	/* read sequence initiative */
+	ELS_ESTS =	0x0b,	/* establish streaming */
+	ELS_ESTC =	0x0c,	/* estimate credit */
+	ELS_ADVC =	0x0d,	/* advise credit */
+	ELS_RTV =	0x0e,	/* read timeout value */
+	ELS_RLS =	0x0f,	/* read link error status block */
+	ELS_ECHO =	0x10,	/* echo */
+	ELS_TEST =	0x11,	/* test */
+	ELS_RRQ =	0x12,	/* reinstate recovery qualifier */
+	ELS_REC =	0x13,	/* read exchange concise */
+	ELS_SRR =	0x14,	/* sequence retransmission request */
+	ELS_PRLI =	0x20,	/* process login */
+	ELS_PRLO =	0x21,	/* process logout */
+	ELS_SCN =	0x22,	/* state change notification */
+	ELS_TPLS =	0x23,	/* test process login state */
+	ELS_TPRLO =	0x24,	/* third party process logout */
+	ELS_LCLM =	0x25,	/* login control list mgmt (obs) */
+	ELS_GAID =	0x30,	/* get alias_ID */
+	ELS_FACT =	0x31,	/* fabric activate alias_id */
+	ELS_FDACDT =	0x32,	/* fabric deactivate alias_id */
+	ELS_NACT =	0x33,	/* N-port activate alias_id */
+	ELS_NDACT =	0x34,	/* N-port deactivate alias_id */
+	ELS_QOSR =	0x40,	/* quality of service request */
+	ELS_RVCS =	0x41,	/* read virtual circuit status */
+	ELS_PDISC =	0x50,	/* discover N_port service params */
+	ELS_FDISC =	0x51,	/* discover F_port service params */
+	ELS_ADISC =	0x52,	/* discover address */
+	ELS_RNC =	0x53,	/* report node cap (obs) */
+	ELS_FARP_REQ =	0x54,	/* FC ARP request */
+	ELS_FARP_REPL =	0x55,	/* FC ARP reply */
+	ELS_RPS =	0x56,	/* read port status block */
+	ELS_RPL =	0x57,	/* read port list */
+	ELS_RPBC =	0x58,	/* read port buffer condition */
+	ELS_FAN =	0x60,	/* fabric address notification */
+	ELS_RSCN =	0x61,	/* registered state change notification */
+	ELS_SCR =	0x62,	/* state change registration */
+	ELS_RNFT =	0x63,	/* report node FC-4 types */
+	ELS_CSR =	0x68,	/* clock synch. request */
+	ELS_CSU =	0x69,	/* clock synch. update */
+	ELS_LINIT =	0x70,	/* loop initialize */
+	ELS_LSTS =	0x72,	/* loop status */
+	ELS_RNID =	0x78,	/* request node ID data */
+	ELS_RLIR =	0x79,	/* registered link incident report */
+	ELS_LIRR =	0x7a,	/* link incident record registration */
+	ELS_SRL =	0x7b,	/* scan remote loop */
+	ELS_SBRP =	0x7c,	/* set bit-error reporting params */
+	ELS_RPSC =	0x7d,	/* report speed capabilities */
+	ELS_QSA =	0x7e,	/* query security attributes */
+	ELS_EVFP =	0x7f,	/* exchange virt. fabrics params */
+	ELS_LKA =	0x80,	/* link keep-alive */
+	ELS_AUTH_ELS =	0x90,	/* authentication ELS */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define	FC_ELS_CMDS_INIT {			\
+	[ELS_LS_RJT] =	"LS_RJT",		\
+	[ELS_LS_ACC] =	"LS_ACC",		\
+	[ELS_PLOGI] =	"PLOGI",		\
+	[ELS_FLOGI] =	"FLOGI",		\
+	[ELS_LOGO] =	"LOGO",			\
+	[ELS_ABTX] =	"ABTX",			\
+	[ELS_RCS] =	"RCS",			\
+	[ELS_RES] =	"RES",			\
+	[ELS_RSS] =	"RSS",			\
+	[ELS_RSI] =	"RSI",			\
+	[ELS_ESTS] =	"ESTS",			\
+	[ELS_ESTC] =	"ESTC",			\
+	[ELS_ADVC] =	"ADVC",			\
+	[ELS_RTV] =	"RTV",			\
+	[ELS_RLS] =	"RLS",			\
+	[ELS_ECHO] =	"ECHO",			\
+	[ELS_TEST] =	"TEST",			\
+	[ELS_RRQ] =	"RRQ",			\
+	[ELS_REC] =	"REC",			\
+	[ELS_SRR] =	"SRR",			\
+	[ELS_PRLI] =	"PRLI",			\
+	[ELS_PRLO] =	"PRLO",			\
+	[ELS_SCN] =	"SCN",			\
+	[ELS_TPLS] =	"TPLS",			\
+	[ELS_TPRLO] =	"TPRLO",		\
+	[ELS_LCLM] =	"LCLM",			\
+	[ELS_GAID] =	"GAID",			\
+	[ELS_FACT] =	"FACT",			\
+	[ELS_FDACDT] =	"FDACDT",		\
+	[ELS_NACT] =	"NACT",			\
+	[ELS_NDACT] =	"NDACT",		\
+	[ELS_QOSR] =	"QOSR",			\
+	[ELS_RVCS] =	"RVCS",			\
+	[ELS_PDISC] =	"PDISC",		\
+	[ELS_FDISC] =	"FDISC",		\
+	[ELS_ADISC] =	"ADISC",		\
+	[ELS_RNC] =	"RNC",			\
+	[ELS_FARP_REQ] = "FARP_REQ",		\
+	[ELS_FARP_REPL] =  "FARP_REPL",		\
+	[ELS_RPS] =	"RPS",			\
+	[ELS_RPL] =	"RPL",			\
+	[ELS_RPBC] =	"RPBC",			\
+	[ELS_FAN] =	"FAN",			\
+	[ELS_RSCN] =	"RSCN",			\
+	[ELS_SCR] =	"SCR",			\
+	[ELS_RNFT] =	"RNFT",			\
+	[ELS_CSR] =	"CSR",			\
+	[ELS_CSU] =	"CSU",			\
+	[ELS_LINIT] =	"LINIT",		\
+	[ELS_LSTS] =	"LSTS",			\
+	[ELS_RNID] =	"RNID",			\
+	[ELS_RLIR] =	"RLIR",			\
+	[ELS_LIRR] =	"LIRR",			\
+	[ELS_SRL] =	"SRL",			\
+	[ELS_SBRP] =	"SBRP",			\
+	[ELS_RPSC] =	"RPSC",			\
+	[ELS_QSA] =	"QSA",			\
+	[ELS_EVFP] =	"EVFP",			\
+	[ELS_LKA] =	"LKA",			\
+	[ELS_AUTH_ELS] = "AUTH_ELS",		\
+}
+
+/*
+ * LS_ACC payload.
+ */
+struct fc_els_ls_acc {
+	__u8          la_cmd;		/* command code ELS_LS_ACC */
+	__u8          la_resv[3];	/* reserved */
+};
+
+/*
+ * ELS reject payload.
+ */
+struct fc_els_ls_rjt {
+	__u8	er_cmd;		/* command code ELS_LS_RJT */
+	__u8	er_resv[4];	/* reserved must be zero */
+	__u8	er_reason;	/* reason (enum fc_els_rjt_reason below) */
+	__u8	er_explan;	/* explanation (enum fc_els_rjt_explan below) */
+	__u8	er_vendor;	/* vendor specific code */
+};
+
+/*
+ * ELS reject reason codes (er_reason).
+ */
+enum fc_els_rjt_reason {
+	ELS_RJT_NONE =		0,	/* no reject - not to be sent */
+	ELS_RJT_INVAL =		0x01,	/* invalid ELS command code */
+	ELS_RJT_LOGIC =		0x03,	/* logical error */
+	ELS_RJT_BUSY =		0x05,	/* logical busy */
+	ELS_RJT_PROT =		0x07,	/* protocol error */
+	ELS_RJT_UNAB =		0x09,	/* unable to perform command request */
+	ELS_RJT_UNSUP =		0x0b,	/* command not supported */
+	ELS_RJT_INPROG =	0x0e,	/* command already in progress */
+	ELS_RJT_VENDOR =	0xff,	/* vendor specific error */
+};
+
+
+/*
+ * reason code explanation (er_explan).
+ */
+enum fc_els_rjt_explan {
+	ELS_EXPL_NONE =		0x00,	/* No additional explanation */
+	ELS_EXPL_SPP_OPT_ERR =	0x01,	/* service parameter error - options */
+	ELS_EXPL_SPP_ICTL_ERR =	0x03,	/* service parm error - initiator ctl */
+	ELS_EXPL_AH =		0x11,	/* invalid association header */
+	ELS_EXPL_AH_REQ =	0x13,	/* association_header required */
+	ELS_EXPL_SID =		0x15,	/* invalid originator S_ID */
+	ELS_EXPL_OXID_RXID =	0x17,	/* invalid OX_ID-RX_ID combination */
+	ELS_EXPL_INPROG =	0x19,	/* Request already in progress */
+	ELS_EXPL_PLOGI_REQD =	0x1e,	/* N_Port login required */
+	ELS_EXPL_INSUF_RES =	0x29,	/* insufficient resources */
+	ELS_EXPL_UNAB_DATA =	0x2a,	/* unable to supply requested data */
+	ELS_EXPL_UNSUPR =	0x2c,	/* Request not supported */
+	ELS_EXPL_INV_LEN =	0x2d,	/* Invalid payload length */
+	/* TBD - above definitions incomplete */
+};
+
+/*
+ * Common service parameters (N ports).
+ */
+struct fc_els_csp {
+	__u8		sp_hi_ver;	/* highest version supported (obs.) */
+	__u8		sp_lo_ver;	/* highest version supported (obs.) */
+	__be16		sp_bb_cred;	/* buffer-to-buffer credits */
+	__be16		sp_features;	/* common feature flags */
+	__be16		sp_bb_data;	/* b-b state number and data field sz */
+	union {
+		struct {
+			__be16	_sp_tot_seq; /* total concurrent sequences */
+			__be16	_sp_rel_off; /* rel. offset by info cat */
+		} sp_plogi;
+		struct {
+			__be32	_sp_r_a_tov; /* resource alloc. timeout msec */
+		} sp_flogi_acc;
+	} sp_u;
+	__be32		sp_e_d_tov;	/* error detect timeout value */
+};
+#define	sp_tot_seq	sp_u.sp_plogi._sp_tot_seq
+#define	sp_rel_off	sp_u.sp_plogi._sp_rel_off
+#define	sp_r_a_tov	sp_u.sp_flogi_acc._sp_r_a_tov
+
+#define	FC_SP_BB_DATA_MASK 0xfff /* mask for data field size in sp_bb_data */
+
+/*
+ * Minimum and maximum values for max data field size in service parameters.
+ */
+#define	FC_SP_MIN_MAX_PAYLOAD	FC_MIN_MAX_PAYLOAD
+#define	FC_SP_MAX_MAX_PAYLOAD	FC_MAX_PAYLOAD
+
+/*
+ * sp_features
+ */
+#define	FC_SP_FT_CIRO	0x8000	/* continuously increasing rel. off. */
+#define	FC_SP_FT_CLAD	0x8000	/* clean address (in FLOGI LS_ACC) */
+#define	FC_SP_FT_RAND	0x4000	/* random relative offset */
+#define	FC_SP_FT_VAL	0x2000	/* valid vendor version level */
+#define	FC_SP_FT_FPORT	0x1000	/* F port (1) vs. N port (0) */
+#define	FC_SP_FT_ABB	0x0800	/* alternate BB_credit management */
+#define	FC_SP_FT_EDTR	0x0400	/* E_D_TOV Resolution is nanoseconds */
+#define	FC_SP_FT_MCAST	0x0200	/* multicast */
+#define	FC_SP_FT_BCAST	0x0100	/* broadcast */
+#define	FC_SP_FT_HUNT	0x0080	/* hunt group */
+#define	FC_SP_FT_SIMP	0x0040	/* dedicated simplex */
+#define	FC_SP_FT_SEC	0x0020	/* reserved for security */
+#define	FC_SP_FT_CSYN	0x0010	/* clock synch. supported */
+#define	FC_SP_FT_RTTOV	0x0008	/* R_T_TOV value 100 uS, else 100 mS */
+#define	FC_SP_FT_HALF	0x0004	/* dynamic half duplex */
+#define	FC_SP_FT_SEQC	0x0002	/* SEQ_CNT */
+#define	FC_SP_FT_PAYL	0x0001	/* FLOGI payload length 256, else 116 */
+
+/*
+ * Class-specific service parameters.
+ */
+struct fc_els_cssp {
+	__be16		cp_class;	/* class flags */
+	__be16		cp_init;	/* initiator flags */
+	__be16		cp_recip;	/* recipient flags */
+	__be16		cp_rdfs;	/* receive data field size */
+	__be16		cp_con_seq;	/* concurrent sequences */
+	__be16		cp_ee_cred;	/* N-port end-to-end credit */
+	__u8		cp_resv1;	/* reserved */
+	__u8		cp_open_seq;	/* open sequences per exchange */
+	__u8		_cp_resv2[2];	/* reserved */
+};
+
+/*
+ * cp_class flags.
+ */
+#define	FC_CPC_VALID	0x8000		/* class valid */
+#define	FC_CPC_IMIX	0x4000		/* intermix mode */
+#define	FC_CPC_SEQ	0x0800		/* sequential delivery */
+#define	FC_CPC_CAMP	0x0200		/* camp-on */
+#define	FC_CPC_PRI	0x0080		/* priority */
+
+/*
+ * cp_init flags.
+ * (TBD: not all flags defined here).
+ */
+#define	FC_CPI_CSYN	0x0010		/* clock synch. capable */
+
+/*
+ * cp_recip flags.
+ */
+#define	FC_CPR_CSYN	0x0008		/* clock synch. capable */
+
+/*
+ * NFC_ELS_FLOGI: Fabric login request.
+ * NFC_ELS_PLOGI: Port login request (same format).
+ */
+struct fc_els_flogi {
+	__u8		fl_cmd;		/* command */
+	__u8		_fl_resvd[3];	/* must be zero */
+	struct fc_els_csp fl_csp;	/* common service parameters */
+	__be64		fl_wwpn;	/* port name */
+	__be64		fl_wwnn;	/* node name */
+	struct fc_els_cssp fl_cssp[4];	/* class 1-4 service parameters */
+	__u8		fl_vend[16];	/* vendor version level */
+} __attribute__((__packed__));
+
+/*
+ * Process login service parameter page.
+ */
+struct fc_els_spp {
+	__u8		spp_type;	/* type code or common service params */
+	__u8		spp_type_ext;	/* type code extension */
+	__u8		spp_flags;
+	__u8		_spp_resvd;
+	__be32		spp_orig_pa;	/* originator process associator */
+	__be32		spp_resp_pa;	/* responder process associator */
+	__be32		spp_params;	/* service parameters */
+};
+
+/*
+ * spp_flags.
+ */
+#define	FC_SPP_OPA_VAL	    0x80	/* originator proc. assoc. valid */
+#define	FC_SPP_RPA_VAL	    0x40	/* responder proc. assoc. valid */
+#define	FC_SPP_EST_IMG_PAIR 0x20	/* establish image pair */
+#define	FC_SPP_RESP_MASK    0x0f	/* mask for response code (below) */
+
+/*
+ * SPP response code in spp_flags - lower 4 bits.
+ */
+enum fc_els_spp_resp {
+	FC_SPP_RESP_ACK	=	1,	/* request executed */
+	FC_SPP_RESP_RES =	2,	/* unable due to lack of resources */
+	FC_SPP_RESP_INIT =	3,	/* initialization not complete */
+	FC_SPP_RESP_NO_PA = 	4,	/* unknown process associator */
+	FC_SPP_RESP_CONF = 	5,	/* configuration precludes image pair */
+	FC_SPP_RESP_COND = 	6,	/* request completed conditionally */
+	FC_SPP_RESP_MULT = 	7,	/* unable to handle multiple SPPs */
+	FC_SPP_RESP_INVL = 	8,	/* SPP is invalid */
+};
+
+/*
+ * ELS_RRQ - Reinstate Recovery Qualifier
+ */
+struct fc_els_rrq {
+	__u8		rrq_cmd;	/* command (0x12) */
+	__u8		rrq_zero[3];	/* specified as zero - part of cmd */
+	__u8		rrq_resvd;	/* reserved */
+	__u8		rrq_s_id[3];	/* originator FID */
+	__be16		rrq_ox_id;	/* originator exchange ID */
+	__be16		rrq_rx_id;	/* responders exchange ID */
+};
+
+/*
+ * ELS_REC - Read exchange concise.
+ */
+struct fc_els_rec {
+	__u8		rec_cmd;	/* command (0x13) */
+	__u8		rec_zero[3];	/* specified as zero - part of cmd */
+	__u8		rec_resvd;	/* reserved */
+	__u8		rec_s_id[3];	/* originator FID */
+	__be16		rec_ox_id;	/* originator exchange ID */
+	__be16		rec_rx_id;	/* responders exchange ID */
+};
+
+/*
+ * ELS_REC LS_ACC payload.
+ */
+struct fc_els_rec_acc {
+	__u8		reca_cmd;	/* accept (0x02) */
+	__u8		reca_zero[3];	/* specified as zero - part of cmd */
+	__be16		reca_ox_id;	/* originator exchange ID */
+	__be16		reca_rx_id;	/* responders exchange ID */
+	__u8		reca_resvd1;	/* reserved */
+	__u8		reca_ofid[3];	/* originator FID */
+	__u8		reca_resvd2;	/* reserved */
+	__u8		reca_rfid[3];	/* responder FID */
+	__be32		reca_fc4value;	/* FC4 value */
+	__be32		reca_e_stat;	/* ESB (exchange status block) status */
+};
+
+/*
+ * ELS_PRLI - Process login request and response.
+ */
+struct fc_els_prli {
+	__u8		prli_cmd;	/* command */
+	__u8		prli_spp_len;	/* length of each serv. parm. page */
+	__be16		prli_len;	/* length of entire payload */
+	/* service parameter pages follow */
+};
+
+/*
+ * ELS_ADISC payload
+ */
+struct fc_els_adisc {
+	__u8		adisc_cmd;
+	__u8		adisc_resv[3];
+	__u8            adisc_resv1;
+	__u8            adisc_hard_addr[3];
+	__be64          adisc_wwpn;
+	__be64          adisc_wwnn;
+	__u8            adisc_resv2;
+	__u8            adisc_port_id[3];
+} __attribute__((__packed__));
+
+/*
+ * ELS_LOGO - process or fabric logout.
+ */
+struct fc_els_logo {
+	__u8		fl_cmd;		/* command code */
+	__u8		fl_zero[3];	/* specified as zero - part of cmd */
+	__u8		fl_resvd;	/* reserved */
+	__u8		fl_n_port_id[3];/* N port ID */
+	__be64		fl_n_port_wwn;	/* port name */
+};
+
+/*
+ * ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv {
+	__u8		rtv_cmd;	/* command code 0x0e */
+	__u8		rtv_zero[3];	/* specified as zero - part of cmd */
+};
+
+/*
+ * LS_ACC for ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv_acc {
+	__u8		rtv_cmd;	/* command code 0x02 */
+	__u8		rtv_zero[3];	/* specified as zero - part of cmd */
+	__be32		rtv_r_a_tov;	/* resource allocation timeout value */
+	__be32		rtv_e_d_tov;	/* error detection timeout value */
+	__be32		rtv_toq;	/* timeout qualifier (see below) */
+};
+
+/*
+ * rtv_toq bits.
+ */
+#define	FC_ELS_RTV_EDRES (1 << 26)	/* E_D_TOV resolution is nS else mS */
+#define	FC_ELS_RTV_RTTOV (1 << 19)	/* R_T_TOV is 100 uS else 100 mS */
+
+/*
+ * ELS_SCR - state change registration payload.
+ */
+struct fc_els_scr {
+	__u8		scr_cmd;	/* command code */
+	__u8		scr_resv[6];	/* reserved */
+	__u8		scr_reg_func;	/* registration function (see below) */
+};
+
+enum fc_els_scr_func {
+	ELS_SCRF_FAB =	1,	/* fabric-detected registration */
+	ELS_SCRF_NPORT = 2,	/* Nx_Port-detected registration */
+	ELS_SCRF_FULL =	3,	/* full registration */
+	ELS_SCRF_CLEAR = 255,	/* remove any current registrations */
+};
+
+/*
+ * ELS_RSCN - registered state change notification payload.
+ */
+struct fc_els_rscn {
+	__u8		rscn_cmd;	/* RSCN opcode (0x61) */
+	__u8		rscn_page_len;	/* page length (4) */
+	__be16		rscn_plen;	/* payload length including this word */
+
+	/* followed by 4-byte generic affected Port_ID pages */
+};
+
+struct fc_els_rscn_page {
+	__u8		rscn_page_flags; /* event and address format */
+	__u8		rscn_fid[3];	/* fabric ID */
+};
+
+#define	ELS_RSCN_EV_QUAL_BIT	2	/* shift count for event qualifier */
+#define	ELS_RSCN_EV_QUAL_MASK	0xf	/* mask for event qualifier */
+#define	ELS_RSCN_ADDR_FMT_BIT	0	/* shift count for address format */
+#define	ELS_RSCN_ADDR_FMT_MASK	0x3	/* mask for address format */
+
+enum fc_els_rscn_ev_qual {
+	ELS_EV_QUAL_NONE = 0,		/* unspecified */
+	ELS_EV_QUAL_NS_OBJ = 1,		/* changed name server object */
+	ELS_EV_QUAL_PORT_ATTR = 2,	/* changed port attribute */
+	ELS_EV_QUAL_SERV_OBJ = 3,	/* changed service object */
+	ELS_EV_QUAL_SW_CONFIG = 4,	/* changed switch configuration */
+	ELS_EV_QUAL_REM_OBJ = 5,	/* removed object */
+};
+
+enum fc_els_rscn_addr_fmt {
+	ELS_ADDR_FMT_PORT = 0,	/* rscn_fid is a port address */
+	ELS_ADDR_FMT_AREA = 1,	/* rscn_fid is a area address */
+	ELS_ADDR_FMT_DOM = 2,	/* rscn_fid is a domain address */
+	ELS_ADDR_FMT_FAB = 3,	/* anything on fabric may have changed */
+};
+
+/*
+ * ELS_RNID - request Node ID.
+ */
+struct fc_els_rnid {
+	__u8		rnid_cmd;	/* RNID opcode (0x78) */
+	__u8		rnid_resv[3];	/* reserved */
+	__u8		rnid_fmt;	/* data format */
+	__u8		rnid_resv2[3];	/* reserved */
+};
+
+/*
+ * Node Identification Data formats (rnid_fmt)
+ */
+enum fc_els_rnid_fmt {
+	ELS_RNIDF_NONE = 0,		/* no specific identification data */
+	ELS_RNIDF_GEN = 0xdf,		/* general topology discovery format */
+};
+
+/*
+ * ELS_RNID response.
+ */
+struct fc_els_rnid_resp {
+	__u8		rnid_cmd;	/* response code (LS_ACC) */
+	__u8		rnid_resv[3];	/* reserved */
+	__u8		rnid_fmt;	/* data format */
+	__u8		rnid_cid_len;	/* common ID data length */
+	__u8		rnid_resv2;	/* reserved */
+	__u8		rnid_sid_len;	/* specific ID data length */
+};
+
+struct fc_els_rnid_cid {
+	__be64		rnid_wwpn;	/* N port name */
+	__be64		rnid_wwnn;	/* node name */
+};
+
+struct fc_els_rnid_gen {
+	__u8		rnid_vend_id[16]; /* vendor-unique ID */
+	__be32		rnid_atype;	/* associated type (see below) */
+	__be32		rnid_phys_port;	/* physical port number */
+	__be32		rnid_att_nodes;	/* number of attached nodes */
+	__u8		rnid_node_mgmt;	/* node management (see below) */
+	__u8		rnid_ip_ver;	/* IP version (see below) */
+	__be16		rnid_prot_port;	/* UDP / TCP port number */
+	__be32		rnid_ip_addr[4]; /* IP address */
+	__u8		rnid_resvd[2];	/* reserved */
+	__be16		rnid_vend_spec;	/* vendor-specific field */
+};
+
+enum fc_els_rnid_atype {
+	ELS_RNIDA_UNK =		0x01,	/* unknown */
+	ELS_RNIDA_OTHER =	0x02,	/* none of the following */
+	ELS_RNIDA_HUB =		0x03,
+	ELS_RNIDA_SWITCH =	0x04,
+	ELS_RNIDA_GATEWAY =	0x05,
+	ELS_RNIDA_CONV =	0x06,   /* Obsolete, do not use this value */
+	ELS_RNIDA_HBA =	        0x07,   /* Obsolete, do not use this value */
+	ELS_RNIDA_PROXY =       0x08,   /* Obsolete, do not use this value */
+	ELS_RNIDA_STORAGE =	0x09,
+	ELS_RNIDA_HOST =	0x0a,
+	ELS_RNIDA_SUBSYS =	0x0b,	/* storage subsystem (e.g., RAID) */
+	ELS_RNIDA_ACCESS =	0x0e,	/* access device (e.g. media changer) */
+	ELS_RNIDA_NAS =		0x11,	/* NAS server */
+	ELS_RNIDA_BRIDGE =	0x12,	/* bridge */
+	ELS_RNIDA_VIRT =	0x13,	/* virtualization device */
+	ELS_RNIDA_MF =		0xff,	/* multifunction device (bits below) */
+	ELS_RNIDA_MF_HUB =	1UL << 31, 	/* hub */
+	ELS_RNIDA_MF_SW =	1UL << 30, 	/* switch */
+	ELS_RNIDA_MF_GW =	1UL << 29,	/* gateway */
+	ELS_RNIDA_MF_ST =	1UL << 28,	/* storage */
+	ELS_RNIDA_MF_HOST =	1UL << 27,	/* host */
+	ELS_RNIDA_MF_SUB =	1UL << 26,	/* storage subsystem */
+	ELS_RNIDA_MF_ACC =	1UL << 25,	/* storage access dev */
+	ELS_RNIDA_MF_WDM =	1UL << 24,	/* wavelength division mux */
+	ELS_RNIDA_MF_NAS =	1UL << 23,	/* NAS server */
+	ELS_RNIDA_MF_BR =	1UL << 22,	/* bridge */
+	ELS_RNIDA_MF_VIRT =	1UL << 21,	/* virtualization device */
+};
+
+enum fc_els_rnid_mgmt {
+	ELS_RNIDM_SNMP =	0,
+	ELS_RNIDM_TELNET =	1,
+	ELS_RNIDM_HTTP =	2,
+	ELS_RNIDM_HTTPS =	3,
+	ELS_RNIDM_XML =		4,	/* HTTP + XML */
+};
+
+enum fc_els_rnid_ipver {
+	ELS_RNIDIP_NONE =	0,	/* no IP support or node mgmt. */
+	ELS_RNIDIP_V4 =		1,	/* IPv4 */
+	ELS_RNIDIP_V6 =		2,	/* IPv6 */
+};
+
+/*
+ * ELS RPL - Read Port List.
+ */
+struct fc_els_rpl {
+	__u8		rpl_cmd;	/* command */
+	__u8		rpl_resv[5];	/* reserved - must be zero */
+	__be16		rpl_max_size;	/* maximum response size or zero */
+	__u8		rpl_resv1;	/* reserved - must be zero */
+	__u8		rpl_index[3];	/* starting index */
+};
+
+/*
+ * Port number block in RPL response.
+ */
+struct fc_els_pnb {
+	__be32		pnb_phys_pn;	/* physical port number */
+	__u8		pnb_resv;	/* reserved */
+	__u8		pnb_port_id[3];	/* port ID */
+	__be64		pnb_wwpn;	/* port name */
+};
+
+/*
+ * RPL LS_ACC response.
+ */
+struct fc_els_rpl_resp {
+	__u8		rpl_cmd;	/* ELS_LS_ACC */
+	__u8		rpl_resv1;	/* reserved - must be zero */
+	__be16		rpl_plen;	/* payload length */
+	__u8		rpl_resv2;	/* reserved - must be zero */
+	__u8		rpl_llen[3];	/* list length */
+	__u8		rpl_resv3;	/* reserved - must be zero */
+	__u8		rpl_index[3];	/* starting index */
+	struct fc_els_pnb rpl_pnb[1];	/* variable number of PNBs */
+};
+
+/*
+ * Link Error Status Block.
+ */
+struct fc_els_lesb {
+	__be32		lesb_link_fail;	/* link failure count */
+	__be32		lesb_sync_loss;	/* loss of synchronization count */
+	__be32		lesb_sig_loss;	/* loss of signal count */
+	__be32		lesb_prim_err;	/* primitive sequence error count */
+	__be32		lesb_inv_word;	/* invalid transmission word count */
+	__be32		lesb_inv_crc;	/* invalid CRC count */
+};
+
+/*
+ * ELS RPS - Read Port Status Block request.
+ */
+struct fc_els_rps {
+	__u8		rps_cmd;	/* command */
+	__u8		rps_resv[2];	/* reserved - must be zero */
+	__u8		rps_flag;	/* flag - see below */
+	__be64		rps_port_spec;	/* port selection */
+};
+
+enum fc_els_rps_flag {
+	FC_ELS_RPS_DID =	0x00,	/* port identified by D_ID of req. */
+	FC_ELS_RPS_PPN =	0x01,	/* port_spec is physical port number */
+	FC_ELS_RPS_WWPN =	0x02,	/* port_spec is port WWN */
+};
+
+/*
+ * ELS RPS LS_ACC response.
+ */
+struct fc_els_rps_resp {
+	__u8		rps_cmd;	/* command - LS_ACC */
+	__u8		rps_resv[2];	/* reserved - must be zero */
+	__u8		rps_flag;	/* flag - see below */
+	__u8		rps_resv2[2];	/* reserved */
+	__be16		rps_status;	/* port status - see below */
+	struct fc_els_lesb rps_lesb;	/* link error status block */
+};
+
+enum fc_els_rps_resp_flag {
+	FC_ELS_RPS_LPEV =	0x01,	/* L_port extension valid */
+};
+
+enum fc_els_rps_resp_status {
+	FC_ELS_RPS_PTP =	1 << 5,	/* point-to-point connection */
+	FC_ELS_RPS_LOOP =	1 << 4,	/* loop mode */
+	FC_ELS_RPS_FAB =	1 << 3,	/* fabric present */
+	FC_ELS_RPS_NO_SIG =	1 << 2,	/* loss of signal */
+	FC_ELS_RPS_NO_SYNC =	1 << 1,	/* loss of synchronization */
+	FC_ELS_RPS_RESET =	1 << 0,	/* in link reset protocol */
+};
+
+/*
+ * ELS LIRR - Link Incident Record Registration request.
+ */
+struct fc_els_lirr {
+	__u8		lirr_cmd;	/* command */
+	__u8		lirr_resv[3];	/* reserved - must be zero */
+	__u8		lirr_func;	/* registration function */
+	__u8		lirr_fmt;	/* FC-4 type of RLIR requested */
+	__u8		lirr_resv2[2];	/* reserved - must be zero */
+};
+
+enum fc_els_lirr_func {
+	ELS_LIRR_SET_COND = 	0x01,	/* set - conditionally receive */
+	ELS_LIRR_SET_UNCOND = 	0x02,	/* set - unconditionally receive */
+	ELS_LIRR_CLEAR = 	0xff	/* clear registration */
+};
+
+/*
+ * ELS SRL - Scan Remote Loop request.
+ */
+struct fc_els_srl {
+	__u8		srl_cmd;	/* command */
+	__u8		srl_resv[3];	/* reserved - must be zero */
+	__u8		srl_flag;	/* flag - see below */
+	__u8		srl_flag_param[3];	/* flag parameter */
+};
+
+enum fc_els_srl_flag {
+	FC_ELS_SRL_ALL =	0x00,	/* scan all FL ports */
+	FC_ELS_SRL_ONE =	0x01,	/* scan specified loop */
+	FC_ELS_SRL_EN_PER =	0x02,	/* enable periodic scanning (param) */
+	FC_ELS_SRL_DIS_PER =	0x03,	/* disable periodic scanning */
+};
+
+/*
+ * ELS RLS - Read Link Error Status Block request.
+ */
+struct fc_els_rls {
+	__u8		rls_cmd;	/* command */
+	__u8		rls_resv[4];	/* reserved - must be zero */
+	__u8		rls_port_id[3];	/* port ID */
+};
+
+/*
+ * ELS RLS LS_ACC Response.
+ */
+struct fc_els_rls_resp {
+	__u8		rls_cmd;	/* ELS_LS_ACC */
+	__u8		rls_resv[3];	/* reserved - must be zero */
+	struct fc_els_lesb rls_lesb;	/* link error status block */
+};
+
+/*
+ * ELS RLIR - Registered Link Incident Report.
+ * This is followed by the CLIR and the CLID, described below.
+ */
+struct fc_els_rlir {
+	__u8		rlir_cmd;	/* command */
+	__u8		rlir_resv[3];	/* reserved - must be zero */
+	__u8		rlir_fmt;	/* format (FC4-type if type specific) */
+	__u8		rlir_clr_len;	/* common link incident record length */
+	__u8		rlir_cld_len;	/* common link incident desc. length */
+	__u8		rlir_slr_len;	/* spec. link incident record length */
+};
+
+/*
+ * CLIR - Common Link Incident Record Data. - Sent via RLIR.
+ */
+struct fc_els_clir {
+	__be64		clir_wwpn;	/* incident port name */
+	__be64		clir_wwnn;	/* incident port node name */
+	__u8		clir_port_type;	/* incident port type */
+	__u8		clir_port_id[3];	/* incident port ID */
+
+	__be64		clir_conn_wwpn;	/* connected port name */
+	__be64		clir_conn_wwnn;	/* connected node name */
+	__be64		clir_fab_name;	/* fabric name */
+	__be32		clir_phys_port;	/* physical port number */
+	__be32		clir_trans_id;	/* transaction ID */
+	__u8		clir_resv[3];	/* reserved */
+	__u8		clir_ts_fmt;	/* time stamp format */
+	__be64		clir_timestamp;	/* time stamp */
+};
+
+/*
+ * CLIR clir_ts_fmt - time stamp format values.
+ */
+enum fc_els_clir_ts_fmt {
+	ELS_CLIR_TS_UNKNOWN = 	0,	/* time stamp field unknown */
+	ELS_CLIR_TS_SEC_FRAC = 	1,	/* time in seconds and fractions */
+	ELS_CLIR_TS_CSU =	2,	/* time in clock synch update format */
+};
+
+/*
+ * Common Link Incident Descriptor - sent via RLIR.
+ */
+struct fc_els_clid {
+	__u8		clid_iq;	/* incident qualifier flags */
+	__u8		clid_ic;	/* incident code */
+	__be16		clid_epai;	/* domain/area of ISL */
+};
+
+/*
+ * CLID incident qualifier flags.
+ */
+enum fc_els_clid_iq {
+	ELS_CLID_SWITCH =	0x20,	/* incident port is a switch node */
+	ELS_CLID_E_PORT =	0x10,	/* incident is an ISL (E) port */
+	ELS_CLID_SEV_MASK =	0x0c,	/* severity 2-bit field mask */
+	ELS_CLID_SEV_INFO =	0x00,	/* report is informational */
+	ELS_CLID_SEV_INOP =	0x08,	/* link not operational */
+	ELS_CLID_SEV_DEG =	0x04,	/* link degraded but operational */
+	ELS_CLID_LASER =	0x02,	/* subassembly is a laser */
+	ELS_CLID_FRU =		0x01,	/* format can identify a FRU */
+};
+
+/*
+ * CLID incident code.
+ */
+enum fc_els_clid_ic {
+	ELS_CLID_IC_IMPL =	1,	/* implicit incident */
+	ELS_CLID_IC_BER =	2,	/* bit-error-rate threshold exceeded */
+	ELS_CLID_IC_LOS =	3,	/* loss of synch or signal */
+	ELS_CLID_IC_NOS =	4,	/* non-operational primitive sequence */
+	ELS_CLID_IC_PST =	5,	/* primitive sequence timeout */
+	ELS_CLID_IC_INVAL =	6,	/* invalid primitive sequence */
+	ELS_CLID_IC_LOOP_TO =	7,	/* loop initialization time out */
+	ELS_CLID_IC_LIP =	8,	/* receiving LIP */
+};
+
+#endif /* _FC_ELS_H_ */
diff --git a/include/scsi/fc/fc_encaps.h b/include/scsi/fc/fc_encaps.h
new file mode 100644
index 000000000000..f180c3e16220
--- /dev/null
+++ b/include/scsi/fc/fc_encaps.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+#ifndef _FC_ENCAPS_H_
+#define _FC_ENCAPS_H_
+
+/*
+ * Protocol definitions from RFC 3643 - Fibre Channel Frame Encapsulation.
+ *
+ * Note:  The frame length field is the number of 32-bit words in
+ * the encapsulation including the fcip_encaps_header, CRC and EOF words.
+ * The minimum frame length value in bytes is (32 + 24 + 4 + 4) * 4 = 64.
+ * The maximum frame length value in bytes is (32 + 24 + 2112 + 4 + 4) = 2172.
+ */
+#define FC_ENCAPS_MIN_FRAME_LEN 64	/* min frame len (bytes) (see above) */
+#define FC_ENCAPS_MAX_FRAME_LEN (FC_ENCAPS_MIN_FRAME_LEN + FC_MAX_PAYLOAD)
+
+#define FC_ENCAPS_VER       1           /* current version number */
+
+struct fc_encaps_hdr {
+	__u8	fc_proto;	/* protocol number */
+	__u8	fc_ver;		/* version of encapsulation */
+	__u8	fc_proto_n;	/* ones complement of protocol */
+	__u8	fc_ver_n;	/* ones complement of version */
+
+	unsigned char fc_proto_data[8]; /* protocol specific data */
+
+	__be16	fc_len_flags;	/* 10-bit length/4 w/ 6 flag bits */
+	__be16	fc_len_flags_n;	/* ones complement of length / flags */
+
+	/*
+	 * Offset 0x10
+	 */
+	__be32	fc_time[2];	/* time stamp: seconds and fraction */
+	__be32	fc_crc;		/* CRC */
+	__be32	fc_sof;		/* start of frame (see FC_SOF below) */
+
+	/* 0x20 - FC frame content followed by EOF word */
+};
+
+#define FCIP_ENCAPS_HDR_LEN 0x20	/* expected length for asserts */
+
+/*
+ * Macro's for making redundant copies of EOF and SOF.
+ */
+#define FC_XY(x, y)		((((x) & 0xff) << 8) | ((y) & 0xff))
+#define FC_XYXY(x, y)		((FCIP_XY(x, y) << 16) | FCIP_XY(x, y))
+#define FC_XYNN(x, y)		(FCIP_XYXY(x, y) ^ 0xffff)
+
+#define FC_SOF_ENCODE(n)	FC_XYNN(n, n)
+#define FC_EOF_ENCODE(n)	FC_XYNN(n, n)
+
+/*
+ * SOF / EOF bytes.
+ */
+enum fc_sof {
+	FC_SOF_F =	0x28,	/* fabric */
+	FC_SOF_I4 =	0x29,	/* initiate class 4 */
+	FC_SOF_I2 =	0x2d,	/* initiate class 2 */
+	FC_SOF_I3 =	0x2e,	/* initiate class 3 */
+	FC_SOF_N4 =	0x31,	/* normal class 4 */
+	FC_SOF_N2 =	0x35,	/* normal class 2 */
+	FC_SOF_N3 =	0x36,	/* normal class 3 */
+	FC_SOF_C4 =	0x39,	/* activate class 4 */
+} __attribute__((packed));
+
+enum fc_eof {
+	FC_EOF_N =	0x41,	/* normal (not last frame of seq) */
+	FC_EOF_T =	0x42,	/* terminate (last frame of sequence) */
+	FC_EOF_RT =	0x44,
+	FC_EOF_DT =	0x46,	/* disconnect-terminate class-1 */
+	FC_EOF_NI =	0x49,	/* normal-invalid */
+	FC_EOF_DTI =	0x4e,	/* disconnect-terminate-invalid */
+	FC_EOF_RTI =	0x4f,
+	FC_EOF_A =	0x50,	/* abort */
+} __attribute__((packed));
+
+#define FC_SOF_CLASS_MASK 0x06	/* mask for class of service in SOF */
+
+/*
+ * Define classes in terms of the SOF code (initial).
+ */
+enum fc_class {
+	FC_CLASS_NONE = 0,	/* software value indicating no class */
+	FC_CLASS_2 =	FC_SOF_I2,
+	FC_CLASS_3 =	FC_SOF_I3,
+	FC_CLASS_4 =	FC_SOF_I4,
+	FC_CLASS_F =	FC_SOF_F,
+};
+
+/*
+ * Determine whether SOF code indicates the need for a BLS ACK.
+ */
+static inline int fc_sof_needs_ack(enum fc_sof sof)
+{
+	return (~sof) & 0x02;	/* true for class 1, 2, 4, 6, or F */
+}
+
+/*
+ * Given an fc_class, return the normal (non-initial) SOF value.
+ */
+static inline enum fc_sof fc_sof_normal(enum fc_class class)
+{
+	return class + FC_SOF_N3 - FC_SOF_I3;	/* diff is always 8 */
+}
+
+/*
+ * Compute class from SOF value.
+ */
+static inline enum fc_class fc_sof_class(enum fc_sof sof)
+{
+	return (sof & 0x7) | FC_SOF_F;
+}
+
+/*
+ * Determine whether SOF is for the initial frame of a sequence.
+ */
+static inline int fc_sof_is_init(enum fc_sof sof)
+{
+	return sof < 0x30;
+}
+
+#endif /* _FC_ENCAPS_H_ */
diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h
new file mode 100644
index 000000000000..cff8a8c22f50
--- /dev/null
+++ b/include/scsi/fc/fc_fc2.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FC2_H_
+#define _FC_FC2_H_
+
+/*
+ * Fibre Channel Exchanges and Sequences.
+ */
+#ifndef PACKED
+#define PACKED  __attribute__ ((__packed__))
+#endif /* PACKED */
+
+
+/*
+ * Sequence Status Block.
+ * This format is set by the FC-FS standard and is sent over the wire.
+ * Note that the fields aren't all naturally aligned.
+ */
+struct fc_ssb {
+	__u8	ssb_seq_id;		/* sequence ID */
+	__u8	_ssb_resvd;
+	__be16	ssb_low_seq_cnt;	/* lowest SEQ_CNT */
+
+	__be16	ssb_high_seq_cnt;	/* highest SEQ_CNT */
+	__be16	ssb_s_stat;		/* sequence status flags */
+
+	__be16	ssb_err_seq_cnt;	/* error SEQ_CNT */
+	__u8	ssb_fh_cs_ctl;		/* frame header CS_CTL */
+	__be16	ssb_fh_ox_id;		/* frame header OX_ID */
+	__be16	ssb_rx_id;		/* responder's exchange ID */
+	__u8	_ssb_resvd2[2];
+} PACKED;
+
+/*
+ * The SSB should be 17 bytes.  Since it's layout is somewhat strange,
+ * we define the size here so that code can ASSERT that the size comes out
+ * correct.
+ */
+#define FC_SSB_SIZE         17          /* length of fc_ssb for assert */
+
+/*
+ * ssb_s_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
+ */
+#define SSB_ST_RESP         (1 << 15)   /* sequence responder */
+#define SSB_ST_ACTIVE       (1 << 14)   /* sequence is active */
+#define SSB_ST_ABNORMAL     (1 << 12)   /* abnormal ending condition */
+
+#define SSB_ST_REQ_MASK     (3 << 10)   /* ACK, abort sequence condition */
+#define SSB_ST_REQ_CONT     (0 << 10)
+#define SSB_ST_REQ_ABORT    (1 << 10)
+#define SSB_ST_REQ_STOP     (2 << 10)
+#define SSB_ST_REQ_RETRANS  (3 << 10)
+
+#define SSB_ST_ABTS         (1 << 9)    /* ABTS protocol completed */
+#define SSB_ST_RETRANS      (1 << 8)    /* retransmission completed */
+#define SSB_ST_TIMEOUT      (1 << 7)    /* sequence timed out by recipient */
+#define SSB_ST_P_RJT        (1 << 6)    /* P_RJT transmitted */
+
+#define SSB_ST_CLASS_BIT    4           /* class of service field LSB */
+#define SSB_ST_CLASS_MASK   3           /* class of service mask */
+#define SSB_ST_ACK          (1 << 3)    /* ACK (EOFt or EOFdt) transmitted */
+
+/*
+ * Exchange Status Block.
+ * This format is set by the FC-FS standard and is sent over the wire.
+ * Note that the fields aren't all naturally aligned.
+ */
+struct fc_esb {
+	__u8	esb_cs_ctl;		/* CS_CTL for frame header */
+	__be16	esb_ox_id;		/* originator exchange ID */
+	__be16	esb_rx_id;		/* responder exchange ID */
+	__be32	esb_orig_fid;		/* fabric ID of originator */
+	__be32	esb_resp_fid;		/* fabric ID of responder */
+	__be32	esb_e_stat;		/* status */
+	__u8	_esb_resvd[4];
+	__u8	esb_service_params[112]; /* TBD */
+	__u8	esb_seq_status[8];	/* sequence statuses, 8 bytes each */
+} __attribute__((packed));;
+
+
+/*
+ * Define expected size for ASSERTs.
+ * See comments on FC_SSB_SIZE.
+ */
+#define FC_ESB_SIZE         (1 + 5*4 + 112 + 8)     /* expected size */
+
+/*
+ * esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
+ */
+#define ESB_ST_RESP         (1 << 31)   /* responder to exchange */
+#define ESB_ST_SEQ_INIT     (1 << 30)   /* port holds sequence initiaive */
+#define ESB_ST_COMPLETE     (1 << 29)   /* exchange is complete */
+#define ESB_ST_ABNORMAL     (1 << 28)   /* abnormal ending condition */
+#define ESB_ST_REC_QUAL     (1 << 26)   /* recovery qualifier active */
+
+#define ESB_ST_ERRP_BIT     24          /* LSB for error policy */
+#define ESB_ST_ERRP_MASK    (3 << 24)   /* mask for error policy */
+#define ESB_ST_ERRP_MULT    (0 << 24)   /* abort, discard multiple sequences */
+#define ESB_ST_ERRP_SING    (1 << 24)   /* abort, discard single sequence */
+#define ESB_ST_ERRP_INF     (2 << 24)   /* process with infinite buffers */
+#define ESB_ST_ERRP_IMM     (3 << 24)   /* discard mult. with immed. retran. */
+
+#define ESB_ST_OX_ID_INVL   (1 << 23)   /* originator XID invalid */
+#define ESB_ST_RX_ID_INVL   (1 << 22)   /* responder XID invalid */
+#define ESB_ST_PRI_INUSE    (1 << 21)   /* priority / preemption in use */
+
+#endif /* _FC_FC2_H_ */
diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h
new file mode 100644
index 000000000000..57aaa8f0d613
--- /dev/null
+++ b/include/scsi/fc/fc_fcoe.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FCOE_H_
+#define	_FC_FCOE_H_
+
+/*
+ * FCoE - Fibre Channel over Ethernet.
+ */
+
+/*
+ * The FCoE ethertype eventually goes in net/if_ether.h.
+ */
+#ifndef ETH_P_FCOE
+#define	ETH_P_FCOE	0x8906		/* FCOE ether type */
+#endif
+
+#ifndef ETH_P_8021Q
+#define	ETH_P_8021Q	0x8100
+#endif
+
+/*
+ * FC_FCOE_OUI hasn't been standardized yet.   XXX TBD.
+ */
+#ifndef FC_FCOE_OUI
+#define	FC_FCOE_OUI	0x0efc00	/* upper 24 bits of FCOE dest MAC TBD */
+#endif
+
+/*
+ * The destination MAC address for the fabric login may get a different OUI.
+ * This isn't standardized yet.
+ */
+#ifndef FC_FCOE_FLOGI_MAC
+/* gateway MAC - TBD */
+#define	FC_FCOE_FLOGI_MAC { 0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe }
+#endif
+
+#define	FC_FCOE_VER	0			/* version */
+
+/*
+ * Ethernet Addresses based on FC S_ID and D_ID.
+ * Generated by FC_FCOE_OUI | S_ID/D_ID
+ */
+#define	FC_FCOE_ENCAPS_ID(n)	(((u64) FC_FCOE_OUI << 24) | (n))
+#define	FC_FCOE_DECAPS_ID(n)	((n) >> 24)
+
+/*
+ * FCoE frame header - 14 bytes
+ *
+ * This is the August 2007 version of the FCoE header as defined by T11.
+ * This follows the VLAN header, which includes the ethertype.
+ */
+struct fcoe_hdr {
+	__u8		fcoe_ver;	/* version field - upper 4 bits */
+	__u8		fcoe_resvd[12];	/* reserved - send zero and ignore */
+	__u8		fcoe_sof;	/* start of frame per RFC 3643 */
+};
+
+#define FC_FCOE_DECAPS_VER(hp)	    ((hp)->fcoe_ver >> 4)
+#define FC_FCOE_ENCAPS_VER(hp, ver) ((hp)->fcoe_ver = (ver) << 4)
+
+/*
+ * FCoE CRC & EOF - 8 bytes.
+ */
+struct fcoe_crc_eof {
+	__le32		fcoe_crc32;	/* CRC for FC packet */
+	__u8		fcoe_eof;	/* EOF from RFC 3643 */
+	__u8		fcoe_resvd[3];	/* reserved - send zero and ignore */
+} __attribute__((packed));
+
+/*
+ * Minimum FCoE + FC header length
+ * 14 bytes FCoE header + 24 byte FC header = 38 bytes
+ */
+#define FCOE_HEADER_LEN 38
+
+/*
+ * Minimum FCoE frame size
+ * 14 bytes FCoE header + 24 byte FC header + 8 byte FCoE trailer = 46 bytes
+ */
+#define FCOE_MIN_FRAME 46
+
+/*
+ * fc_fcoe_set_mac - Store OUI + DID into MAC address field.
+ * @mac: mac address to be set
+ * @did: fc dest id to use
+ */
+static inline void fc_fcoe_set_mac(u8 *mac, u8 *did)
+{
+	mac[0] = (u8) (FC_FCOE_OUI >> 16);
+	mac[1] = (u8) (FC_FCOE_OUI >> 8);
+	mac[2] = (u8) FC_FCOE_OUI;
+	mac[3] = did[0];
+	mac[4] = did[1];
+	mac[5] = did[2];
+}
+
+#endif /* _FC_FCOE_H_ */
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
new file mode 100644
index 000000000000..5d38f1989f37
--- /dev/null
+++ b/include/scsi/fc/fc_fcp.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FCP_H_
+#define	_FC_FCP_H_
+
+/*
+ * Fibre Channel Protocol for SCSI.
+ * From T10 FCP-3, T10 project 1560-D Rev 4, Sept. 13, 2005.
+ */
+
+/*
+ * fc/fs.h defines FC_TYPE_FCP.
+ */
+
+/*
+ * Service parameter page parameters (word 3 bits) for Process Login.
+ */
+#define	FCP_SPPF_TASK_RETRY_ID	0x0200	/* task retry ID requested */
+#define	FCP_SPPF_RETRY		0x0100	/* retry supported */
+#define	FCP_SPPF_CONF_COMPL	0x0080	/* confirmed completion allowed */
+#define	FCP_SPPF_OVLY_ALLOW	0x0040	/* data overlay allowed */
+#define	FCP_SPPF_INIT_FCN	0x0020	/* initiator function */
+#define	FCP_SPPF_TARG_FCN	0x0010	/* target function */
+#define	FCP_SPPF_RD_XRDY_DIS	0x0002	/* disable XFER_RDY for reads */
+#define	FCP_SPPF_WR_XRDY_DIS	0x0001	/* disable XFER_RDY for writes */
+
+/*
+ * FCP_CMND IU Payload.
+ */
+struct fcp_cmnd {
+	__u8		fc_lun[8];	/* logical unit number */
+	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_pri_ta;	/* priority and task attribute */
+	__u8		fc_tm_flags;	/* task management flags */
+	__u8		fc_flags;	/* additional len & flags */
+	__u8		fc_cdb[16];	/* base CDB */
+	__be32		fc_dl;		/* data length (must follow fc_cdb) */
+};
+
+#define	FCP_CMND_LEN	32	/* expected length of structure */
+
+struct fcp_cmnd32 {
+	__u8		fc_lun[8];	/* logical unit number */
+	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_pri_ta;	/* priority and task attribute */
+	__u8		fc_tm_flags;	/* task management flags */
+	__u8		fc_flags;	/* additional len & flags */
+	__u8		fc_cdb[32];	/* base CDB */
+	__be32		fc_dl;		/* data length (must follow fc_cdb) */
+};
+
+#define	FCP_CMND32_LEN	    48	/* expected length of structure */
+#define	FCP_CMND32_ADD_LEN  (16 / 4)	/* Additional cdb length */
+
+/*
+ * fc_pri_ta.
+ */
+#define	FCP_PTA_SIMPLE	    0	/* simple task attribute */
+#define	FCP_PTA_HEADQ	    1	/* head of queue task attribute */
+#define	FCP_PTA_ORDERED     2	/* ordered task attribute */
+#define	FCP_PTA_ACA	    4	/* auto. contigent allegiance */
+#define	FCP_PRI_SHIFT	    3	/* priority field starts in bit 3 */
+#define	FCP_PRI_RESVD_MASK  0x80	/* reserved bits in priority field */
+
+/*
+ * fc_tm_flags - task management flags field.
+ */
+#define	FCP_TMF_CLR_ACA		0x40	/* clear ACA condition */
+#define	FCP_TMF_LUN_RESET	0x10	/* logical unit reset task management */
+#define	FCP_TMF_CLR_TASK_SET	0x04	/* clear task set */
+#define	FCP_TMF_ABT_TASK_SET	0x02	/* abort task set */
+
+/*
+ * fc_flags.
+ *  Bits 7:2 are the additional FCP_CDB length / 4.
+ */
+#define	FCP_CFL_LEN_MASK	0xfc	/* mask for additional length */
+#define	FCP_CFL_LEN_SHIFT	2	/* shift bits for additional length */
+#define	FCP_CFL_RDDATA		0x02	/* read data */
+#define	FCP_CFL_WRDATA		0x01	/* write data */
+
+/*
+ * FCP_TXRDY IU - transfer ready payload.
+ */
+struct fcp_txrdy {
+	__be32		ft_data_ro;	/* data relative offset */
+	__be32		ft_burst_len;	/* burst length */
+	__u8		_ft_resvd[4];	/* reserved */
+};
+
+#define	FCP_TXRDY_LEN	12	/* expected length of structure */
+
+/*
+ * FCP_RESP IU - response payload.
+ *
+ * The response payload comes in three parts: the flags/status, the
+ * sense/response lengths and the sense data/response info section.
+ *
+ * From FCP3r04, note 6 of section 9.5.13:
+ *
+ * Some early implementations presented the FCP_RSP IU without the FCP_RESID,
+ * FCP_SNS_LEN, and FCP_RSP_LEN fields if the FCP_RESID_UNDER, FCP_RESID_OVER,
+ * FCP_SNS_LEN_VALID, and FCP_RSP_LEN_VALID bits were all set to zero. This
+ * non-standard behavior should be tolerated.
+ *
+ * All response frames will always contain the fcp_resp template.  Some
+ * will also include the fcp_resp_len template.
+ */
+struct fcp_resp {
+	__u8		_fr_resvd[8];	/* reserved */
+	__be16		fr_retry_delay;	/* retry delay timer */
+	__u8		fr_flags;	/* flags */
+	__u8		fr_status;	/* SCSI status code */
+};
+
+#define	FCP_RESP_LEN	12	/* expected length of structure */
+
+struct fcp_resp_ext {
+	__be32		fr_resid;	/* Residual value */
+	__be32		fr_sns_len;	/* SCSI Sense length */
+	__be32		fr_rsp_len;	/* Response Info length */
+
+	/*
+	 * Optionally followed by RSP info and/or SNS info and/or
+	 * bidirectional read residual length, if any.
+	 */
+};
+
+#define FCP_RESP_EXT_LEN    12  /* expected length of the structure */
+
+struct fcp_resp_rsp_info {
+    __u8      _fr_resvd[3];       /* reserved */
+    __u8      rsp_code;           /* Response Info Code */
+    __u8      _fr_resvd2[4];      /* reserved */
+};
+
+struct fcp_resp_with_ext {
+	struct fcp_resp resp;
+	struct fcp_resp_ext ext;
+};
+
+#define	FCP_RESP_WITH_EXT   (FCP_RESP_LEN + FCP_RESP_EXT_LEN)
+
+/*
+ * fr_flags.
+ */
+#define	FCP_BIDI_RSP	    0x80	/* bidirectional read response */
+#define	FCP_BIDI_READ_UNDER 0x40	/* bidir. read less than requested */
+#define	FCP_BIDI_READ_OVER  0x20	/* DL insufficient for full transfer */
+#define	FCP_CONF_REQ	    0x10	/* confirmation requested */
+#define	FCP_RESID_UNDER     0x08	/* transfer shorter than expected */
+#define	FCP_RESID_OVER	    0x04	/* DL insufficient for full transfer */
+#define	FCP_SNS_LEN_VAL     0x02	/* SNS_LEN field is valid */
+#define	FCP_RSP_LEN_VAL     0x01	/* RSP_LEN field is valid */
+
+/*
+ * rsp_codes
+ */
+enum fcp_resp_rsp_codes {
+	FCP_TMF_CMPL = 0,
+	FCP_DATA_LEN_INVALID = 1,
+	FCP_CMND_FIELDS_INVALID = 2,
+	FCP_DATA_PARAM_MISMATCH = 3,
+	FCP_TMF_REJECTED = 4,
+	FCP_TMF_FAILED = 5,
+	FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * FCP SRR Link Service request - Sequence Retransmission Request.
+ */
+struct fcp_srr {
+	__u8		srr_op;		/* opcode ELS_SRR */
+	__u8		srr_resvd[3];	/* opcode / reserved - must be zero */
+	__be16		srr_ox_id;	/* OX_ID of failed command */
+	__be16		srr_rx_id;	/* RX_ID of failed command */
+	__be32		srr_rel_off;	/* relative offset */
+	__u8		srr_r_ctl;	/* r_ctl for the information unit */
+	__u8		srr_resvd2[3];	/* reserved */
+};
+
+#endif /* _FC_FCP_H_ */
diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h
new file mode 100644
index 000000000000..3e4801d2bdbb
--- /dev/null
+++ b/include/scsi/fc/fc_fs.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FS_H_
+#define _FC_FS_H_
+
+/*
+ * Fibre Channel Framing and Signalling definitions.
+ * From T11 FC-FS-2 Rev 0.90 - 9 August 2005.
+ */
+
+/*
+ * Frame header
+ */
+struct fc_frame_header {
+	__u8          fh_r_ctl;	/* routing control */
+	__u8          fh_d_id[3];	/* Destination ID */
+
+	__u8          fh_cs_ctl;	/* class of service control / pri */
+	__u8          fh_s_id[3];	/* Source ID */
+
+	__u8          fh_type;		/* see enum fc_fh_type below */
+	__u8          fh_f_ctl[3];	/* frame control */
+
+	__u8          fh_seq_id;	/* sequence ID */
+	__u8          fh_df_ctl;	/* data field control */
+	__be16        fh_seq_cnt;	/* sequence count */
+
+	__be16        fh_ox_id;		/* originator exchange ID */
+	__be16        fh_rx_id;		/* responder exchange ID */
+	__be32        fh_parm_offset;	/* parameter or relative offset */
+};
+
+#define FC_FRAME_HEADER_LEN 24	/* expected length of structure */
+
+#define FC_MAX_PAYLOAD  2112U		/* max payload length in bytes */
+#define FC_MIN_MAX_PAYLOAD  256U 	/* lower limit on max payload */
+
+#define FC_MAX_FRAME	(FC_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+#define FC_MIN_MAX_FRAME (FC_MIN_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+
+/*
+ * fh_r_ctl - Routing control definitions.
+ */
+    /*
+     * FC-4 device_data.
+     */
+enum fc_rctl {
+	FC_RCTL_DD_UNCAT = 0x00,	/* uncategorized information */
+	FC_RCTL_DD_SOL_DATA = 0x01,	/* solicited data */
+	FC_RCTL_DD_UNSOL_CTL = 0x02,	/* unsolicited control */
+	FC_RCTL_DD_SOL_CTL = 0x03,	/* solicited control or reply */
+	FC_RCTL_DD_UNSOL_DATA = 0x04,	/* unsolicited data */
+	FC_RCTL_DD_DATA_DESC = 0x05,	/* data descriptor */
+	FC_RCTL_DD_UNSOL_CMD = 0x06,	/* unsolicited command */
+	FC_RCTL_DD_CMD_STATUS = 0x07,	/* command status */
+
+#define FC_RCTL_ILS_REQ FC_RCTL_DD_UNSOL_CTL	/* ILS request */
+#define FC_RCTL_ILS_REP FC_RCTL_DD_SOL_CTL	/* ILS reply */
+
+	/*
+	 * Extended Link_Data
+	 */
+	FC_RCTL_ELS_REQ = 0x22,	/* extended link services request */
+	FC_RCTL_ELS_REP = 0x23,	/* extended link services reply */
+	FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
+	FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
+	/*
+	 * Optional Extended Headers
+	 */
+	FC_RCTL_VFTH = 0x50,	/* virtual fabric tagging header */
+	FC_RCTL_IFRH = 0x51,	/* inter-fabric routing header */
+	FC_RCTL_ENCH = 0x52,	/* encapsulation header */
+	/*
+	 * Basic Link Services fh_r_ctl values.
+	 */
+	FC_RCTL_BA_NOP = 0x80,	/* basic link service NOP */
+	FC_RCTL_BA_ABTS = 0x81,	/* basic link service abort */
+	FC_RCTL_BA_RMC = 0x82,	/* remove connection */
+	FC_RCTL_BA_ACC = 0x84,	/* basic accept */
+	FC_RCTL_BA_RJT = 0x85,	/* basic reject */
+	FC_RCTL_BA_PRMT = 0x86,	/* dedicated connection preempted */
+	/*
+	 * Link Control Information.
+	 */
+	FC_RCTL_ACK_1 = 0xc0,	/* acknowledge_1 */
+	FC_RCTL_ACK_0 = 0xc1,	/* acknowledge_0 */
+	FC_RCTL_P_RJT = 0xc2,	/* port reject */
+	FC_RCTL_F_RJT = 0xc3,	/* fabric reject */
+	FC_RCTL_P_BSY = 0xc4,	/* port busy */
+	FC_RCTL_F_BSY = 0xc5,	/* fabric busy to data frame */
+	FC_RCTL_F_BSYL = 0xc6,	/* fabric busy to link control frame */
+	FC_RCTL_LCR = 0xc7,	/* link credit reset */
+	FC_RCTL_END = 0xc9,	/* end */
+};
+				    /* incomplete list of definitions */
+
+/*
+ * R_CTL names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_RCTL_NAMES_INIT { \
+	[FC_RCTL_DD_UNCAT] =		"uncat",			\
+	[FC_RCTL_DD_SOL_DATA] =		"sol data",			\
+	[FC_RCTL_DD_UNSOL_CTL] =	"unsol ctl",			\
+	[FC_RCTL_DD_SOL_CTL] =		"sol ctl/reply",		\
+	[FC_RCTL_DD_UNSOL_DATA] =	"unsol data",			\
+	[FC_RCTL_DD_DATA_DESC] =	"data desc",			\
+	[FC_RCTL_DD_UNSOL_CMD] =	"unsol cmd",			\
+	[FC_RCTL_DD_CMD_STATUS] =	"cmd status",			\
+	[FC_RCTL_ELS_REQ] =		"ELS req",			\
+	[FC_RCTL_ELS_REP] =		"ELS rep",			\
+	[FC_RCTL_ELS4_REQ] =		"FC-4 ELS req",			\
+	[FC_RCTL_ELS4_REP] =		"FC-4 ELS rep",			\
+	[FC_RCTL_BA_NOP] =		"BLS NOP",			\
+	[FC_RCTL_BA_ABTS] =		"BLS abort",			\
+	[FC_RCTL_BA_RMC] =		"BLS remove connection",	\
+	[FC_RCTL_BA_ACC] =		"BLS accept",			\
+	[FC_RCTL_BA_RJT] =		"BLS reject",			\
+	[FC_RCTL_BA_PRMT] =		"BLS dedicated connection preempted", \
+	[FC_RCTL_ACK_1] =		"LC ACK_1",			\
+	[FC_RCTL_ACK_0] =		"LC ACK_0",			\
+	[FC_RCTL_P_RJT] =		"LC port reject",		\
+	[FC_RCTL_F_RJT] =		"LC fabric reject",		\
+	[FC_RCTL_P_BSY] =		"LC port busy",			\
+	[FC_RCTL_F_BSY] =		"LC fabric busy to data frame",	\
+	[FC_RCTL_F_BSYL] =		"LC fabric busy to link control frame",\
+	[FC_RCTL_LCR] =			"LC link credit reset",		\
+	[FC_RCTL_END] =			"LC end",			\
+}
+
+/*
+ * Well-known fabric addresses.
+ */
+enum fc_well_known_fid {
+	FC_FID_BCAST =		0xffffff,	/* broadcast */
+	FC_FID_FLOGI =		0xfffffe,	/* fabric login */
+	FC_FID_FCTRL =		0xfffffd,	/* fabric controller */
+	FC_FID_DIR_SERV =	0xfffffc,	/* directory server */
+	FC_FID_TIME_SERV =	0xfffffb,	/* time server */
+	FC_FID_MGMT_SERV =	0xfffffa,	/* management server */
+	FC_FID_QOS =		0xfffff9,	/* QoS Facilitator */
+	FC_FID_ALIASES =	0xfffff8,	/* alias server (FC-PH2) */
+	FC_FID_SEC_KEY =	0xfffff7,	/* Security key dist. server */
+	FC_FID_CLOCK =		0xfffff6,	/* clock synch server */
+	FC_FID_MCAST_SERV =	0xfffff5,	/* multicast server */
+};
+
+#define	FC_FID_WELL_KNOWN_MAX	0xffffff /* highest well-known fabric ID */
+#define	FC_FID_WELL_KNOWN_BASE	0xfffff5 /* start of well-known fabric ID */
+
+/*
+ * Other well-known addresses, outside the above contiguous range.
+ */
+#define	FC_FID_DOM_MGR		0xfffc00	/* domain manager base */
+
+/*
+ * Fabric ID bytes.
+ */
+#define	FC_FID_DOMAIN		0
+#define	FC_FID_PORT		1
+#define	FC_FID_LINK		2
+
+/*
+ * fh_type codes
+ */
+enum fc_fh_type {
+	FC_TYPE_BLS =	0x00,	/* basic link service */
+	FC_TYPE_ELS =	0x01,	/* extended link service */
+	FC_TYPE_IP =	0x05,	/* IP over FC, RFC 4338 */
+	FC_TYPE_FCP =	0x08,	/* SCSI FCP */
+	FC_TYPE_CT =	0x20,	/* Fibre Channel Services (FC-CT) */
+	FC_TYPE_ILS =	0x22,	/* internal link service */
+};
+
+/*
+ * FC_TYPE names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_TYPE_NAMES_INIT {				\
+	[FC_TYPE_BLS] =		"BLS",			\
+	[FC_TYPE_ELS] =		"ELS",			\
+	[FC_TYPE_IP] =		"IP",			\
+	[FC_TYPE_FCP] =		"FCP",			\
+	[FC_TYPE_CT] =		"CT",			\
+	[FC_TYPE_ILS] =		"ILS",			\
+}
+
+/*
+ * Exchange IDs.
+ */
+#define FC_XID_UNKNOWN  0xffff	/* unknown exchange ID */
+#define FC_XID_MIN	0x0	/* supported min exchange ID */
+#define FC_XID_MAX	0xfffe	/* supported max exchange ID */
+
+/*
+ * fh_f_ctl - Frame control flags.
+ */
+#define	FC_FC_EX_CTX	(1 << 23)	/* sent by responder to exchange */
+#define	FC_FC_SEQ_CTX	(1 << 22)	/* sent by responder to sequence */
+#define	FC_FC_FIRST_SEQ (1 << 21)	/* first sequence of this exchange */
+#define	FC_FC_LAST_SEQ	(1 << 20)	/* last sequence of this exchange */
+#define	FC_FC_END_SEQ	(1 << 19)	/* last frame of sequence */
+#define	FC_FC_END_CONN	(1 << 18)	/* end of class 1 connection pending */
+#define	FC_FC_RES_B17	(1 << 17)	/* reserved */
+#define	FC_FC_SEQ_INIT	(1 << 16)	/* transfer of sequence initiative */
+#define	FC_FC_X_ID_REASS (1 << 15)	/* exchange ID has been changed */
+#define	FC_FC_X_ID_INVAL (1 << 14)	/* exchange ID invalidated */
+
+#define	FC_FC_ACK_1	(1 << 12)	/* 13:12 = 1: ACK_1 expected */
+#define	FC_FC_ACK_N	(2 << 12)	/* 13:12 = 2: ACK_N expected */
+#define	FC_FC_ACK_0	(3 << 12)	/* 13:12 = 3: ACK_0 expected */
+
+#define	FC_FC_RES_B11	(1 << 11)	/* reserved */
+#define	FC_FC_RES_B10	(1 << 10)	/* reserved */
+#define	FC_FC_RETX_SEQ	(1 << 9)	/* retransmitted sequence */
+#define	FC_FC_UNI_TX	(1 << 8)	/* unidirectional transmit (class 1) */
+#define	FC_FC_CONT_SEQ(i) ((i) << 6)
+#define	FC_FC_ABT_SEQ(i) ((i) << 4)
+#define	FC_FC_REL_OFF	(1 << 3)	/* parameter is relative offset */
+#define	FC_FC_RES2	(1 << 2)	/* reserved */
+#define	FC_FC_FILL(i)	((i) & 3)	/* 1:0: bytes of trailing fill */
+
+/*
+ * BA_ACC payload.
+ */
+struct fc_ba_acc {
+	__u8		ba_seq_id_val;	/* SEQ_ID validity */
+#define FC_BA_SEQ_ID_VAL 0x80
+	__u8		ba_seq_id;	/* SEQ_ID of seq last deliverable */
+	__u8		ba_resvd[2];	/* reserved */
+	__be16		ba_ox_id;	/* OX_ID for aborted seq or exch */
+	__be16		ba_rx_id;	/* RX_ID for aborted seq or exch */
+	__be16		ba_low_seq_cnt;	/* low SEQ_CNT of aborted seq */
+	__be16		ba_high_seq_cnt; /* high SEQ_CNT of aborted seq */
+};
+
+/*
+ * BA_RJT: Basic Reject payload.
+ */
+struct fc_ba_rjt {
+	__u8		br_resvd;	/* reserved */
+	__u8		br_reason;	/* reason code */
+	__u8		br_explan;	/* reason explanation */
+	__u8		br_vendor;	/* vendor unique code */
+};
+
+/*
+ * BA_RJT reason codes.
+ * From FS-2.
+ */
+enum fc_ba_rjt_reason {
+	FC_BA_RJT_NONE =	0,	/* in software this means no reject */
+	FC_BA_RJT_INVL_CMD =	0x01,	/* invalid command code */
+	FC_BA_RJT_LOG_ERR =	0x03,	/* logical error */
+	FC_BA_RJT_LOG_BUSY =	0x05,	/* logical busy */
+	FC_BA_RJT_PROTO_ERR =	0x07,	/* protocol error */
+	FC_BA_RJT_UNABLE =	0x09,	/* unable to perform request */
+	FC_BA_RJT_VENDOR =	0xff,	/* vendor-specific (see br_vendor) */
+};
+
+/*
+ * BA_RJT reason code explanations.
+ */
+enum fc_ba_rjt_explan {
+	FC_BA_RJT_EXP_NONE =	0x00,	/* no additional expanation */
+	FC_BA_RJT_INV_XID =	0x03,	/* invalid OX_ID-RX_ID combination */
+	FC_BA_RJT_ABT =		0x05,	/* sequence aborted, no seq info */
+};
+
+/*
+ * P_RJT or F_RJT: Port Reject or Fabric Reject parameter field.
+ */
+struct fc_pf_rjt {
+	__u8		rj_action;	/* reserved */
+	__u8		rj_reason;	/* reason code */
+	__u8		rj_resvd;	/* reserved */
+	__u8		rj_vendor;	/* vendor unique code */
+};
+
+/*
+ * P_RJT and F_RJT reject reason codes.
+ */
+enum fc_pf_rjt_reason {
+	FC_RJT_NONE =		0,	/* non-reject (reserved by standard) */
+	FC_RJT_INVL_DID =	0x01,	/* invalid destination ID */
+	FC_RJT_INVL_SID =	0x02,	/* invalid source ID */
+	FC_RJT_P_UNAV_T =	0x03,	/* port unavailable, temporary */
+	FC_RJT_P_UNAV =		0x04,	/* port unavailable, permanent */
+	FC_RJT_CLS_UNSUP =	0x05,	/* class not supported */
+	FC_RJT_DEL_USAGE =	0x06,	/* delimiter usage error */
+	FC_RJT_TYPE_UNSUP =	0x07,	/* type not supported */
+	FC_RJT_LINK_CTL =	0x08,	/* invalid link control */
+	FC_RJT_R_CTL =		0x09,	/* invalid R_CTL field */
+	FC_RJT_F_CTL =		0x0a,	/* invalid F_CTL field */
+	FC_RJT_OX_ID =		0x0b,	/* invalid originator exchange ID */
+	FC_RJT_RX_ID =		0x0c,	/* invalid responder exchange ID */
+	FC_RJT_SEQ_ID =		0x0d,	/* invalid sequence ID */
+	FC_RJT_DF_CTL =		0x0e,	/* invalid DF_CTL field */
+	FC_RJT_SEQ_CNT =	0x0f,	/* invalid SEQ_CNT field */
+	FC_RJT_PARAM =		0x10,	/* invalid parameter field */
+	FC_RJT_EXCH_ERR =	0x11,	/* exchange error */
+	FC_RJT_PROTO =		0x12,	/* protocol error */
+	FC_RJT_LEN =		0x13,	/* incorrect length */
+	FC_RJT_UNEXP_ACK =	0x14,	/* unexpected ACK */
+	FC_RJT_FAB_CLASS =	0x15,	/* class unsupported by fabric entity */
+	FC_RJT_LOGI_REQ =	0x16,	/* login required */
+	FC_RJT_SEQ_XS =		0x17,	/* excessive sequences attempted */
+	FC_RJT_EXCH_EST =	0x18,	/* unable to establish exchange */
+	FC_RJT_FAB_UNAV =	0x1a,	/* fabric unavailable */
+	FC_RJT_VC_ID =		0x1b,	/* invalid VC_ID (class 4) */
+	FC_RJT_CS_CTL =		0x1c,	/* invalid CS_CTL field */
+	FC_RJT_INSUF_RES =	0x1d,	/* insuff. resources for VC (Class 4) */
+	FC_RJT_INVL_CLS =	0x1f,	/* invalid class of service */
+	FC_RJT_PREEMT_RJT =	0x20,	/* preemption request rejected */
+	FC_RJT_PREEMT_DIS =	0x21,	/* preemption not enabled */
+	FC_RJT_MCAST_ERR =	0x22,	/* multicast error */
+	FC_RJT_MCAST_ET =	0x23,	/* multicast error terminate */
+	FC_RJT_PRLI_REQ =	0x24,	/* process login required */
+	FC_RJT_INVL_ATT =	0x25,	/* invalid attachment */
+	FC_RJT_VENDOR =		0xff,	/* vendor specific reject */
+};
+
+#endif /* _FC_FS_H_ */
diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h
new file mode 100644
index 000000000000..ffab0272c65a
--- /dev/null
+++ b/include/scsi/fc/fc_gs.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_GS_H_
+#define	_FC_GS_H_
+
+/*
+ * Fibre Channel Services - Common Transport.
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+struct fc_ct_hdr {
+	__u8		ct_rev;		/* revision */
+	__u8		ct_in_id[3];	/* N_Port ID of original requestor */
+	__u8		ct_fs_type;	/* type of fibre channel service */
+	__u8		ct_fs_subtype;	/* subtype */
+	__u8		ct_options;
+	__u8		_ct_resvd1;
+	__be16		ct_cmd;		/* command / response code */
+	__be16		ct_mr_size;	/* maximum / residual size */
+	__u8		_ct_resvd2;
+	__u8		ct_reason;	/* reject reason */
+	__u8		ct_explan;	/* reason code explanation */
+	__u8		ct_vendor;	/* vendor unique data */
+};
+
+#define	FC_CT_HDR_LEN	16	/* expected sizeof (struct fc_ct_hdr) */
+
+enum fc_ct_rev {
+	FC_CT_REV = 1		/* common transport revision */
+};
+
+/*
+ * ct_fs_type values.
+ */
+enum fc_ct_fs_type {
+	FC_FST_ALIAS =	0xf8,	/* alias service */
+	FC_FST_MGMT =	0xfa,	/* management service */
+	FC_FST_TIME =	0xfb,	/* time service */
+	FC_FST_DIR =	0xfc,	/* directory service */
+};
+
+/*
+ * ct_cmd: Command / response codes
+ */
+enum fc_ct_cmd {
+	FC_FS_RJT =	0x8001,	/* reject */
+	FC_FS_ACC =	0x8002,	/* accept */
+};
+
+/*
+ * FS_RJT reason codes.
+ */
+enum fc_ct_reason {
+	FC_FS_RJT_CMD =		0x01,	/* invalid command code */
+	FC_FS_RJT_VER =		0x02,	/* invalid version level */
+	FC_FS_RJT_LOG =		0x03,	/* logical error */
+	FC_FS_RJT_IUSIZ =	0x04,	/* invalid IU size */
+	FC_FS_RJT_BSY =		0x05,	/* logical busy */
+	FC_FS_RJT_PROTO =	0x07,	/* protocol error */
+	FC_FS_RJT_UNABL =	0x09,	/* unable to perform command request */
+	FC_FS_RJT_UNSUP =	0x0b,	/* command not supported */
+};
+
+/*
+ * FS_RJT reason code explanations.
+ */
+enum fc_ct_explan {
+	FC_FS_EXP_NONE =	0x00,	/* no additional explanation */
+	FC_FS_EXP_PID =		0x01,	/* port ID not registered */
+	FC_FS_EXP_PNAM =	0x02,	/* port name not registered */
+	FC_FS_EXP_NNAM =	0x03,	/* node name not registered */
+	FC_FS_EXP_COS =		0x04,	/* class of service not registered */
+	/* definitions not complete */
+};
+
+#endif /* _FC_GS_H_ */
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
new file mode 100644
index 000000000000..790d7b97d4bc
--- /dev/null
+++ b/include/scsi/fc/fc_ns.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_NS_H_
+#define	_FC_NS_H_
+
+/*
+ * Fibre Channel Services - Name Service (dNS)
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+/*
+ * Common-transport sub-type for Name Server.
+ */
+#define	FC_NS_SUBTYPE	    2	/* fs_ct_hdr.ct_fs_subtype */
+
+/*
+ * Name server Requests.
+ * Note:  this is an incomplete list, some unused requests are omitted.
+ */
+enum fc_ns_req {
+	FC_NS_GA_NXT =	0x0100,		/* get all next */
+	FC_NS_GI_A =	0x0101,		/* get identifiers - scope */
+	FC_NS_GPN_ID =	0x0112,		/* get port name by ID */
+	FC_NS_GNN_ID =	0x0113,		/* get node name by ID */
+	FC_NS_GID_PN =	0x0121,		/* get ID for port name */
+	FC_NS_GID_NN =	0x0131,		/* get IDs for node name */
+	FC_NS_GID_FT =	0x0171,		/* get IDs by FC4 type */
+	FC_NS_GPN_FT =	0x0172,		/* get port names by FC4 type */
+	FC_NS_GID_PT =	0x01a1,		/* get IDs by port type */
+	FC_NS_RFT_ID =	0x0217,		/* reg FC4 type for ID */
+	FC_NS_RPN_ID =	0x0212,		/* reg port name for ID */
+	FC_NS_RNN_ID =	0x0213,		/* reg node name for ID */
+};
+
+/*
+ * Port type values.
+ */
+enum fc_ns_pt {
+	FC_NS_UNID_PORT = 0x00,	/* unidentified */
+	FC_NS_N_PORT =	0x01,	/* N port */
+	FC_NS_NL_PORT =	0x02,	/* NL port */
+	FC_NS_FNL_PORT = 0x03,	/* F/NL port */
+	FC_NS_NX_PORT =	0x7f,	/* Nx port */
+	FC_NS_F_PORT =	0x81,	/* F port */
+	FC_NS_FL_PORT =	0x82,	/* FL port */
+	FC_NS_E_PORT =	0x84,	/* E port */
+	FC_NS_B_PORT =	0x85,	/* B port */
+};
+
+/*
+ * Port type object.
+ */
+struct fc_ns_pt_obj {
+	__u8		pt_type;
+};
+
+/*
+ * Port ID object
+ */
+struct fc_ns_fid {
+	__u8		fp_flags;	/* flags for responses only */
+	__u8		fp_fid[3];
+};
+
+/*
+ * fp_flags in port ID object, for responses only.
+ */
+#define	FC_NS_FID_LAST	0x80		/* last object */
+
+/*
+ * FC4-types object.
+ */
+#define	FC_NS_TYPES	256	/* number of possible FC-4 types */
+#define	FC_NS_BPW	32	/* bits per word in bitmap */
+
+struct fc_ns_fts {
+	__be32	ff_type_map[FC_NS_TYPES / FC_NS_BPW]; /* bitmap of FC-4 types */
+};
+
+/*
+ * GID_PT request.
+ */
+struct fc_ns_gid_pt {
+	__u8		fn_pt_type;
+	__u8		fn_domain_id_scope;
+	__u8		fn_area_id_scope;
+	__u8		fn_resvd;
+};
+
+/*
+ * GID_FT or GPN_FT request.
+ */
+struct fc_ns_gid_ft {
+	__u8		fn_resvd;
+	__u8		fn_domain_id_scope;
+	__u8		fn_area_id_scope;
+	__u8		fn_fc4_type;
+};
+
+/*
+ * GPN_FT response.
+ */
+struct fc_gpn_ft_resp {
+	__u8		fp_flags;	/* see fp_flags definitions above */
+	__u8		fp_fid[3];	/* port ID */
+	__be32		fp_resvd;
+	__be64		fp_wwpn;	/* port name */
+};
+
+/*
+ * GID_PN request
+ */
+struct fc_ns_gid_pn {
+	__be64     fn_wwpn;    /* port name */
+};
+
+/*
+ * GID_PN response
+ */
+struct fc_gid_pn_resp {
+	__u8      fp_resvd;
+	__u8      fp_fid[3];     /* port ID */
+};
+
+/*
+ * RFT_ID request - register FC-4 types for ID.
+ */
+struct fc_ns_rft_id {
+	struct fc_ns_fid fr_fid;	/* port ID object */
+	struct fc_ns_fts fr_fts;	/* FC-4 types object */
+};
+
+/*
+ * RPN_ID request - register port name for ID.
+ * RNN_ID request - register node name for ID.
+ */
+struct fc_ns_rn_id {
+	struct fc_ns_fid fr_fid;	/* port ID object */
+	__be64		fr_wwn;		/* node name or port name */
+} __attribute__((__packed__));
+
+#endif /* _FC_NS_H_ */
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
new file mode 100644
index 000000000000..6300f556bce5
--- /dev/null
+++ b/include/scsi/fc_encode.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ENCODE_H_
+#define _FC_ENCODE_H_
+#include <asm/unaligned.h>
+
+struct fc_ns_rft {
+	struct fc_ns_fid fid;	/* port ID object */
+	struct fc_ns_fts fts;	/* FC4-types object */
+};
+
+struct fc_ct_req {
+	struct fc_ct_hdr hdr;
+	union {
+		struct fc_ns_gid_ft gid;
+		struct fc_ns_rn_id  rn;
+		struct fc_ns_rft rft;
+	} payload;
+};
+
+/**
+ * fill FC header fields in specified fc_frame
+ */
+static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
+				  u32 did, u32 sid, enum fc_fh_type type,
+				  u32 f_ctl, u32 parm_offset)
+{
+	struct fc_frame_header *fh;
+
+	fh = fc_frame_header_get(fp);
+	WARN_ON(r_ctl == 0);
+	fh->fh_r_ctl = r_ctl;
+	hton24(fh->fh_d_id, did);
+	hton24(fh->fh_s_id, sid);
+	fh->fh_type = type;
+	hton24(fh->fh_f_ctl, f_ctl);
+	fh->fh_cs_ctl = 0;
+	fh->fh_df_ctl = 0;
+	fh->fh_parm_offset = htonl(parm_offset);
+}
+
+/**
+ * fc_ct_hdr_fill- fills ct header and reset ct payload
+ * returns pointer to ct request.
+ */
+static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
+					       unsigned int op, size_t req_size)
+{
+	struct fc_ct_req *ct;
+	size_t ct_plen;
+
+	ct_plen  = sizeof(struct fc_ct_hdr) + req_size;
+	ct = fc_frame_payload_get(fp, ct_plen);
+	memset(ct, 0, ct_plen);
+	ct->hdr.ct_rev = FC_CT_REV;
+	ct->hdr.ct_fs_type = FC_FST_DIR;
+	ct->hdr.ct_fs_subtype = FC_NS_SUBTYPE;
+	ct->hdr.ct_cmd = htons((u16) op);
+	return ct;
+}
+
+/**
+ * fc_ct_fill - Fill in a name service request frame
+ */
+static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp,
+		      unsigned int op, enum fc_rctl *r_ctl, u32 *did,
+		      enum fc_fh_type *fh_type)
+{
+	struct fc_ct_req *ct;
+
+	switch (op) {
+	case FC_NS_GPN_FT:
+		ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft));
+		ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+		break;
+
+	case FC_NS_RFT_ID:
+		ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft));
+		hton24(ct->payload.rft.fid.fp_fid,
+		       fc_host_port_id(lport->host));
+		ct->payload.rft.fts = lport->fcts;
+		break;
+
+	case FC_NS_RPN_ID:
+		ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id));
+		hton24(ct->payload.rn.fr_fid.fp_fid,
+		       fc_host_port_id(lport->host));
+		ct->payload.rft.fts = lport->fcts;
+		put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn);
+		break;
+
+	default:
+		FC_DBG("Invalid op code %x \n", op);
+		return -EINVAL;
+	}
+	*r_ctl = FC_RCTL_DD_UNSOL_CTL;
+	*did = FC_FID_DIR_SERV;
+	*fh_type = FC_TYPE_CT;
+	return 0;
+}
+
+/**
+ * fc_plogi_fill - Fill in plogi request frame
+ */
+static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
+				 unsigned int op)
+{
+	struct fc_els_flogi *plogi;
+	struct fc_els_csp *csp;
+	struct fc_els_cssp *cp;
+
+	plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+	memset(plogi, 0, sizeof(*plogi));
+	plogi->fl_cmd = (u8) op;
+	put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
+
+	csp = &plogi->fl_csp;
+	csp->sp_hi_ver = 0x20;
+	csp->sp_lo_ver = 0x20;
+	csp->sp_bb_cred = htons(10);	/* this gets set by gateway */
+	csp->sp_bb_data = htons((u16) lport->mfs);
+	cp = &plogi->fl_cssp[3 - 1];	/* class 3 parameters */
+	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+	csp->sp_features = htons(FC_SP_FT_CIRO);
+	csp->sp_tot_seq = htons(255);	/* seq. we accept */
+	csp->sp_rel_off = htons(0x1f);
+	csp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+	cp->cp_rdfs = htons((u16) lport->mfs);
+	cp->cp_con_seq = htons(255);
+	cp->cp_open_seq = 1;
+}
+
+/**
+ * fc_flogi_fill - Fill in a flogi request frame.
+ */
+static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_csp *sp;
+	struct fc_els_cssp *cp;
+	struct fc_els_flogi *flogi;
+
+	flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+	memset(flogi, 0, sizeof(*flogi));
+	flogi->fl_cmd = (u8) ELS_FLOGI;
+	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+	sp = &flogi->fl_csp;
+	sp->sp_hi_ver = 0x20;
+	sp->sp_lo_ver = 0x20;
+	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
+	sp->sp_bb_data = htons((u16) lport->mfs);
+	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
+	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+}
+
+/**
+ * fc_logo_fill - Fill in a logo request frame.
+ */
+static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_logo *logo;
+
+	logo = fc_frame_payload_get(fp, sizeof(*logo));
+	memset(logo, 0, sizeof(*logo));
+	logo->fl_cmd = ELS_LOGO;
+	hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
+	logo->fl_n_port_wwn = htonll(lport->wwpn);
+}
+
+/**
+ * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
+ */
+static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_rtv *rtv;
+
+	rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+	memset(rtv, 0, sizeof(*rtv));
+	rtv->rtv_cmd = ELS_RTV;
+}
+
+/**
+ * fc_rec_fill - Fill in rec request frame
+ */
+static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_rec *rec;
+	struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
+
+	rec = fc_frame_payload_get(fp, sizeof(*rec));
+	memset(rec, 0, sizeof(*rec));
+	rec->rec_cmd = ELS_REC;
+	hton24(rec->rec_s_id, fc_host_port_id(lport->host));
+	rec->rec_ox_id = htons(ep->oxid);
+	rec->rec_rx_id = htons(ep->rxid);
+}
+
+/**
+ * fc_prli_fill - Fill in prli request frame
+ */
+static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+
+	pp = fc_frame_payload_get(fp, sizeof(*pp));
+	memset(pp, 0, sizeof(*pp));
+	pp->prli.prli_cmd = ELS_PRLI;
+	pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+	pp->prli.prli_len = htons(sizeof(*pp));
+	pp->spp.spp_type = FC_TYPE_FCP;
+	pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+	pp->spp.spp_params = htonl(lport->service_params);
+}
+
+/**
+ * fc_scr_fill - Fill in a scr request frame.
+ */
+static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_scr *scr;
+
+	scr = fc_frame_payload_get(fp, sizeof(*scr));
+	memset(scr, 0, sizeof(*scr));
+	scr->scr_cmd = ELS_SCR;
+	scr->scr_reg_func = ELS_SCRF_FULL;
+}
+
+/**
+ * fc_els_fill - Fill in an ELS  request frame
+ */
+static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport,
+		       struct fc_frame *fp, unsigned int op,
+		       enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type)
+{
+	switch (op) {
+	case ELS_PLOGI:
+		fc_plogi_fill(lport, fp, ELS_PLOGI);
+		*did = rport->port_id;
+		break;
+
+	case ELS_FLOGI:
+		fc_flogi_fill(lport, fp);
+		*did = FC_FID_FLOGI;
+		break;
+
+	case ELS_LOGO:
+		fc_logo_fill(lport, fp);
+		*did = FC_FID_FLOGI;
+		/*
+		 * if rport is valid then it
+		 * is port logo, therefore
+		 * set did to rport id.
+		 */
+		if (rport)
+			*did = rport->port_id;
+		break;
+
+	case ELS_RTV:
+		fc_rtv_fill(lport, fp);
+		*did = rport->port_id;
+		break;
+
+	case ELS_REC:
+		fc_rec_fill(lport, fp);
+		*did = rport->port_id;
+		break;
+
+	case ELS_PRLI:
+		fc_prli_fill(lport, fp);
+		*did = rport->port_id;
+		break;
+
+	case ELS_SCR:
+		fc_scr_fill(lport, fp);
+		*did = FC_FID_FCTRL;
+		break;
+
+	default:
+		FC_DBG("Invalid op code %x \n", op);
+		return -EINVAL;
+	}
+
+	*r_ctl = FC_RCTL_ELS_REQ;
+	*fh_type = FC_TYPE_ELS;
+	return 0;
+}
+#endif /* _FC_ENCODE_H_ */
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
new file mode 100644
index 000000000000..04d34a71355f
--- /dev/null
+++ b/include/scsi/fc_frame.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FRAME_H_
+#define _FC_FRAME_H_
+
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_encaps.h>
+
+/*
+ * The fc_frame interface is used to pass frame data between functions.
+ * The frame includes the data buffer, length, and SOF / EOF delimiter types.
+ * A pointer to the port structure of the receiving port is also includeded.
+ */
+
+#define	FC_FRAME_HEADROOM	32	/* headroom for VLAN + FCoE headers */
+#define	FC_FRAME_TAILROOM	8	/* trailer space for FCoE */
+
+/*
+ * Information about an individual fibre channel frame received or to be sent.
+ * The buffer may be in up to 4 additional non-contiguous sections,
+ * but the linear section must hold the frame header.
+ */
+#define FC_FRAME_SG_LEN		4	/* scatter/gather list maximum length */
+
+#define fp_skb(fp)	(&((fp)->skb))
+#define fr_hdr(fp)	((fp)->skb.data)
+#define fr_len(fp)	((fp)->skb.len)
+#define fr_cb(fp)	((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
+#define fr_dev(fp)	(fr_cb(fp)->fr_dev)
+#define fr_seq(fp)	(fr_cb(fp)->fr_seq)
+#define fr_sof(fp)	(fr_cb(fp)->fr_sof)
+#define fr_eof(fp)	(fr_cb(fp)->fr_eof)
+#define fr_flags(fp)	(fr_cb(fp)->fr_flags)
+#define fr_max_payload(fp)	(fr_cb(fp)->fr_max_payload)
+#define fr_cmd(fp)	(fr_cb(fp)->fr_cmd)
+#define fr_dir(fp)	(fr_cmd(fp)->sc_data_direction)
+#define fr_crc(fp)	(fr_cb(fp)->fr_crc)
+
+struct fc_frame {
+	struct sk_buff skb;
+};
+
+struct fcoe_rcv_info {
+	struct packet_type  *ptype;
+	struct fc_lport	*fr_dev;	/* transport layer private pointer */
+	struct fc_seq	*fr_seq;	/* for use with exchange manager */
+	struct scsi_cmnd *fr_cmd;	/* for use of scsi command */
+	u32		fr_crc;
+	u16		fr_max_payload;	/* max FC payload */
+	enum fc_sof	fr_sof;		/* start of frame delimiter */
+	enum fc_eof	fr_eof;		/* end of frame delimiter */
+	u8		fr_flags;	/* flags - see below */
+};
+
+
+/*
+ * Get fc_frame pointer for an skb that's already been imported.
+ */
+static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
+	return (struct fcoe_rcv_info *) skb->cb;
+}
+
+/*
+ * fr_flags.
+ */
+#define	FCPHF_CRC_UNCHECKED	0x01	/* CRC not computed, still appended */
+
+/*
+ * Initialize a frame.
+ * We don't do a complete memset here for performance reasons.
+ * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
+ */
+static inline void fc_frame_init(struct fc_frame *fp)
+{
+	fr_dev(fp) = NULL;
+	fr_seq(fp) = NULL;
+	fr_flags(fp) = 0;
+}
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
+
+struct fc_frame *__fc_frame_alloc(size_t payload_len);
+
+/*
+ * Get frame for sending via port.
+ */
+static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
+					       size_t payload_len)
+{
+	return __fc_frame_alloc(payload_len);
+}
+
+/*
+ * Allocate fc_frame structure and buffer.  Set the initial length to
+ * payload_size + sizeof (struct fc_frame_header).
+ */
+static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
+{
+	struct fc_frame *fp;
+
+	/*
+	 * Note: Since len will often be a constant multiple of 4,
+	 * this check will usually be evaluated and eliminated at compile time.
+	 */
+	if ((len % 4) != 0)
+		fp = fc_frame_alloc_fill(dev, len);
+	else
+		fp = _fc_frame_alloc(dev, len);
+	return fp;
+}
+
+/*
+ * Free the fc_frame structure and buffer.
+ */
+static inline void fc_frame_free(struct fc_frame *fp)
+{
+	kfree_skb(fp_skb(fp));
+}
+
+static inline int fc_frame_is_linear(struct fc_frame *fp)
+{
+	return !skb_is_nonlinear(fp_skb(fp));
+}
+
+/*
+ * Get frame header from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ */
+static inline
+struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
+{
+	WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
+	return (struct fc_frame_header *) fr_hdr(fp);
+}
+
+/*
+ * Get frame payload from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ * The len parameter is the minimum length for the payload portion.
+ * Returns NULL if the frame is too short.
+ *
+ * This assumes the interesting part of the payload is in the first part
+ * of the buffer for received data.  This may not be appropriate to use for
+ * buffers being transmitted.
+ */
+static inline void *fc_frame_payload_get(const struct fc_frame *fp,
+					 size_t len)
+{
+	void *pp = NULL;
+
+	if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
+		pp = fc_frame_header_get(fp) + 1;
+	return pp;
+}
+
+/*
+ * Get frame payload opcode (first byte) from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking. Return 0
+ * if the frame has no payload.
+ */
+static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
+{
+	u8 *cp;
+
+	cp = fc_frame_payload_get(fp, sizeof(u8));
+	if (!cp)
+		return 0;
+	return *cp;
+
+}
+
+/*
+ * Get FC class from frame.
+ */
+static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
+{
+	return fc_sof_class(fr_sof(fp));
+}
+
+/*
+ * Check the CRC in a frame.
+ * The CRC immediately follows the last data item *AFTER* the length.
+ * The return value is zero if the CRC matches.
+ */
+u32 fc_frame_crc_check(struct fc_frame *);
+
+static inline u8 fc_frame_rctl(const struct fc_frame *fp)
+{
+	return fc_frame_header_get(fp)->fh_r_ctl;
+}
+
+static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
+{
+	return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD;
+}
+
+static inline bool fc_frame_is_read(const struct fc_frame *fp)
+{
+	if (fc_frame_is_cmd(fp) && fr_cmd(fp))
+		return fr_dir(fp) == DMA_FROM_DEVICE;
+	return false;
+}
+
+static inline bool fc_frame_is_write(const struct fc_frame *fp)
+{
+	if (fc_frame_is_cmd(fp) && fr_cmd(fp))
+		return fr_dir(fp) == DMA_TO_DEVICE;
+	return false;
+}
+
+/*
+ * Check for leaks.
+ * Print the frame header of any currently allocated frame, assuming there
+ * should be none at this point.
+ */
+void fc_frame_leak_check(void);
+
+#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
new file mode 100644
index 000000000000..8dca2af14ffc
--- /dev/null
+++ b/include/scsi/fc_transport_fcoe.h
@@ -0,0 +1,54 @@
+#ifndef FC_TRANSPORT_FCOE_H
+#define FC_TRANSPORT_FCOE_H
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libfc.h>
+
+/**
+ * struct fcoe_transport - FCoE transport struct for generic transport
+ * for Ethernet devices as well as pure HBAs
+ *
+ * @name: name for thsi transport
+ * @bus: physical bus type (pci_bus_type)
+ * @driver: physical bus driver for network device
+ * @create: entry create function
+ * @destroy: exit destroy function
+ * @list: list of transports
+ */
+struct fcoe_transport {
+	char *name;
+	unsigned short vendor;
+	unsigned short device;
+	struct bus_type *bus;
+	struct device_driver *driver;
+	int (*create)(struct net_device *device);
+	int (*destroy)(struct net_device *device);
+	bool (*match)(struct net_device *device);
+	struct list_head list;
+	struct list_head devlist;
+	struct mutex devlock;
+};
+
+/**
+ * MODULE_ALIAS_FCOE_PCI
+ *
+ * some care must be taken with this, vendor and device MUST be a hex value
+ * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
+ */
+#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
+	MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
+
+/* exported funcs */
+int fcoe_transport_attach(struct net_device *netdev);
+int fcoe_transport_release(struct net_device *netdev);
+int fcoe_transport_register(struct fcoe_transport *t);
+int fcoe_transport_unregister(struct fcoe_transport *t);
+int fcoe_load_transport_driver(struct net_device *netdev);
+int __init fcoe_transport_init(void);
+int __exit fcoe_transport_exit(void);
+
+/* fcow_sw is the default transport */
+extern struct fcoe_transport fcoe_sw_transport;
+#endif /* FC_TRANSPORT_FCOE_H */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 0c9514de5df7..d0ed5226f8c4 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -333,8 +333,11 @@ enum iscsi_host_param {
 #define CAP_TEXT_NEGO		0x80
 #define CAP_MARKERS		0x100
 #define CAP_FW_DB		0x200
-#define CAP_SENDTARGETS_OFFLOAD	0x400
-#define CAP_DATA_PATH_OFFLOAD	0x800
+#define CAP_SENDTARGETS_OFFLOAD	0x400	/* offload discovery process */
+#define CAP_DATA_PATH_OFFLOAD	0x800	/* offload entire IO path */
+#define CAP_DIGEST_OFFLOAD	0x1000	/* offload hdr and data digests */
+#define CAP_PADDING_OFFLOAD	0x2000	/* offload padding insertion, removal,
+					 and verification */
 
 /*
  * These flags describes reason of stop_conn() call
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
new file mode 100644
index 000000000000..9f2876397dda
--- /dev/null
+++ b/include/scsi/libfc.h
@@ -0,0 +1,938 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _LIBFC_H_
+#define _LIBFC_H_
+
+#include <linux/timer.h>
+#include <linux/if.h>
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/fc_frame.h>
+
+#define LIBFC_DEBUG
+
+#ifdef LIBFC_DEBUG
+/* Log messages */
+#define FC_DBG(fmt, args...)						\
+	do {								\
+		printk(KERN_INFO "%s " fmt, __func__, ##args);		\
+	} while (0)
+#else
+#define FC_DBG(fmt, args...)
+#endif
+
+/*
+ * libfc error codes
+ */
+#define	FC_NO_ERR	0	/* no error */
+#define	FC_EX_TIMEOUT	1	/* Exchange timeout */
+#define	FC_EX_CLOSED	2	/* Exchange closed */
+
+/* some helpful macros */
+
+#define ntohll(x) be64_to_cpu(x)
+#define htonll(x) cpu_to_be64(x)
+
+#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
+
+#define hton24(p, v)	do {			\
+		p[0] = (((v) >> 16) & 0xFF);	\
+		p[1] = (((v) >> 8) & 0xFF);	\
+		p[2] = ((v) & 0xFF);		\
+	} while (0)
+
+/*
+ * FC HBA status
+ */
+#define FC_PAUSE		    (1 << 1)
+#define FC_LINK_UP		    (1 << 0)
+
+enum fc_lport_state {
+	LPORT_ST_NONE = 0,
+	LPORT_ST_FLOGI,
+	LPORT_ST_DNS,
+	LPORT_ST_RPN_ID,
+	LPORT_ST_RFT_ID,
+	LPORT_ST_SCR,
+	LPORT_ST_READY,
+	LPORT_ST_LOGO,
+	LPORT_ST_RESET
+};
+
+enum fc_disc_event {
+	DISC_EV_NONE = 0,
+	DISC_EV_SUCCESS,
+	DISC_EV_FAILED
+};
+
+enum fc_rport_state {
+	RPORT_ST_NONE = 0,
+	RPORT_ST_INIT,		/* initialized */
+	RPORT_ST_PLOGI,		/* waiting for PLOGI completion */
+	RPORT_ST_PRLI,		/* waiting for PRLI completion */
+	RPORT_ST_RTV,		/* waiting for RTV completion */
+	RPORT_ST_READY,		/* ready for use */
+	RPORT_ST_LOGO,		/* port logout sent */
+};
+
+enum fc_rport_trans_state {
+	FC_PORTSTATE_ROGUE,
+	FC_PORTSTATE_REAL,
+};
+
+/**
+ * struct fc_disc_port - temporary discovery port to hold rport identifiers
+ * @lp: Fibre Channel host port instance
+ * @peers: node for list management during discovery and RSCN processing
+ * @ids: identifiers structure to pass to fc_remote_port_add()
+ * @rport_work: work struct for starting the rport state machine
+ */
+struct fc_disc_port {
+	struct fc_lport             *lp;
+	struct list_head            peers;
+	struct fc_rport_identifiers ids;
+	struct work_struct	    rport_work;
+};
+
+enum fc_rport_event {
+	RPORT_EV_NONE = 0,
+	RPORT_EV_CREATED,
+	RPORT_EV_FAILED,
+	RPORT_EV_STOP,
+	RPORT_EV_LOGO
+};
+
+struct fc_rport_operations {
+	void (*event_callback)(struct fc_lport *, struct fc_rport *,
+			       enum fc_rport_event);
+};
+
+/**
+ * struct fc_rport_libfc_priv - libfc internal information about a remote port
+ * @local_port: Fibre Channel host port instance
+ * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
+ * @flags: REC and RETRY supported flags
+ * @max_seq: maximum number of concurrent sequences
+ * @retries: retry count in current state
+ * @e_d_tov: error detect timeout value (in msec)
+ * @r_a_tov: resource allocation timeout value (in msec)
+ * @rp_mutex: mutex protects rport
+ * @retry_work:
+ * @event_callback: Callback for rport READY, FAILED or LOGO
+ */
+struct fc_rport_libfc_priv {
+	struct fc_lport		   *local_port;
+	enum fc_rport_state        rp_state;
+	u16			   flags;
+	#define FC_RP_FLAGS_REC_SUPPORTED	(1 << 0)
+	#define FC_RP_FLAGS_RETRY		(1 << 1)
+	u16		           max_seq;
+	unsigned int	           retries;
+	unsigned int	           e_d_tov;
+	unsigned int	           r_a_tov;
+	enum fc_rport_trans_state  trans_state;
+	struct mutex               rp_mutex;
+	struct delayed_work	   retry_work;
+	enum fc_rport_event        event;
+	struct fc_rport_operations *ops;
+	struct list_head           peers;
+	struct work_struct         event_work;
+};
+
+#define PRIV_TO_RPORT(x)						\
+	(struct fc_rport *)((void *)x - sizeof(struct fc_rport));
+#define RPORT_TO_PRIV(x)						\
+	(struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
+
+struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
+
+static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
+{
+	rport->node_name = wwnn;
+	rport->port_name = wwpn;
+}
+
+/*
+ * fcoe stats structure
+ */
+struct fcoe_dev_stats {
+	u64		SecondsSinceLastReset;
+	u64		TxFrames;
+	u64		TxWords;
+	u64		RxFrames;
+	u64		RxWords;
+	u64		ErrorFrames;
+	u64		DumpedFrames;
+	u64		LinkFailureCount;
+	u64		LossOfSignalCount;
+	u64		InvalidTxWordCount;
+	u64		InvalidCRCCount;
+	u64		InputRequests;
+	u64		OutputRequests;
+	u64		ControlRequests;
+	u64		InputMegabytes;
+	u64		OutputMegabytes;
+};
+
+/*
+ * els data is used for passing ELS respone specific
+ * data to send ELS response mainly using infomation
+ * in exchange and sequence in EM layer.
+ */
+struct fc_seq_els_data {
+	struct fc_frame *fp;
+	enum fc_els_rjt_reason reason;
+	enum fc_els_rjt_explan explan;
+};
+
+/*
+ * FCP request structure, one for each scsi cmd request
+ */
+struct fc_fcp_pkt {
+	/*
+	 * housekeeping stuff
+	 */
+	struct fc_lport *lp;	/* handle to hba struct */
+	u16		state;		/* scsi_pkt state state */
+	u16		tgt_flags;	/* target flags	 */
+	atomic_t	ref_cnt;	/* fcp pkt ref count */
+	spinlock_t	scsi_pkt_lock;	/* Must be taken before the host lock
+					 * if both are held at the same time */
+	/*
+	 * SCSI I/O related stuff
+	 */
+	struct scsi_cmnd *cmd;		/* scsi command pointer. set/clear
+					 * under host lock */
+	struct list_head list;		/* tracks queued commands. access under
+					 * host lock */
+	/*
+	 * timeout related stuff
+	 */
+	struct timer_list timer;	/* command timer */
+	struct completion tm_done;
+	int	wait_for_comp;
+	unsigned long	start_time;	/* start jiffie */
+	unsigned long	end_time;	/* end jiffie */
+	unsigned long	last_pkt_time;	 /* jiffies of last frame received */
+
+	/*
+	 * scsi cmd and data transfer information
+	 */
+	u32		data_len;
+	/*
+	 * transport related veriables
+	 */
+	struct fcp_cmnd cdb_cmd;
+	size_t		xfer_len;
+	u32		xfer_contig_end; /* offset of end of contiguous xfer */
+	u16		max_payload;	/* max payload size in bytes */
+
+	/*
+	 * scsi/fcp return status
+	 */
+	u32		io_status;	/* SCSI result upper 24 bits */
+	u8		cdb_status;
+	u8		status_code;	/* FCP I/O status */
+	/* bit 3 Underrun bit 2: overrun */
+	u8		scsi_comp_flags;
+	u32		req_flags;	/* bit 0: read bit:1 write */
+	u32		scsi_resid;	/* residule length */
+
+	struct fc_rport	*rport;		/* remote port pointer */
+	struct fc_seq	*seq_ptr;	/* current sequence pointer */
+	/*
+	 * Error Processing
+	 */
+	u8		recov_retry;	/* count of recovery retries */
+	struct fc_seq	*recov_seq;	/* sequence for REC or SRR */
+};
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+struct fc_exch_mgr;
+
+/*
+ * Sequence.
+ */
+struct fc_seq {
+	u8	id;		/* seq ID */
+	u16	ssb_stat;	/* status flags for sequence status block */
+	u16	cnt;		/* frames sent so far on sequence */
+	u32	rec_data;	/* FC-4 value for REC */
+};
+
+#define FC_EX_DONE		(1 << 0) /* ep is completed */
+#define FC_EX_RST_CLEANUP	(1 << 1) /* reset is forcing completion */
+
+/*
+ * Exchange.
+ *
+ * Locking notes: The ex_lock protects following items:
+ *	state, esb_stat, f_ctl, seq.ssb_stat
+ *	seq_id
+ *	sequence allocation
+ */
+struct fc_exch {
+	struct fc_exch_mgr *em;		/* exchange manager */
+	u32		state;		/* internal driver state */
+	u16		xid;		/* our exchange ID */
+	struct list_head	ex_list;	/* free or busy list linkage */
+	spinlock_t	ex_lock;	/* lock covering exchange state */
+	atomic_t	ex_refcnt;	/* reference counter */
+	struct delayed_work timeout_work; /* timer for upper level protocols */
+	struct fc_lport	*lp;		/* fc device instance */
+	u16		oxid;		/* originator's exchange ID */
+	u16		rxid;		/* responder's exchange ID */
+	u32		oid;		/* originator's FCID */
+	u32		sid;		/* source FCID */
+	u32		did;		/* destination FCID */
+	u32		esb_stat;	/* exchange status for ESB */
+	u32		r_a_tov;	/* r_a_tov from rport (msec) */
+	u8		seq_id;		/* next sequence ID to use */
+	u32		f_ctl;		/* F_CTL flags for sequences */
+	u8		fh_type;	/* frame type */
+	enum fc_class	class;		/* class of service */
+	struct fc_seq	seq;		/* single sequence */
+	/*
+	 * Handler for responses to this current exchange.
+	 */
+	void		(*resp)(struct fc_seq *, struct fc_frame *, void *);
+	void		(*destructor)(struct fc_seq *, void *);
+	/*
+	 * arg is passed as void pointer to exchange
+	 * resp and destructor handlers
+	 */
+	void		*arg;
+};
+#define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
+
+struct libfc_function_template {
+
+	/**
+	 * Mandatory Fields
+	 *
+	 * These handlers must be implemented by the LLD.
+	 */
+
+	/*
+	 * Interface to send a FC frame
+	 */
+	int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
+
+	/**
+	 * Optional Fields
+	 *
+	 * The LLD may choose to implement any of the following handlers.
+	 * If LLD doesn't specify hander and leaves its pointer NULL then
+	 * the default libfc function will be used for that handler.
+	 */
+
+	/**
+	 * ELS/CT interfaces
+	 */
+
+	/*
+	 * elsct_send - sends ELS/CT frame
+	 */
+	struct fc_seq *(*elsct_send)(struct fc_lport *lport,
+				     struct fc_rport *rport,
+				     struct fc_frame *fp,
+				     unsigned int op,
+				     void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				     void *arg, u32 timer_msec);
+	/**
+	 * Exhance Manager interfaces
+	 */
+
+	/*
+	 * Send the FC frame payload using a new exchange and sequence.
+	 *
+	 * The frame pointer with some of the header's fields must be
+	 * filled before calling exch_seq_send(), those fields are,
+	 *
+	 * - routing control
+	 * - FC port did
+	 * - FC port sid
+	 * - FC header type
+	 * - frame control
+	 * - parameter or relative offset
+	 *
+	 * The exchange response handler is set in this routine to resp()
+	 * function pointer. It can be called in two scenarios: if a timeout
+	 * occurs or if a response frame is received for the exchange. The
+	 * fc_frame pointer in response handler will also indicate timeout
+	 * as error using IS_ERR related macros.
+	 *
+	 * The exchange destructor handler is also set in this routine.
+	 * The destructor handler is invoked by EM layer when exchange
+	 * is about to free, this can be used by caller to free its
+	 * resources along with exchange free.
+	 *
+	 * The arg is passed back to resp and destructor handler.
+	 *
+	 * The timeout value (in msec) for an exchange is set if non zero
+	 * timer_msec argument is specified. The timer is canceled when
+	 * it fires or when the exchange is done. The exchange timeout handler
+	 * is registered by EM layer.
+	 */
+	struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
+					struct fc_frame *fp,
+					void (*resp)(struct fc_seq *sp,
+						     struct fc_frame *fp,
+						     void *arg),
+					void (*destructor)(struct fc_seq *sp,
+							   void *arg),
+					void *arg, unsigned int timer_msec);
+
+	/*
+	 * send a frame using existing sequence and exchange.
+	 */
+	int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
+			struct fc_frame *fp);
+
+	/*
+	 * Send ELS response using mainly infomation
+	 * in exchange and sequence in EM layer.
+	 */
+	void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+				 struct fc_seq_els_data *els_data);
+
+	/*
+	 * Abort an exchange and sequence. Generally called because of a
+	 * exchange timeout or an abort from the upper layer.
+	 *
+	 * A timer_msec can be specified for abort timeout, if non-zero
+	 * timer_msec value is specified then exchange resp handler
+	 * will be called with timeout error if no response to abort.
+	 */
+	int (*seq_exch_abort)(const struct fc_seq *req_sp,
+			      unsigned int timer_msec);
+
+	/*
+	 * Indicate that an exchange/sequence tuple is complete and the memory
+	 * allocated for the related objects may be freed.
+	 */
+	void (*exch_done)(struct fc_seq *sp);
+
+	/*
+	 * Assigns a EM and a free XID for an new exchange and then
+	 * allocates a new exchange and sequence pair.
+	 * The fp can be used to determine free XID.
+	 */
+	struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
+
+	/*
+	 * Release previously assigned XID by exch_get API.
+	 * The LLD may implement this if XID is assigned by LLD
+	 * in exch_get().
+	 */
+	void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
+			 u16 ex_id);
+
+	/*
+	 * Start a new sequence on the same exchange/sequence tuple.
+	 */
+	struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
+
+	/*
+	 * Reset an exchange manager, completing all sequences and exchanges.
+	 * If s_id is non-zero, reset only exchanges originating from that FID.
+	 * If d_id is non-zero, reset only exchanges sending to that FID.
+	 */
+	void (*exch_mgr_reset)(struct fc_exch_mgr *,
+			       u32 s_id, u32 d_id);
+
+	void (*rport_flush_queue)(void);
+	/**
+	 * Local Port interfaces
+	 */
+
+	/*
+	 * Receive a frame to a local port.
+	 */
+	void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
+			   struct fc_frame *fp);
+
+	int (*lport_reset)(struct fc_lport *);
+
+	/**
+	 * Remote Port interfaces
+	 */
+
+	/*
+	 * Initiates the RP state machine. It is called from the LP module.
+	 * This function will issue the following commands to the N_Port
+	 * identified by the FC ID provided.
+	 *
+	 * - PLOGI
+	 * - PRLI
+	 * - RTV
+	 */
+	int (*rport_login)(struct fc_rport *rport);
+
+	/*
+	 * Logoff, and remove the rport from the transport if
+	 * it had been added. This will send a LOGO to the target.
+	 */
+	int (*rport_logoff)(struct fc_rport *rport);
+
+	/*
+	 * Recieve a request from a remote port.
+	 */
+	void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
+			       struct fc_rport *);
+
+	struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
+
+	/**
+	 * FCP interfaces
+	 */
+
+	/*
+	 * Send a fcp cmd from fsp pkt.
+	 * Called with the SCSI host lock unlocked and irqs disabled.
+	 *
+	 * The resp handler is called when FCP_RSP received.
+	 *
+	 */
+	int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+			    void (*resp)(struct fc_seq *, struct fc_frame *fp,
+					 void *arg));
+
+	/*
+	 * Used at least durring linkdown and reset
+	 */
+	void (*fcp_cleanup)(struct fc_lport *lp);
+
+	/*
+	 * Abort all I/O on a local port
+	 */
+	void (*fcp_abort_io)(struct fc_lport *lp);
+
+	/**
+	 * Discovery interfaces
+	 */
+
+	void (*disc_recv_req)(struct fc_seq *,
+			      struct fc_frame *, struct fc_lport *);
+
+	/*
+	 * Start discovery for a local port.
+	 */
+	void (*disc_start)(void (*disc_callback)(struct fc_lport *,
+						 enum fc_disc_event),
+			   struct fc_lport *);
+
+	/*
+	 * Stop discovery for a given lport. This will remove
+	 * all discovered rports
+	 */
+	void (*disc_stop) (struct fc_lport *);
+
+	/*
+	 * Stop discovery for a given lport. This will block
+	 * until all discovered rports are deleted from the
+	 * FC transport class
+	 */
+	void (*disc_stop_final) (struct fc_lport *);
+};
+
+/* information used by the discovery layer */
+struct fc_disc {
+	unsigned char		retry_count;
+	unsigned char		delay;
+	unsigned char		pending;
+	unsigned char		requested;
+	unsigned short		seq_count;
+	unsigned char		buf_len;
+	enum fc_disc_event	event;
+
+	void (*disc_callback)(struct fc_lport *,
+			      enum fc_disc_event);
+
+	struct list_head	 rports;
+	struct fc_lport		*lport;
+	struct mutex		disc_mutex;
+	struct fc_gpn_ft_resp	partial_buf;	/* partial name buffer */
+	struct delayed_work	disc_work;
+};
+
+struct fc_lport {
+	struct list_head list;
+
+	/* Associations */
+	struct Scsi_Host	*host;
+	struct fc_exch_mgr	*emp;
+	struct fc_rport		*dns_rp;
+	struct fc_rport		*ptp_rp;
+	void			*scsi_priv;
+	struct fc_disc          disc;
+
+	/* Operational Information */
+	struct libfc_function_template tt;
+	u16			link_status;
+	enum fc_lport_state	state;
+	unsigned long		boot_time;
+
+	struct fc_host_statistics host_stats;
+	struct fcoe_dev_stats	*dev_stats[NR_CPUS];
+	u64			wwpn;
+	u64			wwnn;
+	u8			retry_count;
+
+	/* Capabilities */
+	u32			sg_supp:1;	/* scatter gather supported */
+	u32			seq_offload:1;	/* seq offload supported */
+	u32			crc_offload:1;	/* crc offload supported */
+	u32			lro_enabled:1;	/* large receive offload */
+	u32			mfs;	        /* max FC payload size */
+	unsigned int		service_params;
+	unsigned int		e_d_tov;
+	unsigned int		r_a_tov;
+	u8			max_retry_count;
+	u16			link_speed;
+	u16			link_supported_speeds;
+	u16			lro_xid;	/* max xid for fcoe lro */
+	struct fc_ns_fts	fcts;	        /* FC-4 type masks */
+	struct fc_els_rnid_gen	rnid_gen;	/* RNID information */
+
+	/* Semaphores */
+	struct mutex lp_mutex;
+
+	/* Miscellaneous */
+	struct delayed_work	retry_work;
+	struct delayed_work	disc_work;
+};
+
+/**
+ * FC_LPORT HELPER FUNCTIONS
+ *****************************/
+static inline void *lport_priv(const struct fc_lport *lp)
+{
+	return (void *)(lp + 1);
+}
+
+static inline int fc_lport_test_ready(struct fc_lport *lp)
+{
+	return lp->state == LPORT_ST_READY;
+}
+
+static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
+{
+	lp->wwnn = wwnn;
+}
+
+static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
+{
+	lp->wwpn = wwnn;
+}
+
+static inline void fc_lport_state_enter(struct fc_lport *lp,
+					enum fc_lport_state state)
+{
+	if (state != lp->state)
+		lp->retry_count = 0;
+	lp->state = state;
+}
+
+
+/**
+ * LOCAL PORT LAYER
+ *****************************/
+int fc_lport_init(struct fc_lport *lp);
+
+/*
+ * Destroy the specified local port by finding and freeing all
+ * fc_rports associated with it and then by freeing the fc_lport
+ * itself.
+ */
+int fc_lport_destroy(struct fc_lport *lp);
+
+/*
+ * Logout the specified local port from the fabric
+ */
+int fc_fabric_logoff(struct fc_lport *lp);
+
+/*
+ * Initiate the LP state machine. This handler will use fc_host_attr
+ * to store the FLOGI service parameters, so fc_host_attr must be
+ * initialized before calling this handler.
+ */
+int fc_fabric_login(struct fc_lport *lp);
+
+/*
+ * The link is up for the given local port.
+ */
+void fc_linkup(struct fc_lport *);
+
+/*
+ * Link is down for the given local port.
+ */
+void fc_linkdown(struct fc_lport *);
+
+/*
+ * Pause and unpause traffic.
+ */
+void fc_pause(struct fc_lport *);
+void fc_unpause(struct fc_lport *);
+
+/*
+ * Configure the local port.
+ */
+int fc_lport_config(struct fc_lport *);
+
+/*
+ * Reset the local port.
+ */
+int fc_lport_reset(struct fc_lport *);
+
+/*
+ * Set the mfs or reset
+ */
+int fc_set_mfs(struct fc_lport *lp, u32 mfs);
+
+
+/**
+ * REMOTE PORT LAYER
+ *****************************/
+int fc_rport_init(struct fc_lport *lp);
+void fc_rport_terminate_io(struct fc_rport *rp);
+
+/**
+ * DISCOVERY LAYER
+ *****************************/
+int fc_disc_init(struct fc_lport *lp);
+
+
+/**
+ * SCSI LAYER
+ *****************************/
+/*
+ * Initialize the SCSI block of libfc
+ */
+int fc_fcp_init(struct fc_lport *);
+
+/*
+ * This section provides an API which allows direct interaction
+ * with the SCSI-ml. Each of these functions satisfies a function
+ * pointer defined in Scsi_Host and therefore is always called
+ * directly from the SCSI-ml.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd,
+		    void (*done)(struct scsi_cmnd *));
+
+/*
+ * complete processing of a fcp packet
+ *
+ * This function may sleep if a fsp timer is pending.
+ * The host lock must not be held by caller.
+ */
+void fc_fcp_complete(struct fc_fcp_pkt *fsp);
+
+/*
+ * Send an ABTS frame to the target device. The sc_cmd argument
+ * is a pointer to the SCSI command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset a LUN by sending send the tm cmd to the target.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset the host adapter.
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Check rport status.
+ */
+int fc_slave_alloc(struct scsi_device *sdev);
+
+/*
+ * Adjust the queue depth.
+ */
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
+
+/*
+ * Change the tag type.
+ */
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
+
+/*
+ * Free memory pools used by the FCP layer.
+ */
+void fc_fcp_destroy(struct fc_lport *);
+
+/**
+ * ELS/CT interface
+ *****************************/
+/*
+ * Initializes ELS/CT interface
+ */
+int fc_elsct_init(struct fc_lport *lp);
+
+
+/**
+ * EXCHANGE MANAGER LAYER
+ *****************************/
+/*
+ * Initializes Exchange Manager related
+ * function pointers in struct libfc_function_template.
+ */
+int fc_exch_init(struct fc_lport *lp);
+
+/*
+ * Allocates an Exchange Manager (EM).
+ *
+ * The EM manages exchanges for their allocation and
+ * free, also allows exchange lookup for received
+ * frame.
+ *
+ * The class is used for initializing FC class of
+ * allocated exchange from EM.
+ *
+ * The min_xid and max_xid will limit new
+ * exchange ID (XID) within this range for
+ * a new exchange.
+ * The LLD may choose to have multiple EMs,
+ * e.g. one EM instance per CPU receive thread in LLD.
+ * The LLD can use exch_get() of struct libfc_function_template
+ * to specify XID for a new exchange within
+ * a specified EM instance.
+ *
+ * The em_idx to uniquely identify an EM instance.
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+				      enum fc_class class,
+				      u16 min_xid,
+				      u16 max_xid);
+
+/*
+ * Free an exchange manager.
+ */
+void fc_exch_mgr_free(struct fc_exch_mgr *mp);
+
+/*
+ * Receive a frame on specified local port and exchange manager.
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+		  struct fc_frame *fp);
+
+/*
+ * This function is for exch_seq_send function pointer in
+ * struct libfc_function_template, see comment block on
+ * exch_seq_send for description of this function.
+ */
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *sp,
+					     struct fc_frame *fp,
+					     void *arg),
+				void (*destructor)(struct fc_seq *sp,
+						   void *arg),
+				void *arg, u32 timer_msec);
+
+/*
+ * send a frame using existing sequence and exchange.
+ */
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp);
+
+/*
+ * Send ELS response using mainly infomation
+ * in exchange and sequence in EM layer.
+ */
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+			 struct fc_seq_els_data *els_data);
+
+/*
+ * This function is for seq_exch_abort function pointer in
+ * struct libfc_function_template, see comment block on
+ * seq_exch_abort for description of this function.
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec);
+
+/*
+ * Indicate that an exchange/sequence tuple is complete and the memory
+ * allocated for the related objects may be freed.
+ */
+void fc_exch_done(struct fc_seq *sp);
+
+/*
+ * Assigns a EM and XID for a frame and then allocates
+ * a new exchange and sequence pair.
+ * The fp can be used to determine free XID.
+ */
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
+
+/*
+ * Allocate a new exchange and sequence pair.
+ * if ex_id is zero then next free exchange id
+ * from specified exchange manger mp will be assigned.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
+			      struct fc_frame *fp, u16 ex_id);
+/*
+ * Start a new sequence on the same exchange as the supplied sequence.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+
+/*
+ * Reset an exchange manager, completing all sequences and exchanges.
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
+
+/*
+ * Functions for fc_functions_template
+ */
+void fc_get_host_speed(struct Scsi_Host *shost);
+void fc_get_host_port_type(struct Scsi_Host *shost);
+void fc_get_host_port_state(struct Scsi_Host *shost);
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
+
+/*
+ * module setup functions.
+ */
+int fc_setup_exch_mgr(void);
+void fc_destroy_exch_mgr(void);
+int fc_setup_rport(void);
+void fc_destroy_rport(void);
+
+#endif /* _LIBFC_H_ */
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
new file mode 100644
index 000000000000..89fdbb9a6a1b
--- /dev/null
+++ b/include/scsi/libfcoe.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _LIBFCOE_H
+#define _LIBFCOE_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+
+/*
+ * this percpu struct for fcoe
+ */
+struct fcoe_percpu_s {
+	int		cpu;
+	struct task_struct *thread;
+	struct sk_buff_head fcoe_rx_list;
+	struct page *crc_eof_page;
+	int crc_eof_offset;
+};
+
+/*
+ * the fcoe sw transport private data
+ */
+struct fcoe_softc {
+	struct list_head list;
+	struct fc_lport *lp;
+	struct net_device *real_dev;
+	struct net_device *phys_dev;		/* device with ethtool_ops */
+	struct packet_type  fcoe_packet_type;
+	struct sk_buff_head fcoe_pending_queue;
+
+	u8 dest_addr[ETH_ALEN];
+	u8 ctl_src_addr[ETH_ALEN];
+	u8 data_src_addr[ETH_ALEN];
+	/*
+	 * fcoe protocol address learning related stuff
+	 */
+	u16 flogi_oxid;
+	u8 flogi_progress;
+	u8 address_mode;
+};
+
+static inline struct fcoe_softc *fcoe_softc(
+	const struct fc_lport *lp)
+{
+	return (struct fcoe_softc *)lport_priv(lp);
+}
+
+static inline struct net_device *fcoe_netdev(
+	const struct fc_lport *lp)
+{
+	return fcoe_softc(lp)->real_dev;
+}
+
+static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
+{
+	return (struct fcoe_hdr *)skb_network_header(skb);
+}
+
+static inline int skb_fcoe_offset(const struct sk_buff *skb)
+{
+	return skb_network_offset(skb);
+}
+
+static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
+{
+	return (struct fc_frame_header *)skb_transport_header(skb);
+}
+
+static inline int skb_fc_offset(const struct sk_buff *skb)
+{
+	return skb_transport_offset(skb);
+}
+
+static inline void skb_reset_fc_header(struct sk_buff *skb)
+{
+	skb_reset_network_header(skb);
+	skb_set_transport_header(skb, skb_network_offset(skb) +
+				 sizeof(struct fcoe_hdr));
+}
+
+static inline bool skb_fc_is_data(const struct sk_buff *skb)
+{
+	return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
+}
+
+static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
+{
+	return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
+}
+
+static inline bool skb_fc_has_exthdr(const struct sk_buff *skb)
+{
+	return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) ||
+	    (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) ||
+	    (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH);
+}
+
+static inline bool skb_fc_is_roff(const struct sk_buff *skb)
+{
+	return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF;
+}
+
+static inline u16 skb_fc_oxid(const struct sk_buff *skb)
+{
+	return be16_to_cpu(skb_fc_header(skb)->fh_ox_id);
+}
+
+static inline u16 skb_fc_rxid(const struct sk_buff *skb)
+{
+	return be16_to_cpu(skb_fc_header(skb)->fh_rx_id);
+}
+
+/* FIXME - DMA_BIDIRECTIONAL ? */
+#define skb_cb(skb)	((struct fcoe_rcv_info *)&((skb)->cb[0]))
+#define skb_cmd(skb)	(skb_cb(skb)->fr_cmd)
+#define skb_dir(skb)	(skb_cmd(skb)->sc_data_direction)
+static inline bool skb_fc_is_read(const struct sk_buff *skb)
+{
+	if (skb_fc_is_cmd(skb) && skb_cmd(skb))
+		return skb_dir(skb) == DMA_FROM_DEVICE;
+	return false;
+}
+
+static inline bool skb_fc_is_write(const struct sk_buff *skb)
+{
+	if (skb_fc_is_cmd(skb) && skb_cmd(skb))
+		return skb_dir(skb) == DMA_TO_DEVICE;
+	return false;
+}
+
+/* libfcoe funcs */
+int fcoe_reset(struct Scsi_Host *shost);
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+		      unsigned int scheme, unsigned int port);
+
+u32 fcoe_fc_crc(struct fc_frame *fp);
+int fcoe_xmit(struct fc_lport *, struct fc_frame *);
+int fcoe_rcv(struct sk_buff *, struct net_device *,
+	     struct packet_type *, struct net_device *);
+
+int fcoe_percpu_receive_thread(void *arg);
+void fcoe_clean_pending_queue(struct fc_lport *lp);
+void fcoe_percpu_clean(struct fc_lport *lp);
+void fcoe_watchdog(ulong vp);
+int fcoe_link_ok(struct fc_lport *lp);
+
+struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
+int fcoe_hostlist_add(const struct fc_lport *);
+int fcoe_hostlist_remove(const struct fc_lport *);
+
+struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
+int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
+
+/* fcoe sw hba */
+int __init fcoe_sw_init(void);
+int __exit fcoe_sw_exit(void);
+#endif /* _LIBFCOE_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 61e53f14f7e1..7360e1916e75 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -30,6 +30,7 @@
 #include <linux/workqueue.h>
 #include <scsi/iscsi_proto.h>
 #include <scsi/iscsi_if.h>
+#include <scsi/scsi_transport_iscsi.h>
 
 struct scsi_transport_template;
 struct scsi_host_template;
@@ -70,12 +71,12 @@ enum {
 /* Connection suspend "bit" */
 #define ISCSI_SUSPEND_BIT		1
 
-#define ISCSI_ITT_MASK			(0x1fff)
+#define ISCSI_ITT_MASK			0x1fff
 #define ISCSI_TOTAL_CMDS_MAX		4096
 /* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
 #define ISCSI_TOTAL_CMDS_MIN		16
 #define ISCSI_AGE_SHIFT			28
-#define ISCSI_AGE_MASK			(0xf << ISCSI_AGE_SHIFT)
+#define ISCSI_AGE_MASK			0xf
 
 #define ISCSI_ADDRESS_BUF_LEN		64
 
@@ -93,24 +94,38 @@ enum {
 	ISCSI_TASK_RUNNING,
 };
 
+struct iscsi_r2t_info {
+	__be32			ttt;		/* copied from R2T */
+	__be32			exp_statsn;	/* copied from R2T */
+	uint32_t		data_length;	/* copied from R2T */
+	uint32_t		data_offset;	/* copied from R2T */
+	int			data_count;	/* DATA-Out payload progress */
+	int			datasn;
+	/* LLDs should set/update these values */
+	int			sent;		/* R2T sequence progress */
+};
+
 struct iscsi_task {
 	/*
 	 * Because LLDs allocate their hdr differently, this is a pointer
 	 * and length to that storage. It must be setup at session
 	 * creation time.
 	 */
-	struct iscsi_cmd	*hdr;
+	struct iscsi_hdr	*hdr;
 	unsigned short		hdr_max;
 	unsigned short		hdr_len;	/* accumulated size of hdr used */
+	/* copied values in case we need to send tmfs */
+	itt_t			hdr_itt;
+	__be32			cmdsn;
+	uint8_t			lun[8];
+
 	int			itt;		/* this ITT */
 
-	uint32_t		unsol_datasn;
 	unsigned		imm_count;	/* imm-data (bytes)   */
-	unsigned		unsol_count;	/* unsolicited (bytes)*/
 	/* offset in unsolicited stream (bytes); */
-	unsigned		unsol_offset;
-	unsigned		data_count;	/* remaining Data-Out */
+	struct iscsi_r2t_info	unsol_r2t;
 	char			*data;		/* mgmt payload */
+	unsigned		data_count;
 	struct scsi_cmnd	*sc;		/* associated SCSI cmd*/
 	struct iscsi_conn	*conn;		/* used connection    */
 
@@ -121,6 +136,11 @@ struct iscsi_task {
 	void			*dd_data;	/* driver/transport data */
 };
 
+static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
+{
+	return task->unsol_r2t.data_length > task->unsol_r2t.sent;
+}
+
 static inline void* iscsi_next_hdr(struct iscsi_task *task)
 {
 	return (void*)task->hdr + task->hdr_len;
@@ -376,8 +396,9 @@ extern void iscsi_suspend_tx(struct iscsi_conn *conn);
  * pdu and task processing
  */
 extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
-					struct iscsi_data *hdr);
+extern void iscsi_prep_data_out_pdu(struct iscsi_task *task,
+				    struct iscsi_r2t_info *r2t,
+				    struct iscsi_data *hdr);
 extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
 				char *, uint32_t);
 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
new file mode 100644
index 000000000000..83e32f6d7859
--- /dev/null
+++ b/include/scsi/libiscsi_tcp.h
@@ -0,0 +1,132 @@
+/*
+ * iSCSI over TCP/IP Data-Path lib
+ *
+ * Copyright (C) 2008 Mike Christie
+ * Copyright (C) 2008 Red Hat, Inc.  All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ */
+
+#ifndef LIBISCSI_TCP_H
+#define LIBISCSI_TCP_H
+
+#include <scsi/libiscsi.h>
+
+struct iscsi_tcp_conn;
+struct iscsi_segment;
+struct sk_buff;
+struct hash_desc;
+
+typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+				    struct iscsi_segment *);
+
+struct iscsi_segment {
+	unsigned char		*data;
+	unsigned int		size;
+	unsigned int		copied;
+	unsigned int		total_size;
+	unsigned int		total_copied;
+
+	struct hash_desc	*hash;
+	unsigned char		recv_digest[ISCSI_DIGEST_SIZE];
+	unsigned char		digest[ISCSI_DIGEST_SIZE];
+	unsigned int		digest_len;
+
+	struct scatterlist	*sg;
+	void			*sg_mapped;
+	unsigned int		sg_offset;
+
+	iscsi_segment_done_fn_t	*done;
+};
+
+/* Socket connection recieve helper */
+struct iscsi_tcp_recv {
+	struct iscsi_hdr	*hdr;
+	struct iscsi_segment	segment;
+
+	/* Allocate buffer for BHS + AHS */
+	uint32_t		hdr_buf[64];
+
+	/* copied and flipped values */
+	int			datalen;
+};
+
+struct iscsi_tcp_conn {
+	struct iscsi_conn	*iscsi_conn;
+	void			*dd_data;
+	int			stop_stage;	/* conn_stop() flag: *
+						 * stop to recover,  *
+						 * stop to terminate */
+	/* control data */
+	struct iscsi_tcp_recv	in;		/* TCP receive context */
+	/* CRC32C (Rx) LLD should set this is they do not offload */
+	struct hash_desc	*rx_hash;
+};
+
+struct iscsi_tcp_task {
+	uint32_t		exp_datasn;	/* expected target's R2TSN/DataSN */
+	int			data_offset;
+	struct iscsi_r2t_info	*r2t;		/* in progress solict R2T */
+	struct iscsi_pool	r2tpool;
+	struct kfifo		*r2tqueue;
+	void			*dd_data;
+};
+
+enum {
+	ISCSI_TCP_SEGMENT_DONE,		/* curr seg has been processed */
+	ISCSI_TCP_SKB_DONE,		/* skb is out of data */
+	ISCSI_TCP_CONN_ERR,		/* iscsi layer has fired a conn err */
+	ISCSI_TCP_SUSPENDED,		/* conn is suspended */
+};
+
+extern void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn);
+extern int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+			      unsigned int offset, bool offloaded, int *status);
+extern void iscsi_tcp_cleanup_task(struct iscsi_task *task);
+extern int iscsi_tcp_task_init(struct iscsi_task *task);
+extern int iscsi_tcp_task_xmit(struct iscsi_task *task);
+
+/* segment helpers */
+extern int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn);
+extern int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
+				  struct iscsi_segment *segment, int recv,
+				  unsigned copied);
+extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment);
+
+extern void iscsi_segment_init_linear(struct iscsi_segment *segment,
+				      void *data, size_t size,
+				      iscsi_segment_done_fn_t *done,
+				      struct hash_desc *hash);
+extern int
+iscsi_segment_seek_sg(struct iscsi_segment *segment,
+		      struct scatterlist *sg_list, unsigned int sg_count,
+		      unsigned int offset, size_t size,
+		      iscsi_segment_done_fn_t *done, struct hash_desc *hash);
+
+/* digest helpers */
+extern void iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr,
+				  size_t hdrlen,
+				  unsigned char digest[ISCSI_DIGEST_SIZE]);
+extern struct iscsi_cls_conn *
+iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
+		     uint32_t conn_idx);
+extern void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn);
+
+/* misc helpers */
+extern int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session);
+extern void iscsi_tcp_r2tpool_free(struct iscsi_session *session);
+
+extern void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				     struct iscsi_stats *stats);
+#endif /* LIBISCSI_TCP_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a37a8148a310..01a4c58f8bad 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -159,8 +159,6 @@ struct scsi_device {
 	atomic_t iodone_cnt;
 	atomic_t ioerr_cnt;
 
-	int timeout;
-
 	struct device		sdev_gendev,
 				sdev_dev;
 
@@ -367,10 +365,11 @@ extern int scsi_is_target_device(const struct device *);
 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 			int data_direction, void *buffer, unsigned bufflen,
 			unsigned char *sense, int timeout, int retries,
-			int flag);
+			int flag, int *resid);
 extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
 			    int data_direction, void *buffer, unsigned bufflen,
-			    struct scsi_sense_hdr *, int timeout, int retries);
+			    struct scsi_sense_hdr *, int timeout, int retries,
+			    int *resid);
 extern int scsi_execute_async(struct scsi_device *sdev,
 			      const unsigned char *cmd, int cmd_len, int data_direction,
 			      void *buffer, unsigned bufflen, int use_sg,
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index c667cc396545..b50aabe2861e 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -113,10 +113,18 @@ struct iscsi_transport {
 			 char *data, uint32_t data_size);
 	void (*get_stats) (struct iscsi_cls_conn *conn,
 			   struct iscsi_stats *stats);
+
 	int (*init_task) (struct iscsi_task *task);
 	int (*xmit_task) (struct iscsi_task *task);
-	void (*cleanup_task) (struct iscsi_conn *conn,
-				  struct iscsi_task *task);
+	void (*cleanup_task) (struct iscsi_task *task);
+
+	int (*alloc_pdu) (struct iscsi_task *task, uint8_t opcode);
+	int (*xmit_pdu) (struct iscsi_task *task);
+	int (*init_pdu) (struct iscsi_task *task, unsigned int offset,
+			 unsigned int count);
+	void (*parse_pdu_itt) (struct iscsi_conn *conn, itt_t itt,
+			       int *index, int *age);
+
 	void (*session_recovery_timedout) (struct iscsi_cls_session *session);
 	struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
 					      int non_blocking);