summary refs log tree commit diff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-19 15:51:35 -0700
committerDavid S. Miller <davem@davemloft.net>2008-09-19 15:51:35 -0700
commit79b6f7ecdac7a37df72a5f354816c0dd0b6ac592 (patch)
treecb709af3ca7425768a596df97ccafbd6b8397d1d
parent02a1416478b70cd49bd74827438c8ba797784728 (diff)
parentc4e84bde1d595d857d3c74b49b9c45cc770df792 (diff)
downloadlinux-79b6f7ecdac7a37df72a5f354816c0dd0b6ac592.tar.gz
Merge branch 'new-drivers' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
-rw-r--r--Documentation/networking/LICENSE.qlge46
-rw-r--r--MAINTAINERS20
-rw-r--r--drivers/net/Kconfig39
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/atlx/Makefile2
-rw-r--r--drivers/net/atlx/atl2.c3127
-rw-r--r--drivers/net/atlx/atl2.h530
-rw-r--r--drivers/net/enic/Makefile5
-rw-r--r--drivers/net/enic/cq_desc.h79
-rw-r--r--drivers/net/enic/cq_enet_desc.h169
-rw-r--r--drivers/net/enic/enic.h115
-rw-r--r--drivers/net/enic/enic_main.c1949
-rw-r--r--drivers/net/enic/enic_res.c370
-rw-r--r--drivers/net/enic/enic_res.h151
-rw-r--r--drivers/net/enic/rq_enet_desc.h60
-rw-r--r--drivers/net/enic/vnic_cq.c89
-rw-r--r--drivers/net/enic/vnic_cq.h113
-rw-r--r--drivers/net/enic/vnic_dev.c674
-rw-r--r--drivers/net/enic/vnic_dev.h106
-rw-r--r--drivers/net/enic/vnic_devcmd.h282
-rw-r--r--drivers/net/enic/vnic_enet.h47
-rw-r--r--drivers/net/enic/vnic_intr.c62
-rw-r--r--drivers/net/enic/vnic_intr.h92
-rw-r--r--drivers/net/enic/vnic_nic.h65
-rw-r--r--drivers/net/enic/vnic_resource.h63
-rw-r--r--drivers/net/enic/vnic_rq.c199
-rw-r--r--drivers/net/enic/vnic_rq.h204
-rw-r--r--drivers/net/enic/vnic_rss.h32
-rw-r--r--drivers/net/enic/vnic_stats.h70
-rw-r--r--drivers/net/enic/vnic_wq.c184
-rw-r--r--drivers/net/enic/vnic_wq.h154
-rw-r--r--drivers/net/enic/wq_enet_desc.h98
-rw-r--r--drivers/net/jme.c3019
-rw-r--r--drivers/net/jme.h1199
-rw-r--r--drivers/net/qlge/Makefile7
-rw-r--r--drivers/net/qlge/qlge.h1593
-rw-r--r--drivers/net/qlge/qlge_dbg.c858
-rw-r--r--drivers/net/qlge/qlge_ethtool.c415
-rw-r--r--drivers/net/qlge/qlge_main.c3954
-rw-r--r--drivers/net/qlge/qlge_mpi.c150
-rw-r--r--include/linux/pci_ids.h3
41 files changed, 20398 insertions, 0 deletions
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
new file mode 100644
index 000000000000..123b6edd7f18
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlge
@@ -0,0 +1,46 @@
+Copyright (c)  2003-2008 QLogic Corporation
+QLogic Linux Networking HBA Driver
+
+This program includes a device driver for Linux 2.6 that may be
+distributed with QLogic hardware specific firmware binary file.
+You may modify and redistribute the device driver code under the
+GNU General Public License as published by the Free Software
+Foundation (version 2 or a later version).
+
+You may redistribute the hardware specific firmware binary file
+under the following terms:
+
+	1. Redistribution of source code (only if applicable),
+	   must retain the above copyright notice, this list of
+	   conditions and the following disclaimer.
+
+	2. Redistribution in binary form must reproduce the above
+	   copyright notice, this list of conditions and the
+	   following disclaimer in the documentation and/or other
+	   materials provided with the distribution.
+
+	3. The name of QLogic Corporation may not be used to
+	   endorse or promote products derived from this software
+	   without specific prior written permission
+
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
diff --git a/MAINTAINERS b/MAINTAINERS
index b3e92fbe336c..106684e45e15 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1046,6 +1046,13 @@ L:	cbe-oss-dev@ozlabs.org
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 
+CISCO 10G ETHERNET DRIVER
+P:	Scott Feldman
+M:	scofeldm@cisco.com
+P:	Joe Eykholt
+M:	jeykholt@cisco.com
+S:	Supported
+
 CFAG12864B LCD DRIVER
 P:	Miguel Ojeda Sandonis
 M:	miguel.ojeda.sandonis@gmail.com
@@ -2319,6 +2326,12 @@ L:	video4linux-list@redhat.com
 W:	http://www.ivtvdriver.org
 S:	Maintained
 
+JME NETWORK DRIVER
+P:	Guo-Fu Tseng
+M:	cooldavid@cooldavid.org
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
 P:	David Woodhouse
 M:	dwmw2@infradead.org
@@ -3383,6 +3396,13 @@ M:	linux-driver@qlogic.com
 L:	netdev@vger.kernel.org
 S:	Supported
 
+QLOGIC QLGE 10Gb ETHERNET DRIVER
+P:	Ron Mercer
+M:	linux-driver@qlogic.com
+M:	ron.mercer@qlogic.com
+L:	netdev@vger.kernel.org
+S:	Supported
+
 QNX4 FILESYSTEM
 P:	Anders Larsen
 M:	al@alarsen.net
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4a11296a9514..69c81da48ebc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1840,6 +1840,17 @@ config NE_H8300
 	  Say Y here if you want to use the NE2000 compatible
 	  controller on the Renesas H8/300 processor.
 
+config ATL2
+	tristate "Atheros L2 Fast Ethernet support"
+	depends on PCI
+	select CRC32
+	select MII
+	help
+	  This driver supports the Atheros L2 fast ethernet adapter.
+
+	  To compile this driver as a module, choose M here.  The module
+	  will be called atl2.
+
 source "drivers/net/fs_enet/Kconfig"
 
 endif # NET_ETHERNET
@@ -2302,6 +2313,18 @@ config ATL1E
 	  To compile this driver as a module, choose M here.  The module
 	  will be called atl1e.
 
+config JME
+	tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
+	depends on PCI
+	select CRC32
+	select MII
+	---help---
+	  This driver supports the PCI-Express gigabit ethernet adapters
+	  based on JMicron JMC250 chipset.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called jme.
+
 endif # NETDEV_1000
 
 #
@@ -2377,6 +2400,13 @@ config EHEA
 	  To compile the driver as a module, choose M here. The module
 	  will be called ehea.
 
+config ENIC
+	tristate "E, the Cisco 10G Ethernet NIC"
+	depends on PCI && INET
+	select INET_LRO
+	help
+	  This enables the support for the Cisco 10G Ethernet card.
+
 config IXGBE
 	tristate "Intel(R) 10GbE PCI Express adapters support"
 	depends on PCI && INET
@@ -2496,6 +2526,15 @@ config BNX2X
 	  To compile this driver as a module, choose M here: the module
 	  will be called bnx2x.  This is recommended.
 
+config QLGE
+	tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+	depends on PCI
+	help
+	  This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called qlge.
+
 source "drivers/net/sfc/Kconfig"
 
 endif # NETDEV_10000
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f66b79bd3b89..fa2510b2e609 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,9 +15,12 @@ obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_ATL1) += atlx/
+obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_TEHUTI) += tehuti.o
+obj-$(CONFIG_ENIC) += enic/
+obj-$(CONFIG_JME) += jme.o
 
 gianfar_driver-objs := gianfar.o \
 		gianfar_ethtool.o \
@@ -128,6 +131,7 @@ obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLGE) += qlge/
 
 obj-$(CONFIG_PPP) += ppp_generic.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile
index ca45553a040d..e4f6022ca552 100644
--- a/drivers/net/atlx/Makefile
+++ b/drivers/net/atlx/Makefile
@@ -1 +1,3 @@
 obj-$(CONFIG_ATL1)	+= atl1.o
+obj-$(CONFIG_ATL2)	+= atl2.o
+
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
new file mode 100644
index 000000000000..d548a67da1e8
--- /dev/null
+++ b/drivers/net/atlx/atl2.c
@@ -0,0 +1,3127 @@
+/*
+ * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <asm/atomic.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/irqflags.h>
+#include <linux/irqreturn.h>
+#include <linux/mii.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atl2.h"
+
+#define ATL2_DRV_VERSION "2.2.3"
+
+static char atl2_driver_name[] = "atl2";
+static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
+static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
+static char atl2_driver_version[] = ATL2_DRV_VERSION;
+
+MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
+MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATL2_DRV_VERSION);
+
+/*
+ * atl2_pci_tbl - PCI Device ID Table
+ */
+static struct pci_device_id atl2_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
+	/* required last entry */
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
+
+static void atl2_set_ethtool_ops(struct net_device *netdev);
+
+static void atl2_check_options(struct atl2_adapter *adapter);
+
+/*
+ * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl2_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->wol = 0;
+	adapter->ict = 50000;  /* ~100ms */
+	adapter->link_speed = SPEED_0;   /* hardware init */
+	adapter->link_duplex = FULL_DUPLEX;
+
+	hw->phy_configured = false;
+	hw->preamble_len = 7;
+	hw->ipgt = 0x60;
+	hw->min_ifg = 0x50;
+	hw->ipgr1 = 0x40;
+	hw->ipgr2 = 0x60;
+	hw->retry_buf = 2;
+	hw->max_retry = 0xf;
+	hw->lcol = 0x37;
+	hw->jam_ipg = 7;
+	hw->fc_rxd_hi = 0;
+	hw->fc_rxd_lo = 0;
+	hw->max_frame_size = adapter->netdev->mtu;
+
+	spin_lock_init(&adapter->stats_lock);
+	spin_lock_init(&adapter->tx_lock);
+
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+	return 0;
+}
+
+/*
+ * atl2_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl2_set_multi(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	struct dev_mc_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= MAC_CTRL_PROMIS_EN;
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= MAC_CTRL_MC_ALL_EN;
+		rctl &= ~MAC_CTRL_PROMIS_EN;
+	} else
+		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+	ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
+
+	/* clear the old settings from the multicast hash table */
+	ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+	/* comoute mc addresses' hash value ,and put it into hash table */
+	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+		hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+		atl2_hash_set(hw, hash_value);
+	}
+}
+
+static void init_ring_ptrs(struct atl2_adapter *adapter)
+{
+	/* Read / Write Ptr Initialize: */
+	adapter->txd_write_ptr = 0;
+	atomic_set(&adapter->txd_read_ptr, 0);
+
+	adapter->rxd_read_ptr = 0;
+	adapter->rxd_write_ptr = 0;
+
+	atomic_set(&adapter->txs_write_ptr, 0);
+	adapter->txs_next_clear = 0;
+}
+
+/*
+ * atl2_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl2_configure(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	u32 value;
+
+	/* clear interrupt status */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
+
+	/* set MAC Address */
+	value = (((u32)hw->mac_addr[2]) << 24) |
+		(((u32)hw->mac_addr[3]) << 16) |
+		(((u32)hw->mac_addr[4]) << 8) |
+		(((u32)hw->mac_addr[5]));
+	ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
+	value = (((u32)hw->mac_addr[0]) << 8) |
+		(((u32)hw->mac_addr[1]));
+	ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
+
+	/* HI base address */
+	ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
+		(u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
+
+	/* LO base address */
+	ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
+		(u32)(adapter->txd_dma & 0x00000000ffffffffULL));
+	ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
+		(u32)(adapter->txs_dma & 0x00000000ffffffffULL));
+	ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
+		(u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
+
+	/* element count */
+	ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
+	ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
+	ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM,  (u16)adapter->rxd_ring_size);
+
+	/* config Internal SRAM */
+/*
+    ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
+    ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
+*/
+
+	/* config IPG/IFG */
+	value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
+		MAC_IPG_IFG_IPGT_SHIFT) |
+		(((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
+		MAC_IPG_IFG_MIFG_SHIFT) |
+		(((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
+		MAC_IPG_IFG_IPGR1_SHIFT)|
+		(((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
+		MAC_IPG_IFG_IPGR2_SHIFT);
+	ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
+
+	/* config  Half-Duplex Control */
+	value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+		(((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
+		MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+		MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+		(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+		(((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
+		MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+	ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
+
+	/* set Interrupt Moderator Timer */
+	ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
+	ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
+
+	/* set Interrupt Clear Timer */
+	ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
+
+	/* set MTU */
+	ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
+		ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
+
+	/* 1590 */
+	ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
+
+	/* flow control */
+	ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
+	ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
+
+	/* Init mailbox */
+	ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
+	ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
+
+	/* enable DMA read/write */
+	ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
+	ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
+
+	value = ATL2_READ_REG(&adapter->hw, REG_ISR);
+	if ((value & ISR_PHY_LINKDOWN) != 0)
+		value = 1; /* config failed */
+	else
+		value = 0;
+
+	/* clear all interrupt status */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+	return value;
+}
+
+/*
+ * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+	u8 offset = 0;
+
+	/* real ring DMA buffer */
+	adapter->ring_size = size =
+		adapter->txd_ring_size * 1 + 7 +	/* dword align */
+		adapter->txs_ring_size * 4 + 7 +	/* dword align */
+		adapter->rxd_ring_size * 1536 + 127;	/* 128bytes align */
+
+	adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
+		&adapter->ring_dma);
+	if (!adapter->ring_vir_addr)
+		return -ENOMEM;
+	memset(adapter->ring_vir_addr, 0, adapter->ring_size);
+
+	/* Init TXD Ring */
+	adapter->txd_dma = adapter->ring_dma ;
+	offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
+	adapter->txd_dma += offset;
+	adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
+		offset);
+
+	/* Init TXS Ring */
+	adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
+	offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
+	adapter->txs_dma += offset;
+	adapter->txs_ring = (struct tx_pkt_status *)
+		(((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
+
+	/* Init RXD Ring */
+	adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
+	offset = (adapter->rxd_dma & 127) ?
+		(128 - (adapter->rxd_dma & 127)) : 0;
+	if (offset > 7)
+		offset -= 8;
+	else
+		offset += (128 - 8);
+
+	adapter->rxd_dma += offset;
+	adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
+		(adapter->txs_ring_size * 4 + offset));
+
+/*
+ * Read / Write Ptr Initialize:
+ *      init_ring_ptrs(adapter);
+ */
+	return 0;
+}
+
+/*
+ * atl2_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_enable(struct atl2_adapter *adapter)
+{
+	ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
+	ATL2_WRITE_FLUSH(&adapter->hw);
+}
+
+/*
+ * atl2_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_disable(struct atl2_adapter *adapter)
+{
+    ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
+    ATL2_WRITE_FLUSH(&adapter->hw);
+    synchronize_irq(adapter->pdev->irq);
+}
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void atl2_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl;
+
+	atl2_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+		ctrl |= MAC_CTRL_RMV_VLAN;
+		ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+		ctrl &= ~MAC_CTRL_RMV_VLAN;
+		ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
+	}
+
+	atl2_irq_enable(adapter);
+}
+
+static void atl2_restore_vlan(struct atl2_adapter *adapter)
+{
+	atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
+#endif
+
+static void atl2_intr_rx(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rx_desc *rxd;
+	struct sk_buff *skb;
+
+	do {
+		rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
+		if (!rxd->status.update)
+			break; /* end of tx */
+
+		/* clear this flag at once */
+		rxd->status.update = 0;
+
+		if (rxd->status.ok && rxd->status.pkt_size >= 60) {
+			int rx_size = (int)(rxd->status.pkt_size - 4);
+			/* alloc new buffer */
+			skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
+			if (NULL == skb) {
+				printk(KERN_WARNING
+					"%s: Mem squeeze, deferring packet.\n",
+					netdev->name);
+				/*
+				 * Check that some rx space is free. If not,
+				 * free one and mark stats->rx_dropped++.
+				 */
+				adapter->net_stats.rx_dropped++;
+				break;
+			}
+			skb_reserve(skb, NET_IP_ALIGN);
+			skb->dev = netdev;
+			memcpy(skb->data, rxd->packet, rx_size);
+			skb_put(skb, rx_size);
+			skb->protocol = eth_type_trans(skb, netdev);
+#ifdef NETIF_F_HW_VLAN_TX
+			if (adapter->vlgrp && (rxd->status.vlan)) {
+				u16 vlan_tag = (rxd->status.vtag>>4) |
+					((rxd->status.vtag&7) << 13) |
+					((rxd->status.vtag&8) << 9);
+				vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
+			} else
+#endif
+			netif_rx(skb);
+			adapter->net_stats.rx_bytes += rx_size;
+			adapter->net_stats.rx_packets++;
+			netdev->last_rx = jiffies;
+		} else {
+			adapter->net_stats.rx_errors++;
+
+			if (rxd->status.ok && rxd->status.pkt_size <= 60)
+				adapter->net_stats.rx_length_errors++;
+			if (rxd->status.mcast)
+				adapter->net_stats.multicast++;
+			if (rxd->status.crc)
+				adapter->net_stats.rx_crc_errors++;
+			if (rxd->status.align)
+				adapter->net_stats.rx_frame_errors++;
+		}
+
+		/* advance write ptr */
+		if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
+			adapter->rxd_write_ptr = 0;
+	} while (1);
+
+	/* update mailbox? */
+	adapter->rxd_read_ptr = adapter->rxd_write_ptr;
+	ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
+}
+
+static void atl2_intr_tx(struct atl2_adapter *adapter)
+{
+	u32 txd_read_ptr;
+	u32 txs_write_ptr;
+	struct tx_pkt_status *txs;
+	struct tx_pkt_header *txph;
+	int free_hole = 0;
+
+	do {
+		txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+		txs = adapter->txs_ring + txs_write_ptr;
+		if (!txs->update)
+			break; /* tx stop here */
+
+		free_hole = 1;
+		txs->update = 0;
+
+		if (++txs_write_ptr == adapter->txs_ring_size)
+			txs_write_ptr = 0;
+		atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
+
+		txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
+		txph = (struct tx_pkt_header *)
+			(((u8 *)adapter->txd_ring) + txd_read_ptr);
+
+		if (txph->pkt_size != txs->pkt_size) {
+			struct tx_pkt_status *old_txs = txs;
+			printk(KERN_WARNING
+				"%s: txs packet size not consistent with txd"
+				" txd_:0x%08x, txs_:0x%08x!\n",
+				adapter->netdev->name,
+				*(u32 *)txph, *(u32 *)txs);
+			printk(KERN_WARNING
+				"txd read ptr: 0x%x\n",
+				txd_read_ptr);
+			txs = adapter->txs_ring + txs_write_ptr;
+			printk(KERN_WARNING
+				"txs-behind:0x%08x\n",
+				*(u32 *)txs);
+			if (txs_write_ptr < 2) {
+				txs = adapter->txs_ring +
+					(adapter->txs_ring_size +
+					txs_write_ptr - 2);
+			} else {
+				txs = adapter->txs_ring + (txs_write_ptr - 2);
+			}
+			printk(KERN_WARNING
+				"txs-before:0x%08x\n",
+				*(u32 *)txs);
+			txs = old_txs;
+		}
+
+		 /* 4for TPH */
+		txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
+		if (txd_read_ptr >= adapter->txd_ring_size)
+			txd_read_ptr -= adapter->txd_ring_size;
+
+		atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
+
+		/* tx statistics: */
+		if (txs->ok)
+			adapter->net_stats.tx_packets++;
+		else
+			adapter->net_stats.tx_errors++;
+
+		if (txs->defer)
+			adapter->net_stats.collisions++;
+		if (txs->abort_col)
+			adapter->net_stats.tx_aborted_errors++;
+		if (txs->late_col)
+			adapter->net_stats.tx_window_errors++;
+		if (txs->underun)
+			adapter->net_stats.tx_fifo_errors++;
+	} while (1);
+
+	if (free_hole) {
+		if (netif_queue_stopped(adapter->netdev) &&
+			netif_carrier_ok(adapter->netdev))
+			netif_wake_queue(adapter->netdev);
+	}
+}
+
+static void atl2_check_for_link(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 phy_data = 0;
+
+	spin_lock(&adapter->stats_lock);
+	atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	spin_unlock(&adapter->stats_lock);
+
+	/* notify upper layer link down ASAP */
+	if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
+		if (netif_carrier_ok(netdev)) { /* old link state: Up */
+		printk(KERN_INFO "%s: %s NIC Link is Down\n",
+			atl2_driver_name, netdev->name);
+		adapter->link_speed = SPEED_0;
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+		}
+	}
+	schedule_work(&adapter->link_chg_task);
+}
+
+static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
+{
+	u16 phy_data;
+	spin_lock(&adapter->stats_lock);
+	atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
+	spin_unlock(&adapter->stats_lock);
+}
+
+/*
+ * atl2_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl2_intr(int irq, void *data)
+{
+	struct atl2_adapter *adapter = netdev_priv(data);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 status;
+
+	status = ATL2_READ_REG(hw, REG_ISR);
+	if (0 == status)
+		return IRQ_NONE;
+
+	/* link event */
+	if (status & ISR_PHY)
+		atl2_clear_phy_int(adapter);
+
+	/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+	ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+
+	/* check if PCIE PHY Link down */
+	if (status & ISR_PHY_LINKDOWN) {
+		if (netif_running(adapter->netdev)) { /* reset MAC */
+			ATL2_WRITE_REG(hw, REG_ISR, 0);
+			ATL2_WRITE_REG(hw, REG_IMR, 0);
+			ATL2_WRITE_FLUSH(hw);
+			schedule_work(&adapter->reset_task);
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* check if DMA read/write error? */
+	if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+		ATL2_WRITE_REG(hw, REG_ISR, 0);
+		ATL2_WRITE_REG(hw, REG_IMR, 0);
+		ATL2_WRITE_FLUSH(hw);
+		schedule_work(&adapter->reset_task);
+		return IRQ_HANDLED;
+	}
+
+	/* link event */
+	if (status & (ISR_PHY | ISR_MANUAL)) {
+		adapter->net_stats.tx_carrier_errors++;
+		atl2_check_for_link(adapter);
+	}
+
+	/* transmit event */
+	if (status & ISR_TX_EVENT)
+		atl2_intr_tx(adapter);
+
+	/* rx exception */
+	if (status & ISR_RX_EVENT)
+		atl2_intr_rx(adapter);
+
+	/* re-enable Interrupt */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+	return IRQ_HANDLED;
+}
+
+static int atl2_request_irq(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int flags, err = 0;
+
+	flags = IRQF_SHARED;
+#ifdef CONFIG_PCI_MSI
+	adapter->have_msi = true;
+	err = pci_enable_msi(adapter->pdev);
+	if (err)
+		adapter->have_msi = false;
+
+	if (adapter->have_msi)
+		flags &= ~IRQF_SHARED;
+#endif
+
+	return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
+		netdev);
+}
+
+/*
+ * atl2_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl2_free_ring_resources(struct atl2_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
+		adapter->ring_dma);
+}
+
+/*
+ * atl2_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl2_open(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	int err;
+	u32 val;
+
+	/* disallow open during test */
+	if (test_bit(__ATL2_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = atl2_setup_ring_resources(adapter);
+	if (err)
+		return err;
+
+	err = atl2_init_hw(&adapter->hw);
+	if (err) {
+		err = -EIO;
+		goto err_init_hw;
+	}
+
+	/* hardware has been reset, we need to reload some things */
+	atl2_set_multi(netdev);
+	init_ring_ptrs(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	atl2_restore_vlan(adapter);
+#endif
+
+	if (atl2_configure(adapter)) {
+		err = -EIO;
+		goto err_config;
+	}
+
+	err = atl2_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	clear_bit(__ATL2_DOWN, &adapter->flags);
+
+	mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ);
+
+	val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+	ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
+		val | MASTER_CTRL_MANUAL_INT);
+
+	atl2_irq_enable(adapter);
+
+	return 0;
+
+err_init_hw:
+err_req_irq:
+err_config:
+	atl2_free_ring_resources(adapter);
+	atl2_reset_hw(&adapter->hw);
+
+	return err;
+}
+
+static void atl2_down(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+#ifdef NETIF_F_LLTX
+	netif_stop_queue(netdev);
+#else
+	netif_tx_disable(netdev);
+#endif
+
+	/* reset MAC to disable all RX/TX */
+	atl2_reset_hw(&adapter->hw);
+	msleep(1);
+
+	atl2_irq_disable(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_config_timer);
+	clear_bit(0, &adapter->cfg_phy);
+
+	netif_carrier_off(netdev);
+	adapter->link_speed = SPEED_0;
+	adapter->link_duplex = -1;
+}
+
+static void atl2_free_irq(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	free_irq(adapter->pdev->irq, netdev);
+
+#ifdef CONFIG_PCI_MSI
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+#endif
+}
+
+/*
+ * atl2_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl2_close(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+
+	atl2_down(adapter);
+	atl2_free_irq(adapter);
+	atl2_free_ring_resources(adapter);
+
+	return 0;
+}
+
+static inline int TxsFreeUnit(struct atl2_adapter *adapter)
+{
+	u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+
+	return (adapter->txs_next_clear >= txs_write_ptr) ?
+		(int) (adapter->txs_ring_size - adapter->txs_next_clear +
+		txs_write_ptr - 1) :
+		(int) (txs_write_ptr - adapter->txs_next_clear - 1);
+}
+
+static inline int TxdFreeBytes(struct atl2_adapter *adapter)
+{
+	u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
+
+	return (adapter->txd_write_ptr >= txd_read_ptr) ?
+		(int) (adapter->txd_ring_size - adapter->txd_write_ptr +
+		txd_read_ptr - 1) :
+		(int) (txd_read_ptr - adapter->txd_write_ptr - 1);
+}
+
+static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+	struct tx_pkt_header *txph;
+	u32 offset, copy_len;
+	int txs_unused;
+	int txbuf_unused;
+
+	if (test_bit(__ATL2_DOWN, &adapter->flags)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(skb->len <= 0)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+#ifdef NETIF_F_LLTX
+	local_irq_save(flags);
+	if (!spin_trylock(&adapter->tx_lock)) {
+		/* Collision - tell upper layer to requeue */
+		local_irq_restore(flags);
+		return NETDEV_TX_LOCKED;
+	}
+#else
+	spin_lock_irqsave(&adapter->tx_lock, flags);
+#endif
+	txs_unused = TxsFreeUnit(adapter);
+	txbuf_unused = TxdFreeBytes(adapter);
+
+	if (skb->len + sizeof(struct tx_pkt_header) + 4  > txbuf_unused ||
+		txs_unused < 1) {
+		/* not enough resources */
+		netif_stop_queue(netdev);
+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	offset = adapter->txd_write_ptr;
+
+	txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
+
+	*(u32 *)txph = 0;
+	txph->pkt_size = skb->len;
+
+	offset += 4;
+	if (offset >= adapter->txd_ring_size)
+		offset -= adapter->txd_ring_size;
+	copy_len = adapter->txd_ring_size - offset;
+	if (copy_len >= skb->len) {
+		memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
+		offset += ((u32)(skb->len + 3) & ~3);
+	} else {
+		memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
+		memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
+			skb->len-copy_len);
+		offset = ((u32)(skb->len-copy_len + 3) & ~3);
+	}
+#ifdef NETIF_F_HW_VLAN_TX
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		u16 vlan_tag = vlan_tx_tag_get(skb);
+		vlan_tag = (vlan_tag << 4) |
+			(vlan_tag >> 13) |
+			((vlan_tag >> 9) & 0x8);
+		txph->ins_vlan = 1;
+		txph->vlan = vlan_tag;
+	}
+#endif
+	if (offset >= adapter->txd_ring_size)
+		offset -= adapter->txd_ring_size;
+	adapter->txd_write_ptr = offset;
+
+	/* clear txs before send */
+	adapter->txs_ring[adapter->txs_next_clear].update = 0;
+	if (++adapter->txs_next_clear == adapter->txs_ring_size)
+		adapter->txs_next_clear = 0;
+
+	ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
+		(adapter->txd_write_ptr >> 2));
+
+	spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+	netdev->trans_start = jiffies;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/*
+ * atl2_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl2_get_stats(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	return &adapter->net_stats;
+}
+
+/*
+ * atl2_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
+		return -EINVAL;
+
+	/* set MTU */
+	if (hw->max_frame_size != new_mtu) {
+		netdev->mtu = new_mtu;
+		ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
+			VLAN_SIZE + ETHERNET_FCS_SIZE);
+	}
+
+	return 0;
+}
+
+/*
+ * atl2_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_set_mac(struct net_device *netdev, void *p)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+	atl2_set_mac_addr(&adapter->hw);
+
+	return 0;
+}
+
+/*
+ * atl2_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+	unsigned long flags;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = 0;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (atl2_read_phy_reg(&adapter->hw,
+			data->reg_num & 0x1F, &data->val_out)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data->reg_num & ~(0x1F))
+			return -EFAULT;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
+			data->val_in)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * atl2_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return atl2_mii_ioctl(netdev, ifr, cmd);
+#ifdef ETHTOOL_OPS_COMPAT
+	case SIOCETHTOOL:
+		return ethtool_ioctl(ifr);
+#endif
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/*
+ * atl2_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl2_tx_timeout(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl2_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_watchdog(unsigned long data)
+{
+	struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+	u32 drop_rxd, drop_rxs;
+	unsigned long flags;
+
+	if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
+		drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
+		adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs);
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+
+		/* Reset the timer */
+		mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ);
+	}
+}
+
+/*
+ * atl2_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_phy_config(unsigned long data)
+{
+	struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+	struct atl2_hw *hw = &adapter->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->stats_lock, flags);
+	atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+	atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
+		MII_CR_RESTART_AUTO_NEG);
+	spin_unlock_irqrestore(&adapter->stats_lock, flags);
+	clear_bit(0, &adapter->cfg_phy);
+}
+
+static int atl2_up(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+	u32 val;
+
+	/* hardware has been reset, we need to reload some things */
+
+	err = atl2_init_hw(&adapter->hw);
+	if (err) {
+		err = -EIO;
+		return err;
+	}
+
+	atl2_set_multi(netdev);
+	init_ring_ptrs(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	atl2_restore_vlan(adapter);
+#endif
+
+	if (atl2_configure(adapter)) {
+		err = -EIO;
+		goto err_up;
+	}
+
+	clear_bit(__ATL2_DOWN, &adapter->flags);
+
+	val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+	ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
+		MASTER_CTRL_MANUAL_INT);
+
+	atl2_irq_enable(adapter);
+
+err_up:
+	return err;
+}
+
+static void atl2_reinit_locked(struct atl2_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+		msleep(1);
+	atl2_down(adapter);
+	atl2_up(adapter);
+	clear_bit(__ATL2_RESETTING, &adapter->flags);
+}
+
+static void atl2_reset_task(struct work_struct *work)
+{
+	struct atl2_adapter *adapter;
+	adapter = container_of(work, struct atl2_adapter, reset_task);
+
+	atl2_reinit_locked(adapter);
+}
+
+static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
+{
+	u32 value;
+	struct atl2_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	/* Config MAC CTRL Register */
+	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+
+	/* duplex */
+	if (FULL_DUPLEX == adapter->link_duplex)
+		value |= MAC_CTRL_DUPLX;
+
+	/* flow control */
+	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+	/* PAD & CRC */
+	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+
+	/* preamble length */
+	value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
+		MAC_CTRL_PRMLEN_SHIFT);
+
+	/* vlan */
+	if (adapter->vlgrp)
+		value |= MAC_CTRL_RMV_VLAN;
+
+	/* filter mode */
+	value |= MAC_CTRL_BC_EN;
+	if (netdev->flags & IFF_PROMISC)
+		value |= MAC_CTRL_PROMIS_EN;
+	else if (netdev->flags & IFF_ALLMULTI)
+		value |= MAC_CTRL_MC_ALL_EN;
+
+	/* half retry buffer */
+	value |= (((u32)(adapter->hw.retry_buf &
+		MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+
+	ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+}
+
+static int atl2_check_link(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int ret_val;
+	u16 speed, duplex, phy_data;
+	int reconfig = 0;
+
+	/* MII_BMSR must read twise */
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	if (!(phy_data&BMSR_LSTATUS)) { /* link down */
+		if (netif_carrier_ok(netdev)) { /* old link state: Up */
+			u32 value;
+			/* disable rx */
+			value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+			value &= ~MAC_CTRL_RX_EN;
+			ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+			adapter->link_speed = SPEED_0;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+		return 0;
+	}
+
+	/* Link Up */
+	ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+	if (ret_val)
+		return ret_val;
+	switch (hw->MediaType) {
+	case MEDIA_TYPE_100M_FULL:
+		if (speed  != SPEED_100 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_100M_HALF:
+		if (speed  != SPEED_100 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_FULL:
+		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_HALF:
+		if (speed  != SPEED_10 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	}
+	/* link result is our setting */
+	if (reconfig == 0) {
+		if (adapter->link_speed != speed ||
+			adapter->link_duplex != duplex) {
+			adapter->link_speed = speed;
+			adapter->link_duplex = duplex;
+			atl2_setup_mac_ctrl(adapter);
+			printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
+				atl2_driver_name, netdev->name,
+				adapter->link_speed,
+				adapter->link_duplex == FULL_DUPLEX ?
+					"Full Duplex" : "Half Duplex");
+		}
+
+		if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		}
+		return 0;
+	}
+
+	/* change original link status */
+	if (netif_carrier_ok(netdev)) {
+		u32 value;
+		/* disable rx */
+		value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+		value &= ~MAC_CTRL_RX_EN;
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+
+		adapter->link_speed = SPEED_0;
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+	}
+
+	/* auto-neg, insert timer to re-config phy
+	 * (if interval smaller than 5 seconds, something strange) */
+	if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+		if (!test_and_set_bit(0, &adapter->cfg_phy))
+			mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ);
+	}
+
+	return 0;
+}
+
+/*
+ * atl2_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl2_link_chg_task(struct work_struct *work)
+{
+	struct atl2_adapter *adapter;
+	unsigned long flags;
+
+	adapter = container_of(work, struct atl2_adapter, link_chg_task);
+
+	spin_lock_irqsave(&adapter->stats_lock, flags);
+	atl2_check_link(adapter);
+	spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+static void atl2_setup_pcicmd(struct pci_dev *pdev)
+{
+	u16 cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+	if (cmd & PCI_COMMAND_INTX_DISABLE)
+		cmd &= ~PCI_COMMAND_INTX_DISABLE;
+	if (cmd & PCI_COMMAND_IO)
+		cmd &= ~PCI_COMMAND_IO;
+	if (0 == (cmd & PCI_COMMAND_MEMORY))
+		cmd |= PCI_COMMAND_MEMORY;
+	if (0 == (cmd & PCI_COMMAND_MASTER))
+		cmd |= PCI_COMMAND_MASTER;
+	pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+	/*
+	 * some motherboards BIOS(PXE/EFI) driver may set PME
+	 * while they transfer control to OS (Windows/Linux)
+	 * so we should clear this bit before NIC work normally
+	 */
+	pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
+}
+
+/*
+ * atl2_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl2_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl2_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl2_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct atl2_adapter *adapter;
+	static int cards_found;
+	unsigned long mmio_start;
+	int mmio_len;
+	int err;
+
+	cards_found = 0;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	/*
+	 * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
+	 * until the kernel has the proper infrastructure to support 64-bit DMA
+	 * on these devices.
+	 */
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) &&
+		pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
+		goto err_dma;
+	}
+
+	/* Mark all PCI regions associated with PCI device
+	 * pdev as being reserved by owner atl2_driver_name */
+	err = pci_request_regions(pdev, atl2_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	/* Enables bus-mastering on the device and calls
+	 * pcibios_set_master to do the needed arch specific settings */
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct atl2_adapter));
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+
+	mmio_start = pci_resource_start(pdev, 0x0);
+	mmio_len = pci_resource_len(pdev, 0x0);
+
+	adapter->hw.mem_rang = (u32)mmio_len;
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	atl2_setup_pcicmd(pdev);
+
+	netdev->open = &atl2_open;
+	netdev->stop = &atl2_close;
+	netdev->hard_start_xmit = &atl2_xmit_frame;
+	netdev->get_stats = &atl2_get_stats;
+	netdev->set_multicast_list = &atl2_set_multi;
+	netdev->set_mac_address = &atl2_set_mac;
+	netdev->change_mtu = &atl2_change_mtu;
+	netdev->do_ioctl = &atl2_ioctl;
+	atl2_set_ethtool_ops(netdev);
+
+#ifdef HAVE_TX_TIMEOUT
+	netdev->tx_timeout = &atl2_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->vlan_rx_register = atl2_vlan_rx_register;
+#endif
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+	adapter->bd_number = cards_found;
+	adapter->pci_using_64 = false;
+
+	/* setup the private structure */
+	err = atl2_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = -EIO;
+
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+#endif
+
+#ifdef NETIF_F_LLTX
+	netdev->features |= NETIF_F_LLTX;
+#endif
+
+	/* Init PHY as early as possible due to power saving issue  */
+	atl2_phy_init(&adapter->hw);
+
+	/* reset the controller to
+	 * put the device in a known good starting state */
+
+	if (atl2_reset_hw(&adapter->hw)) {
+		err = -EIO;
+		goto err_reset;
+	}
+
+	/* copy the MAC address out of the EEPROM */
+	atl2_read_mac_addr(&adapter->hw);
+	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+/* FIXME: do we still need this? */
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	atl2_check_options(adapter);
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &atl2_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_config_timer);
+	adapter->phy_config_timer.function = &atl2_phy_config;
+	adapter->phy_config_timer.data = (unsigned long) adapter;
+
+	INIT_WORK(&adapter->reset_task, atl2_reset_task);
+	INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
+
+	strcpy(netdev->name, "eth%d"); /* ?? */
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	/* assume we have no link for now */
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	cards_found++;
+
+	return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/*
+ * atl2_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl2_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+/* FIXME: write the original MAC address back in case it was changed from a
+ * BIOS-set value, as in atl1 -- CHS */
+static void __devexit atl2_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	/* flush_scheduled work may reschedule our watchdog task, so
+	 * explicitly disable watchdog tasks from being rescheduled  */
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_config_timer);
+
+	flush_scheduled_work();
+
+	unregister_netdev(netdev);
+
+	atl2_force_ps(&adapter->hw);
+
+	iounmap(adapter->hw.hw_addr);
+	pci_release_regions(pdev);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u16 speed, duplex;
+	u32 ctrl = 0;
+	u32 wufc = adapter->wol;
+
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+		atl2_down(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+	atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+	if (ctrl & BMSR_LSTATUS)
+		wufc &= ~ATLX_WUFC_LNKC;
+
+	if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
+		u32 ret_val;
+		/* get current link speed & duplex */
+		ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			printk(KERN_DEBUG
+				"%s: get speed&duplex error while suspend\n",
+				atl2_driver_name);
+			goto wol_dis;
+		}
+
+		ctrl = 0;
+
+		/* turn on magic packet wol */
+		if (wufc & ATLX_WUFC_MAG)
+			ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
+
+		/* ignore Link Chg event when Link is up */
+		ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+
+		/* Config MAC CTRL Register */
+		ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+		if (FULL_DUPLEX == adapter->link_duplex)
+			ctrl |= MAC_CTRL_DUPLX;
+		ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+		ctrl |= (((u32)adapter->hw.preamble_len &
+			MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+		ctrl |= (((u32)(adapter->hw.retry_buf &
+			MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
+			MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+		if (wufc & ATLX_WUFC_MAG) {
+			/* magic packet maybe Broadcast&multicast&Unicast */
+			ctrl |= MAC_CTRL_BC_EN;
+		}
+
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
+
+		/* pcie patch */
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+		ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+		ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+		ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+		goto suspend_exit;
+	}
+
+	if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
+		/* link is down, so only LINK CHG WOL event enable */
+		ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+		ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
+
+		/* pcie patch */
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+		ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+		ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+		ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+		hw->phy_configured = false; /* re-init PHY when resume */
+
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+
+		goto suspend_exit;
+	}
+
+wol_dis:
+	/* WOL disabled */
+	ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+	/* pcie patch */
+	ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+	ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+	ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+	ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+	ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+	atl2_force_ps(hw);
+	hw->phy_configured = false; /* re-init PHY when resume */
+
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+suspend_exit:
+	if (netif_running(netdev))
+		atl2_free_irq(adapter);
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl2_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR
+			"atl2: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+
+	pci_set_master(pdev);
+
+	ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+
+	err = atl2_request_irq(adapter);
+	if (netif_running(netdev) && err)
+		return err;
+
+	atl2_reset_hw(&adapter->hw);
+
+	if (netif_running(netdev))
+		atl2_up(adapter);
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+#endif
+
+static void atl2_shutdown(struct pci_dev *pdev)
+{
+	atl2_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver atl2_driver = {
+	.name     = atl2_driver_name,
+	.id_table = atl2_pci_tbl,
+	.probe    = atl2_probe,
+	.remove   = __devexit_p(atl2_remove),
+	/* Power Managment Hooks */
+	.suspend  = atl2_suspend,
+#ifdef CONFIG_PM
+	.resume   = atl2_resume,
+#endif
+	.shutdown = atl2_shutdown,
+};
+
+/*
+ * atl2_init_module - Driver Registration Routine
+ *
+ * atl2_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl2_init_module(void)
+{
+	printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
+		atl2_driver_version);
+	printk(KERN_INFO "%s\n", atl2_copyright);
+	return pci_register_driver(&atl2_driver);
+}
+module_init(atl2_init_module);
+
+/*
+ * atl2_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl2_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl2_exit_module(void)
+{
+	pci_unregister_driver(&atl2_driver);
+}
+module_exit(atl2_exit_module);
+
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+	struct atl2_adapter *adapter = hw->back;
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+	struct atl2_adapter *adapter = hw->back;
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+static int atl2_get_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	ecmd->supported = (SUPPORTED_10baseT_Half |
+		SUPPORTED_10baseT_Full |
+		SUPPORTED_100baseT_Half |
+		SUPPORTED_100baseT_Full |
+		SUPPORTED_Autoneg |
+		SUPPORTED_TP);
+	ecmd->advertising = ADVERTISED_TP;
+
+	ecmd->advertising |= ADVERTISED_Autoneg;
+	ecmd->advertising |= hw->autoneg_advertised;
+
+	ecmd->port = PORT_TP;
+	ecmd->phy_address = 0;
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (adapter->link_speed != SPEED_0) {
+		ecmd->speed = adapter->link_speed;
+		if (adapter->link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = AUTONEG_ENABLE;
+	return 0;
+}
+
+static int atl2_set_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+#define MY_ADV_MASK	(ADVERTISE_10_HALF | \
+			 ADVERTISE_10_FULL | \
+			 ADVERTISE_100_HALF| \
+			 ADVERTISE_100_FULL)
+
+		if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
+			hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
+			hw->autoneg_advertised =  MY_ADV_MASK;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_100_FULL) {
+			hw->MediaType = MEDIA_TYPE_100M_FULL;
+			hw->autoneg_advertised = ADVERTISE_100_FULL;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_100_HALF) {
+			hw->MediaType = MEDIA_TYPE_100M_HALF;
+			hw->autoneg_advertised = ADVERTISE_100_HALF;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_10_FULL) {
+			hw->MediaType = MEDIA_TYPE_10M_FULL;
+			hw->autoneg_advertised = ADVERTISE_10_FULL;
+		}  else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_10_HALF) {
+			hw->MediaType = MEDIA_TYPE_10M_HALF;
+			hw->autoneg_advertised = ADVERTISE_10_HALF;
+		} else {
+			clear_bit(__ATL2_RESETTING, &adapter->flags);
+			return -EINVAL;
+		}
+		ecmd->advertising = hw->autoneg_advertised |
+			ADVERTISED_TP | ADVERTISED_Autoneg;
+	} else {
+		clear_bit(__ATL2_RESETTING, &adapter->flags);
+		return -EINVAL;
+	}
+
+	/* reset the link */
+	if (netif_running(adapter->netdev)) {
+		atl2_down(adapter);
+		atl2_up(adapter);
+	} else
+		atl2_reset_hw(&adapter->hw);
+
+	clear_bit(__ATL2_RESETTING, &adapter->flags);
+	return 0;
+}
+
+static u32 atl2_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static u32 atl2_get_msglevel(struct net_device *netdev)
+{
+	return 0;
+}
+
+/*
+ * It's sane for this to be empty, but we might want to take advantage of this.
+ */
+static void atl2_set_msglevel(struct net_device *netdev, u32 data)
+{
+}
+
+static int atl2_get_regs_len(struct net_device *netdev)
+{
+#define ATL2_REGS_LEN 42
+	return sizeof(u32) * ATL2_REGS_LEN;
+}
+
+static void atl2_get_regs(struct net_device *netdev,
+	struct ethtool_regs *regs, void *p)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = ATL2_READ_REG(hw, REG_VPD_CAP);
+	regs_buff[1]  = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+	regs_buff[2]  = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
+	regs_buff[3]  = ATL2_READ_REG(hw, REG_TWSI_CTRL);
+	regs_buff[4]  = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
+	regs_buff[5]  = ATL2_READ_REG(hw, REG_MASTER_CTRL);
+	regs_buff[6]  = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
+	regs_buff[7]  = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
+	regs_buff[8]  = ATL2_READ_REG(hw, REG_PHY_ENABLE);
+	regs_buff[9]  = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
+	regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+	regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+	regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
+	regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
+	regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
+	regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+	regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
+	regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
+	regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
+	regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
+	regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
+	regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
+	regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
+	regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
+	regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
+	regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
+	regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
+	regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
+	regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
+	regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
+	regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
+	regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
+	regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
+	regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
+	regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
+	regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
+	regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
+	regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
+	regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
+
+	atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
+	regs_buff[40] = (u32)phy_data;
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	regs_buff[41] = (u32)phy_data;
+}
+
+static int atl2_get_eeprom_len(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	if (!atl2_check_eeprom_exist(&adapter->hw))
+		return 512;
+	else
+		return 0;
+}
+
+static int atl2_get_eeprom(struct net_device *netdev,
+	struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	int first_dword, last_dword;
+	int ret_val = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	if (atl2_check_eeprom_exist(hw))
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+	eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
+		GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	for (i = first_dword; i < last_dword; i++) {
+		if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
+			return -EIO;
+	}
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+		eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int atl2_set_eeprom(struct net_device *netdev,
+	struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	u32 *ptr;
+	int max_len, first_dword, last_dword, ret_val = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = 512;
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (u32 *)eeprom_buff;
+
+	if (eeprom->offset & 3) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
+			return -EIO;
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 3)) {
+		/*
+		 * need read/modify/write of last changed EEPROM word
+		 * only the first byte of the word is being modified
+		 */
+		if (!atl2_read_eeprom(hw, last_dword * 4,
+			&(eeprom_buff[last_dword - first_dword])))
+			return -EIO;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_dword - first_dword + 1; i++) {
+		if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
+			return -EIO;
+	}
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void atl2_get_drvinfo(struct net_device *netdev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	strncpy(drvinfo->driver,  atl2_driver_name, 32);
+	strncpy(drvinfo->version, atl2_driver_version, 32);
+	strncpy(drvinfo->fw_version, "L2", 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = atl2_get_regs_len(netdev);
+	drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
+}
+
+static void atl2_get_wol(struct net_device *netdev,
+	struct ethtool_wolinfo *wol)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	if (adapter->wol & ATLX_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & ATLX_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & ATLX_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & ATLX_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+	if (adapter->wol & ATLX_WUFC_LNKC)
+		wol->wolopts |= WAKE_PHY;
+}
+
+static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
+		return -EOPNOTSUPP;
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= ATLX_WUFC_MAG;
+	if (wol->wolopts & WAKE_PHY)
+		adapter->wol |= ATLX_WUFC_LNKC;
+
+	return 0;
+}
+
+static int atl2_nway_reset(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		atl2_reinit_locked(adapter);
+	return 0;
+}
+
+static struct ethtool_ops atl2_ethtool_ops = {
+	.get_settings		= atl2_get_settings,
+	.set_settings		= atl2_set_settings,
+	.get_drvinfo		= atl2_get_drvinfo,
+	.get_regs_len		= atl2_get_regs_len,
+	.get_regs		= atl2_get_regs,
+	.get_wol		= atl2_get_wol,
+	.set_wol		= atl2_set_wol,
+	.get_msglevel		= atl2_get_msglevel,
+	.set_msglevel		= atl2_set_msglevel,
+	.nway_reset		= atl2_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_eeprom_len		= atl2_get_eeprom_len,
+	.get_eeprom		= atl2_get_eeprom,
+	.set_eeprom		= atl2_set_eeprom,
+	.get_tx_csum		= atl2_get_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.set_sg			= ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso		= ethtool_op_get_tso,
+#endif
+};
+
+static void atl2_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+}
+
+#define LBYTESWAP(a)  ((((a) & 0x00ff00ff) << 8) | \
+	(((a) & 0xff00ff00) >> 8))
+#define LONGSWAP(a)   ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
+#define SHORTSWAP(a)  (((a) << 8) | ((a) >> 8))
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * return : 0  or  idle status (if error)
+ */
+static s32 atl2_reset_hw(struct atl2_hw *hw)
+{
+	u32 icr;
+	u16 pci_cfg_cmd_word;
+	int i;
+
+	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+	atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+	if ((pci_cfg_cmd_word &
+		(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
+		(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
+		pci_cfg_cmd_word |=
+			(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
+		atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+	}
+
+	/* Clear Interrupt mask to stop board from generating
+	 * interrupts & Clear any pending interrupt events
+	 */
+	/* FIXME */
+	/* ATL2_WRITE_REG(hw, REG_IMR, 0); */
+	/* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
+
+	/* Issue Soft Reset to the MAC.  This will reset the chip's
+	 * transmit, receive, DMA.  It will not effect
+	 * the current PCI configuration.  The global reset bit is self-
+	 * clearing, and should clear within a microsecond.
+	 */
+	ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+	wmb();
+	msleep(1); /* delay about 1ms */
+
+	/* Wait at least 10ms for All module to be Idle */
+	for (i = 0; i < 10; i++) {
+		icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+		if (!icr)
+			break;
+		msleep(1); /* delay 1 ms */
+		cpu_relax();
+	}
+
+	if (icr)
+		return icr;
+
+	return 0;
+}
+
+#define CUSTOM_SPI_CS_SETUP        2
+#define CUSTOM_SPI_CLK_HI          2
+#define CUSTOM_SPI_CLK_LO          2
+#define CUSTOM_SPI_CS_HOLD         2
+#define CUSTOM_SPI_CS_HI           3
+
+static struct atl2_spi_flash_dev flash_table[] =
+{
+/* MFR    WRSR  READ  PROGRAM WREN  WRDI  RDSR  RDID  SECTOR_ERASE CHIP_ERASE */
+{"Atmel", 0x0,  0x03, 0x02,   0x06, 0x04, 0x05, 0x15, 0x52,        0x62 },
+{"SST",   0x01, 0x03, 0x02,   0x06, 0x04, 0x05, 0x90, 0x20,        0x60 },
+{"ST",    0x01, 0x03, 0x02,   0x06, 0x04, 0x05, 0xAB, 0xD8,        0xC7 },
+};
+
+static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
+{
+	int i;
+	u32 value;
+
+	ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
+	ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
+
+	value = SPI_FLASH_CTRL_WAIT_READY |
+		(CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+			SPI_FLASH_CTRL_CS_SETUP_SHIFT |
+		(CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
+			SPI_FLASH_CTRL_CLK_HI_SHIFT |
+		(CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
+			SPI_FLASH_CTRL_CLK_LO_SHIFT |
+		(CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+			SPI_FLASH_CTRL_CS_HOLD_SHIFT |
+		(CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
+			SPI_FLASH_CTRL_CS_HI_SHIFT |
+		(0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
+
+	ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+	value |= SPI_FLASH_CTRL_START;
+
+	ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+	for (i = 0; i < 10; i++) {
+		msleep(1);
+		value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+		if (!(value & SPI_FLASH_CTRL_START))
+			break;
+	}
+
+	if (value & SPI_FLASH_CTRL_START)
+		return false;
+
+	*buf = ATL2_READ_REG(hw, REG_SPI_DATA);
+
+	return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int get_permanent_address(struct atl2_hw *hw)
+{
+	u32 Addr[2];
+	u32 i, Control;
+	u16 Register;
+	u8  EthAddr[NODE_ADDRESS_SIZE];
+	bool KeyValid;
+
+	if (is_valid_ether_addr(hw->perm_mac_addr))
+		return 0;
+
+	Addr[0] = 0;
+	Addr[1] = 0;
+
+	if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
+		Register = 0;
+		KeyValid = false;
+
+		/* Read out all EEPROM content */
+		i = 0;
+		while (1) {
+			if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
+				if (KeyValid) {
+					if (Register == REG_MAC_STA_ADDR)
+						Addr[0] = Control;
+					else if (Register ==
+						(REG_MAC_STA_ADDR + 4))
+						Addr[1] = Control;
+					KeyValid = false;
+				} else if ((Control & 0xff) == 0x5A) {
+					KeyValid = true;
+					Register = (u16) (Control >> 16);
+				} else {
+			/* assume data end while encount an invalid KEYWORD */
+					break;
+				}
+			} else {
+				break; /* read error */
+			}
+			i += 4;
+		}
+
+		*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+		*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+		if (is_valid_ether_addr(EthAddr)) {
+			memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+			return 0;
+		}
+		return 1;
+	}
+
+	/* see if SPI flash exists? */
+	Addr[0] = 0;
+	Addr[1] = 0;
+	Register = 0;
+	KeyValid = false;
+	i = 0;
+	while (1) {
+		if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
+			if (KeyValid) {
+				if (Register == REG_MAC_STA_ADDR)
+					Addr[0] = Control;
+				else if (Register == (REG_MAC_STA_ADDR + 4))
+					Addr[1] = Control;
+				KeyValid = false;
+			} else if ((Control & 0xff) == 0x5A) {
+				KeyValid = true;
+				Register = (u16) (Control >> 16);
+			} else {
+				break; /* data end */
+			}
+		} else {
+			break; /* read error */
+		}
+		i += 4;
+	}
+
+	*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+	*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
+	if (is_valid_ether_addr(EthAddr)) {
+		memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+		return 0;
+	}
+	/* maybe MAC-address is from BIOS */
+	Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+	Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
+	*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+	*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+	if (is_valid_ether_addr(EthAddr)) {
+		memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_read_mac_addr(struct atl2_hw *hw)
+{
+	u16 i;
+
+	if (get_permanent_address(hw)) {
+		/* for test */
+		/* FIXME: shouldn't we use random_ether_addr() here? */
+		hw->perm_mac_addr[0] = 0x00;
+		hw->perm_mac_addr[1] = 0x13;
+		hw->perm_mac_addr[2] = 0x74;
+		hw->perm_mac_addr[3] = 0x00;
+		hw->perm_mac_addr[4] = 0x5c;
+		hw->perm_mac_addr[5] = 0x38;
+	}
+
+	for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+		hw->mac_addr[i] = hw->perm_mac_addr[i];
+
+	return 0;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl2_hash_mc_addr
+ *  purpose
+ *      set hash value for a multicast address
+ *      hash calcu processing :
+ *          1. calcu 32bit CRC for multicast address
+ *          2. reverse crc with MSB to LSB
+ */
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
+{
+	u32 crc32, value;
+	int i;
+
+	value = 0;
+	crc32 = ether_crc_le(6, mc_addr);
+
+	for (i = 0; i < 32; i++)
+		value |= (((crc32 >> i) & 1) << (31 - i));
+
+	return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg;
+	u32 mta;
+
+	/* The HASH Table  is a register array of 2 32-bit registers.
+	 * It is treated like an array of 64 bits.  We want to set
+	 * bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The register is determined by the
+	 * upper 7 bits of the hash value and the bit within that
+	 * register are determined by the lower 5 bits of the value.
+	 */
+	hash_reg = (hash_value >> 31) & 0x1;
+	hash_bit = (hash_value >> 26) & 0x1F;
+
+	mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+
+/*
+ * atl2_init_pcie - init PCIE module
+ */
+static void atl2_init_pcie(struct atl2_hw *hw)
+{
+    u32 value;
+    value = LTSSM_TEST_MODE_DEF;
+    ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
+
+    value = PCIE_DLL_TX_CTRL1_DEF;
+    ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
+}
+
+static void atl2_init_flash_opcode(struct atl2_hw *hw)
+{
+	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
+		hw->flash_vendor = 0; /* ATMEL */
+
+	/* Init OP table */
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
+		flash_table[hw->flash_vendor].cmdPROGRAM);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
+		flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
+		flash_table[hw->flash_vendor].cmdCHIP_ERASE);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
+		flash_table[hw->flash_vendor].cmdRDID);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
+		flash_table[hw->flash_vendor].cmdWREN);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
+		flash_table[hw->flash_vendor].cmdRDSR);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
+		flash_table[hw->flash_vendor].cmdWRSR);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
+		flash_table[hw->flash_vendor].cmdREAD);
+}
+
+/********************************************************************
+* Performs basic configuration of the adapter.
+*
+* hw - Struct containing variables accessed by shared code
+* Assumes that the controller has previously been reset and is in a
+* post-reset uninitialized state. Initializes multicast table,
+* and  Calls routines to setup link
+* Leaves the transmit and receive units disabled and uninitialized.
+********************************************************************/
+static s32 atl2_init_hw(struct atl2_hw *hw)
+{
+	u32 ret_val = 0;
+
+	atl2_init_pcie(hw);
+
+	/* Zero out the Multicast HASH table */
+	/* clear the old settings from the multicast hash table */
+	ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+	atl2_init_flash_opcode(hw);
+
+	ret_val = atl2_phy_init(hw);
+
+	return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+	u16 *duplex)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Read PHY Specific Status Register (17) */
+	ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
+		return ATLX_ERR_PHY_RES;
+
+	switch (phy_data & MII_ATLX_PSSR_SPEED) {
+	case MII_ATLX_PSSR_100MBS:
+		*speed = SPEED_100;
+		break;
+	case MII_ATLX_PSSR_10MBS:
+		*speed = SPEED_10;
+		break;
+	default:
+		return ATLX_ERR_PHY_SPEED;
+		break;
+	}
+
+	if (phy_data & MII_ATLX_PSSR_DPLX)
+		*duplex = FULL_DUPLEX;
+	else
+		*duplex = HALF_DUPLEX;
+
+	return 0;
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	u32 val;
+	int i;
+
+	val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+		MDIO_START |
+		MDIO_SUP_PREAMBLE |
+		MDIO_RW |
+		MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+	ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	wmb();
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+		wmb();
+	}
+	if (!(val & (MDIO_START | MDIO_BUSY))) {
+		*phy_data = (u16)val;
+		return 0;
+	}
+
+	return ATLX_ERR_PHY;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
+{
+	int i;
+	u32 val;
+
+	val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+		(reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+		MDIO_SUP_PREAMBLE |
+		MDIO_START |
+		MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+	ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	wmb();
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+
+		wmb();
+	}
+
+	if (!(val & (MDIO_START | MDIO_BUSY)))
+		return 0;
+
+	return ATLX_ERR_PHY;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	s16 mii_autoneg_adv_reg;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+	/* Need to parse autoneg_advertised  and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9). */
+	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+
+	/* Need to parse MediaType and setup the
+	 * appropriate PHY registers. */
+	switch (hw->MediaType) {
+	case MEDIA_TYPE_AUTO_SENSOR:
+		mii_autoneg_adv_reg |=
+			(MII_AR_10T_HD_CAPS |
+			MII_AR_10T_FD_CAPS  |
+			MII_AR_100TX_HD_CAPS|
+			MII_AR_100TX_FD_CAPS);
+		hw->autoneg_advertised =
+			ADVERTISE_10_HALF |
+			ADVERTISE_10_FULL |
+			ADVERTISE_100_HALF|
+			ADVERTISE_100_FULL;
+		break;
+	case MEDIA_TYPE_100M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_100_FULL;
+		break;
+	case MEDIA_TYPE_100M_HALF:
+		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_100_HALF;
+		break;
+	case MEDIA_TYPE_10M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_10_FULL;
+		break;
+	default:
+		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_10_HALF;
+		break;
+	}
+
+	/* flow control fixed to enable all */
+	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+
+	ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/*
+ * Resets the PHY and make all config validate
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl2_phy_commit(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+	ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
+	if (ret_val) {
+		u32 val;
+		int i;
+		/* pcie serdes link may be down ! */
+		for (i = 0; i < 25; i++) {
+			msleep(1);
+			val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+			if (!(val & (MDIO_START | MDIO_BUSY)))
+				break;
+		}
+
+		if (0 != (val & (MDIO_START | MDIO_BUSY))) {
+			printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
+			return ret_val;
+		}
+	}
+	return 0;
+}
+
+static s32 atl2_phy_init(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_val;
+
+	if (hw->phy_configured)
+		return 0;
+
+	/* Enable PHY */
+	ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
+	ATL2_WRITE_FLUSH(hw);
+	msleep(1);
+
+	/* check if the PHY is in powersaving mode */
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+	atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+
+	/* 024E / 124E 0r 0274 / 1274 ? */
+	if (phy_val & 0x1000) {
+		phy_val &= ~0x1000;
+		atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
+	}
+
+	msleep(1);
+
+	/*Enable PHY LinkChange Interrupt */
+	ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
+	if (ret_val)
+		return ret_val;
+
+	/* setup AutoNeg parameters */
+	ret_val = atl2_phy_setup_autoneg_adv(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* SW.Reset & En-Auto-Neg to restart Auto-Neg */
+	ret_val = atl2_phy_commit(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy_configured = true;
+
+	return ret_val;
+}
+
+static void atl2_set_mac_addr(struct atl2_hw *hw)
+{
+	u32 value;
+	/* 00-0B-6A-F6-00-DC
+	 * 0:  6AF600DC   1: 000B
+	 * low dword */
+	value = (((u32)hw->mac_addr[2]) << 24) |
+		(((u32)hw->mac_addr[3]) << 16) |
+		(((u32)hw->mac_addr[4]) << 8)  |
+		(((u32)hw->mac_addr[5]));
+	ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+	/* hight dword */
+	value = (((u32)hw->mac_addr[0]) << 8) |
+		(((u32)hw->mac_addr[1]));
+	ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl2_check_eeprom_exist(struct atl2_hw *hw)
+{
+	u32 value;
+
+	value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+	if (value & SPI_FLASH_CTRL_EN_VPD) {
+		value &= ~SPI_FLASH_CTRL_EN_VPD;
+		ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+	}
+	value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
+	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+/* FIXME: This doesn't look right. -- CHS */
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
+{
+	return true;
+}
+
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
+{
+	int i;
+	u32    Control;
+
+	if (Offset & 0x3)
+		return false; /* address do not align */
+
+	ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
+	Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+	ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
+
+	for (i = 0; i < 10; i++) {
+		msleep(2);
+		Control = ATL2_READ_REG(hw, REG_VPD_CAP);
+		if (Control & VPD_CAP_VPD_FLAG)
+			break;
+	}
+
+	if (Control & VPD_CAP_VPD_FLAG) {
+		*pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
+		return true;
+	}
+	return false; /* timeout */
+}
+
+static void atl2_force_ps(struct atl2_hw *hw)
+{
+	u16 phy_val;
+
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+	atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
+
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
+}
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL2_MAX_NIC 4
+
+#define OPTION_UNSET    -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when atl2_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define ATL2_PARAM(X, desc) \
+    static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+    MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
+    MODULE_PARM_DESC(X, desc);
+#else
+#define ATL2_PARAM(X, desc) \
+    static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
+    static int num_##X = 0; \
+    module_param_array_named(X, X, int, &num_##X, 0); \
+    MODULE_PARM_DESC(X, desc);
+#endif
+
+/*
+ * Transmit Memory Size
+ * Valid Range: 64-2048
+ * Default Value: 128
+ */
+#define ATL2_MIN_TX_MEMSIZE		4	/* 4KB */
+#define ATL2_MAX_TX_MEMSIZE		64	/* 64KB */
+#define ATL2_DEFAULT_TX_MEMSIZE		8	/* 8KB */
+ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
+
+/*
+ * Receive Memory Block Count
+ * Valid Range: 16-512
+ * Default Value: 128
+ */
+#define ATL2_MIN_RXD_COUNT		16
+#define ATL2_MAX_RXD_COUNT		512
+#define ATL2_DEFAULT_RXD_COUNT		64
+ATL2_PARAM(RxMemBlock, "Number of receive memory block");
+
+/*
+ * User Specified MediaType Override
+ *
+ * Valid Range: 0-5
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 1    - only link at 1000Mbps Full Duplex
+ *  - 2    - only link at 100Mbps Full Duplex
+ *  - 3    - only link at 100Mbps Half Duplex
+ *  - 4    - only link at 10Mbps Full Duplex
+ *  - 5    - only link at 10Mbps Half Duplex
+ * Default Value: 0
+ */
+ATL2_PARAM(MediaType, "MediaType Select");
+
+/*
+ * Interrupt Moderate Timer in units of 2048 ns (~2 us)
+ * Valid Range: 10-65535
+ * Default Value: 45000(90ms)
+ */
+#define INT_MOD_DEFAULT_CNT	100 /* 200us */
+#define INT_MOD_MAX_CNT		65000
+#define INT_MOD_MIN_CNT		50
+ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
+
+/*
+ * FlashVendor
+ * Valid Range: 0-2
+ * 0 - Atmel
+ * 1 - SST
+ * 2 - ST
+ */
+ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
+
+#define AUTONEG_ADV_DEFAULT	0x2F
+#define AUTONEG_ADV_MASK	0x2F
+#define FLOW_CONTROL_DEFAULT	FLOW_CONTROL_FULL
+
+#define FLASH_VENDOR_DEFAULT	0
+#define FLASH_VENDOR_MIN	0
+#define FLASH_VENDOR_MAX	2
+
+struct atl2_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int  def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct atl2_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
+{
+	int i;
+	struct atl2_opt_list *ent;
+
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			printk(KERN_INFO "%s Enabled\n", opt->name);
+			return 0;
+			break;
+		case OPTION_DISABLED:
+			printk(KERN_INFO "%s Disabled\n", opt->name);
+			return 0;
+			break;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option:
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					printk(KERN_INFO "%s\n", ent->str);
+			return 0;
+			}
+		}
+		break;
+	default:
+		BUG();
+	}
+
+	printk(KERN_INFO "Invalid %s specified (%i) %s\n",
+		opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+/*
+ * atl2_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ */
+static void __devinit atl2_check_options(struct atl2_adapter *adapter)
+{
+	int val;
+	struct atl2_option opt;
+	int bd = adapter->bd_number;
+	if (bd >= ATL2_MAX_NIC) {
+		printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
+			bd);
+		printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = ATL2_MAX_NIC;
+#endif
+	}
+
+	/* Bytes of Transmit Memory */
+	opt.type = range_option;
+	opt.name = "Bytes of Transmit Memory";
+	opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
+	opt.def = ATL2_DEFAULT_TX_MEMSIZE;
+	opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
+	opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
+#ifdef module_param_array
+	if (num_TxMemSize > bd) {
+#endif
+		val = TxMemSize[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->txd_ring_size = ((u32) val) * 1024;
+#ifdef module_param_array
+	} else
+		adapter->txd_ring_size = ((u32)opt.def) * 1024;
+#endif
+	/* txs ring size: */
+	adapter->txs_ring_size = adapter->txd_ring_size / 128;
+	if (adapter->txs_ring_size > 160)
+		adapter->txs_ring_size = 160;
+
+	/* Receive Memory Block Count */
+	opt.type = range_option;
+	opt.name = "Number of receive memory block";
+	opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
+	opt.def = ATL2_DEFAULT_RXD_COUNT;
+	opt.arg.r.min = ATL2_MIN_RXD_COUNT;
+	opt.arg.r.max = ATL2_MAX_RXD_COUNT;
+#ifdef module_param_array
+	if (num_RxMemBlock > bd) {
+#endif
+		val = RxMemBlock[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->rxd_ring_size = (u32)val;
+		/* FIXME */
+		/* ((u16)val)&~1; */	/* even number */
+#ifdef module_param_array
+	} else
+		adapter->rxd_ring_size = (u32)opt.def;
+#endif
+	/* init RXD Flow control value */
+	adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
+	adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
+		(adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
+		(adapter->rxd_ring_size / 12);
+
+	/* Interrupt Moderate Timer */
+	opt.type = range_option;
+	opt.name = "Interrupt Moderate Timer";
+	opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
+	opt.def = INT_MOD_DEFAULT_CNT;
+	opt.arg.r.min = INT_MOD_MIN_CNT;
+	opt.arg.r.max = INT_MOD_MAX_CNT;
+#ifdef module_param_array
+	if (num_IntModTimer > bd) {
+#endif
+		val = IntModTimer[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->imt = (u16) val;
+#ifdef module_param_array
+	} else
+		adapter->imt = (u16)(opt.def);
+#endif
+	/* Flash Vendor */
+	opt.type = range_option;
+	opt.name = "SPI Flash Vendor";
+	opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
+	opt.def = FLASH_VENDOR_DEFAULT;
+	opt.arg.r.min = FLASH_VENDOR_MIN;
+	opt.arg.r.max = FLASH_VENDOR_MAX;
+#ifdef module_param_array
+	if (num_FlashVendor > bd) {
+#endif
+		val = FlashVendor[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->hw.flash_vendor = (u8) val;
+#ifdef module_param_array
+	} else
+		adapter->hw.flash_vendor = (u8)(opt.def);
+#endif
+	/* MediaType */
+	opt.type = range_option;
+	opt.name = "Speed/Duplex Selection";
+	opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
+	opt.def = MEDIA_TYPE_AUTO_SENSOR;
+	opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
+	opt.arg.r.max = MEDIA_TYPE_10M_HALF;
+#ifdef module_param_array
+	if (num_MediaType > bd) {
+#endif
+		val = MediaType[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->hw.MediaType = (u16) val;
+#ifdef module_param_array
+	} else
+		adapter->hw.MediaType = (u16)(opt.def);
+#endif
+}
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
new file mode 100644
index 000000000000..6e1f28ff227b
--- /dev/null
+++ b/drivers/net/atlx/atl2.h
@@ -0,0 +1,530 @@
+/* atl2.h -- atl2 driver definitions
+ *
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
+ * Copyright(c) 2007 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef _ATL2_H_
+#define _ATL2_H_
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+
+#ifndef _ATL2_HW_H_
+#define _ATL2_HW_H_
+
+#ifndef _ATL2_OSDEP_H_
+#define _ATL2_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "atlx.h"
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#define PCI_COMMAND_REGISTER	PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE	PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN		ETH_ALEN
+
+#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
+
+#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
+	(iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
+
+#define ATL2_READ_REG_ARRAY(a, reg, offset) \
+	(ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
+
+#endif /* _ATL2_OSDEP_H_ */
+
+struct atl2_adapter;
+struct atl2_hw;
+
+/* function prototype */
+static s32 atl2_reset_hw(struct atl2_hw *hw);
+static s32 atl2_read_mac_addr(struct atl2_hw *hw);
+static s32 atl2_init_hw(struct atl2_hw *hw);
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+	u16 *duplex);
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_set_mac_addr(struct atl2_hw *hw);
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
+static s32 atl2_phy_init(struct atl2_hw *hw);
+static int atl2_check_eeprom_exist(struct atl2_hw *hw);
+static void atl2_force_ps(struct atl2_hw *hw);
+
+/* register definition */
+
+/* Block IDLE Status Register */
+#define IDLE_STATUS_RXMAC	1	/* 1: RXMAC is non-IDLE */
+#define IDLE_STATUS_TXMAC	2	/* 1: TXMAC is non-IDLE */
+#define IDLE_STATUS_DMAR	8	/* 1: DMAR is non-IDLE */
+#define IDLE_STATUS_DMAW	4	/* 1: DMAW is non-IDLE */
+
+/* MDIO Control Register */
+#define MDIO_WAIT_TIMES		10
+
+/* MAC Control Register */
+#define MAC_CTRL_DBG_TX_BKPRESURE	0x100000	/* 1: TX max backoff */
+#define MAC_CTRL_MACLP_CLK_PHY		0x8000000	/* 1: 25MHz from phy */
+#define MAC_CTRL_HALF_LEFT_BUF_SHIFT	28
+#define MAC_CTRL_HALF_LEFT_BUF_MASK	0xF		/* MAC retry buf x32B */
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_TXRAM_END	0x1500	/* Internal tail address of TXRAM
+					 * default: 2byte*1024 */
+#define REG_SRAM_RXRAM_END	0x1502	/* Internal tail address of RXRAM
+					 * default: 2byte*1024 */
+
+/* Descriptor Control register */
+#define REG_TXD_BASE_ADDR_LO	0x1544	/* The base address of the Transmit
+					 * Data Mem low 32-bit(dword align) */
+#define REG_TXD_MEM_SIZE	0x1548	/* Transmit Data Memory size(by
+					 * double word , max 256KB) */
+#define REG_TXS_BASE_ADDR_LO	0x154C	/* The base address of the Transmit
+					 * Status Memory low 32-bit(dword word
+					 * align) */
+#define REG_TXS_MEM_SIZE	0x1550	/* double word unit, max 4*2047
+					 * bytes. */
+#define REG_RXD_BASE_ADDR_LO	0x1554	/* The base address of the Transmit
+					 * Status Memory low 32-bit(unit 8
+					 * bytes) */
+#define REG_RXD_BUF_NUM		0x1558	/* Receive Data & Status Memory buffer
+					 * number (unit 1536bytes, max
+					 * 1536*2047) */
+
+/* DMAR Control Register */
+#define REG_DMAR	0x1580
+#define     DMAR_EN	0x1	/* 1: Enable DMAR */
+
+/* TX Cur-Through (early tx threshold) Control Register */
+#define REG_TX_CUT_THRESH	0x1590	/* TxMac begin transmit packet
+					 * threshold(unit word) */
+
+/* DMAW Control Register */
+#define REG_DMAW	0x15A0
+#define     DMAW_EN	0x1
+
+/* Flow control register */
+#define REG_PAUSE_ON_TH		0x15A8	/* RXD high watermark of overflow
+					 * threshold configuration register */
+#define REG_PAUSE_OFF_TH	0x15AA	/* RXD lower watermark of overflow
+					 * threshold configuration register */
+
+/* Mailbox Register */
+#define REG_MB_TXD_WR_IDX	0x15f0	/* double word align */
+#define REG_MB_RXD_RD_IDX	0x15F4	/* RXD Read index (unit: 1536byets) */
+
+/* Interrupt Status Register */
+#define ISR_TIMER	1	/* Interrupt when Timer counts down to zero */
+#define ISR_MANUAL	2	/* Software manual interrupt, for debug. Set
+				 * when SW_MAN_INT_EN is set in Table 51
+				 * Selene Master Control Register
+				 * (Offset 0x1400). */
+#define ISR_RXF_OV	4	/* RXF overflow interrupt */
+#define ISR_TXF_UR	8	/* TXF underrun interrupt */
+#define ISR_TXS_OV	0x10	/* Internal transmit status buffer full
+				 * interrupt */
+#define ISR_RXS_OV	0x20	/* Internal receive status buffer full
+				 * interrupt */
+#define ISR_LINK_CHG	0x40	/* Link Status Change Interrupt */
+#define ISR_HOST_TXD_UR	0x80
+#define ISR_HOST_RXD_OV	0x100	/* Host rx data memory full , one pulse */
+#define ISR_DMAR_TO_RST	0x200	/* DMAR op timeout interrupt. SW should
+				 * do Reset */
+#define ISR_DMAW_TO_RST	0x400
+#define ISR_PHY		0x800	/* phy interrupt */
+#define ISR_TS_UPDATE	0x10000	/* interrupt after new tx pkt status written
+				 * to host */
+#define ISR_RS_UPDATE	0x20000	/* interrupt ater new rx pkt status written
+				 * to host. */
+#define ISR_TX_EARLY	0x40000	/* interrupt when txmac begin transmit one
+				 * packet */
+
+#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
+	ISR_TS_UPDATE | ISR_TX_EARLY)
+#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
+	 ISR_RS_UPDATE)
+
+#define IMR_NORMAL_MASK		(\
+	/*ISR_LINK_CHG		|*/\
+	ISR_MANUAL		|\
+	ISR_DMAR_TO_RST		|\
+	ISR_DMAW_TO_RST		|\
+	ISR_PHY			|\
+	ISR_PHY_LINKDOWN	|\
+	ISR_TS_UPDATE		|\
+	ISR_RS_UPDATE)
+
+/* Receive MAC Statistics Registers */
+#define REG_STS_RX_PAUSE	0x1700	/* Num pause packets received */
+#define REG_STS_RXD_OV		0x1704	/* Num frames dropped due to RX
+					 * FIFO overflow */
+#define REG_STS_RXS_OV		0x1708	/* Num frames dropped due to RX
+					 * Status Buffer Overflow */
+#define REG_STS_RX_FILTER	0x170C	/* Num packets dropped due to
+					 * address filtering */
+
+/* MII definitions */
+
+/* PHY Common Register */
+#define MII_SMARTSPEED	0x14
+#define MII_DBG_ADDR	0x1D
+#define MII_DBG_DATA	0x1E
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND		0x04
+#define CMD_IO_SPACE		0x0001
+#define CMD_MEMORY_SPACE	0x0002
+#define CMD_BUS_MASTER		0x0004
+
+#define MEDIA_TYPE_100M_FULL	1
+#define MEDIA_TYPE_100M_HALF	2
+#define MEDIA_TYPE_10M_FULL	3
+#define MEDIA_TYPE_10M_HALF	4
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT	0x000F	/* Everything */
+
+/* The size (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE		14
+#define MAXIMUM_ETHERNET_FRAME_SIZE	1518	/* with FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE	64	/* with FCS */
+#define ETHERNET_FCS_SIZE		4
+#define MAX_JUMBO_FRAME_SIZE		0x2000
+#define VLAN_SIZE                                               4
+
+struct tx_pkt_header {
+	unsigned pkt_size:11;
+	unsigned:4;			/* reserved */
+	unsigned ins_vlan:1;		/* txmac should insert vlan */
+	unsigned short vlan;		/* vlan tag */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_HEADER_SIZE_MASK		0x7FF
+#define TX_PKT_HEADER_SIZE_SHIFT	0
+#define TX_PKT_HEADER_INS_VLAN_MASK	0x1
+#define TX_PKT_HEADER_INS_VLAN_SHIFT	15
+#define TX_PKT_HEADER_VLAN_TAG_MASK	0xFFFF
+#define TX_PKT_HEADER_VLAN_TAG_SHIFT	16
+
+struct tx_pkt_status {
+	unsigned pkt_size:11;
+	unsigned:5;		/* reserved */
+	unsigned ok:1;		/* current packet transmitted without error */
+	unsigned bcast:1;	/* broadcast packet */
+	unsigned mcast:1;	/* multicast packet */
+	unsigned pause:1;	/* transmiited a pause frame */
+	unsigned ctrl:1;
+	unsigned defer:1;    	/* current packet is xmitted with defer */
+	unsigned exc_defer:1;
+	unsigned single_col:1;
+	unsigned multi_col:1;
+	unsigned late_col:1;
+	unsigned abort_col:1;
+	unsigned underun:1;	/* current packet is aborted
+				 * due to txram underrun */
+	unsigned:3;		/* reserved */
+	unsigned update:1;	/* always 1'b1 in tx_status_buf */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_STATUS_SIZE_MASK		0x7FF
+#define TX_PKT_STATUS_SIZE_SHIFT	0
+#define TX_PKT_STATUS_OK_MASK		0x1
+#define TX_PKT_STATUS_OK_SHIFT		16
+#define TX_PKT_STATUS_BCAST_MASK	0x1
+#define TX_PKT_STATUS_BCAST_SHIFT	17
+#define TX_PKT_STATUS_MCAST_MASK	0x1
+#define TX_PKT_STATUS_MCAST_SHIFT	18
+#define TX_PKT_STATUS_PAUSE_MASK	0x1
+#define TX_PKT_STATUS_PAUSE_SHIFT	19
+#define TX_PKT_STATUS_CTRL_MASK		0x1
+#define TX_PKT_STATUS_CTRL_SHIFT	20
+#define TX_PKT_STATUS_DEFER_MASK	0x1
+#define TX_PKT_STATUS_DEFER_SHIFT	21
+#define TX_PKT_STATUS_EXC_DEFER_MASK	0x1
+#define TX_PKT_STATUS_EXC_DEFER_SHIFT	22
+#define TX_PKT_STATUS_SINGLE_COL_MASK	0x1
+#define TX_PKT_STATUS_SINGLE_COL_SHIFT	23
+#define TX_PKT_STATUS_MULTI_COL_MASK	0x1
+#define TX_PKT_STATUS_MULTI_COL_SHIFT	24
+#define TX_PKT_STATUS_LATE_COL_MASK	0x1
+#define TX_PKT_STATUS_LATE_COL_SHIFT	25
+#define TX_PKT_STATUS_ABORT_COL_MASK	0x1
+#define TX_PKT_STATUS_ABORT_COL_SHIFT	26
+#define TX_PKT_STATUS_UNDERRUN_MASK	0x1
+#define TX_PKT_STATUS_UNDERRUN_SHIFT	27
+#define TX_PKT_STATUS_UPDATE_MASK	0x1
+#define TX_PKT_STATUS_UPDATE_SHIFT	31
+
+struct rx_pkt_status {
+	unsigned pkt_size:11;	/* packet size, max 2047 bytes */
+	unsigned:5;		/* reserved */
+	unsigned ok:1;		/* current packet received ok without error */
+	unsigned bcast:1;	/* current packet is broadcast */
+	unsigned mcast:1;	/* current packet is multicast */
+	unsigned pause:1;
+	unsigned ctrl:1;
+	unsigned crc:1;		/* received a packet with crc error */
+	unsigned code:1;	/* received a packet with code error */
+	unsigned runt:1;	/* received a packet less than 64 bytes
+				 * with good crc */
+	unsigned frag:1;	/* received a packet less than 64 bytes
+				 * with bad crc */
+	unsigned trunc:1;	/* current frame truncated due to rxram full */
+	unsigned align:1;	/* this packet is alignment error */
+	unsigned vlan:1;	/* this packet has vlan */
+	unsigned:3;		/* reserved */
+	unsigned update:1;
+	unsigned short vtag;	/* vlan tag */
+	unsigned:16;
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define RX_PKT_STATUS_SIZE_MASK		0x7FF
+#define RX_PKT_STATUS_SIZE_SHIFT	0
+#define RX_PKT_STATUS_OK_MASK		0x1
+#define RX_PKT_STATUS_OK_SHIFT		16
+#define RX_PKT_STATUS_BCAST_MASK	0x1
+#define RX_PKT_STATUS_BCAST_SHIFT	17
+#define RX_PKT_STATUS_MCAST_MASK	0x1
+#define RX_PKT_STATUS_MCAST_SHIFT	18
+#define RX_PKT_STATUS_PAUSE_MASK	0x1
+#define RX_PKT_STATUS_PAUSE_SHIFT	19
+#define RX_PKT_STATUS_CTRL_MASK		0x1
+#define RX_PKT_STATUS_CTRL_SHIFT	20
+#define RX_PKT_STATUS_CRC_MASK		0x1
+#define RX_PKT_STATUS_CRC_SHIFT		21
+#define RX_PKT_STATUS_CODE_MASK		0x1
+#define RX_PKT_STATUS_CODE_SHIFT	22
+#define RX_PKT_STATUS_RUNT_MASK		0x1
+#define RX_PKT_STATUS_RUNT_SHIFT	23
+#define RX_PKT_STATUS_FRAG_MASK		0x1
+#define RX_PKT_STATUS_FRAG_SHIFT	24
+#define RX_PKT_STATUS_TRUNK_MASK	0x1
+#define RX_PKT_STATUS_TRUNK_SHIFT	25
+#define RX_PKT_STATUS_ALIGN_MASK	0x1
+#define RX_PKT_STATUS_ALIGN_SHIFT	26
+#define RX_PKT_STATUS_VLAN_MASK		0x1
+#define RX_PKT_STATUS_VLAN_SHIFT	27
+#define RX_PKT_STATUS_UPDATE_MASK	0x1
+#define RX_PKT_STATUS_UPDATE_SHIFT	31
+#define RX_PKT_STATUS_VLAN_TAG_MASK	0xFFFF
+#define RX_PKT_STATUS_VLAN_TAG_SHIFT	32
+
+struct rx_desc {
+	struct rx_pkt_status	status;
+	unsigned char     	packet[1536-sizeof(struct rx_pkt_status)];
+};
+
+enum atl2_speed_duplex {
+	atl2_10_half = 0,
+	atl2_10_full = 1,
+	atl2_100_half = 2,
+	atl2_100_full = 3
+};
+
+struct atl2_spi_flash_dev {
+	const char *manu_name;	/* manufacturer id */
+	/* op-code */
+	u8 cmdWRSR;
+	u8 cmdREAD;
+	u8 cmdPROGRAM;
+	u8 cmdWREN;
+	u8 cmdWRDI;
+	u8 cmdRDSR;
+	u8 cmdRDID;
+	u8 cmdSECTOR_ERASE;
+	u8 cmdCHIP_ERASE;
+};
+
+/* Structure containing variables used by the shared code (atl2_hw.c) */
+struct atl2_hw {
+	u8 __iomem *hw_addr;
+	void *back;
+
+	u8 preamble_len;
+	u8 max_retry;          /* Retransmission maximum, afterwards the
+				* packet will be discarded. */
+	u8 jam_ipg;            /* IPG to start JAM for collision based flow
+				* control in half-duplex mode. In unit of
+				* 8-bit time. */
+	u8 ipgt;               /* Desired back to back inter-packet gap. The
+				* default is 96-bit time. */
+	u8 min_ifg;            /* Minimum number of IFG to enforce in between
+				* RX frames. Frame gap below such IFP is
+				* dropped. */
+	u8 ipgr1;              /* 64bit Carrier-Sense window */
+	u8 ipgr2;              /* 96-bit IPG window */
+	u8 retry_buf;          /* When half-duplex mode, should hold some
+				* bytes for mac retry . (8*4bytes unit) */
+
+	u16 fc_rxd_hi;
+	u16 fc_rxd_lo;
+	u16 lcol;              /* Collision Window */
+	u16 max_frame_size;
+
+	u16 MediaType;
+	u16 autoneg_advertised;
+	u16 pci_cmd_word;
+
+	u16 mii_autoneg_adv_reg;
+
+	u32 mem_rang;
+	u32 txcw;
+	u32 mc_filter_type;
+	u32 num_mc_addrs;
+	u32 collision_delta;
+	u32 tx_packet_delta;
+	u16 phy_spd_default;
+
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+
+	/* spi flash */
+	u8 flash_vendor;
+
+	u8 dma_fairness;
+	u8 mac_addr[NODE_ADDRESS_SIZE];
+	u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+
+	/* FIXME */
+	/* bool phy_preamble_sup; */
+	bool phy_configured;
+};
+
+#endif /* _ATL2_HW_H_ */
+
+struct atl2_ring_header {
+    /* pointer to the descriptor ring memory */
+    void *desc;
+    /* physical adress of the descriptor ring */
+    dma_addr_t dma;
+    /* length of descriptor ring in bytes */
+    unsigned int size;
+};
+
+/* board specific private data structure */
+struct atl2_adapter {
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+#endif
+	u32 wol;
+	u16 link_speed;
+	u16 link_duplex;
+
+	spinlock_t stats_lock;
+	spinlock_t tx_lock;
+
+	struct work_struct reset_task;
+	struct work_struct link_chg_task;
+	struct timer_list watchdog_timer;
+	struct timer_list phy_config_timer;
+
+	unsigned long cfg_phy;
+	bool mac_disabled;
+
+	/* All Descriptor memory */
+	dma_addr_t	ring_dma;
+	void		*ring_vir_addr;
+	int		ring_size;
+
+	struct tx_pkt_header	*txd_ring;
+	dma_addr_t	txd_dma;
+
+	struct tx_pkt_status	*txs_ring;
+	dma_addr_t	txs_dma;
+
+	struct rx_desc	*rxd_ring;
+	dma_addr_t	rxd_dma;
+
+	u32 txd_ring_size;         /* bytes per unit */
+	u32 txs_ring_size;         /* dwords per unit */
+	u32 rxd_ring_size;         /* 1536 bytes per unit */
+
+	/* read /write ptr: */
+	/* host */
+	u32 txd_write_ptr;
+	u32 txs_next_clear;
+	u32 rxd_read_ptr;
+
+	/* nic */
+	atomic_t txd_read_ptr;
+	atomic_t txs_write_ptr;
+	u32 rxd_write_ptr;
+
+	/* Interrupt Moderator timer ( 2us resolution) */
+	u16 imt;
+	/* Interrupt Clear timer (2us resolution) */
+	u16 ict;
+
+	unsigned long flags;
+	/* structs defined in atl2_hw.h */
+	u32 bd_number;     /* board number */
+	bool pci_using_64;
+	bool have_msi;
+	struct atl2_hw hw;
+
+	u32 usr_cmd;
+	/* FIXME */
+	/* u32 regs_buff[ATL2_REGS_LEN]; */
+	u32 pci_state[16];
+
+	u32 *config_space;
+};
+
+enum atl2_state_t {
+	__ATL2_TESTING,
+	__ATL2_RESETTING,
+	__ATL2_DOWN
+};
+
+#endif /* _ATL2_H_ */
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
new file mode 100644
index 000000000000..391c3bce5b79
--- /dev/null
+++ b/drivers/net/enic/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ENIC) := enic.o
+
+enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
+	enic_res.o vnic_dev.o vnic_rq.o
+
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
new file mode 100644
index 000000000000..c036a8bfd043
--- /dev/null
+++ b/drivers/net/enic/cq_desc.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+	CQ_DESC_TYPE_WQ_ENET = 0,
+	CQ_DESC_TYPE_DESC_COPY = 1,
+	CQ_DESC_TYPE_WQ_EXCH = 2,
+	CQ_DESC_TYPE_RQ_ENET = 3,
+	CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout.  The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 type_specfic[11];
+	u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS        7
+#define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK       1
+#define CQ_DESC_Q_NUM_BITS       10
+#define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS    12
+#define CQ_DESC_COMP_NDX_MASK    ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	const struct cq_desc *desc = desc_arg;
+	const u8 type_color = desc->type_color;
+
+	*color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+	*type = type_color & CQ_DESC_TYPE_MASK;
+	*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+	*completed_index = le16_to_cpu(desc->completed_index) &
+		CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..03dce9ed612c
--- /dev/null
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 reserved[11];
+	u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+	__le16 completed_index_flags;
+	__le16 q_number_rss_type_flags;
+	__le32 rss_hash;
+	__le16 bytes_written_flags;
+	__le16 vlan;
+	__le16 checksum_fcoe;
+	u8 flags;
+	u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT          (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE                  (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP                   (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP                   (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS               4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+	((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE               0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4               1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4           2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6               3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6           4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX            5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX        6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC         (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS          14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+	((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED             (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED         (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS               8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT              8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK       (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK              (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP                   (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR              (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP                   (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK          (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6                  (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4                  (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT         (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK                (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+	u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+	u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+	u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+	u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+	u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+	u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+	u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+	u16 q_number_rss_type_flags =
+		le16_to_cpu(desc->q_number_rss_type_flags);
+	u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+
+	*ingress_port = (completed_index_flags &
+		CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+	*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+		1 : 0;
+	*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+		1 : 0;
+	*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+		1 : 0;
+
+	*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+		CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+	*csum_not_calc = (q_number_rss_type_flags &
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+	*rss_hash = le32_to_cpu(desc->rss_hash);
+
+	*bytes_written = bytes_written_flags &
+		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_error = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+	*vlan_stripped = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+	*vlan = le16_to_cpu(desc->vlan);
+
+	if (*fcoe) {
+		*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+			CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+		*fcoe_fc_crc_ok = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+		*fcoe_enc_error = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+		*fcoe_eof = (u8)((desc->checksum_fcoe >>
+			CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+			CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+		*checksum = 0;
+	} else {
+		*fcoe_sof = 0;
+		*fcoe_fc_crc_ok = 0;
+		*fcoe_enc_error = 0;
+		*fcoe_eof = 0;
+		*checksum = le16_to_cpu(desc->checksum_fcoe);
+	}
+
+	*tcp_udp_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+	*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+	*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+	*ipv4_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+	*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+	*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+	*ipv4_fragment =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+	*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
new file mode 100644
index 000000000000..fb83c926da58
--- /dev/null
+++ b/drivers/net/enic/enic.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_H_
+#define _ENIC_H_
+
+#include <linux/inet_lro.h>
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_rss.h"
+
+#define DRV_NAME		"enic"
+#define DRV_DESCRIPTION		"Cisco 10G Ethernet Driver"
+#define DRV_VERSION		"0.0.1.18163.472"
+#define DRV_COPYRIGHT		"Copyright 2008 Cisco Systems, Inc"
+#define PFX			DRV_NAME ": "
+
+#define ENIC_LRO_MAX_DESC	8
+#define ENIC_LRO_MAX_AGGR	64
+
+enum enic_cq_index {
+	ENIC_CQ_RQ,
+	ENIC_CQ_WQ,
+	ENIC_CQ_MAX,
+};
+
+enum enic_intx_intr_index {
+	ENIC_INTX_WQ_RQ,
+	ENIC_INTX_ERR,
+	ENIC_INTX_NOTIFY,
+	ENIC_INTX_MAX,
+};
+
+enum enic_msix_intr_index {
+	ENIC_MSIX_RQ,
+	ENIC_MSIX_WQ,
+	ENIC_MSIX_ERR,
+	ENIC_MSIX_NOTIFY,
+	ENIC_MSIX_MAX,
+};
+
+struct enic_msix_entry {
+	int requested;
+	char devname[IFNAMSIZ];
+	irqreturn_t (*isr)(int, void *);
+	void *devid;
+};
+
+/* Per-instance private data structure */
+struct enic {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct vnic_enet_config config;
+	struct vnic_dev_bar bar0;
+	struct vnic_dev *vdev;
+	struct net_device_stats net_stats;
+	struct timer_list notify_timer;
+	struct work_struct reset;
+	struct msix_entry msix_entry[ENIC_MSIX_MAX];
+	struct enic_msix_entry msix[ENIC_MSIX_MAX];
+	u32 msg_enable;
+	spinlock_t devcmd_lock;
+	u8 mac_addr[ETH_ALEN];
+	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+	unsigned int mc_count;
+	int csum_rx_enabled;
+	u32 port_mtu;
+
+	/* work queue cache line section */
+	____cacheline_aligned struct vnic_wq wq[1];
+	spinlock_t wq_lock[1];
+	unsigned int wq_count;
+	struct vlan_group *vlan_group;
+
+	/* receive queue cache line section */
+	____cacheline_aligned struct vnic_rq rq[1];
+	unsigned int rq_count;
+	int (*rq_alloc_buf)(struct vnic_rq *rq);
+	struct napi_struct napi;
+	struct net_lro_mgr lro_mgr;
+	struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
+
+	/* interrupt resource cache line section */
+	____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
+	unsigned int intr_count;
+	u32 __iomem *legacy_pba;		/* memory-mapped */
+
+	/* completion queue cache line section */
+	____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+	unsigned int cq_count;
+};
+
+#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
new file mode 100644
index 000000000000..4cf5ec76c993
--- /dev/null
+++ b/drivers/net/enic/enic_main.c
@@ -0,0 +1,1949 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#include "cq_enet_desc.h"
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "enic_res.h"
+#include "enic.h"
+
+#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
+#define ENIC_JUMBO_FIRST_BUF_SIZE	256
+
+/* Supported devices */
+static struct pci_device_id enic_id_table[] = {
+	{ PCI_VDEVICE(CISCO, 0x0043) },
+	{ 0, }	/* end of table */
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, enic_id_table);
+
+struct enic_stat {
+	char name[ETH_GSTRING_LEN];
+	unsigned int offset;
+};
+
+#define ENIC_TX_STAT(stat)	\
+	{ .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
+#define ENIC_RX_STAT(stat)	\
+	{ .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
+
+static const struct enic_stat enic_tx_stats[] = {
+	ENIC_TX_STAT(tx_frames_ok),
+	ENIC_TX_STAT(tx_unicast_frames_ok),
+	ENIC_TX_STAT(tx_multicast_frames_ok),
+	ENIC_TX_STAT(tx_broadcast_frames_ok),
+	ENIC_TX_STAT(tx_bytes_ok),
+	ENIC_TX_STAT(tx_unicast_bytes_ok),
+	ENIC_TX_STAT(tx_multicast_bytes_ok),
+	ENIC_TX_STAT(tx_broadcast_bytes_ok),
+	ENIC_TX_STAT(tx_drops),
+	ENIC_TX_STAT(tx_errors),
+	ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+	ENIC_RX_STAT(rx_frames_ok),
+	ENIC_RX_STAT(rx_frames_total),
+	ENIC_RX_STAT(rx_unicast_frames_ok),
+	ENIC_RX_STAT(rx_multicast_frames_ok),
+	ENIC_RX_STAT(rx_broadcast_frames_ok),
+	ENIC_RX_STAT(rx_bytes_ok),
+	ENIC_RX_STAT(rx_unicast_bytes_ok),
+	ENIC_RX_STAT(rx_multicast_bytes_ok),
+	ENIC_RX_STAT(rx_broadcast_bytes_ok),
+	ENIC_RX_STAT(rx_drop),
+	ENIC_RX_STAT(rx_no_bufs),
+	ENIC_RX_STAT(rx_errors),
+	ENIC_RX_STAT(rx_rss),
+	ENIC_RX_STAT(rx_crc_errors),
+	ENIC_RX_STAT(rx_frames_64),
+	ENIC_RX_STAT(rx_frames_127),
+	ENIC_RX_STAT(rx_frames_255),
+	ENIC_RX_STAT(rx_frames_511),
+	ENIC_RX_STAT(rx_frames_1023),
+	ENIC_RX_STAT(rx_frames_1518),
+	ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+	ecmd->port = PORT_FIBRE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	if (netif_carrier_ok(netdev)) {
+		ecmd->speed = vnic_dev_port_speed(enic->vdev);
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_devcmd_fw_info *fw_info;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_fw_info(enic->vdev, &fw_info);
+	spin_unlock(&enic->devcmd_lock);
+
+	strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	strncpy(drvinfo->fw_version, fw_info->fw_version,
+		sizeof(drvinfo->fw_version));
+	strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	unsigned int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < enic_n_tx_stats; i++) {
+			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < enic_n_rx_stats; i++) {
+			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int enic_get_stats_count(struct net_device *netdev)
+{
+	return enic_n_tx_stats + enic_n_rx_stats;
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+	struct ethtool_stats *stats, u64 *data)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_stats *vstats;
+	unsigned int i;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_stats_dump(enic->vdev, &vstats);
+	spin_unlock(&enic->devcmd_lock);
+
+	for (i = 0; i < enic_n_tx_stats; i++)
+		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
+	for (i = 0; i < enic_n_rx_stats; i++)
+		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
+}
+
+static u32 enic_get_rx_csum(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	return enic->csum_rx_enabled;
+}
+
+static int enic_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	enic->csum_rx_enabled =
+		(data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0;
+
+	return 0;
+}
+
+static int enic_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (data && ENIC_SETTING(enic, TXCSUM))
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int enic_set_tso(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (data && ENIC_SETTING(enic, TSO))
+		netdev->features |=
+			NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+	else
+		netdev->features &=
+			~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
+
+	return 0;
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct enic *enic = netdev_priv(netdev);
+	enic->msg_enable = value;
+}
+
+static struct ethtool_ops enic_ethtool_ops = {
+	.get_settings = enic_get_settings,
+	.get_drvinfo = enic_get_drvinfo,
+	.get_msglevel = enic_get_msglevel,
+	.set_msglevel = enic_set_msglevel,
+	.get_link = ethtool_op_get_link,
+	.get_strings = enic_get_strings,
+	.get_stats_count = enic_get_stats_count,
+	.get_ethtool_stats = enic_get_ethtool_stats,
+	.get_rx_csum = enic_get_rx_csum,
+	.set_rx_csum = enic_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = enic_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = enic_set_tso,
+};
+
+static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+	struct enic *enic = vnic_dev_priv(wq->vdev);
+
+	if (buf->sop)
+		pci_unmap_single(enic->pdev, buf->dma_addr,
+			buf->len, PCI_DMA_TODEVICE);
+	else
+		pci_unmap_page(enic->pdev, buf->dma_addr,
+			buf->len, PCI_DMA_TODEVICE);
+
+	if (buf->os_buf)
+		dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
+{
+	enic_free_wq_buf(wq, buf);
+}
+
+static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	spin_lock(&enic->wq_lock[q_number]);
+
+	vnic_wq_service(&enic->wq[q_number], cq_desc,
+		completed_index, enic_wq_free_buf,
+		opaque);
+
+	if (netif_queue_stopped(enic->netdev) &&
+	    vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
+		netif_wake_queue(enic->netdev);
+
+	spin_unlock(&enic->wq_lock[q_number]);
+
+	return 0;
+}
+
+static void enic_log_q_error(struct enic *enic)
+{
+	unsigned int i;
+	u32 error_status;
+
+	for (i = 0; i < enic->wq_count; i++) {
+		error_status = vnic_wq_error_status(&enic->wq[i]);
+		if (error_status)
+			printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
+				enic->netdev->name, i, error_status);
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		error_status = vnic_rq_error_status(&enic->rq[i]);
+		if (error_status)
+			printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
+				enic->netdev->name, i, error_status);
+	}
+}
+
+static void enic_link_check(struct enic *enic)
+{
+	int link_status = vnic_dev_link_status(enic->vdev);
+	int carrier_ok = netif_carrier_ok(enic->netdev);
+
+	if (link_status && !carrier_ok) {
+		printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
+		netif_carrier_on(enic->netdev);
+	} else if (!link_status && carrier_ok) {
+		printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
+		netif_carrier_off(enic->netdev);
+	}
+}
+
+static void enic_mtu_check(struct enic *enic)
+{
+	u32 mtu = vnic_dev_mtu(enic->vdev);
+
+	if (mtu != enic->port_mtu) {
+		if (mtu < enic->netdev->mtu)
+			printk(KERN_WARNING PFX
+				"%s: interface MTU (%d) set higher "
+				"than switch port MTU (%d)\n",
+				enic->netdev->name, enic->netdev->mtu, mtu);
+		enic->port_mtu = mtu;
+	}
+}
+
+static void enic_msglvl_check(struct enic *enic)
+{
+	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
+
+	if (msg_enable != enic->msg_enable) {
+		printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
+			enic->netdev->name, enic->msg_enable, msg_enable);
+		enic->msg_enable = msg_enable;
+	}
+}
+
+static void enic_notify_check(struct enic *enic)
+{
+	enic_msglvl_check(enic);
+	enic_mtu_check(enic);
+	enic_link_check(enic);
+}
+
+#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
+
+static irqreturn_t enic_isr_legacy(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct enic *enic = netdev_priv(netdev);
+	u32 pba;
+
+	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
+
+	pba = vnic_intr_legacy_pba(enic->legacy_pba);
+	if (!pba) {
+		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
+		return IRQ_NONE;	/* not our interrupt */
+	}
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
+		enic_notify_check(enic);
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
+		enic_log_q_error(enic);
+		/* schedule recovery from WQ/RQ error */
+		schedule_work(&enic->reset);
+		return IRQ_HANDLED;
+	}
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
+		if (netif_rx_schedule_prep(netdev, &enic->napi))
+			__netif_rx_schedule(netdev, &enic->napi);
+	} else {
+		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msi(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	/* With MSI, there is no sharing of interrupts, so this is
+	 * our interrupt and there is no need to ack it.  The device
+	 * is not providing per-vector masking, so the OS will not
+	 * write to PCI config space to mask/unmask the interrupt.
+	 * We're using mask_on_assertion for MSI, so the device
+	 * automatically masks the interrupt when the interrupt is
+	 * generated.  Later, when exiting polling, the interrupt
+	 * will be unmasked (see enic_poll).
+	 *
+	 * Also, the device uses the same PCIe Traffic Class (TC)
+	 * for Memory Write data and MSI, so there are no ordering
+	 * issues; the MSI will always arrive at the Root Complex
+	 * _after_ corresponding Memory Writes (i.e. descriptor
+	 * writes).
+	 */
+
+	netif_rx_schedule(enic->netdev, &enic->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_rq(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	/* schedule NAPI polling for RQ cleanup */
+	netif_rx_schedule(enic->netdev, &enic->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_wq(int irq, void *data)
+{
+	struct enic *enic = data;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int wq_work_done;
+
+	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		wq_work_to_do, enic_wq_service, NULL);
+
+	vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
+		wq_work_done,
+		1 /* unmask intr */,
+		1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_err(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	enic_log_q_error(enic);
+
+	/* schedule recovery from WQ/RQ error */
+	schedule_work(&enic->reset);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_notify(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	enic_notify_check(enic);
+	vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
+
+	return IRQ_HANDLED;
+}
+
+static inline void enic_queue_wq_skb_cont(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	unsigned int len_left)
+{
+	skb_frag_t *frag;
+
+	/* Queue additional data fragments */
+	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+		len_left -= frag->size;
+		enic_queue_wq_desc_cont(wq, skb,
+			pci_map_page(enic->pdev, frag->page,
+				frag->page_offset, frag->size,
+				PCI_DMA_TODEVICE),
+			frag->size,
+			(len_left == 0));	/* EOP? */
+	}
+}
+
+static inline void enic_queue_wq_skb_vlan(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	int eop = (len_left == 0);
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	unsigned int hdr_len = skb_transport_offset(skb);
+	unsigned int csum_offset = hdr_len + skb->csum_offset;
+	int eop = (len_left == 0);
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc_csum_l4(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		csum_offset,
+		hdr_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb_tso(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	int eop = (len_left == 0);
+
+	/* Preload TCP csum field with IP pseudo hdr calculated
+	 * with IP length set to zero.  HW will later add in length
+	 * to each TCP segment resulting from the TSO.
+	 */
+
+	if (skb->protocol == __constant_htons(ETH_P_IP)) {
+		ip_hdr(skb)->check = 0;
+		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+	} else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+	}
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc_tso(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		mss, hdr_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb)
+{
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	unsigned int vlan_tag = 0;
+	int vlan_tag_insert = 0;
+
+	if (enic->vlan_group && vlan_tx_tag_present(skb)) {
+		/* VLAN tag from trunking driver */
+		vlan_tag_insert = 1;
+		vlan_tag = vlan_tx_tag_get(skb);
+	}
+
+	if (mss)
+		enic_queue_wq_skb_tso(enic, wq, skb, mss,
+			vlan_tag_insert, vlan_tag);
+	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
+		enic_queue_wq_skb_csum_l4(enic, wq, skb,
+			vlan_tag_insert, vlan_tag);
+	else
+		enic_queue_wq_skb_vlan(enic, wq, skb,
+			vlan_tag_insert, vlan_tag);
+}
+
+/* netif_tx_lock held, process context with BHs disabled */
+static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_wq *wq = &enic->wq[0];
+	unsigned long flags;
+
+	if (skb->len <= 0) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
+	 * which is very likely.  In the off chance it's going to take
+	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
+	 */
+
+	if (skb_shinfo(skb)->gso_size == 0 &&
+	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
+	    skb_linearize(skb)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	spin_lock_irqsave(&enic->wq_lock[0], flags);
+
+	if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
+		netif_stop_queue(netdev);
+		/* This is a hard error, log it */
+		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
+			"queue awake!\n", netdev->name);
+		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	enic_queue_wq_skb(enic, wq, skb);
+
+	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
+		netif_stop_queue(netdev);
+
+	netdev->trans_start = jiffies;
+
+	spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+
+	return NETDEV_TX_OK;
+}
+
+/* dev_base_lock rwlock held, nominally process context */
+static struct net_device_stats *enic_get_stats(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_stats *stats;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_stats_dump(enic->vdev, &stats);
+	spin_unlock(&enic->devcmd_lock);
+
+	enic->net_stats.tx_packets = stats->tx.tx_frames_ok;
+	enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok;
+	enic->net_stats.tx_errors = stats->tx.tx_errors;
+	enic->net_stats.tx_dropped = stats->tx.tx_drops;
+
+	enic->net_stats.rx_packets = stats->rx.rx_frames_ok;
+	enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok;
+	enic->net_stats.rx_errors = stats->rx.rx_errors;
+	enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok;
+	enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors;
+	enic->net_stats.rx_dropped = stats->rx.rx_no_bufs;
+
+	return &enic->net_stats;
+}
+
+static void enic_reset_mcaddrs(struct enic *enic)
+{
+	enic->mc_count = 0;
+}
+
+static int enic_set_mac_addr(struct net_device *netdev, char *addr)
+{
+	if (!is_valid_ether_addr(addr))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr, netdev->addr_len);
+
+	return 0;
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_multicast_list(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct dev_mc_list *list = netdev->mc_list;
+	int directed = 1;
+	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+	int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+	    (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+	unsigned int mc_count = netdev->mc_count;
+	unsigned int i, j;
+
+	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
+		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
+
+	spin_lock(&enic->devcmd_lock);
+
+	vnic_dev_packet_filter(enic->vdev, directed,
+		multicast, broadcast, promisc, allmulti);
+
+	/* Is there an easier way?  Trying to minimize to
+	 * calls to add/del multicast addrs.  We keep the
+	 * addrs from the last call in enic->mc_addr and
+	 * look for changes to add/del.
+	 */
+
+	for (i = 0; list && i < mc_count; i++) {
+		memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
+		list = list->next;
+	}
+
+	for (i = 0; i < enic->mc_count; i++) {
+		for (j = 0; j < mc_count; j++)
+			if (compare_ether_addr(enic->mc_addr[i],
+				mc_addr[j]) == 0)
+				break;
+		if (j == mc_count)
+			enic_del_multicast_addr(enic, enic->mc_addr[i]);
+	}
+
+	for (i = 0; i < mc_count; i++) {
+		for (j = 0; j < enic->mc_count; j++)
+			if (compare_ether_addr(mc_addr[i],
+				enic->mc_addr[j]) == 0)
+				break;
+		if (j == enic->mc_count)
+			enic_add_multicast_addr(enic, mc_addr[i]);
+	}
+
+	/* Save the list to compare against next time
+	 */
+
+	for (i = 0; i < mc_count; i++)
+		memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
+
+	enic->mc_count = mc_count;
+
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *vlan_group)
+{
+	struct enic *enic = netdev_priv(netdev);
+	enic->vlan_group = vlan_group;
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_add_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_del_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_tx_timeout(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	schedule_work(&enic->reset);
+}
+
+static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+
+	if (!buf->os_buf)
+		return;
+
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+	dev_kfree_skb_any(buf->os_buf);
+}
+
+static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(size + NET_IP_ALIGN);
+
+	if (skb)
+		skb_reserve(skb, NET_IP_ALIGN);
+
+	return skb;
+}
+
+static int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+	unsigned int len = enic->netdev->mtu + ETH_HLEN;
+	unsigned int os_buf_index = 0;
+	dma_addr_t dma_addr;
+
+	skb = enic_rq_alloc_skb(len);
+	if (!skb)
+		return -ENOMEM;
+
+	dma_addr = pci_map_single(enic->pdev, skb->data,
+		len, PCI_DMA_FROMDEVICE);
+
+	enic_queue_rq_desc(rq, skb, os_buf_index,
+		dma_addr, len);
+
+	return 0;
+}
+
+static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
+	void **tcph, u64 *hdr_flags, void *priv)
+{
+	struct cq_enet_rq_desc *cq_desc = priv;
+	unsigned int ip_len;
+	struct iphdr *iph;
+
+	u8 type, color, eop, sop, ingress_port, vlan_stripped;
+	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+	u8 packet_error;
+	u16 q_number, completed_index, bytes_written, vlan, checksum;
+	u32 rss_hash;
+
+	cq_enet_rq_desc_dec(cq_desc,
+		&type, &color, &q_number, &completed_index,
+		&ingress_port, &fcoe, &eop, &sop, &rss_type,
+		&csum_not_calc, &rss_hash, &bytes_written,
+		&packet_error, &vlan_stripped, &vlan, &checksum,
+		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+		&fcs_ok);
+
+	if (!(ipv4 && tcp && !ipv4_fragment))
+		return -1;
+
+	skb_reset_network_header(skb);
+	iph = ip_hdr(skb);
+
+	ip_len = ip_hdrlen(skb);
+	skb_set_transport_header(skb, ip_len);
+
+	/* check if ip header and tcp header are complete */
+	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+		return -1;
+
+	*hdr_flags = LRO_IPV4 | LRO_TCP;
+	*tcph = tcp_hdr(skb);
+	*iphdr = iph;
+
+	return 0;
+}
+
+static void enic_rq_indicate_buf(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+
+	u8 type, color, eop, sop, ingress_port, vlan_stripped;
+	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+	u8 packet_error;
+	u16 q_number, completed_index, bytes_written, vlan, checksum;
+	u32 rss_hash;
+
+	if (skipped)
+		return;
+
+	skb = buf->os_buf;
+	prefetch(skb->data - NET_IP_ALIGN);
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+
+	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+		&type, &color, &q_number, &completed_index,
+		&ingress_port, &fcoe, &eop, &sop, &rss_type,
+		&csum_not_calc, &rss_hash, &bytes_written,
+		&packet_error, &vlan_stripped, &vlan, &checksum,
+		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+		&fcs_ok);
+
+	if (packet_error) {
+
+		if (bytes_written > 0 && !fcs_ok) {
+			if (net_ratelimit())
+				printk(KERN_ERR PFX
+					"%s: packet error: bad FCS\n",
+					enic->netdev->name);
+		}
+
+		dev_kfree_skb_any(skb);
+
+		return;
+	}
+
+	if (eop && bytes_written > 0) {
+
+		/* Good receive
+		 */
+
+		skb_put(skb, bytes_written);
+		skb->protocol = eth_type_trans(skb, enic->netdev);
+
+		if (enic->csum_rx_enabled && !csum_not_calc) {
+			skb->csum = htons(checksum);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
+
+		skb->dev = enic->netdev;
+		enic->netdev->last_rx = jiffies;
+
+		if (enic->vlan_group && vlan_stripped) {
+
+			if (ENIC_SETTING(enic, LRO))
+				lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
+					skb, enic->vlan_group,
+					vlan, cq_desc);
+			else
+				vlan_hwaccel_receive_skb(skb,
+					enic->vlan_group, vlan);
+
+		} else {
+
+			if (ENIC_SETTING(enic, LRO))
+				lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
+			else
+				netif_receive_skb(skb);
+
+		}
+
+	} else {
+
+		/* Buffer overflow
+		 */
+
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&enic->rq[q_number], cq_desc,
+		completed_index, VNIC_RQ_RETURN_DESC,
+		enic_rq_indicate_buf, opaque);
+
+	return 0;
+}
+
+static void enic_rq_drop_buf(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb = buf->os_buf;
+
+	if (skipped)
+		return;
+
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+
+	dev_kfree_skb_any(skb);
+}
+
+static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&enic->rq[q_number], cq_desc,
+		completed_index, VNIC_RQ_RETURN_DESC,
+		enic_rq_drop_buf, opaque);
+
+	return 0;
+}
+
+static int enic_poll(struct napi_struct *napi, int budget)
+{
+	struct enic *enic = container_of(napi, struct enic, napi);
+	struct net_device *netdev = enic->netdev;
+	unsigned int rq_work_to_do = budget;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int  work_done, rq_work_done, wq_work_done;
+
+	/* Service RQ (first) and WQ
+	 */
+
+	rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		rq_work_to_do, enic_rq_service, NULL);
+
+	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		wq_work_to_do, enic_wq_service, NULL);
+
+	/* Accumulate intr event credits for this polling
+	 * cycle.  An intr event is the completion of a
+	 * a WQ or RQ packet.
+	 */
+
+	work_done = rq_work_done + wq_work_done;
+
+	if (work_done > 0)
+		vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+
+	if (rq_work_done > 0) {
+
+		/* Replenish RQ
+		 */
+
+		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+
+	} else {
+
+		/* If no work done, flush all LROs and exit polling
+		 */
+
+		if (ENIC_SETTING(enic, LRO))
+			lro_flush_all(&enic->lro_mgr);
+
+		netif_rx_complete(netdev, napi);
+		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
+	}
+
+	return rq_work_done;
+}
+
+static int enic_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct enic *enic = container_of(napi, struct enic, napi);
+	struct net_device *netdev = enic->netdev;
+	unsigned int work_to_do = budget;
+	unsigned int work_done;
+
+	/* Service RQ
+	 */
+
+	work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		work_to_do, enic_rq_service, NULL);
+
+	if (work_done > 0) {
+
+		/* Replenish RQ
+		 */
+
+		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+
+		/* Accumulate intr event credits for this polling
+		 * cycle.  An intr event is the completion of a
+		 * a WQ or RQ packet.
+		 */
+
+		vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+	} else {
+
+		/* If no work done, flush all LROs and exit polling
+		 */
+
+		if (ENIC_SETTING(enic, LRO))
+			lro_flush_all(&enic->lro_mgr);
+
+		netif_rx_complete(netdev, napi);
+		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
+	}
+
+	return work_done;
+}
+
+static void enic_notify_timer(unsigned long data)
+{
+	struct enic *enic = (struct enic *)data;
+
+	enic_notify_check(enic);
+
+	mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void enic_free_intr(struct enic *enic)
+{
+	struct net_device *netdev = enic->netdev;
+	unsigned int i;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSI:
+		free_irq(enic->pdev->irq, netdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+			if (enic->msix[i].requested)
+				free_irq(enic->msix_entry[i].vector,
+					enic->msix[i].devid);
+		break;
+	default:
+		break;
+	}
+}
+
+static int enic_request_intr(struct enic *enic)
+{
+	struct net_device *netdev = enic->netdev;
+	unsigned int i;
+	int err = 0;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+
+	case VNIC_DEV_INTR_MODE_INTX:
+
+		err = request_irq(enic->pdev->irq, enic_isr_legacy,
+			IRQF_SHARED, netdev->name, netdev);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSI:
+
+		err = request_irq(enic->pdev->irq, enic_isr_msi,
+			0, netdev->name, enic);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+
+		sprintf(enic->msix[ENIC_MSIX_RQ].devname,
+			"%.11s-rx", netdev->name);
+		enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
+		enic->msix[ENIC_MSIX_RQ].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_WQ].devname,
+			"%.11s-tx", netdev->name);
+		enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
+		enic->msix[ENIC_MSIX_WQ].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_ERR].devname,
+			"%.11s-err", netdev->name);
+		enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
+		enic->msix[ENIC_MSIX_ERR].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
+			"%.11s-notify", netdev->name);
+		enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
+		enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
+
+		for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
+			err = request_irq(enic->msix_entry[i].vector,
+				enic->msix[i].isr, 0,
+				enic->msix[i].devname,
+				enic->msix[i].devid);
+			if (err) {
+				enic_free_intr(enic);
+				break;
+			}
+			enic->msix[i].requested = 1;
+		}
+
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static int enic_notify_set(struct enic *enic)
+{
+	int err;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
+		break;
+	default:
+		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
+		break;
+	}
+
+	return err;
+}
+
+static void enic_notify_timer_start(struct enic *enic)
+{
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSI:
+		mod_timer(&enic->notify_timer, jiffies);
+		break;
+	default:
+		/* Using intr for notification for INTx/MSI-X */
+		break;
+	};
+}
+
+/* rtnl lock is held, process context */
+static int enic_open(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+		if (err) {
+			printk(KERN_ERR PFX
+				"%s: Unable to alloc receive buffers.\n",
+				netdev->name);
+			return err;
+		}
+	}
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_enable(&enic->wq[i]);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_enable(&enic->rq[i]);
+
+	enic_add_station_addr(enic);
+	enic_set_multicast_list(netdev);
+
+	netif_wake_queue(netdev);
+	napi_enable(&enic->napi);
+	vnic_dev_enable(enic->vdev);
+
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_unmask(&enic->intr[i]);
+
+	enic_notify_timer_start(enic);
+
+	return 0;
+}
+
+/* rtnl lock is held, process context */
+static int enic_stop(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int i;
+	int err;
+
+	del_timer_sync(&enic->notify_timer);
+
+	vnic_dev_disable(enic->vdev);
+	napi_disable(&enic->napi);
+	netif_stop_queue(netdev);
+
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_mask(&enic->intr[i]);
+
+	for (i = 0; i < enic->wq_count; i++) {
+		err = vnic_wq_disable(&enic->wq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_disable(&enic->rq[i]);
+		if (err)
+			return err;
+	}
+
+	(void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		-1, enic_rq_service_drop, NULL);
+	(void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		-1, enic_wq_service, NULL);
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+	for (i = 0; i < enic->cq_count; i++)
+		vnic_cq_clean(&enic->cq[i]);
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_clean(&enic->intr[i]);
+
+	return 0;
+}
+
+static int enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct enic *enic = netdev_priv(netdev);
+	int running = netif_running(netdev);
+
+	if (running)
+		enic_stop(netdev);
+
+	if (new_mtu < ENIC_MIN_MTU)
+		new_mtu = ENIC_MIN_MTU;
+	if (new_mtu > ENIC_MAX_MTU)
+		new_mtu = ENIC_MAX_MTU;
+
+	netdev->mtu = new_mtu;
+
+	if (netdev->mtu > enic->port_mtu)
+		printk(KERN_WARNING PFX
+			"%s: interface MTU (%d) set higher "
+			"than port MTU (%d)\n",
+			netdev->name, netdev->mtu, enic->port_mtu);
+
+	if (running)
+		enic_open(netdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void enic_poll_controller(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_dev *vdev = enic->vdev;
+
+	switch (vnic_dev_get_intr_mode(vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		enic_isr_msix_rq(enic->pdev->irq, enic);
+		enic_isr_msix_wq(enic->pdev->irq, enic);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		enic_isr_msi(enic->pdev->irq, enic);
+		break;
+	case VNIC_DEV_INTR_MODE_INTX:
+		enic_isr_legacy(enic->pdev->irq, netdev);
+		break;
+	default:
+		break;
+	}
+}
+#endif
+
+static int enic_dev_wait(struct vnic_dev *vdev,
+	int (*start)(struct vnic_dev *, int),
+	int (*finished)(struct vnic_dev *, int *),
+	int arg)
+{
+	unsigned long time;
+	int done;
+	int err;
+
+	BUG_ON(in_interrupt());
+
+	err = start(vdev, arg);
+	if (err)
+		return err;
+
+	/* Wait for func to complete...2 seconds max
+	 */
+
+	time = jiffies + (HZ * 2);
+	do {
+
+		err = finished(vdev, &done);
+		if (err)
+			return err;
+
+		if (done)
+			return 0;
+
+		schedule_timeout_uninterruptible(HZ / 10);
+
+	} while (time_after(time, jiffies));
+
+	return -ETIMEDOUT;
+}
+
+static int enic_dev_open(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_open,
+		vnic_dev_open_done, 0);
+	if (err)
+		printk(KERN_ERR PFX
+			"vNIC device open failed, err %d.\n", err);
+
+	return err;
+}
+
+static int enic_dev_soft_reset(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+		vnic_dev_soft_reset_done, 0);
+	if (err)
+		printk(KERN_ERR PFX
+			"vNIC soft reset failed, err %d.\n", err);
+
+	return err;
+}
+
+static void enic_reset(struct work_struct *work)
+{
+	struct enic *enic = container_of(work, struct enic, reset);
+
+	if (!netif_running(enic->netdev))
+		return;
+
+	rtnl_lock();
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_hang_notify(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	enic_stop(enic->netdev);
+	enic_dev_soft_reset(enic);
+	enic_reset_mcaddrs(enic);
+	enic_init_vnic_resources(enic);
+	enic_open(enic->netdev);
+
+	rtnl_unlock();
+}
+
+static int enic_set_intr_mode(struct enic *enic)
+{
+	unsigned int n = ARRAY_SIZE(enic->rq);
+	unsigned int m = ARRAY_SIZE(enic->wq);
+	unsigned int i;
+
+	/* Set interrupt mode (INTx, MSI, MSI-X) depending
+	 * system capabilities.
+	 *
+	 * Try MSI-X first
+	 *
+	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
+	 * (the second to last INTR is used for WQ/RQ errors)
+	 * (the last INTR is used for notifications)
+	 */
+
+	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
+	for (i = 0; i < n + m + 2; i++)
+		enic->msix_entry[i].entry = i;
+
+	if (enic->config.intr_mode < 1 &&
+	    enic->rq_count >= n &&
+	    enic->wq_count >= m &&
+	    enic->cq_count >= n + m &&
+	    enic->intr_count >= n + m + 2 &&
+	    !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+
+		enic->rq_count = n;
+		enic->wq_count = m;
+		enic->cq_count = n + m;
+		enic->intr_count = n + m + 2;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+
+		return 0;
+	}
+
+	/* Next try MSI
+	 *
+	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+	 */
+
+	if (enic->config.intr_mode < 2 &&
+	    enic->rq_count >= 1 &&
+	    enic->wq_count >= 1 &&
+	    enic->cq_count >= 2 &&
+	    enic->intr_count >= 1 &&
+	    !pci_enable_msi(enic->pdev)) {
+
+		enic->rq_count = 1;
+		enic->wq_count = 1;
+		enic->cq_count = 2;
+		enic->intr_count = 1;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+		return 0;
+	}
+
+	/* Next try INTx
+	 *
+	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+	 * (the first INTR is used for WQ/RQ)
+	 * (the second INTR is used for WQ/RQ errors)
+	 * (the last INTR is used for notifications)
+	 */
+
+	if (enic->config.intr_mode < 3 &&
+	    enic->rq_count >= 1 &&
+	    enic->wq_count >= 1 &&
+	    enic->cq_count >= 2 &&
+	    enic->intr_count >= 3) {
+
+		enic->rq_count = 1;
+		enic->wq_count = 1;
+		enic->cq_count = 2;
+		enic->intr_count = 3;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+		return 0;
+	}
+
+	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+	return -EINVAL;
+}
+
+static void enic_clear_intr_mode(struct enic *enic)
+{
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		pci_disable_msix(enic->pdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		pci_disable_msi(enic->pdev);
+		break;
+	default:
+		break;
+	}
+
+	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+}
+
+static void enic_iounmap(struct enic *enic)
+{
+	if (enic->bar0.vaddr)
+		iounmap(enic->bar0.vaddr);
+}
+
+static int __devinit enic_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct enic *enic;
+	int using_dac = 0;
+	unsigned int i;
+	int err;
+
+	const u8 rss_default_cpu = 0;
+	const u8 rss_hash_type = 0;
+	const u8 rss_hash_bits = 0;
+	const u8 rss_base_cpu = 0;
+	const u8 rss_enable = 0;
+	const u8 tso_ipid_split_en = 0;
+	const u8 ig_vlan_strip_en = 1;
+
+	/* Allocate net device structure and initialize.  Private
+	 * instance data is initialized to zero.
+	 */
+
+	netdev = alloc_etherdev(sizeof(struct enic));
+	if (!netdev) {
+		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+		return -ENOMEM;
+	}
+
+	/* Set the netdev name early so intr vectors are properly
+	 * named and any error msgs can include netdev->name
+	 */
+
+	rtnl_lock();
+	err = dev_alloc_name(netdev, netdev->name);
+	rtnl_unlock();
+	if (err < 0) {
+		printk(KERN_ERR PFX "Unable to allocate netdev name.\n");
+		goto err_out_free_netdev;
+	}
+
+	pci_set_drvdata(pdev, netdev);
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	enic = netdev_priv(netdev);
+	enic->netdev = netdev;
+	enic->pdev = pdev;
+
+	/* Setup PCI resources
+	 */
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Cannot enable PCI device, aborting.\n",
+			netdev->name);
+		goto err_out_free_netdev;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Cannot request PCI regions, aborting.\n",
+			netdev->name);
+		goto err_out_disable_device;
+	}
+
+	pci_set_master(pdev);
+
+	/* Query PCI controller on system for DMA addressing
+	 * limitation for the device.  Try 40-bit first, and
+	 * fail to 32-bit.
+	 */
+
+	err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"%s: No usable DMA configuration, aborting.\n",
+				netdev->name);
+			goto err_out_release_regions;
+		}
+		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"%s: Unable to obtain 32-bit DMA "
+				"for consistent allocations, aborting.\n",
+				netdev->name);
+			goto err_out_release_regions;
+		}
+	} else {
+		err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"%s: Unable to obtain 40-bit DMA "
+				"for consistent allocations, aborting.\n",
+				netdev->name);
+			goto err_out_release_regions;
+		}
+		using_dac = 1;
+	}
+
+	/* Map vNIC resources from BAR0
+	 */
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		printk(KERN_ERR PFX
+			"%s: BAR0 not memory-map'able, aborting.\n",
+			netdev->name);
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len);
+	enic->bar0.bus_addr = pci_resource_start(pdev, 0);
+	enic->bar0.len = pci_resource_len(pdev, 0);
+
+	if (!enic->bar0.vaddr) {
+		printk(KERN_ERR PFX
+			"%s: Cannot memory-map BAR0 res hdr, aborting.\n",
+			netdev->name);
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	/* Register vNIC device
+	 */
+
+	enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0);
+	if (!enic->vdev) {
+		printk(KERN_ERR PFX
+			"%s: vNIC registration failed, aborting.\n",
+			netdev->name);
+		err = -ENODEV;
+		goto err_out_iounmap;
+	}
+
+	/* Issue device open to get device in known state
+	 */
+
+	err = enic_dev_open(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: vNIC dev open failed, aborting.\n",
+			netdev->name);
+		goto err_out_vnic_unregister;
+	}
+
+	/* Issue device init to initialize the vnic-to-switch link.
+	 * We'll start with carrier off and wait for link UP
+	 * notification later to turn on carrier.  We don't need
+	 * to wait here for the vnic-to-switch link initialization
+	 * to complete; link UP notification is the indication that
+	 * the process is complete.
+	 */
+
+	netif_carrier_off(netdev);
+
+	err = vnic_dev_init(enic->vdev, 0);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: vNIC dev init failed, aborting.\n",
+			netdev->name);
+		goto err_out_dev_close;
+	}
+
+	/* Get vNIC configuration
+	 */
+
+	err = enic_get_vnic_config(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Get vNIC configuration failed, aborting.\n",
+			netdev->name);
+		goto err_out_dev_close;
+	}
+
+	/* Get available resource counts
+	*/
+
+	enic_get_res_counts(enic);
+
+	/* Set interrupt mode based on resource counts and system
+	 * capabilities
+	*/
+
+	err = enic_set_intr_mode(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Failed to set intr mode, aborting.\n",
+			netdev->name);
+		goto err_out_dev_close;
+	}
+
+	/* Request interrupt vector(s)
+	*/
+
+	err = enic_request_intr(enic);
+	if (err) {
+		printk(KERN_ERR PFX "%s: Unable to request irq.\n",
+			netdev->name);
+		goto err_out_dev_close;
+	}
+
+	/* Allocate and configure vNIC resources
+	 */
+
+	err = enic_alloc_vnic_resources(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Failed to alloc vNIC resources, aborting.\n",
+			netdev->name);
+		goto err_out_free_vnic_resources;
+	}
+
+	enic_init_vnic_resources(enic);
+
+	/* Enable VLAN tag stripping.  RSS not enabled (yet).
+	 */
+
+	err = enic_set_nic_cfg(enic,
+		rss_default_cpu, rss_hash_type,
+		rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en,
+		ig_vlan_strip_en);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Failed to config nic, aborting.\n",
+			netdev->name);
+		goto err_out_free_vnic_resources;
+	}
+
+	/* Setup notification buffer area
+	 */
+
+	err = enic_notify_set(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Failed to alloc notify buffer, aborting.\n",
+			netdev->name);
+		goto err_out_free_vnic_resources;
+	}
+
+	/* Setup notification timer, HW reset task, and locks
+	 */
+
+	init_timer(&enic->notify_timer);
+	enic->notify_timer.function = enic_notify_timer;
+	enic->notify_timer.data = (unsigned long)enic;
+
+	INIT_WORK(&enic->reset, enic_reset);
+
+	for (i = 0; i < enic->wq_count; i++)
+		spin_lock_init(&enic->wq_lock[i]);
+
+	spin_lock_init(&enic->devcmd_lock);
+
+	/* Register net device
+	 */
+
+	enic->port_mtu = enic->config.mtu;
+	(void)enic_change_mtu(netdev, enic->port_mtu);
+
+	err = enic_set_mac_addr(netdev, enic->mac_addr);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Invalid MAC address, aborting.\n",
+			netdev->name);
+		goto err_out_notify_unset;
+	}
+
+	netdev->open = enic_open;
+	netdev->stop = enic_stop;
+	netdev->hard_start_xmit = enic_hard_start_xmit;
+	netdev->get_stats = enic_get_stats;
+	netdev->set_multicast_list = enic_set_multicast_list;
+	netdev->change_mtu = enic_change_mtu;
+	netdev->vlan_rx_register = enic_vlan_rx_register;
+	netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
+	netdev->tx_timeout = enic_tx_timeout;
+	netdev->watchdog_timeo = 2 * HZ;
+	netdev->ethtool_ops = &enic_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = enic_poll_controller;
+#endif
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	default:
+		netif_napi_add(netdev, &enic->napi, enic_poll, 64);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
+		break;
+	}
+
+	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	if (ENIC_SETTING(enic, TXCSUM))
+		netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+	if (ENIC_SETTING(enic, TSO))
+		netdev->features |= NETIF_F_TSO |
+			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+
+	enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
+
+	if (ENIC_SETTING(enic, LRO)) {
+		enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
+		enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
+		enic->lro_mgr.lro_arr = enic->lro_desc;
+		enic->lro_mgr.get_skb_header = enic_get_skb_header;
+		enic->lro_mgr.features	= LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+		enic->lro_mgr.dev = netdev;
+		enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
+		enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	}
+
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Cannot register net device, aborting.\n",
+			netdev->name);
+		goto err_out_notify_unset;
+	}
+
+	return 0;
+
+err_out_notify_unset:
+	vnic_dev_notify_unset(enic->vdev);
+err_out_free_vnic_resources:
+	enic_free_vnic_resources(enic);
+	enic_free_intr(enic);
+err_out_dev_close:
+	vnic_dev_close(enic->vdev);
+err_out_vnic_unregister:
+	enic_clear_intr_mode(enic);
+	vnic_dev_unregister(enic->vdev);
+err_out_iounmap:
+	enic_iounmap(enic);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_device:
+	pci_disable_device(pdev);
+err_out_free_netdev:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+
+	return err;
+}
+
+static void __devexit enic_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+
+	if (netdev) {
+		struct enic *enic = netdev_priv(netdev);
+
+		flush_scheduled_work();
+		unregister_netdev(netdev);
+		vnic_dev_notify_unset(enic->vdev);
+		enic_free_vnic_resources(enic);
+		enic_free_intr(enic);
+		vnic_dev_close(enic->vdev);
+		enic_clear_intr_mode(enic);
+		vnic_dev_unregister(enic->vdev);
+		enic_iounmap(enic);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+		free_netdev(netdev);
+	}
+}
+
+static struct pci_driver enic_driver = {
+	.name = DRV_NAME,
+	.id_table = enic_id_table,
+	.probe = enic_probe,
+	.remove = __devexit_p(enic_remove),
+};
+
+static int __init enic_init_module(void)
+{
+	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+	return pci_register_driver(&enic_driver);
+}
+
+static void __exit enic_cleanup_module(void)
+{
+	pci_unregister_driver(&enic_driver);
+}
+
+module_init(enic_init_module);
+module_exit(enic_cleanup_module);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
new file mode 100644
index 000000000000..95184b9108ef
--- /dev/null
+++ b/drivers/net/enic/enic_res.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+	struct vnic_enet_config *c = &enic->config;
+	int err;
+
+	err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
+	if (err) {
+		printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
+		return err;
+	}
+
+#define GET_CONFIG(m) \
+	do { \
+		err = vnic_dev_spec(enic->vdev, \
+			offsetof(struct vnic_enet_config, m), \
+			sizeof(c->m), &c->m); \
+		if (err) { \
+			printk(KERN_ERR PFX \
+				"Error getting %s, %d\n", #m, err); \
+			return err; \
+		} \
+	} while (0)
+
+	GET_CONFIG(flags);
+	GET_CONFIG(wq_desc_count);
+	GET_CONFIG(rq_desc_count);
+	GET_CONFIG(mtu);
+	GET_CONFIG(intr_timer);
+	GET_CONFIG(intr_timer_type);
+	GET_CONFIG(intr_mode);
+
+	c->wq_desc_count =
+		min_t(u32, ENIC_MAX_WQ_DESCS,
+		max_t(u32, ENIC_MIN_WQ_DESCS,
+		c->wq_desc_count));
+	c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+
+	c->rq_desc_count =
+		min_t(u32, ENIC_MAX_RQ_DESCS,
+		max_t(u32, ENIC_MIN_RQ_DESCS,
+		c->rq_desc_count));
+	c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+
+	if (c->mtu == 0)
+		c->mtu = 1500;
+	c->mtu = min_t(u16, ENIC_MAX_MTU,
+		max_t(u16, ENIC_MIN_MTU,
+		c->mtu));
+
+	c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+
+	printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+		"wq/rq %d/%d\n",
+		enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
+		enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
+		c->wq_desc_count, c->rq_desc_count);
+	printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
+		"intr timer %d\n",
+		c->mtu, ENIC_SETTING(enic, TXCSUM),
+		ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
+		ENIC_SETTING(enic, LRO), c->intr_timer);
+
+	return 0;
+}
+
+void enic_add_station_addr(struct enic *enic)
+{
+	vnic_dev_add_addr(enic->vdev, enic->mac_addr);
+}
+
+void enic_add_multicast_addr(struct enic *enic, u8 *addr)
+{
+	vnic_dev_add_addr(enic->vdev, addr);
+}
+
+void enic_del_multicast_addr(struct enic *enic, u8 *addr)
+{
+	vnic_dev_del_addr(enic->vdev, addr);
+}
+
+void enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+	u64 a0 = vlanid, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
+}
+
+void enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+	u64 a0 = vlanid, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en)
+{
+	u64 a0, a1;
+	u32 nic_cfg;
+	int wait = 1000;
+
+	vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+		rss_hash_type, rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+	a0 = nic_cfg;
+	a1 = 0;
+
+	return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void enic_free_vnic_resources(struct enic *enic)
+{
+	unsigned int i;
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_free(&enic->wq[i]);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_free(&enic->rq[i]);
+	for (i = 0; i < enic->cq_count; i++)
+		vnic_cq_free(&enic->cq[i]);
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_free(&enic->intr[i]);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+	enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+	enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+	enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+	enic->intr_count = vnic_dev_get_res_count(enic->vdev,
+		RES_TYPE_INTR_CTRL);
+
+	printk(KERN_INFO PFX "vNIC resources avail: "
+		"wq %d rq %d cq %d intr %d\n",
+		enic->wq_count, enic->rq_count,
+		enic->cq_count, enic->intr_count);
+}
+
+void enic_init_vnic_resources(struct enic *enic)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int mask_on_assertion;
+	unsigned int interrupt_offset;
+	unsigned int error_interrupt_enable;
+	unsigned int error_interrupt_offset;
+	unsigned int cq_index;
+	unsigned int i;
+
+	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+	/* Init RQ/WQ resources.
+	 *
+	 * RQ[0 - n-1] point to CQ[0 - n-1]
+	 * WQ[0 - m-1] point to CQ[n - n+m-1]
+	 *
+	 * Error interrupt is not enabled for MSI.
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		error_interrupt_enable = 1;
+		error_interrupt_offset = enic->intr_count - 2;
+		break;
+	default:
+		error_interrupt_enable = 0;
+		error_interrupt_offset = 0;
+		break;
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		cq_index = i;
+		vnic_rq_init(&enic->rq[i],
+			cq_index,
+			error_interrupt_enable,
+			error_interrupt_offset);
+	}
+
+	for (i = 0; i < enic->wq_count; i++) {
+		cq_index = enic->rq_count + i;
+		vnic_wq_init(&enic->wq[i],
+			cq_index,
+			error_interrupt_enable,
+			error_interrupt_offset);
+	}
+
+	/* Init CQ resources
+	 *
+	 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
+	 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+	 */
+
+	for (i = 0; i < enic->cq_count; i++) {
+
+		switch (intr_mode) {
+		case VNIC_DEV_INTR_MODE_MSIX:
+			interrupt_offset = i;
+			break;
+		default:
+			interrupt_offset = 0;
+			break;
+		}
+
+		vnic_cq_init(&enic->cq[i],
+			0 /* flow_control_enable */,
+			1 /* color_enable */,
+			0 /* cq_head */,
+			0 /* cq_tail */,
+			1 /* cq_tail_color */,
+			1 /* interrupt_enable */,
+			1 /* cq_entry_enable */,
+			0 /* cq_message_enable */,
+			interrupt_offset,
+			0 /* cq_message_addr */);
+	}
+
+	/* Init INTR resources
+	 *
+	 * mask_on_assertion is not used for INTx due to the level-
+	 * triggered nature of INTx
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_MSI:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		mask_on_assertion = 1;
+		break;
+	default:
+		mask_on_assertion = 0;
+		break;
+	}
+
+	for (i = 0; i < enic->intr_count; i++) {
+		vnic_intr_init(&enic->intr[i],
+			enic->config.intr_timer,
+			enic->config.intr_timer_type,
+			mask_on_assertion);
+	}
+
+	/* Clear LIF stats
+	 */
+
+	vnic_dev_stats_clear(enic->vdev);
+}
+
+int enic_alloc_vnic_resources(struct enic *enic)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int i;
+	int err;
+
+	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+	printk(KERN_INFO PFX "vNIC resources used:  "
+		"wq %d rq %d cq %d intr %d intr mode %s\n",
+		enic->wq_count, enic->rq_count,
+		enic->cq_count, enic->intr_count,
+		intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+		intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+		intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
+		"unknown"
+		);
+
+	/* Allocate queue resources
+	 */
+
+	for (i = 0; i < enic->wq_count; i++) {
+		err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+			enic->config.wq_desc_count,
+			sizeof(struct wq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+			enic->config.rq_desc_count,
+			sizeof(struct rq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->cq_count; i++) {
+		if (i < enic->rq_count)
+			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+				enic->config.rq_desc_count,
+				sizeof(struct cq_enet_rq_desc));
+		else
+			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+				enic->config.wq_desc_count,
+				sizeof(struct cq_enet_wq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->intr_count; i++) {
+		err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* Hook remaining resource
+	 */
+
+	enic->legacy_pba = vnic_dev_get_res(enic->vdev,
+		RES_TYPE_INTR_PBA_LEGACY, 0);
+	if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+		printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
+		err = -ENODEV;
+		goto err_out_cleanup;
+	}
+
+	return 0;
+
+err_out_cleanup:
+	enic_free_vnic_resources(enic);
+
+	return err;
+}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
new file mode 100644
index 000000000000..68534a29b7ac
--- /dev/null
+++ b/drivers/net/enic/enic_res.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS		64
+#define ENIC_MAX_WQ_DESCS		4096
+#define ENIC_MIN_RQ_DESCS		64
+#define ENIC_MAX_RQ_DESCS		4096
+
+#define ENIC_MIN_MTU			576  /* minimum for IPv4 */
+#define ENIC_MAX_MTU			9000
+
+#define ENIC_MULTICAST_PERFECT_FILTERS	32
+
+#define ENIC_NON_TSO_MAX_DESC		16
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int mss_or_csum_offset, unsigned int hdr_len,
+	int vlan_tag_insert, unsigned int vlan_tag,
+	int offload_mode, int cq_entry, int sop, int eop)
+{
+	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+	wq_enet_desc_enc(desc,
+		(u64)dma_addr | VNIC_PADDR_TARGET,
+		(u16)len,
+		(u16)mss_or_csum_offset,
+		(u16)hdr_len, (u8)offload_mode,
+		(u8)eop, (u8)cq_entry,
+		0, /* fcoe_encap */
+		(u8)vlan_tag_insert,
+		(u16)vlan_tag,
+		0 /* loopback */);
+
+	wmb();
+
+	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		0, 0, 0, 0, 0,
+		eop, 0 /* !SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
+	dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		0, 0, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	int ip_csum, int tcpudp_csum, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
+		0, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int csum_offset, unsigned int hdr_len,
+	int vlan_tag_insert, unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM_L4,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		mss, hdr_len, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_TSO,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_rq_desc(struct vnic_rq *rq,
+	void *os_buf, unsigned int os_buf_index,
+	dma_addr_t dma_addr, unsigned int len)
+{
+	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+	u8 type = os_buf_index ?
+		RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
+
+	rq_enet_desc_enc(desc,
+		(u64)dma_addr | VNIC_PADDR_TARGET,
+		type, (u16)len);
+
+	wmb();
+
+	vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+}
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+void enic_add_station_addr(struct enic *enic);
+void enic_add_multicast_addr(struct enic *enic, u8 *addr);
+void enic_del_multicast_addr(struct enic *enic, u8 *addr);
+void enic_add_vlan(struct enic *enic, u16 vlanid);
+void enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..a06e649010ce
--- /dev/null
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+	__le64 address;
+	__le16 length_type;
+	u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+	RQ_ENET_TYPE_ONLY_SOP = 0,
+	RQ_ENET_TYPE_NOT_SOP = 1,
+	RQ_ENET_TYPE_RESV2 = 2,
+	RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS		64
+#define RQ_ENET_LEN_BITS		14
+#define RQ_ENET_LEN_MASK		((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS		2
+#define RQ_ENET_TYPE_MASK		((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+	u64 address, u8 type, u16 length)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+		((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+	u64 *address, u8 *type, u16 *length)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+	*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+		RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
new file mode 100644
index 000000000000..020ae6c3f3d9
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+	cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	cq->index = index;
+	cq->vdev = vdev;
+
+	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+	if (!cq->ctrl) {
+		printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int cq_message_enable,
+	unsigned int interrupt_offset, u64 cq_message_addr)
+{
+	u64 paddr;
+
+	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &cq->ctrl->ring_base);
+	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+	iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+	iowrite32(color_enable, &cq->ctrl->color_enable);
+	iowrite32(cq_head, &cq->ctrl->cq_head);
+	iowrite32(cq_tail, &cq->ctrl->cq_tail);
+	iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+	iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+	iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+	iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+	iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+	writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+	cq->to_clean = 0;
+	cq->last_color = 0;
+
+	iowrite32(0, &cq->ctrl->cq_head);
+	iowrite32(0, &cq->ctrl->cq_tail);
+	iowrite32(1, &cq->ctrl->cq_tail_color);
+
+	vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
new file mode 100644
index 000000000000..114763cbc2f8
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 flow_control_enable;	/* 0x10 */
+	u32 pad1;
+	u32 color_enable;		/* 0x18 */
+	u32 pad2;
+	u32 cq_head;			/* 0x20 */
+	u32 pad3;
+	u32 cq_tail;			/* 0x28 */
+	u32 pad4;
+	u32 cq_tail_color;		/* 0x30 */
+	u32 pad5;
+	u32 interrupt_enable;		/* 0x38 */
+	u32 pad6;
+	u32 cq_entry_enable;		/* 0x40 */
+	u32 pad7;
+	u32 cq_message_enable;		/* 0x48 */
+	u32 pad8;
+	u32 interrupt_offset;		/* 0x50 */
+	u32 pad9;
+	u64 cq_message_addr;		/* 0x58 */
+	u32 pad10;
+};
+
+struct vnic_cq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_cq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	unsigned int to_clean;
+	unsigned int last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+	unsigned int work_to_do,
+	int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque),
+	void *opaque)
+{
+	struct cq_desc *cq_desc;
+	unsigned int work_done = 0;
+	u16 q_number, completed_index;
+	u8 type, color;
+
+	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	cq_desc_dec(cq_desc, &type, &color,
+		&q_number, &completed_index);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq_desc, type,
+			q_number, completed_index, opaque))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		cq_desc_dec(cq_desc, &type, &color,
+			&q_number, &completed_index);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int message_enable,
+	unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
new file mode 100644
index 000000000000..4d104f5c30f9
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+	void __iomem *vaddr;
+	unsigned int count;
+};
+
+struct vnic_dev {
+	void *priv;
+	struct pci_dev *pdev;
+	struct vnic_res res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode intr_mode;
+	struct vnic_devcmd __iomem *devcmd;
+	struct vnic_devcmd_notify *notify;
+	struct vnic_devcmd_notify notify_copy;
+	dma_addr_t notify_pa;
+	u32 *linkstatus;
+	dma_addr_t linkstatus_pa;
+	struct vnic_stats *stats;
+	dma_addr_t stats_pa;
+	struct vnic_devcmd_fw_info *fw_info;
+	dma_addr_t fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+	(sizeof(struct vnic_resource_header) + \
+	sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE	128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+	return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+	struct vnic_dev_bar *bar)
+{
+	struct vnic_resource_header __iomem *rh;
+	struct vnic_resource __iomem *r;
+	u8 type;
+
+	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+		printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+		return -EINVAL;
+	}
+
+	rh = bar->vaddr;
+	if (!rh) {
+		printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+		return -EINVAL;
+	}
+
+	if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+	    ioread32(&rh->version) != VNIC_RES_VERSION) {
+		printk(KERN_ERR "vNIC BAR0 res magic/version error "
+			"exp (%lx/%lx) curr (%x/%x)\n",
+			VNIC_RES_MAGIC, VNIC_RES_VERSION,
+			ioread32(&rh->magic), ioread32(&rh->version));
+		return -EINVAL;
+	}
+
+	r = (struct vnic_resource __iomem *)(rh + 1);
+
+	while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+		u8 bar_num = ioread8(&r->bar);
+		u32 bar_offset = ioread32(&r->bar_offset);
+		u32 count = ioread32(&r->count);
+		u32 len;
+
+		r++;
+
+		if (bar_num != 0)  /* only mapping in BAR0 resources */
+			continue;
+
+		switch (type) {
+		case RES_TYPE_WQ:
+		case RES_TYPE_RQ:
+		case RES_TYPE_CQ:
+		case RES_TYPE_INTR_CTRL:
+			/* each count is stride bytes long */
+			len = count * VNIC_RES_STRIDE;
+			if (len + bar_offset > bar->len) {
+				printk(KERN_ERR "vNIC BAR0 resource %d "
+					"out-of-bounds, offset 0x%x + "
+					"size 0x%x > bar len 0x%lx\n",
+					type, bar_offset,
+					len,
+					bar->len);
+				return -EINVAL;
+			}
+			break;
+		case RES_TYPE_INTR_PBA_LEGACY:
+		case RES_TYPE_DEVCMD:
+			len = count;
+			break;
+		default:
+			continue;
+		}
+
+		vdev->res[type].count = count;
+		vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+	}
+
+	return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+	enum vnic_res_type type)
+{
+	return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+	unsigned int index)
+{
+	if (!vdev->res[type].vaddr)
+		return NULL;
+
+	switch (type) {
+	case RES_TYPE_WQ:
+	case RES_TYPE_RQ:
+	case RES_TYPE_CQ:
+	case RES_TYPE_INTR_CTRL:
+		return (char __iomem *)vdev->res[type].vaddr +
+			index * VNIC_RES_STRIDE;
+	default:
+		return (char __iomem *)vdev->res[type].vaddr;
+	}
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	/* The base address of the desc rings must be 512 byte aligned.
+	 * Descriptor count is aligned to groups of 32 descriptors.  A
+	 * count of 0 means the maximum 4096 descriptors.  Descriptor
+	 * size is aligned to 16 bytes.
+	 */
+
+	unsigned int count_align = 32;
+	unsigned int desc_align = 16;
+
+	ring->base_align = 512;
+
+	if (desc_count == 0)
+		desc_count = 4096;
+
+	ring->desc_count = ALIGN(desc_count, count_align);
+
+	ring->desc_size = ALIGN(desc_size, desc_align);
+
+	ring->size = ring->desc_count * ring->desc_size;
+	ring->size_unaligned = ring->size + ring->base_align;
+
+	return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+	memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+		ring->size_unaligned,
+		&ring->base_addr_unaligned);
+
+	if (!ring->descs_unaligned) {
+		printk(KERN_ERR
+		  "Failed to allocate ring (size=%d), aborting\n",
+			(int)ring->size);
+		return -ENOMEM;
+	}
+
+	ring->base_addr = ALIGN(ring->base_addr_unaligned,
+		ring->base_align);
+	ring->descs = (u8 *)ring->descs_unaligned +
+		(ring->base_addr - ring->base_addr_unaligned);
+
+	vnic_dev_clear_desc_ring(ring);
+
+	ring->desc_avail = ring->desc_count - 1;
+
+	return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+	if (ring->descs) {
+		pci_free_consistent(vdev->pdev,
+			ring->size_unaligned,
+			ring->descs_unaligned,
+			ring->base_addr_unaligned);
+		ring->descs = NULL;
+	}
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait)
+{
+	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+	int delay;
+	u32 status;
+	int dev_cmd_err[] = {
+		/* convert from fw's version of error.h to host's version */
+		0,	/* ERR_SUCCESS */
+		EINVAL,	/* ERR_EINVAL */
+		EFAULT,	/* ERR_EFAULT */
+		EPERM,	/* ERR_EPERM */
+		EBUSY,  /* ERR_EBUSY */
+	};
+	int err;
+
+	status = ioread32(&devcmd->status);
+	if (status & STAT_BUSY) {
+		printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+		return -EBUSY;
+	}
+
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+		writeq(*a0, &devcmd->args[0]);
+		writeq(*a1, &devcmd->args[1]);
+		wmb();
+	}
+
+	iowrite32(cmd, &devcmd->cmd);
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+			return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+
+		udelay(100);
+
+		status = ioread32(&devcmd->status);
+		if (!(status & STAT_BUSY)) {
+
+			if (status & STAT_ERROR) {
+				err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+				printk(KERN_ERR "Error %d devcmd %d\n",
+					err, _CMD_N(cmd));
+				return -err;
+			}
+
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+				rmb();
+				*a0 = readq(&devcmd->args[0]);
+				*a1 = readq(&devcmd->args[1]);
+			}
+
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+	return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+	struct vnic_devcmd_fw_info **fw_info)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err = 0;
+
+	if (!vdev->fw_info) {
+		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_fw_info),
+			&vdev->fw_info_pa);
+		if (!vdev->fw_info)
+			return -ENOMEM;
+
+		a0 = vdev->fw_info_pa;
+
+		/* only get fw_info once and cache it */
+		err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+	}
+
+	*fw_info = vdev->fw_info;
+
+	return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+	void *value)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err;
+
+	a0 = offset;
+	a1 = size;
+
+	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+	switch (size) {
+	case 1: *(u8 *)value = (u8)a0; break;
+	case 2: *(u16 *)value = (u16)a0; break;
+	case 4: *(u32 *)value = (u32)a0; break;
+	case 8: *(u64 *)value = a0; break;
+	default: BUG(); break;
+	}
+
+	return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->stats) {
+		vdev->stats = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_stats), &vdev->stats_pa);
+		if (!vdev->stats)
+			return -ENOMEM;
+	}
+
+	*stats = vdev->stats;
+	a0 = vdev->stats_pa;
+	a1 = sizeof(struct vnic_stats);
+
+	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err, i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = ((u8 *)&a0)[i];
+
+	return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+	int broadcast, int promisc, int allmulti)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
+	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR "Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->notify) {
+		vdev->notify = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_notify),
+			&vdev->notify_pa);
+		if (!vdev->notify)
+			return -ENOMEM;
+	}
+
+	a0 = vdev->notify_pa;
+	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	a0 = 0;  /* paddr = 0 to unset notify buffer */
+	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+	u32 *words;
+	unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+	unsigned int i;
+	u32 csum;
+
+	if (!vdev->notify)
+		return 0;
+
+	do {
+		csum = 0;
+		memcpy(&vdev->notify_copy, vdev->notify,
+			sizeof(struct vnic_devcmd_notify));
+		words = (u32 *)&vdev->notify_copy;
+		for (i = 1; i < nwords; i++)
+			csum += words[i];
+	} while (csum != words[0]);
+
+	return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+	if (vdev->linkstatus)
+		return *vdev->linkstatus;
+
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.mtu;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+	enum vnic_dev_intr_mode intr_mode)
+{
+	vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+	struct vnic_dev *vdev)
+{
+	return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+	if (vdev) {
+		if (vdev->notify)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_notify),
+				vdev->notify,
+				vdev->notify_pa);
+		if (vdev->linkstatus)
+			pci_free_consistent(vdev->pdev,
+				sizeof(u32),
+				vdev->linkstatus,
+				vdev->linkstatus_pa);
+		if (vdev->stats)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_dev),
+				vdev->stats, vdev->stats_pa);
+		if (vdev->fw_info)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_fw_info),
+				vdev->fw_info, vdev->fw_info_pa);
+		kfree(vdev);
+	}
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+	if (!vdev) {
+		vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+		if (!vdev)
+			return NULL;
+	}
+
+	vdev->priv = priv;
+	vdev->pdev = pdev;
+
+	if (vnic_dev_discover_res(vdev, bar))
+		goto err_out;
+
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		goto err_out;
+
+	return vdev;
+
+err_out:
+	vnic_dev_unregister(vdev);
+	return NULL;
+}
+
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
new file mode 100644
index 000000000000..2dcffd3a24bd
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET	0x0000000000000000ULL
+#endif
+
+enum vnic_dev_intr_mode {
+	VNIC_DEV_INTR_MODE_UNKNOWN,
+	VNIC_DEV_INTR_MODE_INTX,
+	VNIC_DEV_INTR_MODE_MSI,
+	VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+	void __iomem *vaddr;
+	dma_addr_t bus_addr;
+	unsigned long len;
+};
+
+struct vnic_dev_ring {
+	void *descs;
+	size_t size;
+	dma_addr_t base_addr;
+	size_t base_align;
+	void *descs_unaligned;
+	size_t size_unaligned;
+	dma_addr_t base_addr_unaligned;
+	unsigned int desc_size;
+	unsigned int desc_count;
+	unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+	enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+	unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+	struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+	struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+	void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+	int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+	enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..d8617a3373b1
--- /dev/null
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS      14
+#define _CMD_VTYPEBITS	10
+#define _CMD_FLAGSBITS  6
+#define _CMD_DIRBITS	2
+
+#define _CMD_NMASK      ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK  ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK  ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK    ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT     0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT   (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE   0U
+#define _CMD_DIR_WRITE  1U
+#define _CMD_DIR_READ   2U
+#define _CMD_DIR_RW     (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE  0U
+#define _CMD_VTYPE_ENET  1U
+#define _CMD_VTYPE_FC    2U
+#define _CMD_VTYPE_SCSI  4U
+#define _CMD_VTYPE_ALL   (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr)  \
+	(((dir)   << _CMD_DIRSHIFT) | \
+	((flags) << _CMD_FLAGSSHIFT) | \
+	((vtype) << _CMD_VTYPESHIFT) | \
+	((nr)    << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr)    _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr)  _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd)            (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd)          (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd)          (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd)              (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+	CMD_NONE                = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+	/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+	CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+	/* dev-specific block member:
+	 *    in: (u16)a0=offset,(u8)a1=size
+	 *    out: a0=value */
+	CMD_DEV_SPEC            = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+	/* stats clear */
+	CMD_STATS_CLEAR         = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+	/* stats dump in mem: (u64)a0=paddr to stats area,
+	 *                    (u16)a1=sizeof stats area */
+	CMD_STATS_DUMP          = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+	/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+	CMD_PACKET_FILTER	= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+	/* hang detection notification */
+	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+	/* MAC address in (u48)a0 */
+	CMD_MAC_ADDR            = _CMDC(_CMD_DIR_READ,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+	/* disable/enable promisc mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_PROMISC_MODE        = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+	/* disable/enable all-multi mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_ALLMULTI_MODE       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+	/* add addr from (u48)a0 */
+	CMD_ADDR_ADD            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+	/* del addr from (u48)a0 */
+	CMD_ADDR_DEL            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+	/* add VLAN id in (u16)a0 */
+	CMD_VLAN_ADD            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+	/* del VLAN id in (u16)a0 */
+	CMD_VLAN_DEL            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+	/* nic_cfg in (u32)a0 */
+	CMD_NIC_CFG             = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+	/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_KEY             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+	/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_CPU             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+	/* initiate softreset */
+	CMD_SOFT_RESET          = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+	/* softreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_SOFT_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+	/* set struct vnic_devcmd_notify buffer in mem:
+	 * in:
+	 *   (u64)a0=paddr to notify (set paddr=0 to unset)
+	 *   (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+	 *   (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+	 * out:
+	 *   (u32)a1 = effective size
+	 */
+	CMD_NOTIFY              = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+	/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+	 *           (u8)a1=PXENV_UNDI_xxx */
+	CMD_UNDI                = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+	/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+	CMD_OPEN		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+	/* open status:
+	 *    out: a0=0 open complete, a0=1 open in progress */
+	CMD_OPEN_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+	/* close vnic */
+	CMD_CLOSE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+	/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+	CMD_INIT		= _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+	/* variant of CMD_INIT, with provisioning info
+	 *     (u64)a0=paddr of vnic_devcmd_provinfo
+	 *     (u32)a1=sizeof provision info */
+	CMD_INIT_PROV_INFO	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+	/* enable virtual link */
+	CMD_ENABLE		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+	/* disable virtual link */
+	CMD_DISABLE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+	/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+	CMD_STATS_DUMP_ALL	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+	/* init status:
+	 *    out: a0=0 init complete, a0=1 init in progress
+	 *         if a0=0, a1=errno */
+	CMD_INIT_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+	/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+	 *            (u8)a1=INT13_CMD_xxx */
+	CMD_INT13               = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+	/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+	CMD_LOGICAL_UPLINK      = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+	/* undo initialize of virtual link */
+	CMD_DEINIT		= _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM		0x1	/* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC	0x1	/* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED		0x01
+#define CMD_PFILTER_MULTICAST		0x02
+#define CMD_PFILTER_BROADCAST		0x04
+#define CMD_PFILTER_PROMISCUOUS		0x08
+#define CMD_PFILTER_ALL_MULTICAST	0x10
+
+enum vnic_devcmd_status {
+	STAT_NONE = 0,
+	STAT_BUSY = 1 << 0,	/* cmd in progress */
+	STAT_ERROR = 1 << 1,	/* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+	ERR_SUCCESS = 0,
+	ERR_EINVAL = 1,
+	ERR_EFAULT = 2,
+	ERR_EPERM = 3,
+	ERR_EBUSY = 4,
+	ERR_ECMDUNKNOWN = 5,
+	ERR_EBADSTATE = 6,
+	ERR_ENOMEM = 7,
+	ERR_ETIMEDOUT = 8,
+	ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+	char fw_version[32];
+	char fw_build[32];
+	char hw_version[32];
+	char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+	u32 csum;		/* checksum over following words */
+
+	u32 link_state;		/* link up == 1 */
+	u32 port_speed;		/* effective port speed (rate limit) */
+	u32 mtu;		/* MTU */
+	u32 msglvl;		/* requested driver msg lvl */
+	u32 uif;		/* uplink interface */
+	u32 status;		/* status bits (see VNIC_STF_*) */
+	u32 error;		/* error code (see ERR_*) for first ERR */
+};
+#define VNIC_STF_FATAL_ERR	0x0001	/* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+	u8 oui[3];
+	u8 type;
+	u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only.  While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+	u32 status;			/* RO */
+	u32 cmd;			/* RW */
+	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
new file mode 100644
index 000000000000..6332ac9391b8
--- /dev/null
+++ b/drivers/net/enic/vnic_enet.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+	u32 flags;
+	u32 wq_desc_count;
+	u32 rq_desc_count;
+	u16 mtu;
+	u16 intr_timer;
+	u8 intr_timer_type;
+	u8 intr_mode;
+	char devname[16];
+};
+
+#define VENETF_TSO		0x1	/* TSO enabled */
+#define VENETF_LRO		0x2	/* LRO enabled */
+#define VENETF_RXCSUM		0x4	/* RX csum enabled */
+#define VENETF_TXCSUM		0x8	/* TX csum enabled */
+#define VENETF_RSS		0x10	/* RSS enabled */
+#define VENETF_RSSHASH_IPV4	0x20	/* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4	0x40	/* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6	0x80	/* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6	0x100	/* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX	0x200	/* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400	/* Hash on TCP + IPv6 ext. fields */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
new file mode 100644
index 000000000000..ddc38f8f4656
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+	intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index)
+{
+	intr->index = index;
+	intr->vdev = vdev;
+
+	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+	if (!intr->ctrl) {
+		printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
+			index);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+	iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+	iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+	iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+	iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
new file mode 100644
index 000000000000..ccc408116af8
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_MAX		0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS	0
+#define VNIC_INTR_TIMER_TYPE_QUIET	1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+	u32 coalescing_timer;		/* 0x00 */
+	u32 pad0;
+	u32 coalescing_value;		/* 0x08 */
+	u32 pad1;
+	u32 coalescing_type;		/* 0x10 */
+	u32 pad2;
+	u32 mask_on_assertion;		/* 0x18 */
+	u32 pad3;
+	u32 mask;			/* 0x20 */
+	u32 pad4;
+	u32 int_credits;		/* 0x28 */
+	u32 pad5;
+	u32 int_credit_return;		/* 0x30 */
+	u32 pad6;
+};
+
+struct vnic_intr {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_intr_ctrl __iomem *ctrl;		/* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+	iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+	unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT		16
+#define VNIC_INTR_RESET_TIMER_SHIFT	17
+
+	u32 int_credit_return = (credits & 0xffff) |
+		(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+		(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+	iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+	/* get and ack interrupt in one read (clear-and-ack-on-read) */
+	return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
new file mode 100644
index 000000000000..dadf26fae69a
--- /dev/null
+++ b/drivers/net/enic/vnic_nic.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT		0
+#define NIC_CFG_RSS_HASH_TYPE			(0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT		8
+#define NIC_CFG_RSS_HASH_BITS			(7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD	7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT		16
+#define NIC_CFG_RSS_BASE_CPU			(7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD		7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT		19
+#define NIC_CFG_RSS_ENABLE			(1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD		1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT		22
+#define NIC_CFG_TSO_IPID_SPLIT_EN		(1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD	1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT		23
+#define NIC_CFG_IG_VLAN_STRIP_EN		(1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD	1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT		24
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+	u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu,
+	u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en)
+{
+	*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+		((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+		((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
+		((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+			<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
+		((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+			<< NIC_CFG_RSS_ENABLE_SHIFT) |
+		((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+			<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+		((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+			<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
new file mode 100644
index 000000000000..144d2812f081
--- /dev/null
+++ b/drivers/net/enic/vnic_resource.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC		0x766E6963L	/* 'vnic' */
+#define VNIC_RES_VERSION	0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+	RES_TYPE_EOL,			/* End-of-list */
+	RES_TYPE_WQ,			/* Work queues */
+	RES_TYPE_RQ,			/* Receive queues */
+	RES_TYPE_CQ,			/* Completion queues */
+	RES_TYPE_RSVD1,
+	RES_TYPE_NIC_CFG,		/* Enet NIC config registers */
+	RES_TYPE_RSVD2,
+	RES_TYPE_RSVD3,
+	RES_TYPE_RSVD4,
+	RES_TYPE_RSVD5,
+	RES_TYPE_INTR_CTRL,		/* Interrupt ctrl table */
+	RES_TYPE_INTR_TABLE,		/* MSI/MSI-X Interrupt table */
+	RES_TYPE_INTR_PBA,		/* MSI/MSI-X PBA table */
+	RES_TYPE_INTR_PBA_LEGACY,	/* Legacy intr status, r2c */
+	RES_TYPE_RSVD6,
+	RES_TYPE_RSVD7,
+	RES_TYPE_DEVCMD,		/* Device command region */
+	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+
+	RES_TYPE_MAX,			/* Count of resource types */
+};
+
+struct vnic_resource_header {
+	u32 magic;
+	u32 version;
+};
+
+struct vnic_resource {
+	u8 type;
+	u8 bar;
+	u8 pad[2];
+	u32 bar_offset;
+	u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
new file mode 100644
index 000000000000..9365e63e821a
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+	struct vnic_rq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = rq->ring.desc_count;
+	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+	vdev = rq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!rq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc rq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = rq->bufs[i];
+		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)rq->ring.descs +
+				rq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = rq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+				buf->next = rq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	rq->to_use = rq->to_clean = rq->bufs[0];
+	rq->buf_index = 0;
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = rq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+		kfree(rq->bufs[i]);
+		rq->bufs[i] = NULL;
+	}
+
+	rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	rq->index = index;
+	rq->vdev = vdev;
+
+	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+	if (!rq->ctrl) {
+		printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_rq_disable(rq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_rq_alloc_bufs(rq);
+	if (err) {
+		vnic_rq_free(rq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+	u32 fetch_index;
+
+	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &rq->ctrl->ring_base);
+	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+	iowrite32(cq_index, &rq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+	iowrite32(0, &rq->ctrl->dropped_packet_count);
+	iowrite32(0, &rq->ctrl->error_status);
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+	return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+	iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &rq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&rq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+	struct vnic_rq_buf *buf;
+	u32 fetch_index;
+
+	BUG_ON(ioread32(&rq->ctrl->enable));
+
+	buf = rq->to_clean;
+
+	while (vnic_rq_desc_used(rq) > 0) {
+
+		(*buf_clean)(rq, buf);
+
+		buf = rq->to_clean = buf->next;
+		rq->ring.desc_avail++;
+	}
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+
+	vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
new file mode 100644
index 000000000000..82bfca67cc4d
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 error_interrupt_enable;	/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_offset;	/* 0x40 */
+	u32 pad7;
+	u32 error_status;		/* 0x48 */
+	u32 pad8;
+	u32 dropped_packet_count;	/* 0x50 */
+	u32 pad9;
+	u32 dropped_packet_count_rc;	/* 0x58 */
+	u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+	(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+	struct vnic_rq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int os_buf_index;
+	unsigned int len;
+	unsigned int index;
+	void *desc;
+};
+
+struct vnic_rq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_rq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+	struct vnic_rq_buf *to_use;
+	struct vnic_rq_buf *to_clean;
+	void *os_buf_head;
+	unsigned int buf_index;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+	/* how many does SW own? */
+	return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+	/* how many does HW own? */
+	return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+	return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+	return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+	return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+	void *os_buf, unsigned int os_buf_index,
+	dma_addr_t dma_addr, unsigned int len)
+{
+	struct vnic_rq_buf *buf = rq->to_use;
+
+	buf->os_buf = os_buf;
+	buf->os_buf_index = os_buf_index;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	rq->to_use = buf;
+	rq->ring.desc_avail--;
+
+	/* Move the posted_index every nth descriptor
+	 */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE		0xf	/* keep 2^n - 1 */
+#endif
+
+	if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
+		iowrite32(buf->index, &rq->ctrl->posted_index);
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+	rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+	VNIC_RQ_RETURN_DESC,
+	VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	int desc_return, void (*buf_service)(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque), void *opaque)
+{
+	struct vnic_rq_buf *buf;
+	int skipped;
+
+	buf = rq->to_clean;
+	while (1) {
+
+		skipped = (buf->index != completed_index);
+
+		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+		if (desc_return == VNIC_RQ_RETURN_DESC)
+			rq->ring.desc_avail++;
+
+		rq->to_clean = buf->next;
+
+		if (!skipped)
+			break;
+
+		buf = rq->to_clean;
+	}
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+	int (*buf_fill)(struct vnic_rq *rq))
+{
+	int err;
+
+	while (vnic_rq_desc_avail(rq) > 1) {
+
+		err = (*buf_fill)(rq);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
new file mode 100644
index 000000000000..e325d65d7c34
--- /dev/null
+++ b/drivers/net/enic/vnic_rss.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ */
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+	struct {
+		u8 b[10];
+		u8 b_pad[6];
+	} key[4];
+	u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+	struct {
+		u8 b[4] ;
+		u8 b_pad[4];
+	} cpu[32];
+	u64 raw[32];
+};
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
new file mode 100644
index 000000000000..9ff9614d89b1
--- /dev/null
+++ b/drivers/net/enic/vnic_stats.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+	u64 tx_frames_ok;
+	u64 tx_unicast_frames_ok;
+	u64 tx_multicast_frames_ok;
+	u64 tx_broadcast_frames_ok;
+	u64 tx_bytes_ok;
+	u64 tx_unicast_bytes_ok;
+	u64 tx_multicast_bytes_ok;
+	u64 tx_broadcast_bytes_ok;
+	u64 tx_drops;
+	u64 tx_errors;
+	u64 tx_tso;
+	u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+	u64 rx_frames_ok;
+	u64 rx_frames_total;
+	u64 rx_unicast_frames_ok;
+	u64 rx_multicast_frames_ok;
+	u64 rx_broadcast_frames_ok;
+	u64 rx_bytes_ok;
+	u64 rx_unicast_bytes_ok;
+	u64 rx_multicast_bytes_ok;
+	u64 rx_broadcast_bytes_ok;
+	u64 rx_drop;
+	u64 rx_no_bufs;
+	u64 rx_errors;
+	u64 rx_rss;
+	u64 rx_crc_errors;
+	u64 rx_frames_64;
+	u64 rx_frames_127;
+	u64 rx_frames_255;
+	u64 rx_frames_511;
+	u64 rx_frames_1023;
+	u64 rx_frames_1518;
+	u64 rx_frames_to_max;
+	u64 rsvd[16];
+};
+
+struct vnic_stats {
+	struct vnic_tx_stats tx;
+	struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
new file mode 100644
index 000000000000..a576d04708ef
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+	struct vnic_wq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = wq->ring.desc_count;
+	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+	vdev = wq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!wq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc wq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = wq->bufs[i];
+		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)wq->ring.descs +
+				wq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = wq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+				buf->next = wq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = wq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+	for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+		kfree(wq->bufs[i]);
+		wq->bufs[i] = NULL;
+	}
+
+	wq->ctrl = NULL;
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_wq_alloc_bufs(wq);
+	if (err) {
+		vnic_wq_free(wq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+
+	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+	iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+	return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+	struct vnic_wq_buf *buf;
+
+	BUG_ON(ioread32(&wq->ctrl->enable));
+
+	buf = wq->to_clean;
+
+	while (vnic_wq_desc_used(wq) > 0) {
+
+		(*buf_clean)(wq, buf);
+
+		buf = wq->to_clean = buf->next;
+		wq->ring.desc_avail++;
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
new file mode 100644
index 000000000000..7081828d8a42
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 dca_value;			/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_enable;	/* 0x40 */
+	u32 pad7;
+	u32 error_interrupt_offset;	/* 0x48 */
+	u32 pad8;
+	u32 error_status;		/* 0x50 */
+	u32 pad9;
+};
+
+struct vnic_wq_buf {
+	struct vnic_wq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int len;
+	unsigned int index;
+	int sop;
+	void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+	(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_wq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+	struct vnic_wq_buf *to_use;
+	struct vnic_wq_buf *to_clean;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+	/* how many does SW own? */
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+	/* how many does HW own? */
+	return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+	return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr,
+	unsigned int len, int sop, int eop)
+{
+	struct vnic_wq_buf *buf = wq->to_use;
+
+	buf->sop = sop;
+	buf->os_buf = eop ? os_buf : NULL;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	if (eop)
+		iowrite32(buf->index, &wq->ctrl->posted_index);
+	wq->to_use = buf;
+
+	wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	void (*buf_service)(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+	void *opaque)
+{
+	struct vnic_wq_buf *buf;
+
+	buf = wq->to_clean;
+	while (1) {
+
+		(*buf_service)(wq, cq_desc, buf, opaque);
+
+		wq->ring.desc_avail++;
+
+		wq->to_clean = buf->next;
+
+		if (buf->index == completed_index)
+			break;
+
+		buf = wq->to_clean;
+	}
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..483596c2d8bf
--- /dev/null
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+	__le64 address;
+	__le16 length;
+	__le16 mss_loopback;
+	__le16 header_length_flags;
+	__le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS		64
+#define WQ_ENET_LEN_BITS		14
+#define WQ_ENET_LEN_MASK		((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS		14
+#define WQ_ENET_MSS_MASK		((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT		2
+#define WQ_ENET_LOOPBACK_SHIFT		1
+#define WQ_ENET_HDRLEN_BITS		10
+#define WQ_ENET_HDRLEN_MASK		((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS		2
+#define WQ_ENET_FLAGS_OM_MASK		((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT		12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT	13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT	14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT	15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM	0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED	1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4	2
+#define WQ_ENET_OFFLOAD_MODE_TSO	3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+	u64 address, u16 length, u16 mss, u16 header_length,
+	u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+	u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+	desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+		WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+	desc->header_length_flags = cpu_to_le16(
+		(header_length & WQ_ENET_HDRLEN_MASK) |
+		(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+		(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+		(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+		(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+		(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+	desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+	u64 *address, u16 *length, u16 *mss, u16 *header_length,
+	u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+	u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+	*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+		WQ_ENET_MSS_MASK;
+	*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+		WQ_ENET_LOOPBACK_SHIFT) & 1);
+	*header_length = le16_to_cpu(desc->header_length_flags) &
+		WQ_ENET_HDRLEN_MASK;
+	*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+	*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+	*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+	*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+	*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+	*vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
new file mode 100644
index 000000000000..f292df557544
--- /dev/null
+++ b/drivers/net/jme.c
@@ -0,0 +1,3019 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include "jme.h"
+
+static int force_pseudohp = -1;
+static int no_pseudohp = -1;
+static int no_extplug = -1;
+module_param(force_pseudohp, int, 0);
+MODULE_PARM_DESC(force_pseudohp,
+	"Enable pseudo hot-plug feature manually by driver instead of BIOS.");
+module_param(no_pseudohp, int, 0);
+MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
+module_param(no_extplug, int, 0);
+MODULE_PARM_DESC(no_extplug,
+	"Do not use external plug signal for pseudo hot-plug.");
+
+static int
+jme_mdio_read(struct net_device *netdev, int phy, int reg)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, val, again = (reg == MII_BMSR) ? 1 : 0;
+
+read_again:
+	jwrite32(jme, JME_SMI, SMI_OP_REQ |
+				smi_phy_addr(phy) |
+				smi_reg_addr(reg));
+
+	wmb();
+	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+		udelay(20);
+		val = jread32(jme, JME_SMI);
+		if ((val & SMI_OP_REQ) == 0)
+			break;
+	}
+
+	if (i == 0) {
+		jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
+		return 0;
+	}
+
+	if (again--)
+		goto read_again;
+
+	return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
+}
+
+static void
+jme_mdio_write(struct net_device *netdev,
+				int phy, int reg, int val)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i;
+
+	jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
+		((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
+		smi_phy_addr(phy) | smi_reg_addr(reg));
+
+	wmb();
+	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+		udelay(20);
+		if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
+			break;
+	}
+
+	if (i == 0)
+		jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
+
+	return;
+}
+
+static inline void
+jme_reset_phy_processor(struct jme_adapter *jme)
+{
+	u32 val;
+
+	jme_mdio_write(jme->dev,
+			jme->mii_if.phy_id,
+			MII_ADVERTISE, ADVERTISE_ALL |
+			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+		jme_mdio_write(jme->dev,
+				jme->mii_if.phy_id,
+				MII_CTRL1000,
+				ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+	val = jme_mdio_read(jme->dev,
+				jme->mii_if.phy_id,
+				MII_BMCR);
+
+	jme_mdio_write(jme->dev,
+			jme->mii_if.phy_id,
+			MII_BMCR, val | BMCR_RESET);
+
+	return;
+}
+
+static void
+jme_setup_wakeup_frame(struct jme_adapter *jme,
+		u32 *mask, u32 crc, int fnr)
+{
+	int i;
+
+	/*
+	 * Setup CRC pattern
+	 */
+	jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
+	wmb();
+	jwrite32(jme, JME_WFODP, crc);
+	wmb();
+
+	/*
+	 * Setup Mask
+	 */
+	for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
+		jwrite32(jme, JME_WFOI,
+				((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
+				(fnr & WFOI_FRAME_SEL));
+		wmb();
+		jwrite32(jme, JME_WFODP, mask[i]);
+		wmb();
+	}
+}
+
+static inline void
+jme_reset_mac_processor(struct jme_adapter *jme)
+{
+	u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
+	u32 crc = 0xCDCDCDCD;
+	u32 gpreg0;
+	int i;
+
+	jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
+	udelay(2);
+	jwrite32(jme, JME_GHC, jme->reg_ghc);
+
+	jwrite32(jme, JME_RXDBA_LO, 0x00000000);
+	jwrite32(jme, JME_RXDBA_HI, 0x00000000);
+	jwrite32(jme, JME_RXQDC, 0x00000000);
+	jwrite32(jme, JME_RXNDA, 0x00000000);
+	jwrite32(jme, JME_TXDBA_LO, 0x00000000);
+	jwrite32(jme, JME_TXDBA_HI, 0x00000000);
+	jwrite32(jme, JME_TXQDC, 0x00000000);
+	jwrite32(jme, JME_TXNDA, 0x00000000);
+
+	jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
+	jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
+	for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
+		jme_setup_wakeup_frame(jme, mask, crc, i);
+	if (jme->fpgaver)
+		gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
+	else
+		gpreg0 = GPREG0_DEFAULT;
+	jwrite32(jme, JME_GPREG0, gpreg0);
+	jwrite32(jme, JME_GPREG1, 0);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+	jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
+	jwrite32(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_pm(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
+	pci_set_power_state(jme->pdev, PCI_D0);
+	pci_enable_wake(jme->pdev, PCI_D0, false);
+}
+
+static int
+jme_reload_eeprom(struct jme_adapter *jme)
+{
+	u32 val;
+	int i;
+
+	val = jread32(jme, JME_SMBCSR);
+
+	if (val & SMBCSR_EEPROMD) {
+		val |= SMBCSR_CNACK;
+		jwrite32(jme, JME_SMBCSR, val);
+		val |= SMBCSR_RELOAD;
+		jwrite32(jme, JME_SMBCSR, val);
+		mdelay(12);
+
+		for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
+			mdelay(1);
+			if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
+				break;
+		}
+
+		if (i == 0) {
+			jeprintk(jme->pdev, "eeprom reload timeout\n");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static void
+jme_load_macaddr(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	unsigned char macaddr[6];
+	u32 val;
+
+	spin_lock_bh(&jme->macaddr_lock);
+	val = jread32(jme, JME_RXUMA_LO);
+	macaddr[0] = (val >>  0) & 0xFF;
+	macaddr[1] = (val >>  8) & 0xFF;
+	macaddr[2] = (val >> 16) & 0xFF;
+	macaddr[3] = (val >> 24) & 0xFF;
+	val = jread32(jme, JME_RXUMA_HI);
+	macaddr[4] = (val >>  0) & 0xFF;
+	macaddr[5] = (val >>  8) & 0xFF;
+	memcpy(netdev->dev_addr, macaddr, 6);
+	spin_unlock_bh(&jme->macaddr_lock);
+}
+
+static inline void
+jme_set_rx_pcc(struct jme_adapter *jme, int p)
+{
+	switch (p) {
+	case PCC_OFF:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P1:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P2:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P3:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	default:
+		break;
+	}
+	wmb();
+
+	if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+		msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+}
+
+static void
+jme_start_irq(struct jme_adapter *jme)
+{
+	register struct dynpcc_info *dpi = &(jme->dpi);
+
+	jme_set_rx_pcc(jme, PCC_P1);
+	dpi->cur		= PCC_P1;
+	dpi->attempt		= PCC_P1;
+	dpi->cnt		= 0;
+
+	jwrite32(jme, JME_PCCTX,
+			((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+			((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
+			PCCTXQ0_EN
+		);
+
+	/*
+	 * Enable Interrupts
+	 */
+	jwrite32(jme, JME_IENS, INTR_ENABLE);
+}
+
+static inline void
+jme_stop_irq(struct jme_adapter *jme)
+{
+	/*
+	 * Disable Interrupts
+	 */
+	jwrite32f(jme, JME_IENC, INTR_ENABLE);
+}
+
+static inline void
+jme_enable_shadow(struct jme_adapter *jme)
+{
+	jwrite32(jme,
+		 JME_SHBA_LO,
+		 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
+}
+
+static inline void
+jme_disable_shadow(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_SHBA_LO, 0x0);
+}
+
+static u32
+jme_linkstat_from_phy(struct jme_adapter *jme)
+{
+	u32 phylink, bmsr;
+
+	phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
+	bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
+	if (bmsr & BMSR_ANCOMP)
+		phylink |= PHY_LINK_AUTONEG_COMPLETE;
+
+	return phylink;
+}
+
+static inline void
+jme_set_phyfifoa(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
+}
+
+static inline void
+jme_set_phyfifob(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
+}
+
+static int
+jme_check_link(struct net_device *netdev, int testonly)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
+	char linkmsg[64];
+	int rc = 0;
+
+	linkmsg[0] = '\0';
+
+	if (jme->fpgaver)
+		phylink = jme_linkstat_from_phy(jme);
+	else
+		phylink = jread32(jme, JME_PHY_LINK);
+
+	if (phylink & PHY_LINK_UP) {
+		if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
+			/*
+			 * If we did not enable AN
+			 * Speed/Duplex Info should be obtained from SMI
+			 */
+			phylink = PHY_LINK_UP;
+
+			bmcr = jme_mdio_read(jme->dev,
+						jme->mii_if.phy_id,
+						MII_BMCR);
+
+			phylink |= ((bmcr & BMCR_SPEED1000) &&
+					(bmcr & BMCR_SPEED100) == 0) ?
+					PHY_LINK_SPEED_1000M :
+					(bmcr & BMCR_SPEED100) ?
+					PHY_LINK_SPEED_100M :
+					PHY_LINK_SPEED_10M;
+
+			phylink |= (bmcr & BMCR_FULLDPLX) ?
+					 PHY_LINK_DUPLEX : 0;
+
+			strcat(linkmsg, "Forced: ");
+		} else {
+			/*
+			 * Keep polling for speed/duplex resolve complete
+			 */
+			while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
+				--cnt) {
+
+				udelay(1);
+
+				if (jme->fpgaver)
+					phylink = jme_linkstat_from_phy(jme);
+				else
+					phylink = jread32(jme, JME_PHY_LINK);
+			}
+			if (!cnt)
+				jeprintk(jme->pdev,
+					"Waiting speed resolve timeout.\n");
+
+			strcat(linkmsg, "ANed: ");
+		}
+
+		if (jme->phylink == phylink) {
+			rc = 1;
+			goto out;
+		}
+		if (testonly)
+			goto out;
+
+		jme->phylink = phylink;
+
+		ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
+					GHC_SPEED_100M |
+					GHC_SPEED_1000M |
+					GHC_DPX);
+		switch (phylink & PHY_LINK_SPEED_MASK) {
+		case PHY_LINK_SPEED_10M:
+			ghc |= GHC_SPEED_10M;
+			strcat(linkmsg, "10 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifoa(jme);
+			break;
+		case PHY_LINK_SPEED_100M:
+			ghc |= GHC_SPEED_100M;
+			strcat(linkmsg, "100 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifob(jme);
+			break;
+		case PHY_LINK_SPEED_1000M:
+			ghc |= GHC_SPEED_1000M;
+			strcat(linkmsg, "1000 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifoa(jme);
+			break;
+		default:
+			break;
+		}
+		ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
+
+		strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
+					"Full-Duplex, " :
+					"Half-Duplex, ");
+
+		if (phylink & PHY_LINK_MDI_STAT)
+			strcat(linkmsg, "MDI-X");
+		else
+			strcat(linkmsg, "MDI");
+
+		if (phylink & PHY_LINK_DUPLEX) {
+			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
+		} else {
+			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
+						TXMCS_BACKOFF |
+						TXMCS_CARRIERSENSE |
+						TXMCS_COLLISION);
+			jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
+				((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+				TXTRHD_TXREN |
+				((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+		}
+
+		jme->reg_ghc = ghc;
+		jwrite32(jme, JME_GHC, ghc);
+
+		msg_link(jme, "Link is up at %s.\n", linkmsg);
+		netif_carrier_on(netdev);
+	} else {
+		if (testonly)
+			goto out;
+
+		msg_link(jme, "Link is down.\n");
+		jme->phylink = 0;
+		netif_carrier_off(netdev);
+	}
+
+out:
+	return rc;
+}
+
+static int
+jme_setup_tx_resources(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = &(jme->txring[0]);
+
+	txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+				   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+				   &(txring->dmaalloc),
+				   GFP_ATOMIC);
+
+	if (!txring->alloc) {
+		txring->desc = NULL;
+		txring->dmaalloc = 0;
+		txring->dma = 0;
+		return -ENOMEM;
+	}
+
+	/*
+	 * 16 Bytes align
+	 */
+	txring->desc		= (void *)ALIGN((unsigned long)(txring->alloc),
+						RING_DESC_ALIGN);
+	txring->dma		= ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
+	txring->next_to_use	= 0;
+	atomic_set(&txring->next_to_clean, 0);
+	atomic_set(&txring->nr_free, jme->tx_ring_size);
+
+	/*
+	 * Initialize Transmit Descriptors
+	 */
+	memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
+	memset(txring->bufinf, 0,
+		sizeof(struct jme_buffer_info) * jme->tx_ring_size);
+
+	return 0;
+}
+
+static void
+jme_free_tx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *txring = &(jme->txring[0]);
+	struct jme_buffer_info *txbi = txring->bufinf;
+
+	if (txring->alloc) {
+		for (i = 0 ; i < jme->tx_ring_size ; ++i) {
+			txbi = txring->bufinf + i;
+			if (txbi->skb) {
+				dev_kfree_skb(txbi->skb);
+				txbi->skb = NULL;
+			}
+			txbi->mapping		= 0;
+			txbi->len		= 0;
+			txbi->nr_desc		= 0;
+			txbi->start_xmit	= 0;
+		}
+
+		dma_free_coherent(&(jme->pdev->dev),
+				  TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+				  txring->alloc,
+				  txring->dmaalloc);
+
+		txring->alloc		= NULL;
+		txring->desc		= NULL;
+		txring->dmaalloc	= 0;
+		txring->dma		= 0;
+	}
+	txring->next_to_use	= 0;
+	atomic_set(&txring->next_to_clean, 0);
+	atomic_set(&txring->nr_free, 0);
+
+}
+
+static inline void
+jme_enable_tx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Select Queue 0
+	 */
+	jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
+	wmb();
+
+	/*
+	 * Setup TX Queue 0 DMA Bass Address
+	 */
+	jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+	jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
+	jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+
+	/*
+	 * Setup TX Descptor Count
+	 */
+	jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
+
+	/*
+	 * Enable TX Engine
+	 */
+	wmb();
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_ENABLE);
+
+}
+
+static inline void
+jme_restart_tx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Restart TX Engine
+	 */
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_ENABLE);
+}
+
+static inline void
+jme_disable_tx_engine(struct jme_adapter *jme)
+{
+	int i;
+	u32 val;
+
+	/*
+	 * Disable TX Engine
+	 */
+	jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
+	wmb();
+
+	val = jread32(jme, JME_TXCS);
+	for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
+		mdelay(1);
+		val = jread32(jme, JME_TXCS);
+		rmb();
+	}
+
+	if (!i)
+		jeprintk(jme->pdev, "Disable TX engine timeout.\n");
+}
+
+static void
+jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = jme->rxring;
+	register struct rxdesc *rxdesc = rxring->desc;
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	rxdesc += i;
+	rxbi += i;
+
+	rxdesc->dw[0] = 0;
+	rxdesc->dw[1] = 0;
+	rxdesc->desc1.bufaddrh	= cpu_to_le32((__u64)rxbi->mapping >> 32);
+	rxdesc->desc1.bufaddrl	= cpu_to_le32(
+					(__u64)rxbi->mapping & 0xFFFFFFFFUL);
+	rxdesc->desc1.datalen	= cpu_to_le16(rxbi->len);
+	if (jme->dev->features & NETIF_F_HIGHDMA)
+		rxdesc->desc1.flags = RXFLAG_64BIT;
+	wmb();
+	rxdesc->desc1.flags	|= RXFLAG_OWN | RXFLAG_INT;
+}
+
+static int
+jme_make_new_rx_buf(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct jme_buffer_info *rxbi = rxring->bufinf + i;
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(jme->dev,
+		jme->dev->mtu + RX_EXTRA_LEN);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	rxbi->skb = skb;
+	rxbi->len = skb_tailroom(skb);
+	rxbi->mapping = pci_map_page(jme->pdev,
+					virt_to_page(skb->data),
+					offset_in_page(skb->data),
+					rxbi->len,
+					PCI_DMA_FROMDEVICE);
+
+	return 0;
+}
+
+static void
+jme_free_rx_buf(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	rxbi += i;
+
+	if (rxbi->skb) {
+		pci_unmap_page(jme->pdev,
+				 rxbi->mapping,
+				 rxbi->len,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(rxbi->skb);
+		rxbi->skb = NULL;
+		rxbi->mapping = 0;
+		rxbi->len = 0;
+	}
+}
+
+static void
+jme_free_rx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *rxring = &(jme->rxring[0]);
+
+	if (rxring->alloc) {
+		for (i = 0 ; i < jme->rx_ring_size ; ++i)
+			jme_free_rx_buf(jme, i);
+
+		dma_free_coherent(&(jme->pdev->dev),
+				  RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+				  rxring->alloc,
+				  rxring->dmaalloc);
+		rxring->alloc    = NULL;
+		rxring->desc     = NULL;
+		rxring->dmaalloc = 0;
+		rxring->dma      = 0;
+	}
+	rxring->next_to_use   = 0;
+	atomic_set(&rxring->next_to_clean, 0);
+}
+
+static int
+jme_setup_rx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *rxring = &(jme->rxring[0]);
+
+	rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+				   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+				   &(rxring->dmaalloc),
+				   GFP_ATOMIC);
+	if (!rxring->alloc) {
+		rxring->desc = NULL;
+		rxring->dmaalloc = 0;
+		rxring->dma = 0;
+		return -ENOMEM;
+	}
+
+	/*
+	 * 16 Bytes align
+	 */
+	rxring->desc		= (void *)ALIGN((unsigned long)(rxring->alloc),
+						RING_DESC_ALIGN);
+	rxring->dma		= ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
+	rxring->next_to_use	= 0;
+	atomic_set(&rxring->next_to_clean, 0);
+
+	/*
+	 * Initiallize Receive Descriptors
+	 */
+	for (i = 0 ; i < jme->rx_ring_size ; ++i) {
+		if (unlikely(jme_make_new_rx_buf(jme, i))) {
+			jme_free_rx_resources(jme);
+			return -ENOMEM;
+		}
+
+		jme_set_clean_rxdesc(jme, i);
+	}
+
+	return 0;
+}
+
+static inline void
+jme_enable_rx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Select Queue 0
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0);
+	wmb();
+
+	/*
+	 * Setup RX DMA Bass Address
+	 */
+	jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+	jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
+	jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+
+	/*
+	 * Setup RX Descriptor Count
+	 */
+	jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
+
+	/*
+	 * Setup Unicast Filter
+	 */
+	jme_set_multi(jme->dev);
+
+	/*
+	 * Enable RX Engine
+	 */
+	wmb();
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0 |
+				RXCS_ENABLE |
+				RXCS_QST);
+}
+
+static inline void
+jme_restart_rx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Start RX Engine
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0 |
+				RXCS_ENABLE |
+				RXCS_QST);
+}
+
+static inline void
+jme_disable_rx_engine(struct jme_adapter *jme)
+{
+	int i;
+	u32 val;
+
+	/*
+	 * Disable RX Engine
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs);
+	wmb();
+
+	val = jread32(jme, JME_RXCS);
+	for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
+		mdelay(1);
+		val = jread32(jme, JME_RXCS);
+		rmb();
+	}
+
+	if (!i)
+		jeprintk(jme->pdev, "Disable RX engine timeout.\n");
+
+}
+
+static int
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+{
+	if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
+		return false;
+
+	if (unlikely(!(flags & RXWBFLAG_MF) &&
+	(flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
+		msg_rx_err(jme, "TCP Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	if (unlikely(!(flags & RXWBFLAG_MF) &&
+	(flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
+		msg_rx_err(jme, "UDP Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
+		msg_rx_err(jme, "IPv4 Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	return true;
+
+out_sumerr:
+	return false;
+}
+
+static void
+jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct rxdesc *rxdesc = rxring->desc;
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	struct sk_buff *skb;
+	int framesize;
+
+	rxdesc += idx;
+	rxbi += idx;
+
+	skb = rxbi->skb;
+	pci_dma_sync_single_for_cpu(jme->pdev,
+					rxbi->mapping,
+					rxbi->len,
+					PCI_DMA_FROMDEVICE);
+
+	if (unlikely(jme_make_new_rx_buf(jme, idx))) {
+		pci_dma_sync_single_for_device(jme->pdev,
+						rxbi->mapping,
+						rxbi->len,
+						PCI_DMA_FROMDEVICE);
+
+		++(NET_STAT(jme).rx_dropped);
+	} else {
+		framesize = le16_to_cpu(rxdesc->descwb.framesize)
+				- RX_PREPAD_SIZE;
+
+		skb_reserve(skb, RX_PREPAD_SIZE);
+		skb_put(skb, framesize);
+		skb->protocol = eth_type_trans(skb, jme->dev);
+
+		if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
+			if (jme->vlgrp) {
+				jme->jme_vlan_rx(skb, jme->vlgrp,
+					le32_to_cpu(rxdesc->descwb.vlan));
+				NET_STAT(jme).rx_bytes += 4;
+			}
+		} else {
+			jme->jme_rx(skb);
+		}
+
+		if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
+				RXWBFLAG_DEST_MUL)
+			++(NET_STAT(jme).multicast);
+
+		jme->dev->last_rx = jiffies;
+		NET_STAT(jme).rx_bytes += framesize;
+		++(NET_STAT(jme).rx_packets);
+	}
+
+	jme_set_clean_rxdesc(jme, idx);
+
+}
+
+static int
+jme_process_receive(struct jme_adapter *jme, int limit)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct rxdesc *rxdesc = rxring->desc;
+	int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
+
+	if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
+		goto out_inc;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		goto out_inc;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		goto out_inc;
+
+	i = atomic_read(&rxring->next_to_clean);
+	while (limit-- > 0) {
+		rxdesc = rxring->desc;
+		rxdesc += i;
+
+		if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
+		!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
+			goto out;
+
+		desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
+
+		if (unlikely(desccnt > 1 ||
+		rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
+
+			if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
+				++(NET_STAT(jme).rx_crc_errors);
+			else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
+				++(NET_STAT(jme).rx_fifo_errors);
+			else
+				++(NET_STAT(jme).rx_errors);
+
+			if (desccnt > 1)
+				limit -= desccnt - 1;
+
+			for (j = i, ccnt = desccnt ; ccnt-- ; ) {
+				jme_set_clean_rxdesc(jme, j);
+				j = (j + 1) & (mask);
+			}
+
+		} else {
+			jme_alloc_and_feed_skb(jme, i);
+		}
+
+		i = (i + desccnt) & (mask);
+	}
+
+out:
+	atomic_set(&rxring->next_to_clean, i);
+
+out_inc:
+	atomic_inc(&jme->rx_cleaning);
+
+	return limit > 0 ? limit : 0;
+
+}
+
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
+{
+	if (likely(atmp == dpi->cur)) {
+		dpi->cnt = 0;
+		return;
+	}
+
+	if (dpi->attempt == atmp) {
+		++(dpi->cnt);
+	} else {
+		dpi->attempt = atmp;
+		dpi->cnt = 0;
+	}
+
+}
+
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+	register struct dynpcc_info *dpi = &(jme->dpi);
+
+	if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
+		jme_attempt_pcc(dpi, PCC_P3);
+	else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
+	|| dpi->intr_cnt > PCC_INTR_THRESHOLD)
+		jme_attempt_pcc(dpi, PCC_P2);
+	else
+		jme_attempt_pcc(dpi, PCC_P1);
+
+	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
+		if (dpi->attempt < dpi->cur)
+			tasklet_schedule(&jme->rxclean_task);
+		jme_set_rx_pcc(jme, dpi->attempt);
+		dpi->cur = dpi->attempt;
+		dpi->cnt = 0;
+	}
+}
+
+static void
+jme_start_pcc_timer(struct jme_adapter *jme)
+{
+	struct dynpcc_info *dpi = &(jme->dpi);
+	dpi->last_bytes		= NET_STAT(jme).rx_bytes;
+	dpi->last_pkts		= NET_STAT(jme).rx_packets;
+	dpi->intr_cnt		= 0;
+	jwrite32(jme, JME_TMCSR,
+		TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
+}
+
+static inline void
+jme_stop_pcc_timer(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_TMCSR, 0);
+}
+
+static void
+jme_shutdown_nic(struct jme_adapter *jme)
+{
+	u32 phylink;
+
+	phylink = jme_linkstat_from_phy(jme);
+
+	if (!(phylink & PHY_LINK_UP)) {
+		/*
+		 * Disable all interrupt before issue timer
+		 */
+		jme_stop_irq(jme);
+		jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
+	}
+}
+
+static void
+jme_pcc_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct net_device *netdev = jme->dev;
+
+	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
+		jme_shutdown_nic(jme);
+		return;
+	}
+
+	if (unlikely(!netif_carrier_ok(netdev) ||
+		(atomic_read(&jme->link_changing) != 1)
+	)) {
+		jme_stop_pcc_timer(jme);
+		return;
+	}
+
+	if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+		jme_dynamic_pcc(jme);
+
+	jme_start_pcc_timer(jme);
+}
+
+static inline void
+jme_polling_mode(struct jme_adapter *jme)
+{
+	jme_set_rx_pcc(jme, PCC_OFF);
+}
+
+static inline void
+jme_interrupt_mode(struct jme_adapter *jme)
+{
+	jme_set_rx_pcc(jme, PCC_P1);
+}
+
+static inline int
+jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
+{
+	u32 apmc;
+	apmc = jread32(jme, JME_APMC);
+	return apmc & JME_APMC_PSEUDO_HP_EN;
+}
+
+static void
+jme_start_shutdown_timer(struct jme_adapter *jme)
+{
+	u32 apmc;
+
+	apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
+	apmc &= ~JME_APMC_EPIEN_CTRL;
+	if (!no_extplug) {
+		jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
+		wmb();
+	}
+	jwrite32f(jme, JME_APMC, apmc);
+
+	jwrite32f(jme, JME_TIMER2, 0);
+	set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+	jwrite32(jme, JME_TMCSR,
+		TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
+}
+
+static void
+jme_stop_shutdown_timer(struct jme_adapter *jme)
+{
+	u32 apmc;
+
+	jwrite32f(jme, JME_TMCSR, 0);
+	jwrite32f(jme, JME_TIMER2, 0);
+	clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+
+	apmc = jread32(jme, JME_APMC);
+	apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
+	jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
+	wmb();
+	jwrite32f(jme, JME_APMC, apmc);
+}
+
+static void
+jme_link_change_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct net_device *netdev = jme->dev;
+	int rc;
+
+	while (!atomic_dec_and_test(&jme->link_changing)) {
+		atomic_inc(&jme->link_changing);
+		msg_intr(jme, "Get link change lock failed.\n");
+		while (atomic_read(&jme->link_changing) != 1)
+			msg_intr(jme, "Waiting link change lock.\n");
+	}
+
+	if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
+		goto out;
+
+	jme->old_mtu = netdev->mtu;
+	netif_stop_queue(netdev);
+	if (jme_pseudo_hotplug_enabled(jme))
+		jme_stop_shutdown_timer(jme);
+
+	jme_stop_pcc_timer(jme);
+	tasklet_disable(&jme->txclean_task);
+	tasklet_disable(&jme->rxclean_task);
+	tasklet_disable(&jme->rxempty_task);
+
+	if (netif_carrier_ok(netdev)) {
+		jme_reset_ghc_speed(jme);
+		jme_disable_rx_engine(jme);
+		jme_disable_tx_engine(jme);
+		jme_reset_mac_processor(jme);
+		jme_free_rx_resources(jme);
+		jme_free_tx_resources(jme);
+
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_polling_mode(jme);
+
+		netif_carrier_off(netdev);
+	}
+
+	jme_check_link(netdev, 0);
+	if (netif_carrier_ok(netdev)) {
+		rc = jme_setup_rx_resources(jme);
+		if (rc) {
+			jeprintk(jme->pdev, "Allocating resources for RX error"
+				", Device STOPPED!\n");
+			goto out_enable_tasklet;
+		}
+
+		rc = jme_setup_tx_resources(jme);
+		if (rc) {
+			jeprintk(jme->pdev, "Allocating resources for TX error"
+				", Device STOPPED!\n");
+			goto err_out_free_rx_resources;
+		}
+
+		jme_enable_rx_engine(jme);
+		jme_enable_tx_engine(jme);
+
+		netif_start_queue(netdev);
+
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_interrupt_mode(jme);
+
+		jme_start_pcc_timer(jme);
+	} else if (jme_pseudo_hotplug_enabled(jme)) {
+		jme_start_shutdown_timer(jme);
+	}
+
+	goto out_enable_tasklet;
+
+err_out_free_rx_resources:
+	jme_free_rx_resources(jme);
+out_enable_tasklet:
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+out:
+	atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_rx_clean_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct dynpcc_info *dpi = &(jme->dpi);
+
+	jme_process_receive(jme, jme->rx_ring_size);
+	++(dpi->intr_cnt);
+
+}
+
+static int
+jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
+{
+	struct jme_adapter *jme = jme_napi_priv(holder);
+	struct net_device *netdev = jme->dev;
+	int rest;
+
+	rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
+
+	while (atomic_read(&jme->rx_empty) > 0) {
+		atomic_dec(&jme->rx_empty);
+		++(NET_STAT(jme).rx_dropped);
+		jme_restart_rx_engine(jme);
+	}
+	atomic_inc(&jme->rx_empty);
+
+	if (rest) {
+		JME_RX_COMPLETE(netdev, holder);
+		jme_interrupt_mode(jme);
+	}
+
+	JME_NAPI_WEIGHT_SET(budget, rest);
+	return JME_NAPI_WEIGHT_VAL(budget) - rest;
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		return;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		return;
+
+	msg_rx_status(jme, "RX Queue Full!\n");
+
+	jme_rx_clean_tasklet(arg);
+
+	while (atomic_read(&jme->rx_empty) > 0) {
+		atomic_dec(&jme->rx_empty);
+		++(NET_STAT(jme).rx_dropped);
+		jme_restart_rx_engine(jme);
+	}
+	atomic_inc(&jme->rx_empty);
+}
+
+static void
+jme_wake_queue_if_stopped(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = jme->txring;
+
+	smp_wmb();
+	if (unlikely(netif_queue_stopped(jme->dev) &&
+	atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
+		msg_tx_done(jme, "TX Queue Waked.\n");
+		netif_wake_queue(jme->dev);
+	}
+
+}
+
+static void
+jme_tx_clean_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct jme_ring *txring = &(jme->txring[0]);
+	struct txdesc *txdesc = txring->desc;
+	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
+	int i, j, cnt = 0, max, err, mask;
+
+	tx_dbg(jme, "Into txclean.\n");
+
+	if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
+		goto out;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		goto out;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		goto out;
+
+	max = jme->tx_ring_size - atomic_read(&txring->nr_free);
+	mask = jme->tx_ring_mask;
+
+	for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
+
+		ctxbi = txbi + i;
+
+		if (likely(ctxbi->skb &&
+		!(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
+
+			tx_dbg(jme, "txclean: %d+%d@%lu\n",
+					i, ctxbi->nr_desc, jiffies);
+
+			err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
+
+			for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
+				ttxbi = txbi + ((i + j) & (mask));
+				txdesc[(i + j) & (mask)].dw[0] = 0;
+
+				pci_unmap_page(jme->pdev,
+						 ttxbi->mapping,
+						 ttxbi->len,
+						 PCI_DMA_TODEVICE);
+
+				ttxbi->mapping = 0;
+				ttxbi->len = 0;
+			}
+
+			dev_kfree_skb(ctxbi->skb);
+
+			cnt += ctxbi->nr_desc;
+
+			if (unlikely(err)) {
+				++(NET_STAT(jme).tx_carrier_errors);
+			} else {
+				++(NET_STAT(jme).tx_packets);
+				NET_STAT(jme).tx_bytes += ctxbi->len;
+			}
+
+			ctxbi->skb = NULL;
+			ctxbi->len = 0;
+			ctxbi->start_xmit = 0;
+
+		} else {
+			break;
+		}
+
+		i = (i + ctxbi->nr_desc) & mask;
+
+		ctxbi->nr_desc = 0;
+	}
+
+	tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
+	atomic_set(&txring->next_to_clean, i);
+	atomic_add(cnt, &txring->nr_free);
+
+	jme_wake_queue_if_stopped(jme);
+
+out:
+	atomic_inc(&jme->tx_cleaning);
+}
+
+static void
+jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
+{
+	/*
+	 * Disable interrupt
+	 */
+	jwrite32f(jme, JME_IENC, INTR_ENABLE);
+
+	if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+		/*
+		 * Link change event is critical
+		 * all other events are ignored
+		 */
+		jwrite32(jme, JME_IEVE, intrstat);
+		tasklet_schedule(&jme->linkch_task);
+		goto out_reenable;
+	}
+
+	if (intrstat & INTR_TMINTR) {
+		jwrite32(jme, JME_IEVE, INTR_TMINTR);
+		tasklet_schedule(&jme->pcc_task);
+	}
+
+	if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
+		jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
+		tasklet_schedule(&jme->txclean_task);
+	}
+
+	if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+		jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
+						     INTR_PCCRX0 |
+						     INTR_RX0EMP)) |
+					INTR_RX0);
+	}
+
+	if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+		if (intrstat & INTR_RX0EMP)
+			atomic_inc(&jme->rx_empty);
+
+		if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+			if (likely(JME_RX_SCHEDULE_PREP(jme))) {
+				jme_polling_mode(jme);
+				JME_RX_SCHEDULE(jme);
+			}
+		}
+	} else {
+		if (intrstat & INTR_RX0EMP) {
+			atomic_inc(&jme->rx_empty);
+			tasklet_hi_schedule(&jme->rxempty_task);
+		} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
+			tasklet_hi_schedule(&jme->rxclean_task);
+		}
+	}
+
+out_reenable:
+	/*
+	 * Re-enable interrupt
+	 */
+	jwrite32f(jme, JME_IENS, INTR_ENABLE);
+}
+
+static irqreturn_t
+jme_intr(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 intrstat;
+
+	intrstat = jread32(jme, JME_IEVE);
+
+	/*
+	 * Check if it's really an interrupt for us
+	 */
+	if (unlikely(intrstat == 0))
+		return IRQ_NONE;
+
+	/*
+	 * Check if the device still exist
+	 */
+	if (unlikely(intrstat == ~((typeof(intrstat))0)))
+		return IRQ_NONE;
+
+	jme_intr_msi(jme, intrstat);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msi(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 intrstat;
+
+	pci_dma_sync_single_for_cpu(jme->pdev,
+				    jme->shadow_dma,
+				    sizeof(u32) * SHADOW_REG_NR,
+				    PCI_DMA_FROMDEVICE);
+	intrstat = jme->shadow_regs[SHADOW_IEVE];
+	jme->shadow_regs[SHADOW_IEVE] = 0;
+
+	jme_intr_msi(jme, intrstat);
+
+	return IRQ_HANDLED;
+}
+
+static void
+jme_reset_link(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
+}
+
+static void
+jme_restart_an(struct jme_adapter *jme)
+{
+	u32 bmcr;
+
+	spin_lock_bh(&jme->phy_lock);
+	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+	spin_unlock_bh(&jme->phy_lock);
+}
+
+static int
+jme_request_irq(struct jme_adapter *jme)
+{
+	int rc;
+	struct net_device *netdev = jme->dev;
+	irq_handler_t handler = jme_intr;
+	int irq_flags = IRQF_SHARED;
+
+	if (!pci_enable_msi(jme->pdev)) {
+		set_bit(JME_FLAG_MSI, &jme->flags);
+		handler = jme_msi;
+		irq_flags = 0;
+	}
+
+	rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
+			  netdev);
+	if (rc) {
+		jeprintk(jme->pdev,
+			"Unable to request %s interrupt (return: %d)\n",
+			test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
+			rc);
+
+		if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+			pci_disable_msi(jme->pdev);
+			clear_bit(JME_FLAG_MSI, &jme->flags);
+		}
+	} else {
+		netdev->irq = jme->pdev->irq;
+	}
+
+	return rc;
+}
+
+static void
+jme_free_irq(struct jme_adapter *jme)
+{
+	free_irq(jme->pdev->irq, jme->dev);
+	if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+		pci_disable_msi(jme->pdev);
+		clear_bit(JME_FLAG_MSI, &jme->flags);
+		jme->dev->irq = jme->pdev->irq;
+	}
+}
+
+static int
+jme_open(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc;
+
+	jme_clear_pm(jme);
+	JME_NAPI_ENABLE(jme);
+
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+
+	rc = jme_request_irq(jme);
+	if (rc)
+		goto err_out;
+
+	jme_enable_shadow(jme);
+	jme_start_irq(jme);
+
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+	else
+		jme_reset_phy_processor(jme);
+
+	jme_reset_link(jme);
+
+	return 0;
+
+err_out:
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+	return rc;
+}
+
+static void
+jme_set_100m_half(struct jme_adapter *jme)
+{
+	u32 bmcr, tmp;
+
+	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+	tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+		       BMCR_SPEED1000 | BMCR_FULLDPLX);
+	tmp |= BMCR_SPEED100;
+
+	if (bmcr != tmp)
+		jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
+
+	if (jme->fpgaver)
+		jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
+	else
+		jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+}
+
+#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
+static void
+jme_wait_link(struct jme_adapter *jme)
+{
+	u32 phylink, to = JME_WAIT_LINK_TIME;
+
+	mdelay(1000);
+	phylink = jme_linkstat_from_phy(jme);
+	while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
+		mdelay(10);
+		phylink = jme_linkstat_from_phy(jme);
+	}
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
+}
+
+static int
+jme_close(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+
+	jme_stop_irq(jme);
+	jme_disable_shadow(jme);
+	jme_free_irq(jme);
+
+	JME_NAPI_DISABLE(jme);
+
+	tasklet_kill(&jme->linkch_task);
+	tasklet_kill(&jme->txclean_task);
+	tasklet_kill(&jme->rxclean_task);
+	tasklet_kill(&jme->rxempty_task);
+
+	jme_reset_ghc_speed(jme);
+	jme_disable_rx_engine(jme);
+	jme_disable_tx_engine(jme);
+	jme_reset_mac_processor(jme);
+	jme_free_rx_resources(jme);
+	jme_free_tx_resources(jme);
+	jme->phylink = 0;
+	jme_phy_off(jme);
+
+	return 0;
+}
+
+static int
+jme_alloc_txdesc(struct jme_adapter *jme,
+			struct sk_buff *skb)
+{
+	struct jme_ring *txring = jme->txring;
+	int idx, nr_alloc, mask = jme->tx_ring_mask;
+
+	idx = txring->next_to_use;
+	nr_alloc = skb_shinfo(skb)->nr_frags + 2;
+
+	if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
+		return -1;
+
+	atomic_sub(nr_alloc, &txring->nr_free);
+
+	txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
+
+	return idx;
+}
+
+static void
+jme_fill_tx_map(struct pci_dev *pdev,
+		struct txdesc *txdesc,
+		struct jme_buffer_info *txbi,
+		struct page *page,
+		u32 page_offset,
+		u32 len,
+		u8 hidma)
+{
+	dma_addr_t dmaaddr;
+
+	dmaaddr = pci_map_page(pdev,
+				page,
+				page_offset,
+				len,
+				PCI_DMA_TODEVICE);
+
+	pci_dma_sync_single_for_device(pdev,
+				       dmaaddr,
+				       len,
+				       PCI_DMA_TODEVICE);
+
+	txdesc->dw[0] = 0;
+	txdesc->dw[1] = 0;
+	txdesc->desc2.flags	= TXFLAG_OWN;
+	txdesc->desc2.flags	|= (hidma) ? TXFLAG_64BIT : 0;
+	txdesc->desc2.datalen	= cpu_to_le16(len);
+	txdesc->desc2.bufaddrh	= cpu_to_le32((__u64)dmaaddr >> 32);
+	txdesc->desc2.bufaddrl	= cpu_to_le32(
+					(__u64)dmaaddr & 0xFFFFFFFFUL);
+
+	txbi->mapping = dmaaddr;
+	txbi->len = len;
+}
+
+static void
+jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+	struct jme_ring *txring = jme->txring;
+	struct txdesc *txdesc = txring->desc, *ctxdesc;
+	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+	u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+	int i, nr_frags = skb_shinfo(skb)->nr_frags;
+	int mask = jme->tx_ring_mask;
+	struct skb_frag_struct *frag;
+	u32 len;
+
+	for (i = 0 ; i < nr_frags ; ++i) {
+		frag = &skb_shinfo(skb)->frags[i];
+		ctxdesc = txdesc + ((idx + i + 2) & (mask));
+		ctxbi = txbi + ((idx + i + 2) & (mask));
+
+		jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
+				 frag->page_offset, frag->size, hidma);
+	}
+
+	len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+	ctxdesc = txdesc + ((idx + 1) & (mask));
+	ctxbi = txbi + ((idx + 1) & (mask));
+	jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+			offset_in_page(skb->data), len, hidma);
+
+}
+
+static int
+jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
+{
+	if (unlikely(skb_shinfo(skb)->gso_size &&
+			skb_header_cloned(skb) &&
+			pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
+		dev_kfree_skb(skb);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+jme_tx_tso(struct sk_buff *skb,
+		u16 *mss, u8 *flags)
+{
+	*mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
+	if (*mss) {
+		*flags |= TXFLAG_LSEN;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								iph->daddr, 0,
+								IPPROTO_TCP,
+								0);
+		} else {
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
+								&ip6h->daddr, 0,
+								IPPROTO_TCP,
+								0);
+		}
+
+		return 0;
+	}
+
+	return 1;
+}
+
+static void
+jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 ip_proto;
+
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			ip_proto = ip_hdr(skb)->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			ip_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			ip_proto = 0;
+			break;
+		}
+
+		switch (ip_proto) {
+		case IPPROTO_TCP:
+			*flags |= TXFLAG_TCPCS;
+			break;
+		case IPPROTO_UDP:
+			*flags |= TXFLAG_UDPCS;
+			break;
+		default:
+			msg_tx_err(jme, "Error upper layer protocol.\n");
+			break;
+		}
+	}
+}
+
+static inline void
+jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
+{
+	if (vlan_tx_tag_present(skb)) {
+		*flags |= TXFLAG_TAGON;
+		*vlan = vlan_tx_tag_get(skb);
+	}
+}
+
+static int
+jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+	struct jme_ring *txring = jme->txring;
+	struct txdesc *txdesc;
+	struct jme_buffer_info *txbi;
+	u8 flags;
+
+	txdesc = (struct txdesc *)txring->desc + idx;
+	txbi = txring->bufinf + idx;
+
+	txdesc->dw[0] = 0;
+	txdesc->dw[1] = 0;
+	txdesc->dw[2] = 0;
+	txdesc->dw[3] = 0;
+	txdesc->desc1.pktsize = cpu_to_le16(skb->len);
+	/*
+	 * Set OWN bit at final.
+	 * When kernel transmit faster than NIC.
+	 * And NIC trying to send this descriptor before we tell
+	 * it to start sending this TX queue.
+	 * Other fields are already filled correctly.
+	 */
+	wmb();
+	flags = TXFLAG_OWN | TXFLAG_INT;
+	/*
+	 * Set checksum flags while not tso
+	 */
+	if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
+		jme_tx_csum(jme, skb, &flags);
+	jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
+	txdesc->desc1.flags = flags;
+	/*
+	 * Set tx buffer info after telling NIC to send
+	 * For better tx_clean timing
+	 */
+	wmb();
+	txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
+	txbi->skb = skb;
+	txbi->len = skb->len;
+	txbi->start_xmit = jiffies;
+	if (!txbi->start_xmit)
+		txbi->start_xmit = (0UL-1);
+
+	return 0;
+}
+
+static void
+jme_stop_queue_if_full(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = jme->txring;
+	struct jme_buffer_info *txbi = txring->bufinf;
+	int idx = atomic_read(&txring->next_to_clean);
+
+	txbi += idx;
+
+	smp_wmb();
+	if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
+		netif_stop_queue(jme->dev);
+		msg_tx_queued(jme, "TX Queue Paused.\n");
+		smp_wmb();
+		if (atomic_read(&txring->nr_free)
+			>= (jme->tx_wake_threshold)) {
+			netif_wake_queue(jme->dev);
+			msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+		}
+	}
+
+	if (unlikely(txbi->start_xmit &&
+			(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+			txbi->skb)) {
+		netif_stop_queue(jme->dev);
+		msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+	}
+}
+
+/*
+ * This function is already protected by netif_tx_lock()
+ */
+
+static int
+jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int idx;
+
+	if (unlikely(jme_expand_header(jme, skb))) {
+		++(NET_STAT(jme).tx_dropped);
+		return NETDEV_TX_OK;
+	}
+
+	idx = jme_alloc_txdesc(jme, skb);
+
+	if (unlikely(idx < 0)) {
+		netif_stop_queue(netdev);
+		msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+
+		return NETDEV_TX_BUSY;
+	}
+
+	jme_map_tx_skb(jme, skb, idx);
+	jme_fill_first_tx_desc(jme, skb, idx);
+
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_QUEUE0S |
+				TXCS_ENABLE);
+	netdev->trans_start = jiffies;
+
+	tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
+			skb_shinfo(skb)->nr_frags + 2,
+			jiffies);
+	jme_stop_queue_if_full(jme);
+
+	return NETDEV_TX_OK;
+}
+
+static int
+jme_set_macaddr(struct net_device *netdev, void *p)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+	u32 val;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	spin_lock_bh(&jme->macaddr_lock);
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+	val = (addr->sa_data[3] & 0xff) << 24 |
+	      (addr->sa_data[2] & 0xff) << 16 |
+	      (addr->sa_data[1] & 0xff) <<  8 |
+	      (addr->sa_data[0] & 0xff);
+	jwrite32(jme, JME_RXUMA_LO, val);
+	val = (addr->sa_data[5] & 0xff) << 8 |
+	      (addr->sa_data[4] & 0xff);
+	jwrite32(jme, JME_RXUMA_HI, val);
+	spin_unlock_bh(&jme->macaddr_lock);
+
+	return 0;
+}
+
+static void
+jme_set_multi(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 mc_hash[2] = {};
+	int i;
+
+	spin_lock_bh(&jme->rxmcs_lock);
+
+	jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
+
+	if (netdev->flags & IFF_PROMISC) {
+		jme->reg_rxmcs |= RXMCS_ALLFRAME;
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
+	} else if (netdev->flags & IFF_MULTICAST) {
+		struct dev_mc_list *mclist;
+		int bit_nr;
+
+		jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
+		for (i = 0, mclist = netdev->mc_list;
+			mclist && i < netdev->mc_count;
+			++i, mclist = mclist->next) {
+
+			bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+			mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
+		}
+
+		jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
+		jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
+	}
+
+	wmb();
+	jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+
+	spin_unlock_bh(&jme->rxmcs_lock);
+}
+
+static int
+jme_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (new_mtu == jme->old_mtu)
+		return 0;
+
+	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+		((new_mtu) < IPV6_MIN_MTU))
+		return -EINVAL;
+
+	if (new_mtu > 4000) {
+		jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+		jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+		jme_restart_rx_engine(jme);
+	} else {
+		jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+		jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+		jme_restart_rx_engine(jme);
+	}
+
+	if (new_mtu > 1900) {
+		netdev->features &= ~(NETIF_F_HW_CSUM |
+				NETIF_F_TSO |
+				NETIF_F_TSO6);
+	} else {
+		if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
+			netdev->features |= NETIF_F_HW_CSUM;
+		if (test_bit(JME_FLAG_TSO, &jme->flags))
+			netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	}
+
+	netdev->mtu = new_mtu;
+	jme_reset_link(jme);
+
+	return 0;
+}
+
+static void
+jme_tx_timeout(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme->phylink = 0;
+	jme_reset_phy_processor(jme);
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+
+	/*
+	 * Force to Reset the link again
+	 */
+	jme_reset_link(jme);
+}
+
+static void
+jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme->vlgrp = grp;
+}
+
+static void
+jme_get_drvinfo(struct net_device *netdev,
+		     struct ethtool_drvinfo *info)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(jme->pdev));
+}
+
+static int
+jme_get_regs_len(struct net_device *netdev)
+{
+	return JME_REG_LEN;
+}
+
+static void
+mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
+{
+	int i;
+
+	for (i = 0 ; i < len ; i += 4)
+		p[i >> 2] = jread32(jme, reg + i);
+}
+
+static void
+mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
+{
+	int i;
+	u16 *p16 = (u16 *)p;
+
+	for (i = 0 ; i < reg_nr ; ++i)
+		p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
+}
+
+static void
+jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 *p32 = (u32 *)p;
+
+	memset(p, 0xFF, JME_REG_LEN);
+
+	regs->version = 1;
+	mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+
+	p32 += 0x100 >> 2;
+	mdio_memcpy(jme, p32, JME_PHY_REG_NR);
+}
+
+static int
+jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	ecmd->tx_coalesce_usecs = PCC_TX_TO;
+	ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+
+	if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+		ecmd->use_adaptive_rx_coalesce = false;
+		ecmd->rx_coalesce_usecs = 0;
+		ecmd->rx_max_coalesced_frames = 0;
+		return 0;
+	}
+
+	ecmd->use_adaptive_rx_coalesce = true;
+
+	switch (jme->dpi.cur) {
+	case PCC_P1:
+		ecmd->rx_coalesce_usecs = PCC_P1_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
+		break;
+	case PCC_P2:
+		ecmd->rx_coalesce_usecs = PCC_P2_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
+		break;
+	case PCC_P3:
+		ecmd->rx_coalesce_usecs = PCC_P3_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	struct dynpcc_info *dpi = &(jme->dpi);
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (ecmd->use_adaptive_rx_coalesce
+	&& test_bit(JME_FLAG_POLL, &jme->flags)) {
+		clear_bit(JME_FLAG_POLL, &jme->flags);
+		jme->jme_rx = netif_rx;
+		jme->jme_vlan_rx = vlan_hwaccel_rx;
+		dpi->cur		= PCC_P1;
+		dpi->attempt		= PCC_P1;
+		dpi->cnt		= 0;
+		jme_set_rx_pcc(jme, PCC_P1);
+		jme_interrupt_mode(jme);
+	} else if (!(ecmd->use_adaptive_rx_coalesce)
+	&& !(test_bit(JME_FLAG_POLL, &jme->flags))) {
+		set_bit(JME_FLAG_POLL, &jme->flags);
+		jme->jme_rx = netif_receive_skb;
+		jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
+		jme_interrupt_mode(jme);
+	}
+
+	return 0;
+}
+
+static void
+jme_get_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+
+	ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
+	ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
+
+	spin_lock_bh(&jme->phy_lock);
+	val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+	spin_unlock_bh(&jme->phy_lock);
+
+	ecmd->autoneg =
+		(val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int
+jme_set_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+
+	if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
+		(ecmd->tx_pause != 0)) {
+
+		if (ecmd->tx_pause)
+			jme->reg_txpfc |= TXPFC_PF_EN;
+		else
+			jme->reg_txpfc &= ~TXPFC_PF_EN;
+
+		jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
+	}
+
+	spin_lock_bh(&jme->rxmcs_lock);
+	if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
+		(ecmd->rx_pause != 0)) {
+
+		if (ecmd->rx_pause)
+			jme->reg_rxmcs |= RXMCS_FLOWCTRL;
+		else
+			jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
+
+		jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+	}
+	spin_unlock_bh(&jme->rxmcs_lock);
+
+	spin_lock_bh(&jme->phy_lock);
+	val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+	if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
+		(ecmd->autoneg != 0)) {
+
+		if (ecmd->autoneg)
+			val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		else
+			val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+		jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+				MII_ADVERTISE, val);
+	}
+	spin_unlock_bh(&jme->phy_lock);
+
+	return 0;
+}
+
+static void
+jme_get_wol(struct net_device *netdev,
+		struct ethtool_wolinfo *wol)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC | WAKE_PHY;
+
+	wol->wolopts = 0;
+
+	if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+		wol->wolopts |= WAKE_PHY;
+
+	if (jme->reg_pmcs & PMCS_MFEN)
+		wol->wolopts |= WAKE_MAGIC;
+
+}
+
+static int
+jme_set_wol(struct net_device *netdev,
+		struct ethtool_wolinfo *wol)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_MAGICSECURE |
+				WAKE_UCAST |
+				WAKE_MCAST |
+				WAKE_BCAST |
+				WAKE_ARP))
+		return -EOPNOTSUPP;
+
+	jme->reg_pmcs = 0;
+
+	if (wol->wolopts & WAKE_PHY)
+		jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		jme->reg_pmcs |= PMCS_MFEN;
+
+	jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+
+	return 0;
+}
+
+static int
+jme_get_settings(struct net_device *netdev,
+		     struct ethtool_cmd *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc;
+
+	spin_lock_bh(&jme->phy_lock);
+	rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+	spin_unlock_bh(&jme->phy_lock);
+	return rc;
+}
+
+static int
+jme_set_settings(struct net_device *netdev,
+		     struct ethtool_cmd *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc, fdc = 0;
+
+	if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+
+	if (jme->mii_if.force_media &&
+	ecmd->autoneg != AUTONEG_ENABLE &&
+	(jme->mii_if.full_duplex != ecmd->duplex))
+		fdc = 1;
+
+	spin_lock_bh(&jme->phy_lock);
+	rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+	spin_unlock_bh(&jme->phy_lock);
+
+	if (!rc && fdc)
+		jme_reset_link(jme);
+
+	if (!rc) {
+		set_bit(JME_FLAG_SSET, &jme->flags);
+		jme->old_ecmd = *ecmd;
+	}
+
+	return rc;
+}
+
+static u32
+jme_get_link(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
+}
+
+static u32
+jme_get_msglevel(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jme->msg_enable;
+}
+
+static void
+jme_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	jme->msg_enable = value;
+}
+
+static u32
+jme_get_rx_csum(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jme->reg_rxmcs & RXMCS_CHECKSUM;
+}
+
+static int
+jme_set_rx_csum(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	spin_lock_bh(&jme->rxmcs_lock);
+	if (on)
+		jme->reg_rxmcs |= RXMCS_CHECKSUM;
+	else
+		jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+	jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+	spin_unlock_bh(&jme->rxmcs_lock);
+
+	return 0;
+}
+
+static int
+jme_set_tx_csum(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (on) {
+		set_bit(JME_FLAG_TXCSUM, &jme->flags);
+		if (netdev->mtu <= 1900)
+			netdev->features |= NETIF_F_HW_CSUM;
+	} else {
+		clear_bit(JME_FLAG_TXCSUM, &jme->flags);
+		netdev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	return 0;
+}
+
+static int
+jme_set_tso(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (on) {
+		set_bit(JME_FLAG_TSO, &jme->flags);
+		if (netdev->mtu <= 1900)
+			netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	} else {
+		clear_bit(JME_FLAG_TSO, &jme->flags);
+		netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	}
+
+	return 0;
+}
+
+static int
+jme_nway_reset(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	jme_restart_an(jme);
+	return 0;
+}
+
+static u8
+jme_smb_read(struct jme_adapter *jme, unsigned int addr)
+{
+	u32 val;
+	int to;
+
+	val = jread32(jme, JME_SMBCSR);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBCSR_BUSY) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBCSR);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return 0xFF;
+	}
+
+	jwrite32(jme, JME_SMBINTF,
+		((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+		SMBINTF_HWRWN_READ |
+		SMBINTF_HWCMD);
+
+	val = jread32(jme, JME_SMBINTF);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBINTF_HWCMD) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBINTF);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return 0xFF;
+	}
+
+	return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
+}
+
+static void
+jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
+{
+	u32 val;
+	int to;
+
+	val = jread32(jme, JME_SMBCSR);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBCSR_BUSY) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBCSR);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return;
+	}
+
+	jwrite32(jme, JME_SMBINTF,
+		((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
+		((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+		SMBINTF_HWRWN_WRITE |
+		SMBINTF_HWCMD);
+
+	val = jread32(jme, JME_SMBINTF);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBINTF_HWCMD) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBINTF);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return;
+	}
+
+	mdelay(2);
+}
+
+static int
+jme_get_eeprom_len(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+	val = jread32(jme, JME_SMBCSR);
+	return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
+}
+
+static int
+jme_get_eeprom(struct net_device *netdev,
+		struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, offset = eeprom->offset, len = eeprom->len;
+
+	/*
+	 * ethtool will check the boundary for us
+	 */
+	eeprom->magic = JME_EEPROM_MAGIC;
+	for (i = 0 ; i < len ; ++i)
+		data[i] = jme_smb_read(jme, i + offset);
+
+	return 0;
+}
+
+static int
+jme_set_eeprom(struct net_device *netdev,
+		struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, offset = eeprom->offset, len = eeprom->len;
+
+	if (eeprom->magic != JME_EEPROM_MAGIC)
+		return -EINVAL;
+
+	/*
+	 * ethtool will check the boundary for us
+	 */
+	for (i = 0 ; i < len ; ++i)
+		jme_smb_write(jme, i + offset, data[i]);
+
+	return 0;
+}
+
+static const struct ethtool_ops jme_ethtool_ops = {
+	.get_drvinfo            = jme_get_drvinfo,
+	.get_regs_len		= jme_get_regs_len,
+	.get_regs		= jme_get_regs,
+	.get_coalesce		= jme_get_coalesce,
+	.set_coalesce		= jme_set_coalesce,
+	.get_pauseparam		= jme_get_pauseparam,
+	.set_pauseparam		= jme_set_pauseparam,
+	.get_wol		= jme_get_wol,
+	.set_wol		= jme_set_wol,
+	.get_settings		= jme_get_settings,
+	.set_settings		= jme_set_settings,
+	.get_link		= jme_get_link,
+	.get_msglevel           = jme_get_msglevel,
+	.set_msglevel           = jme_set_msglevel,
+	.get_rx_csum		= jme_get_rx_csum,
+	.set_rx_csum		= jme_set_rx_csum,
+	.set_tx_csum		= jme_set_tx_csum,
+	.set_tso		= jme_set_tso,
+	.set_sg			= ethtool_op_set_sg,
+	.nway_reset             = jme_nway_reset,
+	.get_eeprom_len		= jme_get_eeprom_len,
+	.get_eeprom		= jme_get_eeprom,
+	.set_eeprom		= jme_set_eeprom,
+};
+
+static int
+jme_pci_dma64(struct pci_dev *pdev)
+{
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+			return 1;
+
+	if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+			return 1;
+
+	if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+			return 0;
+
+	return -1;
+}
+
+static inline void
+jme_phy_init(struct jme_adapter *jme)
+{
+	u16 reg26;
+
+	reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
+}
+
+static inline void
+jme_check_hw_ver(struct jme_adapter *jme)
+{
+	u32 chipmode;
+
+	chipmode = jread32(jme, JME_CHIPMODE);
+
+	jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
+	jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+}
+
+static int __devinit
+jme_init_one(struct pci_dev *pdev,
+	     const struct pci_device_id *ent)
+{
+	int rc = 0, using_dac, i;
+	struct net_device *netdev;
+	struct jme_adapter *jme;
+	u16 bmcr, bmsr;
+	u32 apmc;
+
+	/*
+	 * set up PCI device basics
+	 */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		jeprintk(pdev, "Cannot enable PCI device.\n");
+		goto err_out;
+	}
+
+	using_dac = jme_pci_dma64(pdev);
+	if (using_dac < 0) {
+		jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
+		rc = -EIO;
+		goto err_out_disable_pdev;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		jeprintk(pdev, "No PCI resource region found.\n");
+		rc = -ENOMEM;
+		goto err_out_disable_pdev;
+	}
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc) {
+		jeprintk(pdev, "Cannot obtain PCI resource region.\n");
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	/*
+	 * alloc and init net device
+	 */
+	netdev = alloc_etherdev(sizeof(*jme));
+	if (!netdev) {
+		jeprintk(pdev, "Cannot allocate netdev structure.\n");
+		rc = -ENOMEM;
+		goto err_out_release_regions;
+	}
+	netdev->open			= jme_open;
+	netdev->stop			= jme_close;
+	netdev->hard_start_xmit		= jme_start_xmit;
+	netdev->set_mac_address		= jme_set_macaddr;
+	netdev->set_multicast_list	= jme_set_multi;
+	netdev->change_mtu		= jme_change_mtu;
+	netdev->ethtool_ops		= &jme_ethtool_ops;
+	netdev->tx_timeout		= jme_tx_timeout;
+	netdev->watchdog_timeo		= TX_TIMEOUT;
+	netdev->vlan_rx_register	= jme_vlan_rx_register;
+	NETDEV_GET_STATS(netdev, &jme_get_stats);
+	netdev->features		=	NETIF_F_HW_CSUM |
+						NETIF_F_SG |
+						NETIF_F_TSO |
+						NETIF_F_TSO6 |
+						NETIF_F_HW_VLAN_TX |
+						NETIF_F_HW_VLAN_RX;
+	if (using_dac)
+		netdev->features	|=	NETIF_F_HIGHDMA;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	/*
+	 * init adapter info
+	 */
+	jme = netdev_priv(netdev);
+	jme->pdev = pdev;
+	jme->dev = netdev;
+	jme->jme_rx = netif_rx;
+	jme->jme_vlan_rx = vlan_hwaccel_rx;
+	jme->old_mtu = netdev->mtu = 1500;
+	jme->phylink = 0;
+	jme->tx_ring_size = 1 << 10;
+	jme->tx_ring_mask = jme->tx_ring_size - 1;
+	jme->tx_wake_threshold = 1 << 9;
+	jme->rx_ring_size = 1 << 9;
+	jme->rx_ring_mask = jme->rx_ring_size - 1;
+	jme->msg_enable = JME_DEF_MSG_ENABLE;
+	jme->regs = ioremap(pci_resource_start(pdev, 0),
+			     pci_resource_len(pdev, 0));
+	if (!(jme->regs)) {
+		jeprintk(pdev, "Mapping PCI resource region error.\n");
+		rc = -ENOMEM;
+		goto err_out_free_netdev;
+	}
+	jme->shadow_regs = pci_alloc_consistent(pdev,
+						sizeof(u32) * SHADOW_REG_NR,
+						&(jme->shadow_dma));
+	if (!(jme->shadow_regs)) {
+		jeprintk(pdev, "Allocating shadow register mapping error.\n");
+		rc = -ENOMEM;
+		goto err_out_unmap;
+	}
+
+	if (no_pseudohp) {
+		apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
+		jwrite32(jme, JME_APMC, apmc);
+	} else if (force_pseudohp) {
+		apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
+		jwrite32(jme, JME_APMC, apmc);
+	}
+
+	NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
+
+	spin_lock_init(&jme->phy_lock);
+	spin_lock_init(&jme->macaddr_lock);
+	spin_lock_init(&jme->rxmcs_lock);
+
+	atomic_set(&jme->link_changing, 1);
+	atomic_set(&jme->rx_cleaning, 1);
+	atomic_set(&jme->tx_cleaning, 1);
+	atomic_set(&jme->rx_empty, 1);
+
+	tasklet_init(&jme->pcc_task,
+		     &jme_pcc_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->linkch_task,
+		     &jme_link_change_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->txclean_task,
+		     &jme_tx_clean_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->rxclean_task,
+		     &jme_rx_clean_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->rxempty_task,
+		     &jme_rx_empty_tasklet,
+		     (unsigned long) jme);
+	tasklet_disable_nosync(&jme->txclean_task);
+	tasklet_disable_nosync(&jme->rxclean_task);
+	tasklet_disable_nosync(&jme->rxempty_task);
+	jme->dpi.cur = PCC_P1;
+
+	jme->reg_ghc = 0;
+	jme->reg_rxcs = RXCS_DEFAULT;
+	jme->reg_rxmcs = RXMCS_DEFAULT;
+	jme->reg_txpfc = 0;
+	jme->reg_pmcs = PMCS_MFEN;
+	set_bit(JME_FLAG_TXCSUM, &jme->flags);
+	set_bit(JME_FLAG_TSO, &jme->flags);
+
+	/*
+	 * Get Max Read Req Size from PCI Config Space
+	 */
+	pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
+	jme->mrrs &= PCI_DCSR_MRRS_MASK;
+	switch (jme->mrrs) {
+	case MRRS_128B:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
+		break;
+	case MRRS_256B:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
+		break;
+	default:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
+		break;
+	};
+
+	/*
+	 * Must check before reset_mac_processor
+	 */
+	jme_check_hw_ver(jme);
+	jme->mii_if.dev = netdev;
+	if (jme->fpgaver) {
+		jme->mii_if.phy_id = 0;
+		for (i = 1 ; i < 32 ; ++i) {
+			bmcr = jme_mdio_read(netdev, i, MII_BMCR);
+			bmsr = jme_mdio_read(netdev, i, MII_BMSR);
+			if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
+				jme->mii_if.phy_id = i;
+				break;
+			}
+		}
+
+		if (!jme->mii_if.phy_id) {
+			rc = -EIO;
+			jeprintk(pdev, "Can not find phy_id.\n");
+			 goto err_out_free_shadow;
+		}
+
+		jme->reg_ghc |= GHC_LINK_POLL;
+	} else {
+		jme->mii_if.phy_id = 1;
+	}
+	if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+		jme->mii_if.supports_gmii = true;
+	else
+		jme->mii_if.supports_gmii = false;
+	jme->mii_if.mdio_read = jme_mdio_read;
+	jme->mii_if.mdio_write = jme_mdio_write;
+
+	jme_clear_pm(jme);
+	jme_set_phyfifoa(jme);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+	if (!jme->fpgaver)
+		jme_phy_init(jme);
+	jme_phy_off(jme);
+
+	/*
+	 * Reset MAC processor and reload EEPROM for MAC Address
+	 */
+	jme_reset_mac_processor(jme);
+	rc = jme_reload_eeprom(jme);
+	if (rc) {
+		jeprintk(pdev,
+			"Reload eeprom for reading MAC Address error.\n");
+		goto err_out_free_shadow;
+	}
+	jme_load_macaddr(netdev);
+
+	/*
+	 * Tell stack that we are not ready to work until open()
+	 */
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	/*
+	 * Register netdev
+	 */
+	rc = register_netdev(netdev);
+	if (rc) {
+		jeprintk(pdev, "Cannot register net device.\n");
+		goto err_out_free_shadow;
+	}
+
+	msg_probe(jme,
+		"JMC250 gigabit%s ver:%x rev:%x "
+		"macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+		(jme->fpgaver != 0) ? " (FPGA)" : "",
+		(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+		jme->rev,
+		netdev->dev_addr[0],
+		netdev->dev_addr[1],
+		netdev->dev_addr[2],
+		netdev->dev_addr[3],
+		netdev->dev_addr[4],
+		netdev->dev_addr[5]);
+
+	return 0;
+
+err_out_free_shadow:
+	pci_free_consistent(pdev,
+			    sizeof(u32) * SHADOW_REG_NR,
+			    jme->shadow_regs,
+			    jme->shadow_dma);
+err_out_unmap:
+	iounmap(jme->regs);
+err_out_free_netdev:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_pdev:
+	pci_disable_device(pdev);
+err_out:
+	return rc;
+}
+
+static void __devexit
+jme_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+	pci_free_consistent(pdev,
+			    sizeof(u32) * SHADOW_REG_NR,
+			    jme->shadow_regs,
+			    jme->shadow_dma);
+	iounmap(jme->regs);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+}
+
+static int
+jme_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	atomic_dec(&jme->link_changing);
+
+	netif_device_detach(netdev);
+	netif_stop_queue(netdev);
+	jme_stop_irq(jme);
+
+	tasklet_disable(&jme->txclean_task);
+	tasklet_disable(&jme->rxclean_task);
+	tasklet_disable(&jme->rxempty_task);
+
+	jme_disable_shadow(jme);
+
+	if (netif_carrier_ok(netdev)) {
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_polling_mode(jme);
+
+		jme_stop_pcc_timer(jme);
+		jme_reset_ghc_speed(jme);
+		jme_disable_rx_engine(jme);
+		jme_disable_tx_engine(jme);
+		jme_reset_mac_processor(jme);
+		jme_free_rx_resources(jme);
+		jme_free_tx_resources(jme);
+		netif_carrier_off(netdev);
+		jme->phylink = 0;
+	}
+
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+
+	pci_save_state(pdev);
+	if (jme->reg_pmcs) {
+		jme_set_100m_half(jme);
+
+		if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+			jme_wait_link(jme);
+
+		jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+
+		pci_enable_wake(pdev, PCI_D3cold, true);
+	} else {
+		jme_phy_off(jme);
+	}
+	pci_set_power_state(pdev, PCI_D3cold);
+
+	return 0;
+}
+
+static int
+jme_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme_clear_pm(jme);
+	pci_restore_state(pdev);
+
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+	else
+		jme_reset_phy_processor(jme);
+
+	jme_enable_shadow(jme);
+	jme_start_irq(jme);
+	netif_device_attach(netdev);
+
+	atomic_inc(&jme->link_changing);
+
+	jme_reset_link(jme);
+
+	return 0;
+}
+
+static struct pci_device_id jme_pci_tbl[] = {
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
+	{ }
+};
+
+static struct pci_driver jme_driver = {
+	.name           = DRV_NAME,
+	.id_table       = jme_pci_tbl,
+	.probe          = jme_init_one,
+	.remove         = __devexit_p(jme_remove_one),
+#ifdef CONFIG_PM
+	.suspend        = jme_suspend,
+	.resume         = jme_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init
+jme_init_module(void)
+{
+	printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
+	       "driver version %s\n", DRV_VERSION);
+	return pci_register_driver(&jme_driver);
+}
+
+static void __exit
+jme_cleanup_module(void)
+{
+	pci_unregister_driver(&jme_driver);
+}
+
+module_init(jme_init_module);
+module_exit(jme_cleanup_module);
+
+MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
+MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
+
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
new file mode 100644
index 000000000000..b29688431a6d
--- /dev/null
+++ b/drivers/net/jme.h
@@ -0,0 +1,1199 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JME_H_INCLUDED__
+#define __JME_H_INCLUDEE__
+
+#define DRV_NAME	"jme"
+#define DRV_VERSION	"1.0.2"
+#define PFX		DRV_NAME ": "
+
+#define PCI_DEVICE_ID_JMICRON_JMC250	0x0250
+#define PCI_DEVICE_ID_JMICRON_JMC260	0x0260
+
+/*
+ * Message related definitions
+ */
+#define JME_DEF_MSG_ENABLE \
+	(NETIF_MSG_PROBE | \
+	NETIF_MSG_LINK | \
+	NETIF_MSG_RX_ERR | \
+	NETIF_MSG_TX_ERR | \
+	NETIF_MSG_HW)
+
+#define jeprintk(pdev, fmt, args...) \
+	printk(KERN_ERR PFX fmt, ## args)
+
+#ifdef TX_DEBUG
+#define tx_dbg(priv, fmt, args...) \
+	printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#else
+#define tx_dbg(priv, fmt, args...)
+#endif
+
+#define jme_msg(msglvl, type, priv, fmt, args...) \
+	if (netif_msg_##type(priv)) \
+		printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
+
+#define msg_probe(priv, fmt, args...) \
+	jme_msg(KERN_INFO, probe, priv, fmt, ## args)
+
+#define msg_link(priv, fmt, args...) \
+	jme_msg(KERN_INFO, link, priv, fmt, ## args)
+
+#define msg_intr(priv, fmt, args...) \
+	jme_msg(KERN_INFO, intr, priv, fmt, ## args)
+
+#define msg_rx_err(priv, fmt, args...) \
+	jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
+
+#define msg_rx_status(priv, fmt, args...) \
+	jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
+
+#define msg_tx_err(priv, fmt, args...) \
+	jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
+
+#define msg_tx_done(priv, fmt, args...) \
+	jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
+
+#define msg_tx_queued(priv, fmt, args...) \
+	jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
+
+#define msg_hw(priv, fmt, args...) \
+	jme_msg(KERN_ERR, hw, priv, fmt, ## args)
+
+/*
+ * Extra PCI Configuration space interface
+ */
+#define PCI_DCSR_MRRS		0x59
+#define PCI_DCSR_MRRS_MASK	0x70
+
+enum pci_dcsr_mrrs_vals {
+	MRRS_128B	= 0x00,
+	MRRS_256B	= 0x10,
+	MRRS_512B	= 0x20,
+	MRRS_1024B	= 0x30,
+	MRRS_2048B	= 0x40,
+	MRRS_4096B	= 0x50,
+};
+
+#define PCI_SPI			0xB0
+
+enum pci_spi_bits {
+	SPI_EN		= 0x10,
+	SPI_MISO	= 0x08,
+	SPI_MOSI	= 0x04,
+	SPI_SCLK	= 0x02,
+	SPI_CS		= 0x01,
+};
+
+struct jme_spi_op {
+	void __user *uwbuf;
+	void __user *urbuf;
+	__u8	wn;	/* Number of write actions */
+	__u8	rn;	/* Number of read actions */
+	__u8	bitn;	/* Number of bits per action */
+	__u8	spd;	/* The maxim acceptable speed of controller, in MHz.*/
+	__u8	mode;	/* CPOL, CPHA, and Duplex mode of SPI */
+
+	/* Internal use only */
+	u8	*kwbuf;
+	u8	*krbuf;
+	u8	sr;
+	u16	halfclk; /* Half of clock cycle calculated from spd, in ns */
+};
+
+enum jme_spi_op_bits {
+	SPI_MODE_CPHA	= 0x01,
+	SPI_MODE_CPOL	= 0x02,
+	SPI_MODE_DUP	= 0x80,
+};
+
+#define HALF_US 500	/* 500 ns */
+#define JMESPIIOCTL	SIOCDEVPRIVATE
+
+/*
+ * Dynamic(adaptive)/Static PCC values
+ */
+enum dynamic_pcc_values {
+	PCC_OFF		= 0,
+	PCC_P1		= 1,
+	PCC_P2		= 2,
+	PCC_P3		= 3,
+
+	PCC_OFF_TO	= 0,
+	PCC_P1_TO	= 1,
+	PCC_P2_TO	= 64,
+	PCC_P3_TO	= 128,
+
+	PCC_OFF_CNT	= 0,
+	PCC_P1_CNT	= 1,
+	PCC_P2_CNT	= 16,
+	PCC_P3_CNT	= 32,
+};
+struct dynpcc_info {
+	unsigned long	last_bytes;
+	unsigned long	last_pkts;
+	unsigned long	intr_cnt;
+	unsigned char	cur;
+	unsigned char	attempt;
+	unsigned char	cnt;
+};
+#define PCC_INTERVAL_US	100000
+#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US))
+#define PCC_P3_THRESHOLD (2 * 1024 * 1024)
+#define PCC_P2_THRESHOLD 800
+#define PCC_INTR_THRESHOLD 800
+#define PCC_TX_TO 1000
+#define PCC_TX_CNT 8
+
+/*
+ * TX/RX Descriptors
+ *
+ * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024
+ */
+#define RING_DESC_ALIGN		16	/* Descriptor alignment */
+#define TX_DESC_SIZE		16
+#define TX_RING_NR		8
+#define TX_RING_ALLOC_SIZE(s)	((s * TX_DESC_SIZE) + RING_DESC_ALIGN)
+
+struct txdesc {
+	union {
+		__u8	all[16];
+		__le32	dw[4];
+		struct {
+			/* DW0 */
+			__le16	vlan;
+			__u8	rsv1;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	mss;
+
+			/* DW2 */
+			__le16	pktsize;
+			__le16	rsv2;
+
+			/* DW3 */
+			__le32	bufaddr;
+		} desc1;
+		struct {
+			/* DW0 */
+			__le16	rsv1;
+			__u8	rsv2;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	rsv3;
+
+			/* DW2 */
+			__le32	bufaddrh;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} desc2;
+		struct {
+			/* DW0 */
+			__u8	ehdrsz;
+			__u8	rsv1;
+			__u8	rsv2;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	trycnt;
+			__le16	segcnt;
+
+			/* DW2 */
+			__le16	pktsz;
+			__le16	rsv3;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} descwb;
+	};
+};
+
+enum jme_txdesc_flags_bits {
+	TXFLAG_OWN	= 0x80,
+	TXFLAG_INT	= 0x40,
+	TXFLAG_64BIT	= 0x20,
+	TXFLAG_TCPCS	= 0x10,
+	TXFLAG_UDPCS	= 0x08,
+	TXFLAG_IPCS	= 0x04,
+	TXFLAG_LSEN	= 0x02,
+	TXFLAG_TAGON	= 0x01,
+};
+
+#define TXDESC_MSS_SHIFT	2
+enum jme_rxdescwb_flags_bits {
+	TXWBFLAG_OWN	= 0x80,
+	TXWBFLAG_INT	= 0x40,
+	TXWBFLAG_TMOUT	= 0x20,
+	TXWBFLAG_TRYOUT	= 0x10,
+	TXWBFLAG_COL	= 0x08,
+
+	TXWBFLAG_ALLERR	= TXWBFLAG_TMOUT |
+			  TXWBFLAG_TRYOUT |
+			  TXWBFLAG_COL,
+};
+
+#define RX_DESC_SIZE		16
+#define RX_RING_NR		4
+#define RX_RING_ALLOC_SIZE(s)	((s * RX_DESC_SIZE) + RING_DESC_ALIGN)
+#define RX_BUF_DMA_ALIGN	8
+#define RX_PREPAD_SIZE		10
+#define ETH_CRC_LEN		2
+#define RX_VLANHDR_LEN		2
+#define RX_EXTRA_LEN		(RX_PREPAD_SIZE + \
+				ETH_HLEN + \
+				ETH_CRC_LEN + \
+				RX_VLANHDR_LEN + \
+				RX_BUF_DMA_ALIGN)
+
+struct rxdesc {
+	union {
+		__u8	all[16];
+		__le32	dw[4];
+		struct {
+			/* DW0 */
+			__le16	rsv2;
+			__u8	rsv1;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	wbcpl;
+
+			/* DW2 */
+			__le32	bufaddrh;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} desc1;
+		struct {
+			/* DW0 */
+			__le16	vlan;
+			__le16	flags;
+
+			/* DW1 */
+			__le16	framesize;
+			__u8	errstat;
+			__u8	desccnt;
+
+			/* DW2 */
+			__le32	rsshash;
+
+			/* DW3 */
+			__u8	hashfun;
+			__u8	hashtype;
+			__le16	resrv;
+		} descwb;
+	};
+};
+
+enum jme_rxdesc_flags_bits {
+	RXFLAG_OWN	= 0x80,
+	RXFLAG_INT	= 0x40,
+	RXFLAG_64BIT	= 0x20,
+};
+
+enum jme_rxwbdesc_flags_bits {
+	RXWBFLAG_OWN		= 0x8000,
+	RXWBFLAG_INT		= 0x4000,
+	RXWBFLAG_MF		= 0x2000,
+	RXWBFLAG_64BIT		= 0x2000,
+	RXWBFLAG_TCPON		= 0x1000,
+	RXWBFLAG_UDPON		= 0x0800,
+	RXWBFLAG_IPCS		= 0x0400,
+	RXWBFLAG_TCPCS		= 0x0200,
+	RXWBFLAG_UDPCS		= 0x0100,
+	RXWBFLAG_TAGON		= 0x0080,
+	RXWBFLAG_IPV4		= 0x0040,
+	RXWBFLAG_IPV6		= 0x0020,
+	RXWBFLAG_PAUSE		= 0x0010,
+	RXWBFLAG_MAGIC		= 0x0008,
+	RXWBFLAG_WAKEUP		= 0x0004,
+	RXWBFLAG_DEST		= 0x0003,
+	RXWBFLAG_DEST_UNI	= 0x0001,
+	RXWBFLAG_DEST_MUL	= 0x0002,
+	RXWBFLAG_DEST_BRO	= 0x0003,
+};
+
+enum jme_rxwbdesc_desccnt_mask {
+	RXWBDCNT_WBCPL	= 0x80,
+	RXWBDCNT_DCNT	= 0x7F,
+};
+
+enum jme_rxwbdesc_errstat_bits {
+	RXWBERR_LIMIT	= 0x80,
+	RXWBERR_MIIER	= 0x40,
+	RXWBERR_NIBON	= 0x20,
+	RXWBERR_COLON	= 0x10,
+	RXWBERR_ABORT	= 0x08,
+	RXWBERR_SHORT	= 0x04,
+	RXWBERR_OVERUN	= 0x02,
+	RXWBERR_CRCERR	= 0x01,
+	RXWBERR_ALLERR	= 0xFF,
+};
+
+/*
+ * Buffer information corresponding to ring descriptors.
+ */
+struct jme_buffer_info {
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int len;
+	int nr_desc;
+	unsigned long start_xmit;
+};
+
+/*
+ * The structure holding buffer information and ring descriptors all together.
+ */
+#define MAX_RING_DESC_NR	1024
+struct jme_ring {
+	void *alloc;		/* pointer to allocated memory */
+	void *desc;		/* pointer to ring memory  */
+	dma_addr_t dmaalloc;	/* phys address of ring alloc */
+	dma_addr_t dma;		/* phys address for ring dma */
+
+	/* Buffer information corresponding to each descriptor */
+	struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
+
+	int next_to_use;
+	atomic_t next_to_clean;
+	atomic_t nr_free;
+};
+
+#define NET_STAT(priv) (priv->dev->stats)
+#define NETDEV_GET_STATS(netdev, fun_ptr)
+#define DECLARE_NET_DEVICE_STATS
+
+#define DECLARE_NAPI_STRUCT struct napi_struct napi;
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+	netif_napi_add(dev, napis, pollfn, q);
+#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
+#define JME_NAPI_WEIGHT(w) int w
+#define JME_NAPI_WEIGHT_VAL(w) w
+#define JME_NAPI_WEIGHT_SET(w, r)
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
+#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
+#define JME_NAPI_DISABLE(priv) \
+	if (!napi_disable_pending(&priv->napi)) \
+		napi_disable(&priv->napi);
+#define JME_RX_SCHEDULE_PREP(priv) \
+	netif_rx_schedule_prep(priv->dev, &priv->napi)
+#define JME_RX_SCHEDULE(priv) \
+	__netif_rx_schedule(priv->dev, &priv->napi);
+
+/*
+ * Jmac Adapter Private data
+ */
+#define SHADOW_REG_NR 8
+struct jme_adapter {
+	struct pci_dev          *pdev;
+	struct net_device       *dev;
+	void __iomem            *regs;
+	dma_addr_t		shadow_dma;
+	u32			*shadow_regs;
+	struct mii_if_info	mii_if;
+	struct jme_ring		rxring[RX_RING_NR];
+	struct jme_ring		txring[TX_RING_NR];
+	spinlock_t		phy_lock;
+	spinlock_t		macaddr_lock;
+	spinlock_t		rxmcs_lock;
+	struct tasklet_struct	rxempty_task;
+	struct tasklet_struct	rxclean_task;
+	struct tasklet_struct	txclean_task;
+	struct tasklet_struct	linkch_task;
+	struct tasklet_struct	pcc_task;
+	unsigned long		flags;
+	u32			reg_txcs;
+	u32			reg_txpfc;
+	u32			reg_rxcs;
+	u32			reg_rxmcs;
+	u32			reg_ghc;
+	u32			reg_pmcs;
+	u32			phylink;
+	u32			tx_ring_size;
+	u32			tx_ring_mask;
+	u32			tx_wake_threshold;
+	u32			rx_ring_size;
+	u32			rx_ring_mask;
+	u8			mrrs;
+	unsigned int		fpgaver;
+	unsigned int		chiprev;
+	u8			rev;
+	u32			msg_enable;
+	struct ethtool_cmd	old_ecmd;
+	unsigned int		old_mtu;
+	struct vlan_group	*vlgrp;
+	struct dynpcc_info	dpi;
+	atomic_t		intr_sem;
+	atomic_t		link_changing;
+	atomic_t		tx_cleaning;
+	atomic_t		rx_cleaning;
+	atomic_t		rx_empty;
+	int			(*jme_rx)(struct sk_buff *skb);
+	int			(*jme_vlan_rx)(struct sk_buff *skb,
+					  struct vlan_group *grp,
+					  unsigned short vlan_tag);
+	DECLARE_NAPI_STRUCT
+	DECLARE_NET_DEVICE_STATS
+};
+
+enum shadow_reg_val {
+	SHADOW_IEVE = 0,
+};
+
+enum jme_flags_bits {
+	JME_FLAG_MSI		= 1,
+	JME_FLAG_SSET		= 2,
+	JME_FLAG_TXCSUM		= 3,
+	JME_FLAG_TSO		= 4,
+	JME_FLAG_POLL		= 5,
+	JME_FLAG_SHUTDOWN	= 6,
+};
+
+#define TX_TIMEOUT		(5 * HZ)
+#define JME_REG_LEN		0x500
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
+
+static inline struct jme_adapter*
+jme_napi_priv(struct napi_struct *napi)
+{
+	struct jme_adapter *jme;
+	jme = container_of(napi, struct jme_adapter, napi);
+	return jme;
+}
+
+/*
+ * MMaped I/O Resters
+ */
+enum jme_iomap_offsets {
+	JME_MAC		= 0x0000,
+	JME_PHY		= 0x0400,
+	JME_MISC	= 0x0800,
+	JME_RSS		= 0x0C00,
+};
+
+enum jme_iomap_lens {
+	JME_MAC_LEN	= 0x80,
+	JME_PHY_LEN	= 0x58,
+	JME_MISC_LEN	= 0x98,
+	JME_RSS_LEN	= 0xFF,
+};
+
+enum jme_iomap_regs {
+	JME_TXCS	= JME_MAC | 0x00, /* Transmit Control and Status */
+	JME_TXDBA_LO	= JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
+	JME_TXDBA_HI	= JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */
+	JME_TXQDC	= JME_MAC | 0x0C, /* Transmit Queue Desc Count */
+	JME_TXNDA	= JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */
+	JME_TXMCS	= JME_MAC | 0x14, /* Transmit MAC Control Status */
+	JME_TXPFC	= JME_MAC | 0x18, /* Transmit Pause Frame Control */
+	JME_TXTRHD	= JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */
+
+	JME_RXCS	= JME_MAC | 0x20, /* Receive Control and Status */
+	JME_RXDBA_LO	= JME_MAC | 0x24, /* Receive Queue Desc Base Addr */
+	JME_RXDBA_HI	= JME_MAC | 0x28, /* Receive Queue Desc Base Addr */
+	JME_RXQDC	= JME_MAC | 0x2C, /* Receive Queue Desc Count */
+	JME_RXNDA	= JME_MAC | 0x30, /* Receive Queue Next Desc Addr */
+	JME_RXMCS	= JME_MAC | 0x34, /* Receive MAC Control Status */
+	JME_RXUMA_LO	= JME_MAC | 0x38, /* Receive Unicast MAC Address */
+	JME_RXUMA_HI	= JME_MAC | 0x3C, /* Receive Unicast MAC Address */
+	JME_RXMCHT_LO	= JME_MAC | 0x40, /* Recv Multicast Addr HashTable */
+	JME_RXMCHT_HI	= JME_MAC | 0x44, /* Recv Multicast Addr HashTable */
+	JME_WFODP	= JME_MAC | 0x48, /* Wakeup Frame Output Data Port */
+	JME_WFOI	= JME_MAC | 0x4C, /* Wakeup Frame Output Interface */
+
+	JME_SMI		= JME_MAC | 0x50, /* Station Management Interface */
+	JME_GHC		= JME_MAC | 0x54, /* Global Host Control */
+	JME_PMCS	= JME_MAC | 0x60, /* Power Management Control/Stat */
+
+
+	JME_PHY_CS	= JME_PHY | 0x28, /* PHY Ctrl and Status Register */
+	JME_PHY_LINK	= JME_PHY | 0x30, /* PHY Link Status Register */
+	JME_SMBCSR	= JME_PHY | 0x40, /* SMB Control and Status */
+	JME_SMBINTF	= JME_PHY | 0x44, /* SMB Interface */
+
+
+	JME_TMCSR	= JME_MISC | 0x00, /* Timer Control/Status Register */
+	JME_GPREG0	= JME_MISC | 0x08, /* General purpose REG-0 */
+	JME_GPREG1	= JME_MISC | 0x0C, /* General purpose REG-1 */
+	JME_IEVE	= JME_MISC | 0x20, /* Interrupt Event Status */
+	JME_IREQ	= JME_MISC | 0x24, /* Intr Req Status(For Debug) */
+	JME_IENS	= JME_MISC | 0x28, /* Intr Enable - Setting Port */
+	JME_IENC	= JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */
+	JME_PCCRX0	= JME_MISC | 0x30, /* PCC Control for RX Queue 0 */
+	JME_PCCTX	= JME_MISC | 0x40, /* PCC Control for TX Queues */
+	JME_CHIPMODE	= JME_MISC | 0x44, /* Identify FPGA Version */
+	JME_SHBA_HI	= JME_MISC | 0x48, /* Shadow Register Base HI */
+	JME_SHBA_LO	= JME_MISC | 0x4C, /* Shadow Register Base LO */
+	JME_TIMER1	= JME_MISC | 0x70, /* Timer1 */
+	JME_TIMER2	= JME_MISC | 0x74, /* Timer2 */
+	JME_APMC	= JME_MISC | 0x7C, /* Aggressive Power Mode Control */
+	JME_PCCSRX0	= JME_MISC | 0x80, /* PCC Status of RX0 */
+};
+
+/*
+ * TX Control/Status Bits
+ */
+enum jme_txcs_bits {
+	TXCS_QUEUE7S	= 0x00008000,
+	TXCS_QUEUE6S	= 0x00004000,
+	TXCS_QUEUE5S	= 0x00002000,
+	TXCS_QUEUE4S	= 0x00001000,
+	TXCS_QUEUE3S	= 0x00000800,
+	TXCS_QUEUE2S	= 0x00000400,
+	TXCS_QUEUE1S	= 0x00000200,
+	TXCS_QUEUE0S	= 0x00000100,
+	TXCS_FIFOTH	= 0x000000C0,
+	TXCS_DMASIZE	= 0x00000030,
+	TXCS_BURST	= 0x00000004,
+	TXCS_ENABLE	= 0x00000001,
+};
+
+enum jme_txcs_value {
+	TXCS_FIFOTH_16QW	= 0x000000C0,
+	TXCS_FIFOTH_12QW	= 0x00000080,
+	TXCS_FIFOTH_8QW		= 0x00000040,
+	TXCS_FIFOTH_4QW		= 0x00000000,
+
+	TXCS_DMASIZE_64B	= 0x00000000,
+	TXCS_DMASIZE_128B	= 0x00000010,
+	TXCS_DMASIZE_256B	= 0x00000020,
+	TXCS_DMASIZE_512B	= 0x00000030,
+
+	TXCS_SELECT_QUEUE0	= 0x00000000,
+	TXCS_SELECT_QUEUE1	= 0x00010000,
+	TXCS_SELECT_QUEUE2	= 0x00020000,
+	TXCS_SELECT_QUEUE3	= 0x00030000,
+	TXCS_SELECT_QUEUE4	= 0x00040000,
+	TXCS_SELECT_QUEUE5	= 0x00050000,
+	TXCS_SELECT_QUEUE6	= 0x00060000,
+	TXCS_SELECT_QUEUE7	= 0x00070000,
+
+	TXCS_DEFAULT		= TXCS_FIFOTH_4QW |
+				  TXCS_BURST,
+};
+
+#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * TX MAC Control/Status Bits
+ */
+enum jme_txmcs_bit_masks {
+	TXMCS_IFG2		= 0xC0000000,
+	TXMCS_IFG1		= 0x30000000,
+	TXMCS_TTHOLD		= 0x00000300,
+	TXMCS_FBURST		= 0x00000080,
+	TXMCS_CARRIEREXT	= 0x00000040,
+	TXMCS_DEFER		= 0x00000020,
+	TXMCS_BACKOFF		= 0x00000010,
+	TXMCS_CARRIERSENSE	= 0x00000008,
+	TXMCS_COLLISION		= 0x00000004,
+	TXMCS_CRC		= 0x00000002,
+	TXMCS_PADDING		= 0x00000001,
+};
+
+enum jme_txmcs_values {
+	TXMCS_IFG2_6_4		= 0x00000000,
+	TXMCS_IFG2_8_5		= 0x40000000,
+	TXMCS_IFG2_10_6		= 0x80000000,
+	TXMCS_IFG2_12_7		= 0xC0000000,
+
+	TXMCS_IFG1_8_4		= 0x00000000,
+	TXMCS_IFG1_12_6		= 0x10000000,
+	TXMCS_IFG1_16_8		= 0x20000000,
+	TXMCS_IFG1_20_10	= 0x30000000,
+
+	TXMCS_TTHOLD_1_8	= 0x00000000,
+	TXMCS_TTHOLD_1_4	= 0x00000100,
+	TXMCS_TTHOLD_1_2	= 0x00000200,
+	TXMCS_TTHOLD_FULL	= 0x00000300,
+
+	TXMCS_DEFAULT		= TXMCS_IFG2_8_5 |
+				  TXMCS_IFG1_16_8 |
+				  TXMCS_TTHOLD_FULL |
+				  TXMCS_DEFER |
+				  TXMCS_CRC |
+				  TXMCS_PADDING,
+};
+
+enum jme_txpfc_bits_masks {
+	TXPFC_VLAN_TAG		= 0xFFFF0000,
+	TXPFC_VLAN_EN		= 0x00008000,
+	TXPFC_PF_EN		= 0x00000001,
+};
+
+enum jme_txtrhd_bits_masks {
+	TXTRHD_TXPEN		= 0x80000000,
+	TXTRHD_TXP		= 0x7FFFFF00,
+	TXTRHD_TXREN		= 0x00000080,
+	TXTRHD_TXRL		= 0x0000007F,
+};
+
+enum jme_txtrhd_shifts {
+	TXTRHD_TXP_SHIFT	= 8,
+	TXTRHD_TXRL_SHIFT	= 0,
+};
+
+/*
+ * RX Control/Status Bits
+ */
+enum jme_rxcs_bit_masks {
+	/* FIFO full threshold for transmitting Tx Pause Packet */
+	RXCS_FIFOTHTP	= 0x30000000,
+	/* FIFO threshold for processing next packet */
+	RXCS_FIFOTHNP	= 0x0C000000,
+	RXCS_DMAREQSZ	= 0x03000000, /* DMA Request Size */
+	RXCS_QUEUESEL	= 0x00030000, /* Queue selection */
+	RXCS_RETRYGAP	= 0x0000F000, /* RX Desc full retry gap */
+	RXCS_RETRYCNT	= 0x00000F00, /* RX Desc full retry counter */
+	RXCS_WAKEUP	= 0x00000040, /* Enable receive wakeup packet */
+	RXCS_MAGIC	= 0x00000020, /* Enable receive magic packet */
+	RXCS_SHORT	= 0x00000010, /* Enable receive short packet */
+	RXCS_ABORT	= 0x00000008, /* Enable receive errorr packet */
+	RXCS_QST	= 0x00000004, /* Receive queue start */
+	RXCS_SUSPEND	= 0x00000002,
+	RXCS_ENABLE	= 0x00000001,
+};
+
+enum jme_rxcs_values {
+	RXCS_FIFOTHTP_16T	= 0x00000000,
+	RXCS_FIFOTHTP_32T	= 0x10000000,
+	RXCS_FIFOTHTP_64T	= 0x20000000,
+	RXCS_FIFOTHTP_128T	= 0x30000000,
+
+	RXCS_FIFOTHNP_16QW	= 0x00000000,
+	RXCS_FIFOTHNP_32QW	= 0x04000000,
+	RXCS_FIFOTHNP_64QW	= 0x08000000,
+	RXCS_FIFOTHNP_128QW	= 0x0C000000,
+
+	RXCS_DMAREQSZ_16B	= 0x00000000,
+	RXCS_DMAREQSZ_32B	= 0x01000000,
+	RXCS_DMAREQSZ_64B	= 0x02000000,
+	RXCS_DMAREQSZ_128B	= 0x03000000,
+
+	RXCS_QUEUESEL_Q0	= 0x00000000,
+	RXCS_QUEUESEL_Q1	= 0x00010000,
+	RXCS_QUEUESEL_Q2	= 0x00020000,
+	RXCS_QUEUESEL_Q3	= 0x00030000,
+
+	RXCS_RETRYGAP_256ns	= 0x00000000,
+	RXCS_RETRYGAP_512ns	= 0x00001000,
+	RXCS_RETRYGAP_1024ns	= 0x00002000,
+	RXCS_RETRYGAP_2048ns	= 0x00003000,
+	RXCS_RETRYGAP_4096ns	= 0x00004000,
+	RXCS_RETRYGAP_8192ns	= 0x00005000,
+	RXCS_RETRYGAP_16384ns	= 0x00006000,
+	RXCS_RETRYGAP_32768ns	= 0x00007000,
+
+	RXCS_RETRYCNT_0		= 0x00000000,
+	RXCS_RETRYCNT_4		= 0x00000100,
+	RXCS_RETRYCNT_8		= 0x00000200,
+	RXCS_RETRYCNT_12	= 0x00000300,
+	RXCS_RETRYCNT_16	= 0x00000400,
+	RXCS_RETRYCNT_20	= 0x00000500,
+	RXCS_RETRYCNT_24	= 0x00000600,
+	RXCS_RETRYCNT_28	= 0x00000700,
+	RXCS_RETRYCNT_32	= 0x00000800,
+	RXCS_RETRYCNT_36	= 0x00000900,
+	RXCS_RETRYCNT_40	= 0x00000A00,
+	RXCS_RETRYCNT_44	= 0x00000B00,
+	RXCS_RETRYCNT_48	= 0x00000C00,
+	RXCS_RETRYCNT_52	= 0x00000D00,
+	RXCS_RETRYCNT_56	= 0x00000E00,
+	RXCS_RETRYCNT_60	= 0x00000F00,
+
+	RXCS_DEFAULT		= RXCS_FIFOTHTP_128T |
+				  RXCS_FIFOTHNP_128QW |
+				  RXCS_DMAREQSZ_128B |
+				  RXCS_RETRYGAP_256ns |
+				  RXCS_RETRYCNT_32,
+};
+
+#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * RX MAC Control/Status Bits
+ */
+enum jme_rxmcs_bits {
+	RXMCS_ALLFRAME		= 0x00000800,
+	RXMCS_BRDFRAME		= 0x00000400,
+	RXMCS_MULFRAME		= 0x00000200,
+	RXMCS_UNIFRAME		= 0x00000100,
+	RXMCS_ALLMULFRAME	= 0x00000080,
+	RXMCS_MULFILTERED	= 0x00000040,
+	RXMCS_RXCOLLDEC		= 0x00000020,
+	RXMCS_FLOWCTRL		= 0x00000008,
+	RXMCS_VTAGRM		= 0x00000004,
+	RXMCS_PREPAD		= 0x00000002,
+	RXMCS_CHECKSUM		= 0x00000001,
+
+	RXMCS_DEFAULT		= RXMCS_VTAGRM |
+				  RXMCS_PREPAD |
+				  RXMCS_FLOWCTRL |
+				  RXMCS_CHECKSUM,
+};
+
+/*
+ * Wakeup Frame setup interface registers
+ */
+#define WAKEUP_FRAME_NR	8
+#define WAKEUP_FRAME_MASK_DWNR	4
+
+enum jme_wfoi_bit_masks {
+	WFOI_MASK_SEL		= 0x00000070,
+	WFOI_CRC_SEL		= 0x00000008,
+	WFOI_FRAME_SEL		= 0x00000007,
+};
+
+enum jme_wfoi_shifts {
+	WFOI_MASK_SHIFT		= 4,
+};
+
+/*
+ * SMI Related definitions
+ */
+enum jme_smi_bit_mask {
+	SMI_DATA_MASK		= 0xFFFF0000,
+	SMI_REG_ADDR_MASK	= 0x0000F800,
+	SMI_PHY_ADDR_MASK	= 0x000007C0,
+	SMI_OP_WRITE		= 0x00000020,
+	/* Set to 1, after req done it'll be cleared to 0 */
+	SMI_OP_REQ		= 0x00000010,
+	SMI_OP_MDIO		= 0x00000008, /* Software assess In/Out */
+	SMI_OP_MDOE		= 0x00000004, /* Software Output Enable */
+	SMI_OP_MDC		= 0x00000002, /* Software CLK Control */
+	SMI_OP_MDEN		= 0x00000001, /* Software access Enable */
+};
+
+enum jme_smi_bit_shift {
+	SMI_DATA_SHIFT		= 16,
+	SMI_REG_ADDR_SHIFT	= 11,
+	SMI_PHY_ADDR_SHIFT	= 6,
+};
+
+static inline u32 smi_reg_addr(int x)
+{
+	return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK;
+}
+
+static inline u32 smi_phy_addr(int x)
+{
+	return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK;
+}
+
+#define JME_PHY_TIMEOUT 100 /* 100 msec */
+#define JME_PHY_REG_NR 32
+
+/*
+ * Global Host Control
+ */
+enum jme_ghc_bit_mask {
+	GHC_SWRST	= 0x40000000,
+	GHC_DPX		= 0x00000040,
+	GHC_SPEED	= 0x00000030,
+	GHC_LINK_POLL	= 0x00000001,
+};
+
+enum jme_ghc_speed_val {
+	GHC_SPEED_10M	= 0x00000010,
+	GHC_SPEED_100M	= 0x00000020,
+	GHC_SPEED_1000M	= 0x00000030,
+};
+
+/*
+ * Power management control and status register
+ */
+enum jme_pmcs_bit_masks {
+	PMCS_WF7DET	= 0x80000000,
+	PMCS_WF6DET	= 0x40000000,
+	PMCS_WF5DET	= 0x20000000,
+	PMCS_WF4DET	= 0x10000000,
+	PMCS_WF3DET	= 0x08000000,
+	PMCS_WF2DET	= 0x04000000,
+	PMCS_WF1DET	= 0x02000000,
+	PMCS_WF0DET	= 0x01000000,
+	PMCS_LFDET	= 0x00040000,
+	PMCS_LRDET	= 0x00020000,
+	PMCS_MFDET	= 0x00010000,
+	PMCS_WF7EN	= 0x00008000,
+	PMCS_WF6EN	= 0x00004000,
+	PMCS_WF5EN	= 0x00002000,
+	PMCS_WF4EN	= 0x00001000,
+	PMCS_WF3EN	= 0x00000800,
+	PMCS_WF2EN	= 0x00000400,
+	PMCS_WF1EN	= 0x00000200,
+	PMCS_WF0EN	= 0x00000100,
+	PMCS_LFEN	= 0x00000004,
+	PMCS_LREN	= 0x00000002,
+	PMCS_MFEN	= 0x00000001,
+};
+
+/*
+ * Giga PHY Status Registers
+ */
+enum jme_phy_link_bit_mask {
+	PHY_LINK_SPEED_MASK		= 0x0000C000,
+	PHY_LINK_DUPLEX			= 0x00002000,
+	PHY_LINK_SPEEDDPU_RESOLVED	= 0x00000800,
+	PHY_LINK_UP			= 0x00000400,
+	PHY_LINK_AUTONEG_COMPLETE	= 0x00000200,
+	PHY_LINK_MDI_STAT		= 0x00000040,
+};
+
+enum jme_phy_link_speed_val {
+	PHY_LINK_SPEED_10M		= 0x00000000,
+	PHY_LINK_SPEED_100M		= 0x00004000,
+	PHY_LINK_SPEED_1000M		= 0x00008000,
+};
+
+#define JME_SPDRSV_TIMEOUT	500	/* 500 us */
+
+/*
+ * SMB Control and Status
+ */
+enum jme_smbcsr_bit_mask {
+	SMBCSR_CNACK	= 0x00020000,
+	SMBCSR_RELOAD	= 0x00010000,
+	SMBCSR_EEPROMD	= 0x00000020,
+	SMBCSR_INITDONE	= 0x00000010,
+	SMBCSR_BUSY	= 0x0000000F,
+};
+
+enum jme_smbintf_bit_mask {
+	SMBINTF_HWDATR	= 0xFF000000,
+	SMBINTF_HWDATW	= 0x00FF0000,
+	SMBINTF_HWADDR	= 0x0000FF00,
+	SMBINTF_HWRWN	= 0x00000020,
+	SMBINTF_HWCMD	= 0x00000010,
+	SMBINTF_FASTM	= 0x00000008,
+	SMBINTF_GPIOSCL	= 0x00000004,
+	SMBINTF_GPIOSDA	= 0x00000002,
+	SMBINTF_GPIOEN	= 0x00000001,
+};
+
+enum jme_smbintf_vals {
+	SMBINTF_HWRWN_READ	= 0x00000020,
+	SMBINTF_HWRWN_WRITE	= 0x00000000,
+};
+
+enum jme_smbintf_shifts {
+	SMBINTF_HWDATR_SHIFT	= 24,
+	SMBINTF_HWDATW_SHIFT	= 16,
+	SMBINTF_HWADDR_SHIFT	= 8,
+};
+
+#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */
+#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */
+#define JME_SMB_LEN 256
+#define JME_EEPROM_MAGIC 0x250
+
+/*
+ * Timer Control/Status Register
+ */
+enum jme_tmcsr_bit_masks {
+	TMCSR_SWIT	= 0x80000000,
+	TMCSR_EN	= 0x01000000,
+	TMCSR_CNT	= 0x00FFFFFF,
+};
+
+/*
+ * General Purpose REG-0
+ */
+enum jme_gpreg0_masks {
+	GPREG0_DISSH		= 0xFF000000,
+	GPREG0_PCIRLMT		= 0x00300000,
+	GPREG0_PCCNOMUTCLR	= 0x00040000,
+	GPREG0_LNKINTPOLL	= 0x00001000,
+	GPREG0_PCCTMR		= 0x00000300,
+	GPREG0_PHYADDR		= 0x0000001F,
+};
+
+enum jme_gpreg0_vals {
+	GPREG0_DISSH_DW7	= 0x80000000,
+	GPREG0_DISSH_DW6	= 0x40000000,
+	GPREG0_DISSH_DW5	= 0x20000000,
+	GPREG0_DISSH_DW4	= 0x10000000,
+	GPREG0_DISSH_DW3	= 0x08000000,
+	GPREG0_DISSH_DW2	= 0x04000000,
+	GPREG0_DISSH_DW1	= 0x02000000,
+	GPREG0_DISSH_DW0	= 0x01000000,
+	GPREG0_DISSH_ALL	= 0xFF000000,
+
+	GPREG0_PCIRLMT_8	= 0x00000000,
+	GPREG0_PCIRLMT_6	= 0x00100000,
+	GPREG0_PCIRLMT_5	= 0x00200000,
+	GPREG0_PCIRLMT_4	= 0x00300000,
+
+	GPREG0_PCCTMR_16ns	= 0x00000000,
+	GPREG0_PCCTMR_256ns	= 0x00000100,
+	GPREG0_PCCTMR_1us	= 0x00000200,
+	GPREG0_PCCTMR_1ms	= 0x00000300,
+
+	GPREG0_PHYADDR_1	= 0x00000001,
+
+	GPREG0_DEFAULT		= GPREG0_PCIRLMT_4 |
+				  GPREG0_PCCTMR_1us |
+				  GPREG0_PHYADDR_1,
+};
+
+/*
+ * Interrupt Status Bits
+ */
+enum jme_interrupt_bits {
+	INTR_SWINTR	= 0x80000000,
+	INTR_TMINTR	= 0x40000000,
+	INTR_LINKCH	= 0x20000000,
+	INTR_PAUSERCV	= 0x10000000,
+	INTR_MAGICRCV	= 0x08000000,
+	INTR_WAKERCV	= 0x04000000,
+	INTR_PCCRX0TO	= 0x02000000,
+	INTR_PCCRX1TO	= 0x01000000,
+	INTR_PCCRX2TO	= 0x00800000,
+	INTR_PCCRX3TO	= 0x00400000,
+	INTR_PCCTXTO	= 0x00200000,
+	INTR_PCCRX0	= 0x00100000,
+	INTR_PCCRX1	= 0x00080000,
+	INTR_PCCRX2	= 0x00040000,
+	INTR_PCCRX3	= 0x00020000,
+	INTR_PCCTX	= 0x00010000,
+	INTR_RX3EMP	= 0x00008000,
+	INTR_RX2EMP	= 0x00004000,
+	INTR_RX1EMP	= 0x00002000,
+	INTR_RX0EMP	= 0x00001000,
+	INTR_RX3	= 0x00000800,
+	INTR_RX2	= 0x00000400,
+	INTR_RX1	= 0x00000200,
+	INTR_RX0	= 0x00000100,
+	INTR_TX7	= 0x00000080,
+	INTR_TX6	= 0x00000040,
+	INTR_TX5	= 0x00000020,
+	INTR_TX4	= 0x00000010,
+	INTR_TX3	= 0x00000008,
+	INTR_TX2	= 0x00000004,
+	INTR_TX1	= 0x00000002,
+	INTR_TX0	= 0x00000001,
+};
+
+static const u32 INTR_ENABLE = INTR_SWINTR |
+				 INTR_TMINTR |
+				 INTR_LINKCH |
+				 INTR_PCCRX0TO |
+				 INTR_PCCRX0 |
+				 INTR_PCCTXTO |
+				 INTR_PCCTX |
+				 INTR_RX0EMP;
+
+/*
+ * PCC Control Registers
+ */
+enum jme_pccrx_masks {
+	PCCRXTO_MASK	= 0xFFFF0000,
+	PCCRX_MASK	= 0x0000FF00,
+};
+
+enum jme_pcctx_masks {
+	PCCTXTO_MASK	= 0xFFFF0000,
+	PCCTX_MASK	= 0x0000FF00,
+	PCCTX_QS_MASK	= 0x000000FF,
+};
+
+enum jme_pccrx_shifts {
+	PCCRXTO_SHIFT	= 16,
+	PCCRX_SHIFT	= 8,
+};
+
+enum jme_pcctx_shifts {
+	PCCTXTO_SHIFT	= 16,
+	PCCTX_SHIFT	= 8,
+};
+
+enum jme_pcctx_bits {
+	PCCTXQ0_EN	= 0x00000001,
+	PCCTXQ1_EN	= 0x00000002,
+	PCCTXQ2_EN	= 0x00000004,
+	PCCTXQ3_EN	= 0x00000008,
+	PCCTXQ4_EN	= 0x00000010,
+	PCCTXQ5_EN	= 0x00000020,
+	PCCTXQ6_EN	= 0x00000040,
+	PCCTXQ7_EN	= 0x00000080,
+};
+
+/*
+ * Chip Mode Register
+ */
+enum jme_chipmode_bit_masks {
+	CM_FPGAVER_MASK		= 0xFFFF0000,
+	CM_CHIPREV_MASK		= 0x0000FF00,
+	CM_CHIPMODE_MASK	= 0x0000000F,
+};
+
+enum jme_chipmode_shifts {
+	CM_FPGAVER_SHIFT	= 16,
+	CM_CHIPREV_SHIFT	= 8,
+};
+
+/*
+ * Shadow base address register bits
+ */
+enum jme_shadow_base_address_bits {
+	SHBA_POSTEN	= 0x1,
+};
+
+/*
+ * Aggressive Power Mode Control
+ */
+enum jme_apmc_bits {
+	JME_APMC_PCIE_SD_EN	= 0x40000000,
+	JME_APMC_PSEUDO_HP_EN	= 0x20000000,
+	JME_APMC_EPIEN		= 0x04000000,
+	JME_APMC_EPIEN_CTRL	= 0x03000000,
+};
+
+enum jme_apmc_values {
+	JME_APMC_EPIEN_CTRL_EN	= 0x02000000,
+	JME_APMC_EPIEN_CTRL_DIS	= 0x01000000,
+};
+
+#define APMC_PHP_SHUTDOWN_DELAY	(10 * 1000 * 1000)
+
+#ifdef REG_DEBUG
+static char *MAC_REG_NAME[] = {
+	"JME_TXCS",      "JME_TXDBA_LO",  "JME_TXDBA_HI", "JME_TXQDC",
+	"JME_TXNDA",     "JME_TXMCS",     "JME_TXPFC",    "JME_TXTRHD",
+	"JME_RXCS",      "JME_RXDBA_LO",  "JME_RXDBA_HI", "JME_RXQDC",
+	"JME_RXNDA",     "JME_RXMCS",     "JME_RXUMA_LO", "JME_RXUMA_HI",
+	"JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP",    "JME_WFOI",
+	"JME_SMI",       "JME_GHC",       "UNKNOWN",      "UNKNOWN",
+	"JME_PMCS"};
+
+static char *PE_REG_NAME[] = {
+	"UNKNOWN",      "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"UNKNOWN",      "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"UNKNOWN",      "UNKNOWN",     "JME_PHY_CS", "UNKNOWN",
+	"JME_PHY_LINK", "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"JME_SMBCSR",   "JME_SMBINTF"};
+
+static char *MISC_REG_NAME[] = {
+	"JME_TMCSR",  "JME_GPIO",     "JME_GPREG0",  "JME_GPREG1",
+	"JME_IEVE",   "JME_IREQ",     "JME_IENS",    "JME_IENC",
+	"JME_PCCRX0", "JME_PCCRX1",   "JME_PCCRX2",  "JME_PCCRX3",
+	"JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"JME_TIMER1", "JME_TIMER2",   "UNKNOWN",     "JME_APMC",
+	"JME_PCCSRX0"};
+
+static inline void reg_dbg(const struct jme_adapter *jme,
+		const char *msg, u32 val, u32 reg)
+{
+	const char *regname;
+	switch (reg & 0xF00) {
+	case 0x000:
+		regname = MAC_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	case 0x400:
+		regname = PE_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	case 0x800:
+		regname = MISC_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	default:
+		regname = PE_REG_NAME[0];
+	}
+	printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name,
+			msg, val, regname);
+}
+#else
+static inline void reg_dbg(const struct jme_adapter *jme,
+		const char *msg, u32 val, u32 reg) {}
+#endif
+
+/*
+ * Read/Write MMaped I/O Registers
+ */
+static inline u32 jread32(struct jme_adapter *jme, u32 reg)
+{
+	return readl(jme->regs + reg);
+}
+
+static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val)
+{
+	reg_dbg(jme, "REG WRITE", val, reg);
+	writel(val, jme->regs + reg);
+	reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val)
+{
+	/*
+	 * Read after write should cause flush
+	 */
+	reg_dbg(jme, "REG WRITE FLUSH", val, reg);
+	writel(val, jme->regs + reg);
+	readl(jme->regs + reg);
+	reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+/*
+ * PHY Regs
+ */
+enum jme_phy_reg17_bit_masks {
+	PREG17_SPEED		= 0xC000,
+	PREG17_DUPLEX		= 0x2000,
+	PREG17_SPDRSV		= 0x0800,
+	PREG17_LNKUP		= 0x0400,
+	PREG17_MDI		= 0x0040,
+};
+
+enum jme_phy_reg17_vals {
+	PREG17_SPEED_10M	= 0x0000,
+	PREG17_SPEED_100M	= 0x4000,
+	PREG17_SPEED_1000M	= 0x8000,
+};
+
+#define BMSR_ANCOMP               0x0020
+
+/*
+ * Workaround
+ */
+static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+{
+	return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
+}
+
+/*
+ * Function prototypes
+ */
+static int jme_set_settings(struct net_device *netdev,
+				struct ethtool_cmd *ecmd);
+static void jme_set_multi(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 000000000000..8a197658d76f
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 000000000000..c37ea436c918
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1593 @@
+/*
+ * QLogic QLA41xx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qlge for copyright and licensing details.
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME  	"qlge"
+#define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION	"v1.00.00-b3"
+
+#define PFX "qlge: "
+#define QPRINTK(qdev, nlevel, klevel, fmt, args...)     \
+       do {       \
+	if (!((qdev)->msg_enable & NETIF_MSG_##nlevel))		\
+		;						\
+	else							\
+		dev_printk(KERN_##klevel, &((qdev)->pdev->dev),	\
+			   "%s: " fmt, __func__, ##args);  \
+       } while (0)
+
+#define QLGE_VENDOR_ID    0x1077
+#define QLGE_DEVICE_ID1    0x8012
+#define QLGE_DEVICE_ID   0x8000
+
+#define MAX_RX_RINGS 128
+#define MAX_TX_RINGS 128
+
+#define NUM_TX_RING_ENTRIES	256
+#define NUM_RX_RING_ENTRIES	256
+
+#define NUM_SMALL_BUFFERS   512
+#define NUM_LARGE_BUFFERS   512
+
+#define SMALL_BUFFER_SIZE 256
+#define LARGE_BUFFER_SIZE	PAGE_SIZE
+#define MAX_SPLIT_SIZE 1023
+#define QLGE_SB_PAD 32
+
+#define DFLT_COALESCE_WAIT 100	/* 100 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10	/* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 10
+
+
+#define TX_DESC_PER_IOCB 8
+/* The maximum number of frags we handle is based
+ * on PAGE_SIZE...
+ */
+#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)	/* 4k & 8k pages */
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#elif (PAGE_SHIFT == 16)	/* 64k pages */
+#define TX_DESC_PER_OAL 0
+#endif
+
+#define DB_PAGE_SIZE 4096
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+	/* Misc. stuff */
+	MAILBOX_COUNT = 16,
+
+	PROC_ADDR_RDY = (1 << 31),
+	PROC_ADDR_R = (1 << 30),
+	PROC_ADDR_ERR = (1 << 29),
+	PROC_ADDR_DA = (1 << 28),
+	PROC_ADDR_FUNC0_MBI = 0x00001180,
+	PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC0_CTL = 0x000011a1,
+	PROC_ADDR_FUNC2_MBI = 0x00001280,
+	PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC2_CTL = 0x000012a1,
+	PROC_ADDR_MPI_RISC = 0x00000000,
+	PROC_ADDR_MDE = 0x00010000,
+	PROC_ADDR_REGBLOCK = 0x00020000,
+	PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+	SYS_EFE = (1 << 0),
+	SYS_FAE = (1 << 1),
+	SYS_MDC = (1 << 2),
+	SYS_DST = (1 << 3),
+	SYS_DWC = (1 << 4),
+	SYS_EVW = (1 << 5),
+	SYS_OMP_DLY_MASK = 0x3f000000,
+	/*
+	 * There are no values defined as of edit #15.
+	 */
+	SYS_ODI = (1 << 14),
+};
+
+/*
+ *  Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+	RST_FO_TFO = (1 << 0),
+	RST_FO_RR_MASK = 0x00060000,
+	RST_FO_RR_CQ_CAM = 0x00000000,
+	RST_FO_RR_DROP = 0x00000001,
+	RST_FO_RR_DQ = 0x00000002,
+	RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
+	RST_FO_FRB = (1 << 12),
+	RST_FO_MOP = (1 << 13),
+	RST_FO_REG = (1 << 14),
+	RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+	FSC_DBRST_MASK = 0x00070000,
+	FSC_DBRST_256 = 0x00000000,
+	FSC_DBRST_512 = 0x00000001,
+	FSC_DBRST_768 = 0x00000002,
+	FSC_DBRST_1024 = 0x00000003,
+	FSC_DBL_MASK = 0x00180000,
+	FSC_DBL_DBRST = 0x00000000,
+	FSC_DBL_MAX_PLD = 0x00000008,
+	FSC_DBL_MAX_BRST = 0x00000010,
+	FSC_DBL_128_BYTES = 0x00000018,
+	FSC_EC = (1 << 5),
+	FSC_EPC_MASK = 0x00c00000,
+	FSC_EPC_INBOUND = (1 << 6),
+	FSC_EPC_OUTBOUND = (1 << 7),
+	FSC_VM_PAGESIZE_MASK = 0x07000000,
+	FSC_VM_PAGE_2K = 0x00000100,
+	FSC_VM_PAGE_4K = 0x00000200,
+	FSC_VM_PAGE_8K = 0x00000300,
+	FSC_VM_PAGE_64K = 0x00000600,
+	FSC_SH = (1 << 11),
+	FSC_DSB = (1 << 12),
+	FSC_STE = (1 << 13),
+	FSC_FE = (1 << 15),
+};
+
+/*
+ *  Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+	CSR_ERR_STS_MASK = 0x0000003f,
+	/*
+	 * There are no valued defined as of edit #15.
+	 */
+	CSR_RR = (1 << 8),
+	CSR_HRI = (1 << 9),
+	CSR_RP = (1 << 10),
+	CSR_CMD_PARM_SHIFT = 22,
+	CSR_CMD_NOP = 0x00000000,
+	CSR_CMD_SET_RST = 0x1000000,
+	CSR_CMD_CLR_RST = 0x20000000,
+	CSR_CMD_SET_PAUSE = 0x30000000,
+	CSR_CMD_CLR_PAUSE = 0x40000000,
+	CSR_CMD_SET_H2R_INT = 0x50000000,
+	CSR_CMD_CLR_H2R_INT = 0x60000000,
+	CSR_CMD_PAR_EN = 0x70000000,
+	CSR_CMD_SET_BAD_PAR = 0x80000000,
+	CSR_CMD_CLR_BAD_PAR = 0x90000000,
+	CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ *  Configuration Register (CFG) bit definitions.
+ */
+enum {
+	CFG_LRQ = (1 << 0),
+	CFG_DRQ = (1 << 1),
+	CFG_LR = (1 << 2),
+	CFG_DR = (1 << 3),
+	CFG_LE = (1 << 5),
+	CFG_LCQ = (1 << 6),
+	CFG_DCQ = (1 << 7),
+	CFG_Q_SHIFT = 8,
+	CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ *  Status Register (STS) bit definitions.
+ */
+enum {
+	STS_FE = (1 << 0),
+	STS_PI = (1 << 1),
+	STS_PL0 = (1 << 2),
+	STS_PL1 = (1 << 3),
+	STS_PI0 = (1 << 4),
+	STS_PI1 = (1 << 5),
+	STS_FUNC_ID_MASK = 0x000000c0,
+	STS_FUNC_ID_SHIFT = 6,
+	STS_F0E = (1 << 8),
+	STS_F1E = (1 << 9),
+	STS_F2E = (1 << 10),
+	STS_F3E = (1 << 11),
+	STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+	INTR_EN_INTR_MASK = 0x007f0000,
+	INTR_EN_TYPE_MASK = 0x03000000,
+	INTR_EN_TYPE_ENABLE = 0x00000100,
+	INTR_EN_TYPE_DISABLE = 0x00000200,
+	INTR_EN_TYPE_READ = 0x00000300,
+	INTR_EN_IHD = (1 << 13),
+	INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+	INTR_EN_EI = (1 << 14),
+	INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+	INTR_MASK_PI = (1 << 0),
+	INTR_MASK_HL0 = (1 << 1),
+	INTR_MASK_LH0 = (1 << 2),
+	INTR_MASK_HL1 = (1 << 3),
+	INTR_MASK_LH1 = (1 << 4),
+	INTR_MASK_SE = (1 << 5),
+	INTR_MASK_LSC = (1 << 6),
+	INTR_MASK_MC = (1 << 7),
+	INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ *  Register (REV_ID) bit definitions.
+ */
+enum {
+	REV_ID_MASK = 0x0000000f,
+	REV_ID_NICROLL_SHIFT = 0,
+	REV_ID_NICREV_SHIFT = 4,
+	REV_ID_XGROLL_SHIFT = 8,
+	REV_ID_XGREV_SHIFT = 12,
+	REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ *  Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+	FRC_ECC_ERR_VW = (1 << 12),
+	FRC_ECC_ERR_VB = (1 << 13),
+	FRC_ECC_ERR_NI = (1 << 14),
+	FRC_ECC_ERR_NO = (1 << 15),
+	FRC_ECC_PFE_SHIFT = 16,
+	FRC_ECC_ERR_DO = (1 << 18),
+	FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ *  Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+	ERR_STS_NOF = (1 << 0),
+	ERR_STS_NIF = (1 << 1),
+	ERR_STS_DRP = (1 << 2),
+	ERR_STS_XGP = (1 << 3),
+	ERR_STS_FOU = (1 << 4),
+	ERR_STS_FOC = (1 << 5),
+	ERR_STS_FOF = (1 << 6),
+	ERR_STS_FIU = (1 << 7),
+	ERR_STS_FIC = (1 << 8),
+	ERR_STS_FIF = (1 << 9),
+	ERR_STS_MOF = (1 << 10),
+	ERR_STS_TA = (1 << 11),
+	ERR_STS_MA = (1 << 12),
+	ERR_STS_MPE = (1 << 13),
+	ERR_STS_SCE = (1 << 14),
+	ERR_STS_STE = (1 << 15),
+	ERR_STS_FOW = (1 << 16),
+	ERR_STS_UE = (1 << 17),
+	ERR_STS_MCH = (1 << 26),
+	ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ *  RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+	RAM_DBG_ADDR_FW = (1 << 30),
+	RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+	/*
+	 * Example:
+	 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+	 */
+	SEM_CLEAR = 0,
+	SEM_SET = 1,
+	SEM_FORCE = 3,
+	SEM_XGMAC0_SHIFT = 0,
+	SEM_XGMAC1_SHIFT = 2,
+	SEM_ICB_SHIFT = 4,
+	SEM_MAC_ADDR_SHIFT = 6,
+	SEM_FLASH_SHIFT = 8,
+	SEM_PROBE_SHIFT = 10,
+	SEM_RT_IDX_SHIFT = 12,
+	SEM_PROC_REG_SHIFT = 14,
+	SEM_XGMAC0_MASK = 0x00030000,
+	SEM_XGMAC1_MASK = 0x000c0000,
+	SEM_ICB_MASK = 0x00300000,
+	SEM_MAC_ADDR_MASK = 0x00c00000,
+	SEM_FLASH_MASK = 0x03000000,
+	SEM_PROBE_MASK = 0x0c000000,
+	SEM_RT_IDX_MASK = 0x30000000,
+	SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ *  10G MAC Address  Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+	XGMAC_ADDR_RDY = (1 << 31),
+	XGMAC_ADDR_R = (1 << 30),
+	XGMAC_ADDR_XME = (1 << 29),
+
+	/* XGMAC control registers */
+	PAUSE_SRC_LO = 0x00000100,
+	PAUSE_SRC_HI = 0x00000104,
+	GLOBAL_CFG = 0x00000108,
+	GLOBAL_CFG_RESET = (1 << 0),
+	GLOBAL_CFG_JUMBO = (1 << 6),
+	GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+	GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+	TX_CFG = 0x0000010c,
+	TX_CFG_RESET = (1 << 0),
+	TX_CFG_EN = (1 << 1),
+	TX_CFG_PREAM = (1 << 2),
+	RX_CFG = 0x00000110,
+	RX_CFG_RESET = (1 << 0),
+	RX_CFG_EN = (1 << 1),
+	RX_CFG_PREAM = (1 << 2),
+	FLOW_CTL = 0x0000011c,
+	PAUSE_OPCODE = 0x00000120,
+	PAUSE_TIMER = 0x00000124,
+	PAUSE_FRM_DEST_LO = 0x00000128,
+	PAUSE_FRM_DEST_HI = 0x0000012c,
+	MAC_TX_PARAMS = 0x00000134,
+	MAC_TX_PARAMS_JUMBO = (1 << 31),
+	MAC_TX_PARAMS_SIZE_SHIFT = 16,
+	MAC_RX_PARAMS = 0x00000138,
+	MAC_SYS_INT = 0x00000144,
+	MAC_SYS_INT_MASK = 0x00000148,
+	MAC_MGMT_INT = 0x0000014c,
+	MAC_MGMT_IN_MASK = 0x00000150,
+	EXT_ARB_MODE = 0x000001fc,
+
+	/* XGMAC TX statistics  registers */
+	TX_PKTS = 0x00000200,
+	TX_BYTES = 0x00000208,
+	TX_MCAST_PKTS = 0x00000210,
+	TX_BCAST_PKTS = 0x00000218,
+	TX_UCAST_PKTS = 0x00000220,
+	TX_CTL_PKTS = 0x00000228,
+	TX_PAUSE_PKTS = 0x00000230,
+	TX_64_PKT = 0x00000238,
+	TX_65_TO_127_PKT = 0x00000240,
+	TX_128_TO_255_PKT = 0x00000248,
+	TX_256_511_PKT = 0x00000250,
+	TX_512_TO_1023_PKT = 0x00000258,
+	TX_1024_TO_1518_PKT = 0x00000260,
+	TX_1519_TO_MAX_PKT = 0x00000268,
+	TX_UNDERSIZE_PKT = 0x00000270,
+	TX_OVERSIZE_PKT = 0x00000278,
+
+	/* XGMAC statistics control registers */
+	RX_HALF_FULL_DET = 0x000002a0,
+	TX_HALF_FULL_DET = 0x000002a4,
+	RX_OVERFLOW_DET = 0x000002a8,
+	TX_OVERFLOW_DET = 0x000002ac,
+	RX_HALF_FULL_MASK = 0x000002b0,
+	TX_HALF_FULL_MASK = 0x000002b4,
+	RX_OVERFLOW_MASK = 0x000002b8,
+	TX_OVERFLOW_MASK = 0x000002bc,
+	STAT_CNT_CTL = 0x000002c0,
+	STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+	STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+	AUX_RX_HALF_FULL_DET = 0x000002d0,
+	AUX_TX_HALF_FULL_DET = 0x000002d4,
+	AUX_RX_OVERFLOW_DET = 0x000002d8,
+	AUX_TX_OVERFLOW_DET = 0x000002dc,
+	AUX_RX_HALF_FULL_MASK = 0x000002f0,
+	AUX_TX_HALF_FULL_MASK = 0x000002f4,
+	AUX_RX_OVERFLOW_MASK = 0x000002f8,
+	AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+	/* XGMAC RX statistics  registers */
+	RX_BYTES = 0x00000300,
+	RX_BYTES_OK = 0x00000308,
+	RX_PKTS = 0x00000310,
+	RX_PKTS_OK = 0x00000318,
+	RX_BCAST_PKTS = 0x00000320,
+	RX_MCAST_PKTS = 0x00000328,
+	RX_UCAST_PKTS = 0x00000330,
+	RX_UNDERSIZE_PKTS = 0x00000338,
+	RX_OVERSIZE_PKTS = 0x00000340,
+	RX_JABBER_PKTS = 0x00000348,
+	RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+	RX_DROP_EVENTS = 0x00000358,
+	RX_FCERR_PKTS = 0x00000360,
+	RX_ALIGN_ERR = 0x00000368,
+	RX_SYMBOL_ERR = 0x00000370,
+	RX_MAC_ERR = 0x00000378,
+	RX_CTL_PKTS = 0x00000380,
+	RX_PAUSE_PKTS = 0x00000384,
+	RX_64_PKTS = 0x00000390,
+	RX_65_TO_127_PKTS = 0x00000398,
+	RX_128_255_PKTS = 0x000003a0,
+	RX_256_511_PKTS = 0x000003a8,
+	RX_512_TO_1023_PKTS = 0x000003b0,
+	RX_1024_TO_1518_PKTS = 0x000003b8,
+	RX_1519_TO_MAX_PKTS = 0x000003c0,
+	RX_LEN_ERR_PKTS = 0x000003c8,
+
+	/* XGMAC MDIO control registers */
+	MDIO_TX_DATA = 0x00000400,
+	MDIO_RX_DATA = 0x00000410,
+	MDIO_CMD = 0x00000420,
+	MDIO_PHY_ADDR = 0x00000430,
+	MDIO_PORT = 0x00000440,
+	MDIO_STATUS = 0x00000450,
+
+	/* XGMAC AUX statistics  registers */
+};
+
+/*
+ *  Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+	ETS_QUEUE_SHIFT = 29,
+	ETS_REF = (1 << 26),
+	ETS_RS = (1 << 27),
+	ETS_P = (1 << 28),
+	ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ *  Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+	FLASH_ADDR_RDY = (1 << 31),
+	FLASH_ADDR_R = (1 << 30),
+	FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ *  Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+	CQ_STOP_QUEUE_MASK = (0x007f0000),
+	CQ_STOP_TYPE_MASK = (0x03000000),
+	CQ_STOP_TYPE_START = 0x00000100,
+	CQ_STOP_TYPE_STOP = 0x00000200,
+	CQ_STOP_TYPE_READ = 0x00000300,
+	CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ *  MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+	MAC_ADDR_IDX_SHIFT = 4,
+	MAC_ADDR_TYPE_SHIFT = 16,
+	MAC_ADDR_TYPE_MASK = 0x000f0000,
+	MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+	MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+	MAC_ADDR_TYPE_VLAN = 0x00020000,
+	MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+	MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+	MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+	MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+	MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+	MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+	MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+	MAC_ADDR_ADR = (1 << 25),
+	MAC_ADDR_RS = (1 << 26),
+	MAC_ADDR_E = (1 << 27),
+	MAC_ADDR_MR = (1 << 30),
+	MAC_ADDR_MW = (1 << 31),
+	MAX_MULTICAST_ENTRIES = 32,
+};
+
+/*
+ *  MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+	SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ *  FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+	FC_RCV_CFG_ECT = (1 << 15),
+	FC_RCV_CFG_DFH = (1 << 20),
+	FC_RCV_CFG_DVF = (1 << 21),
+	FC_RCV_CFG_RCE = (1 << 27),
+	FC_RCV_CFG_RFE = (1 << 28),
+	FC_RCV_CFG_TEE = (1 << 29),
+	FC_RCV_CFG_TCE = (1 << 30),
+	FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ *  NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+	NIC_RCV_CFG_PPE = (1 << 0),
+	NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+	NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+	NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+	NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+	NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+	NIC_RCV_CFG_RV = (1 << 3),
+	NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+	NIC_RCV_CFG_DFQ_SHIFT = 8,
+	NIC_RCV_CFG_DFQ = 0,	/* HARDCODE default queue to 0. */
+};
+
+/*
+ *   Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+	MGMT_RCV_CFG_ARP = (1 << 0),
+	MGMT_RCV_CFG_DHC = (1 << 1),
+	MGMT_RCV_CFG_DHS = (1 << 2),
+	MGMT_RCV_CFG_NP = (1 << 3),
+	MGMT_RCV_CFG_I6N = (1 << 4),
+	MGMT_RCV_CFG_I6R = (1 << 5),
+	MGMT_RCV_CFG_DH6 = (1 << 6),
+	MGMT_RCV_CFG_UD1 = (1 << 7),
+	MGMT_RCV_CFG_UD0 = (1 << 8),
+	MGMT_RCV_CFG_BCT = (1 << 9),
+	MGMT_RCV_CFG_MCT = (1 << 10),
+	MGMT_RCV_CFG_DM = (1 << 11),
+	MGMT_RCV_CFG_RM = (1 << 12),
+	MGMT_RCV_CFG_STL = (1 << 13),
+	MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+	MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+	MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+	MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+	MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ *  Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+	RT_IDX_IDX_SHIFT = 8,
+	RT_IDX_TYPE_MASK = 0x000f0000,
+	RT_IDX_TYPE_RT = 0x00000000,
+	RT_IDX_TYPE_RT_INV = 0x00010000,
+	RT_IDX_TYPE_NICQ = 0x00020000,
+	RT_IDX_TYPE_NICQ_INV = 0x00030000,
+	RT_IDX_DST_MASK = 0x00700000,
+	RT_IDX_DST_RSS = 0x00000000,
+	RT_IDX_DST_CAM_Q = 0x00100000,
+	RT_IDX_DST_COS_Q = 0x00200000,
+	RT_IDX_DST_DFLT_Q = 0x00300000,
+	RT_IDX_DST_DEST_Q = 0x00400000,
+	RT_IDX_RS = (1 << 26),
+	RT_IDX_E = (1 << 27),
+	RT_IDX_MR = (1 << 30),
+	RT_IDX_MW = (1 << 31),
+
+	/* Nic Queue format - type 2 bits */
+	RT_IDX_BCAST = (1 << 0),
+	RT_IDX_MCAST = (1 << 1),
+	RT_IDX_MCAST_MATCH = (1 << 2),
+	RT_IDX_MCAST_REG_MATCH = (1 << 3),
+	RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+	RT_IDX_FC_MACH = (1 << 5),
+	RT_IDX_ETH_FCOE = (1 << 6),
+	RT_IDX_CAM_HIT = (1 << 7),
+	RT_IDX_CAM_BIT0 = (1 << 8),
+	RT_IDX_CAM_BIT1 = (1 << 9),
+	RT_IDX_VLAN_TAG = (1 << 10),
+	RT_IDX_VLAN_MATCH = (1 << 11),
+	RT_IDX_VLAN_FILTER = (1 << 12),
+	RT_IDX_ETH_SKIP1 = (1 << 13),
+	RT_IDX_ETH_SKIP2 = (1 << 14),
+	RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+	RT_IDX_802_3 = (1 << 16),
+	RT_IDX_LLDP = (1 << 17),
+	RT_IDX_UNUSED018 = (1 << 18),
+	RT_IDX_UNUSED019 = (1 << 19),
+	RT_IDX_UNUSED20 = (1 << 20),
+	RT_IDX_UNUSED21 = (1 << 21),
+	RT_IDX_ERR = (1 << 22),
+	RT_IDX_VALID = (1 << 23),
+	RT_IDX_TU_CSUM_ERR = (1 << 24),
+	RT_IDX_IP_CSUM_ERR = (1 << 25),
+	RT_IDX_MAC_ERR = (1 << 26),
+	RT_IDX_RSS_TCP6 = (1 << 27),
+	RT_IDX_RSS_TCP4 = (1 << 28),
+	RT_IDX_RSS_IPV6 = (1 << 29),
+	RT_IDX_RSS_IPV4 = (1 << 30),
+	RT_IDX_RSS_MATCH = (1 << 31),
+
+	/* Hierarchy for the NIC Queue Mask */
+	RT_IDX_ALL_ERR_SLOT = 0,
+	RT_IDX_MAC_ERR_SLOT = 0,
+	RT_IDX_IP_CSUM_ERR_SLOT = 1,
+	RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+	RT_IDX_BCAST_SLOT = 3,
+	RT_IDX_MCAST_MATCH_SLOT = 4,
+	RT_IDX_ALLMULTI_SLOT = 5,
+	RT_IDX_UNUSED6_SLOT = 6,
+	RT_IDX_UNUSED7_SLOT = 7,
+	RT_IDX_RSS_MATCH_SLOT = 8,
+	RT_IDX_RSS_IPV4_SLOT = 8,
+	RT_IDX_RSS_IPV6_SLOT = 9,
+	RT_IDX_RSS_TCP4_SLOT = 10,
+	RT_IDX_RSS_TCP6_SLOT = 11,
+	RT_IDX_CAM_HIT_SLOT = 12,
+	RT_IDX_UNUSED013 = 13,
+	RT_IDX_UNUSED014 = 14,
+	RT_IDX_PROMISCUOUS_SLOT = 15,
+	RT_IDX_MAX_SLOTS = 16,
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+	PROC_ADDR = 0,		/* Use semaphore */
+	PROC_DATA = 0x04,	/* Use semaphore */
+	SYS = 0x08,
+	RST_FO = 0x0c,
+	FSC = 0x10,
+	CSR = 0x14,
+	LED = 0x18,
+	ICB_RID = 0x1c,		/* Use semaphore */
+	ICB_L = 0x20,		/* Use semaphore */
+	ICB_H = 0x24,		/* Use semaphore */
+	CFG = 0x28,
+	BIOS_ADDR = 0x2c,
+	STS = 0x30,
+	INTR_EN = 0x34,
+	INTR_MASK = 0x38,
+	ISR1 = 0x3c,
+	ISR2 = 0x40,
+	ISR3 = 0x44,
+	ISR4 = 0x48,
+	REV_ID = 0x4c,
+	FRC_ECC_ERR = 0x50,
+	ERR_STS = 0x54,
+	RAM_DBG_ADDR = 0x58,
+	RAM_DBG_DATA = 0x5c,
+	ECC_ERR_CNT = 0x60,
+	SEM = 0x64,
+	GPIO_1 = 0x68,		/* Use semaphore */
+	GPIO_2 = 0x6c,		/* Use semaphore */
+	GPIO_3 = 0x70,		/* Use semaphore */
+	RSVD2 = 0x74,
+	XGMAC_ADDR = 0x78,	/* Use semaphore */
+	XGMAC_DATA = 0x7c,	/* Use semaphore */
+	NIC_ETS = 0x80,
+	CNA_ETS = 0x84,
+	FLASH_ADDR = 0x88,	/* Use semaphore */
+	FLASH_DATA = 0x8c,	/* Use semaphore */
+	CQ_STOP = 0x90,
+	PAGE_TBL_RID = 0x94,
+	WQ_PAGE_TBL_LO = 0x98,
+	WQ_PAGE_TBL_HI = 0x9c,
+	CQ_PAGE_TBL_LO = 0xa0,
+	CQ_PAGE_TBL_HI = 0xa4,
+	MAC_ADDR_IDX = 0xa8,	/* Use semaphore */
+	MAC_ADDR_DATA = 0xac,	/* Use semaphore */
+	COS_DFLT_CQ1 = 0xb0,
+	COS_DFLT_CQ2 = 0xb4,
+	ETYPE_SKIP1 = 0xb8,
+	ETYPE_SKIP2 = 0xbc,
+	SPLT_HDR = 0xc0,
+	FC_PAUSE_THRES = 0xc4,
+	NIC_PAUSE_THRES = 0xc8,
+	FC_ETHERTYPE = 0xcc,
+	FC_RCV_CFG = 0xd0,
+	NIC_RCV_CFG = 0xd4,
+	FC_COS_TAGS = 0xd8,
+	NIC_COS_TAGS = 0xdc,
+	MGMT_RCV_CFG = 0xe0,
+	RT_IDX = 0xe4,
+	RT_DATA = 0xe8,
+	RSVD7 = 0xec,
+	XG_SERDES_ADDR = 0xf0,
+	XG_SERDES_DATA = 0xf4,
+	PRB_MX_ADDR = 0xf8,	/* Use semaphore */
+	PRB_MX_DATA = 0xfc,	/* Use semaphore */
+};
+
+/*
+ * CAM output format.
+ */
+enum {
+	CAM_OUT_ROUTE_FC = 0,
+	CAM_OUT_ROUTE_NIC = 1,
+	CAM_OUT_FUNC_SHIFT = 2,
+	CAM_OUT_RV = (1 << 4),
+	CAM_OUT_SH = (1 << 15),
+	CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox  definitions
+ */
+enum {
+	/* Asynchronous Event Notifications */
+	AEN_SYS_ERR = 0x00008002,
+	AEN_LINK_UP = 0x00008011,
+	AEN_LINK_DOWN = 0x00008012,
+	AEN_IDC_CMPLT = 0x00008100,
+	AEN_IDC_REQ = 0x00008101,
+	AEN_FW_INIT_DONE = 0x00008400,
+	AEN_FW_INIT_FAIL = 0x00008401,
+
+	/* Mailbox Command Opcodes. */
+	MB_CMD_NOP = 0x00000000,
+	MB_CMD_EX_FW = 0x00000002,
+	MB_CMD_MB_TEST = 0x00000006,
+	MB_CMD_CSUM_TEST = 0x00000007,	/* Verify Checksum */
+	MB_CMD_ABOUT_FW = 0x00000008,
+	MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+	MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+	MB_CMD_WRITE_RAM = 0x0000000d,
+	MB_CMD_READ_RAM = 0x0000000f,
+	MB_CMD_STOP_FW = 0x00000014,
+	MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+	MB_CMD_INIT_FW = 0x00000060,
+	MB_CMD_GET_INIT_CB = 0x00000061,
+	MB_CMD_GET_FW_STATE = 0x00000069,
+	MB_CMD_IDC_REQ = 0x00000100,	/* Inter-Driver Communication */
+	MB_CMD_IDC_ACK = 0x00000101,	/* Inter-Driver Communication */
+	MB_CMD_SET_WOL_MODE = 0x00000110,	/* Wake On Lan */
+	MB_WOL_DISABLE = 0x00000000,
+	MB_WOL_MAGIC_PKT = 0x00000001,
+	MB_WOL_FLTR = 0x00000002,
+	MB_WOL_UCAST = 0x00000004,
+	MB_WOL_MCAST = 0x00000008,
+	MB_WOL_BCAST = 0x00000010,
+	MB_WOL_LINK_UP = 0x00000020,
+	MB_WOL_LINK_DOWN = 0x00000040,
+	MB_CMD_SET_WOL_FLTR = 0x00000111,	/* Wake On Lan Filter */
+	MB_CMD_CLEAR_WOL_FLTR = 0x00000112,	/* Wake On Lan Filter */
+	MB_CMD_SET_WOL_MAGIC = 0x00000113,	/* Wake On Lan Magic Packet */
+	MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,	/* Wake On Lan Magic Packet */
+	MB_CMD_PORT_RESET = 0x00000120,
+	MB_CMD_SET_PORT_CFG = 0x00000122,
+	MB_CMD_GET_PORT_CFG = 0x00000123,
+	MB_CMD_SET_ASIC_VOLTS = 0x00000130,
+	MB_CMD_GET_SNS_DATA = 0x00000131,	/* Temp and Volt Sense data. */
+
+	/* Mailbox Command Status. */
+	MB_CMD_STS_GOOD = 0x00004000,	/* Success. */
+	MB_CMD_STS_INTRMDT = 0x00001000,	/* Intermediate Complete. */
+	MB_CMD_STS_ERR = 0x00004005,	/* Error. */
+};
+
+struct mbox_params {
+	u32 mbox_in[MAILBOX_COUNT];
+	u32 mbox_out[MAILBOX_COUNT];
+	int in_count;
+	int out_count;
+};
+
+struct flash_params {
+	u8 dev_id_str[4];
+	u16 size;
+	u16 csum;
+	u16 ver;
+	u16 sub_dev_id;
+	u8 mac_addr[6];
+	u16 res;
+};
+
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+	u32 cnsmr_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+	u32 prod_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+
+struct bq_element {
+	u32 addr_lo;
+#define BQ_END	0x00000001
+#define BQ_CONT	0x00000002
+#define BQ_MASK	0x00000003
+	u32 addr_hi;
+} __attribute((packed));
+
+struct tx_buf_desc {
+	__le64 addr;
+	__le32 len;
+#define TX_DESC_LEN_MASK	0x000fffff
+#define TX_DESC_C	0x40000000
+#define TX_DESC_E	0x80000000
+} __attribute((packed));
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB 			0x01
+#define OPCODE_OB_MAC_TSO_IOCB		0x02
+#define OPCODE_IB_MAC_IOCB			0x20
+#define OPCODE_IB_MPI_IOCB			0x21
+#define OPCODE_IB_AE_IOCB			0x3f
+
+struct ob_mac_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_IOCB_REQ_OI	0x01
+#define OB_MAC_IOCB_REQ_I	0x02
+#define OB_MAC_IOCB_REQ_D	0x08
+#define OB_MAC_IOCB_REQ_F	0x10
+	u8 flags2;
+	u8 flags3;
+#define OB_MAC_IOCB_DFP	0x02
+#define OB_MAC_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+	__le16 reserved2;
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved3;
+	__le16 vlan_tci;
+	__le16 reserved4;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_iocb_rsp {
+	u8 opcode;		/* */
+	u8 flags1;		/* */
+#define OB_MAC_IOCB_RSP_OI	0x01	/* */
+#define OB_MAC_IOCB_RSP_I	0x02	/* */
+#define OB_MAC_IOCB_RSP_E	0x08	/* */
+#define OB_MAC_IOCB_RSP_S	0x10	/* too Short */
+#define OB_MAC_IOCB_RSP_L	0x20	/* too Large */
+#define OB_MAC_IOCB_RSP_P	0x40	/* Padded */
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_IOCB_RSP_B	0x80	/* */
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved[13];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_OI	0x01
+#define OB_MAC_TSO_IOCB_I	0x02
+#define OB_MAC_TSO_IOCB_D	0x08
+#define OB_MAC_TSO_IOCB_IP4	0x40
+#define OB_MAC_TSO_IOCB_IP6	0x80
+	u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO	0x20
+#define OB_MAC_TSO_IOCB_UC	0x40
+#define OB_MAC_TSO_IOCB_TC	0x80
+	u8 flags3;
+#define OB_MAC_TSO_IOCB_IC	0x01
+#define OB_MAC_TSO_IOCB_DFP	0x02
+#define OB_MAC_TSO_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le32 frame_len;
+	__le32 tid;
+	__le32 txq_idx;
+	__le16 total_hdrs_len;
+	__le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+	__le16 vlan_tci;
+	__le16 mss;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI	0x01
+#define OB_MAC_TSO_IOCB_RSP_I	0x02
+#define OB_MAC_TSO_IOCB_RSP_E	0x08
+#define OB_MAC_TSO_IOCB_RSP_S	0x10
+#define OB_MAC_TSO_IOCB_RSP_L	0x20
+#define OB_MAC_TSO_IOCB_RSP_P	0x40
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_TSO_IOCB_RSP_B	0x8000
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved2[13];
+} __attribute((packed));
+
+struct ib_mac_iocb_rsp {
+	u8 opcode;		/* 0x20 */
+	u8 flags1;
+#define IB_MAC_IOCB_RSP_OI	0x01	/* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I	0x02	/* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_TE	0x04	/* Checksum error */
+#define IB_MAC_IOCB_RSP_NU	0x08	/* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE	0x10	/* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK	0x60	/* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH	0x20	/* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 	0x40	/* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 	0x60	/* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B	0x80	/* Broadcast frame */
+	u8 flags2;
+#define IB_MAC_IOCB_RSP_P	0x01	/* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V	0x02	/* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK	0x1c	/*  */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR	0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE	0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE	0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE	0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN	0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC		0x1c
+#define IB_MAC_IOCB_RSP_U	0x20	/* UDP packet */
+#define IB_MAC_IOCB_RSP_T	0x40	/* TCP packet */
+#define IB_MAC_IOCB_RSP_FO	0x80	/* Failover port */
+	u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK	0x07	/* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4	0x04	/* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6	0x02	/* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 	0x05	/* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 	0x03	/* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4	0x08	/* IPV4 */
+#define IB_MAC_IOCB_RSP_V6	0x10	/* IPV6 */
+#define IB_MAC_IOCB_RSP_IH	0x20	/* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS	0x40	/* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL	0x80	/* data is in large buffer */
+	__le32 data_len;	/* */
+	__le32 data_addr_lo;	/* */
+	__le32 data_addr_hi;	/* */
+	__le32 rss;		/* */
+	__le16 vlan_id;		/* 12 bits */
+#define IB_MAC_IOCB_RSP_C	0x1000	/* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT	12	/* class of service value */
+
+	__le16 reserved1;
+	__le32 reserved2[6];
+	__le32 flags4;
+#define IB_MAC_IOCB_RSP_HV	0x20000000	/* */
+#define IB_MAC_IOCB_RSP_HS	0x40000000	/* */
+#define IB_MAC_IOCB_RSP_HL	0x80000000	/* */
+	__le32 hdr_len;		/* */
+	__le32 hdr_addr_lo;	/* */
+	__le32 hdr_addr_hi;	/* */
+} __attribute((packed));
+
+struct ib_ae_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define IB_AE_IOCB_RSP_OI		0x01
+#define IB_AE_IOCB_RSP_I		0x02
+	u8 event;
+#define LINK_UP_EVENT              0x00
+#define LINK_DOWN_EVENT            0x01
+#define CAM_LOOKUP_ERR_EVENT       0x06
+#define SOFT_ECC_ERROR_EVENT       0x07
+#define MGMT_ERR_EVENT             0x08
+#define TEN_GIG_MAC_EVENT          0x09
+#define GPI0_H2L_EVENT       	0x10
+#define GPI0_L2H_EVENT       	0x20
+#define GPI1_H2L_EVENT       	0x11
+#define GPI1_L2H_EVENT       	0x21
+#define PCI_ERR_ANON_BUF_RD        0x40
+	u8 q_id;
+	__le32 reserved[15];
+} __attribute((packed));
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 length;
+	__le32 tid;
+	__le32 reserved[14];
+} __attribute((packed));
+
+struct net_req_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 flags1;
+	__le32 tid;
+	__le32 reserved1[30];
+} __attribute((packed));
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+	__le16 len;
+#define Q_LEN_V		(1 << 4)
+#define Q_LEN_CPP_CONT	0x0000
+#define Q_LEN_CPP_16	0x0001
+#define Q_LEN_CPP_32	0x0002
+#define Q_LEN_CPP_64	0x0003
+	__le16 flags;
+#define Q_PRI_SHIFT	1
+#define Q_FLAGS_LC	0x1000
+#define Q_FLAGS_LB	0x2000
+#define Q_FLAGS_LI	0x4000
+#define Q_FLAGS_LO	0x8000
+	__le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+	__le16 rid;
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 cnsmr_idx_addr_lo;
+	__le32 cnsmr_idx_addr_hi;
+} __attribute((packed));
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+	u8 msix_vect;
+	u8 reserved1;
+	u8 reserved2;
+	u8 flags;
+#define FLAGS_LV	0x08
+#define FLAGS_LS	0x10
+#define FLAGS_LL	0x20
+#define FLAGS_LI	0x40
+#define FLAGS_LC	0x80
+	__le16 len;
+#define LEN_V		(1 << 4)
+#define LEN_CPP_CONT	0x0000
+#define LEN_CPP_32	0x0001
+#define LEN_CPP_64	0x0002
+#define LEN_CPP_128	0x0003
+	__le16 rid;
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 prod_idx_addr_lo;
+	__le32 prod_idx_addr_hi;
+	__le16 pkt_delay;
+	__le16 irq_delay;
+	__le32 lbq_addr_lo;
+	__le32 lbq_addr_hi;
+	__le16 lbq_buf_size;
+	__le16 lbq_len;		/* entry count */
+	__le32 sbq_addr_lo;
+	__le32 sbq_addr_hi;
+	__le16 sbq_buf_size;
+	__le16 sbq_len;		/* entry count */
+} __attribute((packed));
+
+struct ricb {
+	u8 base_cq;
+#define RSS_L4K 0x80
+	u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI  0x02
+#define RSS_LB  0x04
+#define RSS_LM  0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+	__le16 mask;
+	__le32 hash_cq_id[256];
+	__le32 ipv6_hash_key[10];
+	__le32 ipv4_hash_key[4];
+} __attribute((packed));
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+	struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+	DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+	struct sk_buff *skb;
+	struct ob_mac_iocb_req *queue_entry;
+	int index;
+	struct oal oal;
+	struct map_list map[MAX_SKB_FRAGS + 1];
+	int map_cnt;
+	struct tx_ring_desc *next;
+};
+
+struct bq_desc {
+	union {
+		struct page *lbq_page;
+		struct sk_buff *skb;
+	} p;
+	struct bq_element *bq;
+	int index;
+	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	 DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+	/*
+	 * queue info.
+	 */
+	struct wqicb wqicb;	/* structure used to inform chip of new queue */
+	void *wq_base;		/* pci_alloc:virtual addr for tx */
+	dma_addr_t wq_base_dma;	/* pci_alloc:dma addr for tx */
+	u32 *cnsmr_idx_sh_reg;	/* shadow copy of consumer idx */
+	dma_addr_t cnsmr_idx_sh_reg_dma;	/* dma-shadow copy of consumer */
+	u32 wq_size;		/* size in bytes of queue area */
+	u32 wq_len;		/* number of entries in queue */
+	void __iomem *prod_idx_db_reg;	/* doorbell area index reg at offset 0x00 */
+	void __iomem *valid_db_reg;	/* doorbell area valid reg at offset 0x04 */
+	u16 prod_idx;		/* current value for prod idx */
+	u16 cq_id;		/* completion (rx) queue for tx completions */
+	u8 wq_id;		/* queue id for this entry */
+	u8 reserved1[3];
+	struct tx_ring_desc *q;	/* descriptor list for the queue */
+	spinlock_t lock;
+	atomic_t tx_count;	/* counts down for every outstanding IO */
+	atomic_t queue_stopped;	/* Turns queue off when full. */
+	struct delayed_work tx_work;
+	struct ql_adapter *qdev;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+	DEFAULT_Q = 2,		/* Handles slow queue and chip/MPI events. */
+	TX_Q = 3,		/* Handles outbound completions. */
+	RX_Q = 4,		/* Handles inbound completions. */
+};
+
+struct rx_ring {
+	struct cqicb cqicb;	/* The chip's completion queue init control block. */
+
+	/* Completion queue elements. */
+	void *cq_base;
+	dma_addr_t cq_base_dma;
+	u32 cq_size;
+	u32 cq_len;
+	u16 cq_id;
+	u32 *prod_idx_sh_reg;	/* Shadowed producer register. */
+	dma_addr_t prod_idx_sh_reg_dma;
+	void __iomem *cnsmr_idx_db_reg;	/* PCI doorbell mem area + 0 */
+	u32 cnsmr_idx;		/* current sw idx */
+	struct ql_net_rsp_iocb *curr_entry;	/* next entry on queue */
+	void __iomem *valid_db_reg;	/* PCI doorbell mem area + 0x04 */
+
+	/* Large buffer queue elements. */
+	u32 lbq_len;		/* entry count */
+	u32 lbq_size;		/* size in bytes of queue */
+	u32 lbq_buf_size;
+	void *lbq_base;
+	dma_addr_t lbq_base_dma;
+	void *lbq_base_indirect;
+	dma_addr_t lbq_base_indirect_dma;
+	struct bq_desc *lbq;	/* array of control blocks */
+	void __iomem *lbq_prod_idx_db_reg;	/* PCI doorbell mem area + 0x18 */
+	u32 lbq_prod_idx;	/* current sw prod idx */
+	u32 lbq_curr_idx;	/* next entry we expect */
+	u32 lbq_clean_idx;	/* beginning of new descs */
+	u32 lbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Small buffer queue elements. */
+	u32 sbq_len;		/* entry count */
+	u32 sbq_size;		/* size in bytes of queue */
+	u32 sbq_buf_size;
+	void *sbq_base;
+	dma_addr_t sbq_base_dma;
+	void *sbq_base_indirect;
+	dma_addr_t sbq_base_indirect_dma;
+	struct bq_desc *sbq;	/* array of control blocks */
+	void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+	u32 sbq_prod_idx;	/* current sw prod idx */
+	u32 sbq_curr_idx;	/* next entry we expect */
+	u32 sbq_clean_idx;	/* beginning of new descs */
+	u32 sbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Misc. handler elements. */
+	u32 type;		/* Type of queue, tx, rx, or default. */
+	u32 irq;		/* Which vector this ring is assigned. */
+	u32 cpu;		/* Which CPU this should run on. */
+	char name[IFNAMSIZ + 5];
+	struct napi_struct napi;
+	struct delayed_work rx_work;
+	u8 reserved;
+	struct ql_adapter *qdev;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+	u8 value[4];
+};
+
+struct nic_stats {
+	/*
+	 * These stats come from offset 200h to 278h
+	 * in the XGMAC register.
+	 */
+	u64 tx_pkts;
+	u64 tx_bytes;
+	u64 tx_mcast_pkts;
+	u64 tx_bcast_pkts;
+	u64 tx_ucast_pkts;
+	u64 tx_ctl_pkts;
+	u64 tx_pause_pkts;
+	u64 tx_64_pkt;
+	u64 tx_65_to_127_pkt;
+	u64 tx_128_to_255_pkt;
+	u64 tx_256_511_pkt;
+	u64 tx_512_to_1023_pkt;
+	u64 tx_1024_to_1518_pkt;
+	u64 tx_1519_to_max_pkt;
+	u64 tx_undersize_pkt;
+	u64 tx_oversize_pkt;
+
+	/*
+	 * These stats come from offset 300h to 3C8h
+	 * in the XGMAC register.
+	 */
+	u64 rx_bytes;
+	u64 rx_bytes_ok;
+	u64 rx_pkts;
+	u64 rx_pkts_ok;
+	u64 rx_bcast_pkts;
+	u64 rx_mcast_pkts;
+	u64 rx_ucast_pkts;
+	u64 rx_undersize_pkts;
+	u64 rx_oversize_pkts;
+	u64 rx_jabber_pkts;
+	u64 rx_undersize_fcerr_pkts;
+	u64 rx_drop_events;
+	u64 rx_fcerr_pkts;
+	u64 rx_align_err;
+	u64 rx_symbol_err;
+	u64 rx_mac_err;
+	u64 rx_ctl_pkts;
+	u64 rx_pause_pkts;
+	u64 rx_64_pkts;
+	u64 rx_65_to_127_pkts;
+	u64 rx_128_255_pkts;
+	u64 rx_256_511_pkts;
+	u64 rx_512_to_1023_pkts;
+	u64 rx_1024_to_1518_pkts;
+	u64 rx_1519_to_max_pkts;
+	u64 rx_len_err_pkts;
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts.  It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+	struct ql_adapter *qdev;
+	u32 intr;
+	u32 hooked;
+	u32 intr_en_mask;	/* value/mask used to enable this intr */
+	u32 intr_dis_mask;	/* value/mask used to disable this intr */
+	u32 intr_read_mask;	/* value/mask used to read this intr */
+	char name[IFNAMSIZ * 2];
+	atomic_t irq_cnt;	/* irq_cnt is used in single vector
+				 * environment.  It's incremented for each
+				 * irq handler that is scheduled.  When each
+				 * handler finishes it decrements irq_cnt and
+				 * enables interrupts if it's zero. */
+	irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+	QL_ADAPTER_UP = (1 << 0),	/* Adapter has been brought up. */
+	QL_LEGACY_ENABLED = (1 << 3),
+	QL_MSI_ENABLED = (1 << 3),
+	QL_MSIX_ENABLED = (1 << 4),
+	QL_DMA64 = (1 << 5),
+	QL_PROMISCUOUS = (1 << 6),
+	QL_ALLMULTI = (1 << 7),
+};
+
+/* link_status bit definitions */
+enum {
+	LOOPBACK_MASK = 0x00000700,
+	LOOPBACK_PCS = 0x00000100,
+	LOOPBACK_HSS = 0x00000200,
+	LOOPBACK_EXT = 0x00000300,
+	PAUSE_MASK = 0x000000c0,
+	PAUSE_STD = 0x00000040,
+	PAUSE_PRI = 0x00000080,
+	SPEED_MASK = 0x00000038,
+	SPEED_100Mb = 0x00000000,
+	SPEED_1Gb = 0x00000008,
+	SPEED_10Gb = 0x00000010,
+	LINK_TYPE_MASK = 0x00000007,
+	LINK_TYPE_XFI = 0x00000001,
+	LINK_TYPE_XAUI = 0x00000002,
+	LINK_TYPE_XFI_BP = 0x00000003,
+	LINK_TYPE_XAUI_BP = 0x00000004,
+	LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+	struct ricb ricb;
+	unsigned long flags;
+	u32 wol;
+
+	struct nic_stats nic_stats;
+
+	struct vlan_group *vlgrp;
+
+	/* PCI Configuration information for this device */
+	struct pci_dev *pdev;
+	struct net_device *ndev;	/* Parent NET device */
+
+	/* Hardware information */
+	u32 chip_rev_id;
+	u32 func;		/* PCI function for this adapter */
+
+	spinlock_t adapter_lock;
+	spinlock_t hw_lock;
+	spinlock_t stats_lock;
+	spinlock_t legacy_lock;	/* used for maintaining legacy intr sync */
+
+	/* PCI Bus Relative Register Addresses */
+	void __iomem *reg_base;
+	void __iomem *doorbell_area;
+	u32 doorbell_area_size;
+
+	u32 msg_enable;
+
+	/* Page for Shadow Registers */
+	void *rx_ring_shadow_reg_area;
+	dma_addr_t rx_ring_shadow_reg_dma;
+	void *tx_ring_shadow_reg_area;
+	dma_addr_t tx_ring_shadow_reg_dma;
+
+	u32 mailbox_in;
+	u32 mailbox_out;
+
+	int tx_ring_size;
+	int rx_ring_size;
+	u32 intr_count;
+	struct msix_entry *msi_x_entry;
+	struct intr_context intr_context[MAX_RX_RINGS];
+
+	int (*legacy_check) (struct ql_adapter *);
+
+	int tx_ring_count;	/* One per online CPU. */
+	u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
+	u32 rss_ring_count;	/* One per online CPU.  */
+	/*
+	 * rx_ring_count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 */
+	int rx_ring_count;
+	int ring_mem_size;
+	void *ring_mem;
+	struct rx_ring *rx_ring;
+	int rx_csum;
+	struct tx_ring *tx_ring;
+	u32 default_rx_queue;
+
+	u16 rx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 rx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+	u16 tx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 tx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+
+	u32 xg_sem_mask;
+	u32 port_link_up;
+	u32 port_init;
+	u32 link_status;
+
+	struct flash_params flash;
+
+	struct net_device_stats stats;
+	struct workqueue_struct *q_workqueue;
+	struct workqueue_struct *workqueue;
+	struct delayed_work asic_reset_work;
+	struct delayed_work mpi_reset_work;
+	struct delayed_work mpi_work;
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+	return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+	writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization.  The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue.  Each queue uses
+ * 1 4k chunk of memory.  The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+	mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline unsigned int ql_read_sh_reg(const volatile void  *addr)
+{
+	return *(volatile unsigned int __force *)addr;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern const struct ethtool_ops qlge_ethtool_ops;
+
+extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			       u32 *value);
+extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+			u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+
+#if 1
+#define QL_ALL_DUMP
+#define QL_REG_DUMP
+#define QL_DEV_DUMP
+#define QL_CB_DUMP
+/* #define QL_IB_DUMP */
+/* #define QL_OB_DUMP */
+#endif
+
+#ifdef QL_REG_DUMP
+extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+extern void ql_dump_routing_entries(struct ql_adapter *qdev);
+extern void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+extern void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+extern void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+extern void ql_dump_wqicb(struct wqicb *wqicb);
+extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
+extern void ql_dump_ricb(struct ricb *ricb);
+extern void ql_dump_cqicb(struct cqicb *cqicb);
+extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
+extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+		ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef	QL_ALL_DUMP
+extern void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 000000000000..f0392b166170
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,858 @@
+#include "qlge.h"
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+	for (i = 0; i < qdev->intr_count; i++) {
+		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
+		value = ql_read32(qdev, INTR_EN);
+		printk(KERN_ERR PFX
+		       "%s: Interrupt %d is %s.\n",
+		       qdev->ndev->name, i,
+		       (value & INTR_EN_EN ? "enabled" : "disabled"));
+	}
+}
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+	u32 data;
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+		printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
+		return;
+	}
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
+	printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
+	printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value[3];
+	for (i = 0; i < 4; i++) {
+		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of mac index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+				       "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
+				       qdev->ndev->name, i, value[1], value[0],
+				       value[2]);
+		}
+	}
+	for (i = 0; i < 32; i++) {
+		if (ql_get_mac_addr_reg
+		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of mac index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+				       "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
+				       qdev->ndev->name, i, value[1], value[0]);
+		}
+	}
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+	for (i = 0; i < 16; i++) {
+		value = 0;
+		if (ql_get_routing_reg(qdev, i, &value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of routing index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value)
+				printk(KERN_ERR PFX
+				       "%s: Routing Mask %d = 0x%.08x.\n",
+				       qdev->ndev->name, i, value);
+		}
+	}
+}
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
+	printk(KERN_ERR PFX "SYS	 			= 0x%x.\n",
+	       ql_read32(qdev, SYS));
+	printk(KERN_ERR PFX "RST_FO 			= 0x%x.\n",
+	       ql_read32(qdev, RST_FO));
+	printk(KERN_ERR PFX "FSC 				= 0x%x.\n",
+	       ql_read32(qdev, FSC));
+	printk(KERN_ERR PFX "CSR 				= 0x%x.\n",
+	       ql_read32(qdev, CSR));
+	printk(KERN_ERR PFX "ICB_RID 			= 0x%x.\n",
+	       ql_read32(qdev, ICB_RID));
+	printk(KERN_ERR PFX "ICB_L 				= 0x%x.\n",
+	       ql_read32(qdev, ICB_L));
+	printk(KERN_ERR PFX "ICB_H 				= 0x%x.\n",
+	       ql_read32(qdev, ICB_H));
+	printk(KERN_ERR PFX "CFG 				= 0x%x.\n",
+	       ql_read32(qdev, CFG));
+	printk(KERN_ERR PFX "BIOS_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, BIOS_ADDR));
+	printk(KERN_ERR PFX "STS 				= 0x%x.\n",
+	       ql_read32(qdev, STS));
+	printk(KERN_ERR PFX "INTR_EN			= 0x%x.\n",
+	       ql_read32(qdev, INTR_EN));
+	printk(KERN_ERR PFX "INTR_MASK 			= 0x%x.\n",
+	       ql_read32(qdev, INTR_MASK));
+	printk(KERN_ERR PFX "ISR1 				= 0x%x.\n",
+	       ql_read32(qdev, ISR1));
+	printk(KERN_ERR PFX "ISR2 				= 0x%x.\n",
+	       ql_read32(qdev, ISR2));
+	printk(KERN_ERR PFX "ISR3 				= 0x%x.\n",
+	       ql_read32(qdev, ISR3));
+	printk(KERN_ERR PFX "ISR4 				= 0x%x.\n",
+	       ql_read32(qdev, ISR4));
+	printk(KERN_ERR PFX "REV_ID 			= 0x%x.\n",
+	       ql_read32(qdev, REV_ID));
+	printk(KERN_ERR PFX "FRC_ECC_ERR 			= 0x%x.\n",
+	       ql_read32(qdev, FRC_ECC_ERR));
+	printk(KERN_ERR PFX "ERR_STS 			= 0x%x.\n",
+	       ql_read32(qdev, ERR_STS));
+	printk(KERN_ERR PFX "RAM_DBG_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, RAM_DBG_ADDR));
+	printk(KERN_ERR PFX "RAM_DBG_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, RAM_DBG_DATA));
+	printk(KERN_ERR PFX "ECC_ERR_CNT 			= 0x%x.\n",
+	       ql_read32(qdev, ECC_ERR_CNT));
+	printk(KERN_ERR PFX "SEM 				= 0x%x.\n",
+	       ql_read32(qdev, SEM));
+	printk(KERN_ERR PFX "GPIO_1 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_1));
+	printk(KERN_ERR PFX "GPIO_2 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_2));
+	printk(KERN_ERR PFX "GPIO_3 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_3));
+	printk(KERN_ERR PFX "XGMAC_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, XGMAC_ADDR));
+	printk(KERN_ERR PFX "XGMAC_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, XGMAC_DATA));
+	printk(KERN_ERR PFX "NIC_ETS 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_ETS));
+	printk(KERN_ERR PFX "CNA_ETS 			= 0x%x.\n",
+	       ql_read32(qdev, CNA_ETS));
+	printk(KERN_ERR PFX "FLASH_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, FLASH_ADDR));
+	printk(KERN_ERR PFX "FLASH_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, FLASH_DATA));
+	printk(KERN_ERR PFX "CQ_STOP 			= 0x%x.\n",
+	       ql_read32(qdev, CQ_STOP));
+	printk(KERN_ERR PFX "PAGE_TBL_RID 			= 0x%x.\n",
+	       ql_read32(qdev, PAGE_TBL_RID));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_LO 		= 0x%x.\n",
+	       ql_read32(qdev, WQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_HI 		= 0x%x.\n",
+	       ql_read32(qdev, WQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_LO 		= 0x%x.\n",
+	       ql_read32(qdev, CQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_HI 		= 0x%x.\n",
+	       ql_read32(qdev, CQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "COS_DFLT_CQ1 			= 0x%x.\n",
+	       ql_read32(qdev, COS_DFLT_CQ1));
+	printk(KERN_ERR PFX "COS_DFLT_CQ2 			= 0x%x.\n",
+	       ql_read32(qdev, COS_DFLT_CQ2));
+	printk(KERN_ERR PFX "SPLT_HDR 			= 0x%x.\n",
+	       ql_read32(qdev, SPLT_HDR));
+	printk(KERN_ERR PFX "FC_PAUSE_THRES 		= 0x%x.\n",
+	       ql_read32(qdev, FC_PAUSE_THRES));
+	printk(KERN_ERR PFX "NIC_PAUSE_THRES 		= 0x%x.\n",
+	       ql_read32(qdev, NIC_PAUSE_THRES));
+	printk(KERN_ERR PFX "FC_ETHERTYPE 			= 0x%x.\n",
+	       ql_read32(qdev, FC_ETHERTYPE));
+	printk(KERN_ERR PFX "FC_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, FC_RCV_CFG));
+	printk(KERN_ERR PFX "NIC_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_RCV_CFG));
+	printk(KERN_ERR PFX "FC_COS_TAGS 			= 0x%x.\n",
+	       ql_read32(qdev, FC_COS_TAGS));
+	printk(KERN_ERR PFX "NIC_COS_TAGS 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_COS_TAGS));
+	printk(KERN_ERR PFX "MGMT_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, MGMT_RCV_CFG));
+	printk(KERN_ERR PFX "XG_SERDES_ADDR 		= 0x%x.\n",
+	       ql_read32(qdev, XG_SERDES_ADDR));
+	printk(KERN_ERR PFX "XG_SERDES_DATA 		= 0x%x.\n",
+	       ql_read32(qdev, XG_SERDES_DATA));
+	printk(KERN_ERR PFX "PRB_MX_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, PRB_MX_ADDR));
+	printk(KERN_ERR PFX "PRB_MX_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, PRB_MX_DATA));
+	ql_dump_intr_states(qdev);
+	ql_dump_xgmac_control_regs(qdev);
+	ql_dump_ets_regs(qdev);
+	ql_dump_cam_entries(qdev);
+	ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR "%s: Enter.\n", __func__);
+	printk(KERN_ERR "tx_pkts = %ld\n",
+	       (unsigned long)qdev->nic_stats.tx_pkts);
+	printk(KERN_ERR "tx_bytes = %ld\n",
+	       (unsigned long)qdev->nic_stats.tx_bytes);
+	printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_mcast_pkts);
+	printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_bcast_pkts);
+	printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_ucast_pkts);
+	printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_ctl_pkts);
+	printk(KERN_ERR "tx_pause_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_pause_pkts);
+	printk(KERN_ERR "tx_64_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_64_pkt);
+	printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
+	printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
+	printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_256_511_pkt);
+	printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
+	printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
+	printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
+	printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_undersize_pkt);
+	printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_oversize_pkt);
+	printk(KERN_ERR "rx_bytes = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bytes);
+	printk(KERN_ERR "rx_bytes_ok = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bytes_ok);
+	printk(KERN_ERR "rx_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pkts);
+	printk(KERN_ERR "rx_pkts_ok = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pkts_ok);
+	printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bcast_pkts);
+	printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_mcast_pkts);
+	printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_ucast_pkts);
+	printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_undersize_pkts);
+	printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_oversize_pkts);
+	printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_jabber_pkts);
+	printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
+	printk(KERN_ERR "rx_drop_events = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_drop_events);
+	printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
+	printk(KERN_ERR "rx_align_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_align_err);
+	printk(KERN_ERR "rx_symbol_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_symbol_err);
+	printk(KERN_ERR "rx_mac_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_mac_err);
+	printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_ctl_pkts);
+	printk(KERN_ERR "rx_pause_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pause_pkts);
+	printk(KERN_ERR "rx_64_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_64_pkts);
+	printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
+	printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_128_255_pkts);
+	printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_256_511_pkts);
+	printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
+	printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
+	printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
+	printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+	int i;
+	printk(KERN_ERR PFX "qdev->flags 			= %lx.\n",
+	       qdev->flags);
+	printk(KERN_ERR PFX "qdev->vlgrp 			= %p.\n",
+	       qdev->vlgrp);
+	printk(KERN_ERR PFX "qdev->pdev 			= %p.\n",
+	       qdev->pdev);
+	printk(KERN_ERR PFX "qdev->ndev 			= %p.\n",
+	       qdev->ndev);
+	printk(KERN_ERR PFX "qdev->chip_rev_id 		= %d.\n",
+	       qdev->chip_rev_id);
+	printk(KERN_ERR PFX "qdev->reg_base 		= %p.\n",
+	       qdev->reg_base);
+	printk(KERN_ERR PFX "qdev->doorbell_area 	= %p.\n",
+	       qdev->doorbell_area);
+	printk(KERN_ERR PFX "qdev->doorbell_area_size 	= %d.\n",
+	       qdev->doorbell_area_size);
+	printk(KERN_ERR PFX "msg_enable 		= %x.\n",
+	       qdev->msg_enable);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area	= %p.\n",
+	       qdev->rx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma 	= %p.\n",
+	       (void *)qdev->rx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area	= %p.\n",
+	       qdev->tx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma	= %p.\n",
+	       (void *)qdev->tx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->intr_count 		= %d.\n",
+	       qdev->intr_count);
+	if (qdev->msi_x_entry)
+		for (i = 0; i < qdev->intr_count; i++) {
+			printk(KERN_ERR PFX
+			       "msi_x_entry.[%d]vector	= %d.\n", i,
+			       qdev->msi_x_entry[i].vector);
+			printk(KERN_ERR PFX
+			       "msi_x_entry.[%d]entry	= %d.\n", i,
+			       qdev->msi_x_entry[i].entry);
+		}
+	for (i = 0; i < qdev->intr_count; i++) {
+		printk(KERN_ERR PFX
+		       "intr_context[%d].qdev		= %p.\n", i,
+		       qdev->intr_context[i].qdev);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr		= %d.\n", i,
+		       qdev->intr_context[i].intr);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].hooked		= %d.\n", i,
+		       qdev->intr_context[i].hooked);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_en_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_en_mask);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_dis_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_dis_mask);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_read_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_read_mask);
+	}
+	printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
+	printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
+	printk(KERN_ERR PFX "qdev->ring_mem 	= %p.\n", qdev->ring_mem);
+	printk(KERN_ERR PFX "qdev->intr_count 	= %d.\n", qdev->intr_count);
+	printk(KERN_ERR PFX "qdev->tx_ring		= %p.\n",
+	       qdev->tx_ring);
+	printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id 	= %d.\n",
+	       qdev->rss_ring_first_cq_id);
+	printk(KERN_ERR PFX "qdev->rss_ring_count 	= %d.\n",
+	       qdev->rss_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring	= %p.\n", qdev->rx_ring);
+	printk(KERN_ERR PFX "qdev->default_rx_queue	= %d.\n",
+	       qdev->default_rx_queue);
+	printk(KERN_ERR PFX "qdev->xg_sem_mask		= 0x%08x.\n",
+	       qdev->xg_sem_mask);
+	printk(KERN_ERR PFX "qdev->port_link_up		= 0x%08x.\n",
+	       qdev->port_link_up);
+	printk(KERN_ERR PFX "qdev->port_init		= 0x%08x.\n",
+	       qdev->port_init);
+
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+	printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
+	printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
+	printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
+	printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
+	       le16_to_cpu(wqicb->cq_id_rss));
+	printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
+	printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->addr_lo));
+	printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->addr_hi));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+	if (tx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+	       "===================== Dumping tx_ring %d ===============.\n",
+	       tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
+	printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
+	       (u64) tx_ring->wq_base_dma);
+	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
+	       tx_ring->cnsmr_idx_sh_reg);
+	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
+	       (u64) tx_ring->cnsmr_idx_sh_reg_dma);
+	printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
+	printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
+	printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
+	       tx_ring->prod_idx_db_reg);
+	printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
+	       tx_ring->valid_db_reg);
+	printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
+	printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
+	printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
+	printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
+	       atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+	int i;
+	printk(KERN_ERR PFX
+	       "===================== Dumping ricb ===============.\n");
+	printk(KERN_ERR PFX "Dumping ricb stuff...\n");
+
+	printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
+	printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
+	       ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+	       ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+	       ricb->flags & RSS_LI ? "RSS_LI " : "",
+	       ricb->flags & RSS_LB ? "RSS_LB " : "",
+	       ricb->flags & RSS_LM ? "RSS_LM " : "",
+	       ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+	       ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+	       ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+	       ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+	printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
+	for (i = 0; i < 16; i++)
+		printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->hash_cq_id[i]));
+	for (i = 0; i < 10; i++)
+		printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->ipv6_hash_key[i]));
+	for (i = 0; i < 4; i++)
+		printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+	printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
+
+	printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
+	printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
+	printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
+	printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->addr_lo));
+	printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->addr_hi));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->prod_idx_addr_lo));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->prod_idx_addr_hi));
+	printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->pkt_delay));
+	printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->irq_delay));
+	printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->lbq_addr_lo));
+	printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->lbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->lbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->lbq_len));
+	printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->sbq_addr_lo));
+	printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->sbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->sbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+	if (rx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+	       "===================== Dumping rx_ring %d ===============.\n",
+	       rx_ring->cq_id);
+	printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
+	       rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+	       rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+	       rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+	printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
+	printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
+	printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
+	       (u64) rx_ring->cq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
+	printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
+	printk(KERN_ERR PFX
+	       "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
+	       rx_ring->prod_idx_sh_reg,
+	       rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
+	printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
+	       (u64) rx_ring->prod_idx_sh_reg_dma);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
+	       rx_ring->cnsmr_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
+	printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
+	printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
+	       rx_ring->valid_db_reg);
+
+	printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
+	       (u64) rx_ring->lbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
+	       rx_ring->lbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
+	       (u64) rx_ring->lbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
+	printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
+	printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
+	       rx_ring->lbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
+	       rx_ring->lbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
+	       rx_ring->lbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
+	       rx_ring->lbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
+	       rx_ring->lbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
+	       rx_ring->lbq_buf_size);
+
+	printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
+	       (u64) rx_ring->sbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
+	       rx_ring->sbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
+	       (u64) rx_ring->sbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
+	printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
+	printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
+	       rx_ring->sbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
+	       rx_ring->sbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
+	       rx_ring->sbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
+	       rx_ring->sbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
+	       rx_ring->sbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
+	       rx_ring->sbq_buf_size);
+	printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
+	printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
+	printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
+	printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+	void *ptr;
+
+	printk(KERN_ERR PFX "%s: Enter.\n", __func__);
+
+	ptr = kmalloc(size, GFP_ATOMIC);
+	if (ptr == NULL) {
+		printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
+		       __func__);
+		return;
+	}
+
+	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+		printk(KERN_ERR "%s: Failed to upload control block!\n",
+		       __func__);
+		goto fail_it;
+	}
+	switch (bit) {
+	case CFG_DRQ:
+		ql_dump_wqicb((struct wqicb *)ptr);
+		break;
+	case CFG_DCQ:
+		ql_dump_cqicb((struct cqicb *)ptr);
+		break;
+	case CFG_DR:
+		ql_dump_ricb((struct ricb *)ptr);
+		break;
+	default:
+		printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
+		       __func__, bit);
+		break;
+	}
+fail_it:
+	kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+	struct tx_buf_desc *tbd;
+	u16 frame_len;
+
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = %s\n",
+	       (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+	printk(KERN_ERR PFX "flags1          = %s %s %s %s %s\n",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+	printk(KERN_ERR PFX "flags2          = %s %s %s\n",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+	printk(KERN_ERR PFX "flags3          = %s %s %s \n",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
+	printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
+	printk(KERN_ERR PFX "vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
+	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+		printk(KERN_ERR PFX "frame_len      = %d\n",
+		       le32_to_cpu(ob_mac_tso_iocb->frame_len));
+		printk(KERN_ERR PFX "mss      = %d\n",
+		       le16_to_cpu(ob_mac_tso_iocb->mss));
+		printk(KERN_ERR PFX "prot_hdr_len   = %d\n",
+		       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+		printk(KERN_ERR PFX "hdr_offset     = 0x%.04x\n",
+		       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+	} else {
+		printk(KERN_ERR PFX "frame_len      = %d\n",
+		       le16_to_cpu(ob_mac_iocb->frame_len));
+		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+	}
+	tbd = &ob_mac_iocb->tbd[0];
+	ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = %d\n", ob_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags          = %s %s %s %s %s %s %s\n",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+	       ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = 0x%x\n", ib_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+		printk(KERN_ERR PFX "%s%s%s Multicast.\n",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+	printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+		printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+	printk(KERN_ERR PFX "flags3 = %s%s.\n",
+	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+	printk(KERN_ERR PFX "data_len	= %d\n",
+	       le32_to_cpu(ib_mac_rsp->data_len));
+	printk(KERN_ERR PFX "data_addr_hi    = 0x%x\n",
+	       le32_to_cpu(ib_mac_rsp->data_addr_hi));
+	printk(KERN_ERR PFX "data_addr_lo    = 0x%x\n",
+	       le32_to_cpu(ib_mac_rsp->data_addr_lo));
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "rss    = %x\n",
+		       le32_to_cpu(ib_mac_rsp->rss));
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+		printk(KERN_ERR PFX "vlan_id    = %x\n",
+		       le16_to_cpu(ib_mac_rsp->vlan_id));
+
+	printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+	if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
+		printk(KERN_ERR PFX "hdr length	= %d.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_len));
+		printk(KERN_ERR PFX "hdr addr_hi    = 0x%x.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
+		printk(KERN_ERR PFX "hdr addr_lo    = 0x%x.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
+	}
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+	int i;
+
+	QL_DUMP_REGS(qdev);
+	QL_DUMP_QDEV(qdev);
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+	}
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+	}
+}
+#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 000000000000..6457f8c4fdaa
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,415 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <linux/version.h>
+
+#include "qlge.h"
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+	struct cqicb *cqicb;
+
+	if (!netif_running(qdev->ndev))
+		return status;
+
+	spin_lock(&qdev->hw_lock);
+	/* Skip the default queue, and update the outbound handler
+	 * queues if they changed.
+	 */
+	cqicb = (struct cqicb *)&qdev->rx_ring[1];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
+		for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
+			cqicb->pkt_delay =
+			    le16_to_cpu(qdev->tx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+
+	/* Update the inbound (RSS) handler queues if they changed. */
+	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
+		for (i = qdev->rss_ring_first_cq_id;
+		     i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
+		     i++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
+			cqicb->pkt_delay =
+			    le16_to_cpu(qdev->rx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+exit:
+	spin_unlock(&qdev->hw_lock);
+	return status;
+}
+
+void ql_update_stats(struct ql_adapter *qdev)
+{
+	u32 i;
+	u64 data;
+	u64 *iter = &qdev->nic_stats.tx_pkts;
+
+	spin_lock(&qdev->stats_lock);
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Couldn't get xgmac sem.\n");
+		goto quit;
+	}
+	/*
+	 * Get TX statistics.
+	 */
+	for (i = 0x200; i < 0x280; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+	/*
+	 * Get RX statistics.
+	 */
+	for (i = 0x300; i < 0x3d0; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+	spin_unlock(&qdev->stats_lock);
+
+	QL_DUMP_STAT(qdev);
+
+	return;
+}
+
+static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
+	{"tx_pkts"},
+	{"tx_bytes"},
+	{"tx_mcast_pkts"},
+	{"tx_bcast_pkts"},
+	{"tx_ucast_pkts"},
+	{"tx_ctl_pkts"},
+	{"tx_pause_pkts"},
+	{"tx_64_pkts"},
+	{"tx_65_to_127_pkts"},
+	{"tx_128_to_255_pkts"},
+	{"tx_256_511_pkts"},
+	{"tx_512_to_1023_pkts"},
+	{"tx_1024_to_1518_pkts"},
+	{"tx_1519_to_max_pkts"},
+	{"tx_undersize_pkts"},
+	{"tx_oversize_pkts"},
+	{"rx_bytes"},
+	{"rx_bytes_ok"},
+	{"rx_pkts"},
+	{"rx_pkts_ok"},
+	{"rx_bcast_pkts"},
+	{"rx_mcast_pkts"},
+	{"rx_ucast_pkts"},
+	{"rx_undersize_pkts"},
+	{"rx_oversize_pkts"},
+	{"rx_jabber_pkts"},
+	{"rx_undersize_fcerr_pkts"},
+	{"rx_drop_events"},
+	{"rx_fcerr_pkts"},
+	{"rx_align_err"},
+	{"rx_symbol_err"},
+	{"rx_mac_err"},
+	{"rx_ctl_pkts"},
+	{"rx_pause_pkts"},
+	{"rx_64_pkts"},
+	{"rx_65_to_127_pkts"},
+	{"rx_128_255_pkts"},
+	{"rx_256_511_pkts"},
+	{"rx_512_to_1023_pkts"},
+	{"rx_1024_to_1518_pkts"},
+	{"rx_1519_to_max_pkts"},
+	{"rx_len_err_pkts"},
+};
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+		break;
+	}
+}
+
+static int ql_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(ql_stats_str_arr);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+		     struct ethtool_stats *stats, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	struct nic_stats *s = &qdev->nic_stats;
+
+	ql_update_stats(qdev);
+
+	*data++ = s->tx_pkts;
+	*data++ = s->tx_bytes;
+	*data++ = s->tx_mcast_pkts;
+	*data++ = s->tx_bcast_pkts;
+	*data++ = s->tx_ucast_pkts;
+	*data++ = s->tx_ctl_pkts;
+	*data++ = s->tx_pause_pkts;
+	*data++ = s->tx_64_pkt;
+	*data++ = s->tx_65_to_127_pkt;
+	*data++ = s->tx_128_to_255_pkt;
+	*data++ = s->tx_256_511_pkt;
+	*data++ = s->tx_512_to_1023_pkt;
+	*data++ = s->tx_1024_to_1518_pkt;
+	*data++ = s->tx_1519_to_max_pkt;
+	*data++ = s->tx_undersize_pkt;
+	*data++ = s->tx_oversize_pkt;
+	*data++ = s->rx_bytes;
+	*data++ = s->rx_bytes_ok;
+	*data++ = s->rx_pkts;
+	*data++ = s->rx_pkts_ok;
+	*data++ = s->rx_bcast_pkts;
+	*data++ = s->rx_mcast_pkts;
+	*data++ = s->rx_ucast_pkts;
+	*data++ = s->rx_undersize_pkts;
+	*data++ = s->rx_oversize_pkts;
+	*data++ = s->rx_jabber_pkts;
+	*data++ = s->rx_undersize_fcerr_pkts;
+	*data++ = s->rx_drop_events;
+	*data++ = s->rx_fcerr_pkts;
+	*data++ = s->rx_align_err;
+	*data++ = s->rx_symbol_err;
+	*data++ = s->rx_mac_err;
+	*data++ = s->rx_ctl_pkts;
+	*data++ = s->rx_pause_pkts;
+	*data++ = s->rx_64_pkts;
+	*data++ = s->rx_65_to_127_pkts;
+	*data++ = s->rx_128_255_pkts;
+	*data++ = s->rx_256_511_pkts;
+	*data++ = s->rx_512_to_1023_pkts;
+	*data++ = s->rx_1024_to_1518_pkts;
+	*data++ = s->rx_1519_to_max_pkts;
+	*data++ = s->rx_len_err_pkts;
+}
+
+static int ql_get_settings(struct net_device *ndev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->advertising = ADVERTISED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_ENABLE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+	if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
+		ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+		ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+		ecmd->port = PORT_TP;
+	} else {
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+	}
+
+	ecmd->speed = SPEED_10000;
+	ecmd->duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+			   struct ethtool_drvinfo *drvinfo)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	strncpy(drvinfo->driver, qlge_driver_name, 32);
+	strncpy(drvinfo->version, qlge_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(dev);
+
+	c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+	c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+	/* This chip coalesces as follows:
+	 * If a packet arrives, hold off interrupts until
+	 * cqicb->int_delay expires, but if no other packets arrive don't
+	 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+	 * timer to coalesce on a frame basis.  So, we have to take ethtool's
+	 * max_coalesced_frames value and convert it to a delay in microseconds.
+	 * We do this by using a basic thoughput of 1,000,000 frames per
+	 * second @ (1024 bytes).  This means one frame per usec. So it's a
+	 * simple one to one ratio.
+	 */
+	c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+	c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+	return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/* Validate user parameters. */
+	if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+		return -EINVAL;
+       /* Don't wait more than 10 usec. */
+	if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+	if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+		return -EINVAL;
+	if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+
+	/* Verify a change took place before updating the hardware. */
+	if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+	    qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+	    qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+	    qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+		return 0;
+
+	qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+	qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+	qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+	qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+	return ql_update_ring_coalescing(qdev);
+}
+
+static u32 ql_get_rx_csum(struct net_device *netdev)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	return qdev->rx_csum;
+}
+
+static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	qdev->rx_csum = data;
+	return 0;
+}
+
+static int ql_set_tso(struct net_device *ndev, uint32_t data)
+{
+
+	if (data) {
+		ndev->features |= NETIF_F_TSO;
+		ndev->features |= NETIF_F_TSO6;
+	} else {
+		ndev->features &= ~NETIF_F_TSO;
+		ndev->features &= ~NETIF_F_TSO6;
+	}
+	return 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	qdev->msg_enable = value;
+}
+
+const struct ethtool_ops qlge_ethtool_ops = {
+	.get_settings = ql_get_settings,
+	.get_drvinfo = ql_get_drvinfo,
+	.get_msglevel = ql_get_msglevel,
+	.set_msglevel = ql_set_msglevel,
+	.get_link = ethtool_op_get_link,
+	.get_rx_csum = ql_get_rx_csum,
+	.set_rx_csum = ql_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ql_set_tso,
+	.get_coalesce = ql_get_coalesce,
+	.set_coalesce = ql_set_coalesce,
+	.get_sset_count = ql_get_sset_count,
+	.get_strings = ql_get_strings,
+	.get_ethtool_stats = ql_get_ethtool_stats,
+};
+
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 000000000000..ad878e2b9ded
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,3954 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:     Linux qlge network device driver by
+ *                      Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg =
+    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+/* NETIF_MSG_TIMER |	*/
+    NETIF_MSG_IFDOWN |
+    NETIF_MSG_IFUP |
+    NETIF_MSG_RX_ERR |
+    NETIF_MSG_TX_ERR |
+    NETIF_MSG_TX_QUEUED |
+    NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
+/* NETIF_MSG_PKTDATA | */
+    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = 0x00007fff;	/* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int irq_type = MSIX_IRQ;
+module_param(irq_type, int, MSIX_IRQ);
+MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	u32 sem_bits = 0;
+
+	switch (sem_mask) {
+	case SEM_XGMAC0_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+		break;
+	case SEM_XGMAC1_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+		break;
+	case SEM_ICB_MASK:
+		sem_bits = SEM_SET << SEM_ICB_SHIFT;
+		break;
+	case SEM_MAC_ADDR_MASK:
+		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+		break;
+	case SEM_FLASH_MASK:
+		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+		break;
+	case SEM_PROBE_MASK:
+		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+		break;
+	case SEM_RT_IDX_MASK:
+		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+		break;
+	case SEM_PROC_REG_MASK:
+		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+		break;
+	default:
+		QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+		return -EINVAL;
+	}
+
+	ql_write32(qdev, SEM, sem_bits | sem_mask);
+	return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	unsigned int seconds = 3;
+	do {
+		if (!ql_sem_trylock(qdev, sem_mask))
+			return 0;
+		ssleep(1);
+	} while (--seconds);
+	return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	ql_write32(qdev, SEM, sem_mask);
+	ql_read32(qdev, SEM);	/* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register.  It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+	u32 temp;
+	int count = UDELAY_COUNT;
+
+	while (count) {
+		temp = ql_read32(qdev, reg);
+
+		/* check for errors */
+		if (temp & err_bit) {
+			QPRINTK(qdev, PROBE, ALERT,
+				"register 0x%.08x access error, value = 0x%.08x!.\n",
+				reg, temp);
+			return -EIO;
+		} else if (temp & bit)
+			return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	QPRINTK(qdev, PROBE, ALERT,
+		"Timed out waiting for reg %x to come ready.\n", reg);
+	return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+	int count = UDELAY_COUNT;
+	u32 temp;
+
+	while (count) {
+		temp = ql_read32(qdev, CFG);
+		if (temp & CFG_LE)
+			return -EIO;
+		if (!(temp & bit))
+			return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+		 u16 q_id)
+{
+	u64 map;
+	int status = 0;
+	int direction;
+	u32 mask;
+	u32 value;
+
+	direction =
+	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+	    PCI_DMA_FROMDEVICE;
+
+	map = pci_map_single(qdev->pdev, ptr, size, direction);
+	if (pci_dma_mapping_error(qdev->pdev, map)) {
+		QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+		return -ENOMEM;
+	}
+
+	status = ql_wait_cfg(qdev, bit);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Timed out waiting for CFG to come ready.\n");
+		goto exit;
+	}
+
+	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+	if (status)
+		goto exit;
+	ql_write32(qdev, ICB_L, (u32) map);
+	ql_write32(qdev, ICB_H, (u32) (map >> 32));
+	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
+
+	mask = CFG_Q_MASK | (bit << 16);
+	value = bit | (q_id << CFG_Q_SHIFT);
+	ql_write32(qdev, CFG, (mask | value));
+
+	/*
+	 * Wait for the bit to clear after signaling hw.
+	 */
+	status = ql_wait_cfg(qdev, bit);
+exit:
+	pci_unmap_single(qdev->pdev, map, size, direction);
+	return status;
+}
+
+/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			u32 *value)
+{
+	u32 offset = 0;
+	int status;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				status =
+				    ql_wait_reg_rdy(qdev,
+					MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+				if (status)
+					goto exit;
+				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+				status =
+				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+						    MAC_ADDR_MR, MAC_ADDR_E);
+				if (status)
+					goto exit;
+				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+			       u16 index)
+{
+	u32 offset = 0;
+	int status = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			u32 cam_output;
+			u32 upper = (addr[0] << 8) | addr[1];
+			u32 lower =
+			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+			    (addr[5]);
+
+			QPRINTK(qdev, IFUP, INFO,
+				"Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
+				" at index %d in the CAM.\n",
+				((type ==
+				  MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
+				 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
+				addr[4], addr[5], index);
+
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, lower);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, upper);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				   type);	/* type */
+			/* This field should also include the queue id
+			   and possibly the function id.  Right now we hardcode
+			   the route field to NIC core.
+			 */
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				cam_output = (CAM_OUT_ROUTE_NIC |
+					      (qdev->
+					       func << CAM_OUT_FUNC_SHIFT) |
+					      (qdev->
+					       rss_ring_first_cq_id <<
+					       CAM_OUT_CQ_ID_SHIFT));
+				if (qdev->vlgrp)
+					cam_output |= CAM_OUT_RV;
+				/* route to NIC core */
+				ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+		{
+			u32 enable_bit = *((u32 *) &addr[0]);
+			/* For VLAN, the addr actually holds a bit that
+			 * either enables or disables the vlan id we are
+			 * addressing. It's either MAC_ADDR_E on or off.
+			 * That's bit-27 we're talking about.
+			 */
+			QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
+				(enable_bit ? "Adding" : "Removing"),
+				index, (enable_bit ? "to" : "from"));
+
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				   type |	/* type */
+				   enable_bit);	/* enable/disable */
+			break;
+		}
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+	int status = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		goto exit;
+
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
+	if (status)
+		goto exit;
+
+	ql_write32(qdev, RT_IDX,
+		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
+	if (status)
+		goto exit;
+	*value = ql_read32(qdev, RT_DATA);
+exit:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes.  Each one can be used
+ * to route different frame types to various inbound queues.  We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+			      int enable)
+{
+	int status;
+	u32 value = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	QPRINTK(qdev, IFUP, DEBUG,
+		"%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
+		(enable ? "Adding" : "Removing"),
+		((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
+		((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
+		((index ==
+		  RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
+		((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
+		((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
+		((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
+		((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
+		((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
+		((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
+		((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
+		((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
+		((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
+		((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
+		((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
+		((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
+		((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
+		(enable ? "to" : "from"));
+
+	switch (mask) {
+	case RT_IDX_CAM_HIT:
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
+		{
+			value = RT_IDX_DST_RSS |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case 0:		/* Clear the E-bit on an entry. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (index << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	default:
+		QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
+			mask);
+		status = -EPERM;
+		goto exit;
+	}
+
+	if (value) {
+		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+		if (status)
+			goto exit;
+		value |= (enable ? RT_IDX_E : 0);
+		ql_write32(qdev, RT_IDX, value);
+		ql_write32(qdev, RT_DATA, enable ? mask : 0);
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented everytime we queue a worker and decremented everytime
+ * a worker finishes.  Once it hits zero we enable the interrupt.
+ */
+void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
+		ql_write32(qdev, INTR_EN,
+			   qdev->intr_context[intr].intr_en_mask);
+	else {
+		if (qdev->legacy_check)
+			spin_lock(&qdev->legacy_lock);
+		if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
+			QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
+				intr);
+			ql_write32(qdev, INTR_EN,
+				   qdev->intr_context[intr].intr_en_mask);
+		} else {
+			QPRINTK(qdev, INTR, ERR,
+				"Skip enable, other queue(s) are active.\n");
+		}
+		if (qdev->legacy_check)
+			spin_unlock(&qdev->legacy_lock);
+	}
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	u32 var = 0;
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
+		goto exit;
+	else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
+		ql_write32(qdev, INTR_EN,
+			   qdev->intr_context[intr].intr_dis_mask);
+		var = ql_read32(qdev, STS);
+	}
+	atomic_inc(&qdev->intr_context[intr].irq_cnt);
+exit:
+	return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+	int i;
+	for (i = 0; i < qdev->intr_count; i++) {
+		/* The enable call does a atomic_dec_and_test
+		 * and enables only if the result is zero.
+		 * So we precharge it here.
+		 */
+		atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+		ql_enable_completion_interrupt(qdev, i);
+	}
+
+}
+
+int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, FLASH_DATA);
+exit:
+	return status;
+}
+
+static int ql_get_flash_params(struct ql_adapter *qdev)
+{
+	int i;
+	int status;
+	u32 *p = (u32 *)&qdev->flash;
+
+	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+		return -ETIMEDOUT;
+
+	for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
+		status = ql_read_flash_word(qdev, i, p);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+			goto exit;
+		}
+
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_FLASH_MASK);
+	return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		return status;
+	/* write the data to the data reg */
+	ql_write32(qdev, XGMAC_DATA, data);
+	/* trigger the write */
+	ql_write32(qdev, XGMAC_ADDR, reg);
+	return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, XGMAC_DATA);
+exit:
+	return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+	int status = 0;
+	u32 hi = 0;
+	u32 lo = 0;
+
+	status = ql_read_xgmac_reg(qdev, reg, &lo);
+	if (status)
+		goto exit;
+
+	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+	if (status)
+		goto exit;
+
+	*data = (u64) lo | ((u64) hi << 32);
+
+exit:
+	return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_port_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 data;
+
+	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+		/* Another function has the semaphore, so
+		 * wait for the port init bit to come ready.
+		 */
+		QPRINTK(qdev, LINK, INFO,
+			"Another function has the semaphore, so wait for the port init bit to come ready.\n");
+		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+		if (status) {
+			QPRINTK(qdev, LINK, CRIT,
+				"Port initialize timed out.\n");
+		}
+		return status;
+	}
+
+	QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+	/* Set the core reset. */
+	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	if (status)
+		goto end;
+	data |= GLOBAL_CFG_RESET;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Clear the core reset and turn on jumbo for receiver. */
+	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
+	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
+	data |= GLOBAL_CFG_TX_STAT_EN;
+	data |= GLOBAL_CFG_RX_STAT_EN;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable transmitter, and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
+	data |= TX_CFG_EN;	/* Enable the transmitter. */
+	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable receiver and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
+	data |= RX_CFG_EN;	/* Enable the receiver. */
+	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Turn on jumbo. */
+	status =
+	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+	if (status)
+		goto end;
+	status =
+	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+	if (status)
+		goto end;
+
+	/* Signal to the world that the port is enabled.        */
+	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+	return status;
+}
+
+/* Get the next large buffer. */
+struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+	rx_ring->lbq_curr_idx++;
+	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+		rx_ring->lbq_curr_idx = 0;
+	rx_ring->lbq_free_cnt++;
+	return lbq_desc;
+}
+
+/* Get the next small buffer. */
+struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+	rx_ring->sbq_curr_idx++;
+	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+		rx_ring->sbq_curr_idx = 0;
+	rx_ring->sbq_free_cnt++;
+	return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+	rx_ring->cnsmr_idx++;
+	rx_ring->curr_entry++;
+	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+		rx_ring->cnsmr_idx = 0;
+		rx_ring->curr_entry = rx_ring->cq_base;
+	}
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int clean_idx = rx_ring->lbq_clean_idx;
+	struct bq_desc *lbq_desc;
+	struct bq_element *bq;
+	u64 map;
+	int i;
+
+	while (rx_ring->lbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"lbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			lbq_desc = &rx_ring->lbq[clean_idx];
+			bq = lbq_desc->bq;
+			if (lbq_desc->p.lbq_page == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"lbq: getting new page for index %d.\n",
+					lbq_desc->index);
+				lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
+				if (lbq_desc->p.lbq_page == NULL) {
+					QPRINTK(qdev, RX_STATUS, ERR,
+						"Couldn't get a page.\n");
+					return;
+				}
+				map = pci_map_page(qdev->pdev,
+						   lbq_desc->p.lbq_page,
+						   0, PAGE_SIZE,
+						   PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(qdev->pdev, map)) {
+					QPRINTK(qdev, RX_STATUS, ERR,
+						"PCI mapping failed.\n");
+					return;
+				}
+				pci_unmap_addr_set(lbq_desc, mapaddr, map);
+				pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+				bq->addr_lo =	/*lbq_desc->addr_lo = */
+				    cpu_to_le32(map);
+				bq->addr_hi =	/*lbq_desc->addr_hi = */
+				    cpu_to_le32(map >> 32);
+			}
+			clean_idx++;
+			if (clean_idx == rx_ring->lbq_len)
+				clean_idx = 0;
+		}
+
+		rx_ring->lbq_clean_idx = clean_idx;
+		rx_ring->lbq_prod_idx += 16;
+		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+			rx_ring->lbq_prod_idx = 0;
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"lbq: updating prod idx = %d.\n",
+			rx_ring->lbq_prod_idx);
+		ql_write_db_reg(rx_ring->lbq_prod_idx,
+				rx_ring->lbq_prod_idx_db_reg);
+		rx_ring->lbq_free_cnt -= 16;
+	}
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int clean_idx = rx_ring->sbq_clean_idx;
+	struct bq_desc *sbq_desc;
+	struct bq_element *bq;
+	u64 map;
+	int i;
+
+	while (rx_ring->sbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			sbq_desc = &rx_ring->sbq[clean_idx];
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"sbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			bq = sbq_desc->bq;
+			if (sbq_desc->p.skb == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"sbq: getting new skb for index %d.\n",
+					sbq_desc->index);
+				sbq_desc->p.skb =
+				    netdev_alloc_skb(qdev->ndev,
+						     rx_ring->sbq_buf_size);
+				if (sbq_desc->p.skb == NULL) {
+					QPRINTK(qdev, PROBE, ERR,
+						"Couldn't get an skb.\n");
+					rx_ring->sbq_clean_idx = clean_idx;
+					return;
+				}
+				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+				map = pci_map_single(qdev->pdev,
+						     sbq_desc->p.skb->data,
+						     rx_ring->sbq_buf_size /
+						     2, PCI_DMA_FROMDEVICE);
+				pci_unmap_addr_set(sbq_desc, mapaddr, map);
+				pci_unmap_len_set(sbq_desc, maplen,
+						  rx_ring->sbq_buf_size / 2);
+				bq->addr_lo = cpu_to_le32(map);
+				bq->addr_hi = cpu_to_le32(map >> 32);
+			}
+
+			clean_idx++;
+			if (clean_idx == rx_ring->sbq_len)
+				clean_idx = 0;
+		}
+		rx_ring->sbq_clean_idx = clean_idx;
+		rx_ring->sbq_prod_idx += 16;
+		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+			rx_ring->sbq_prod_idx = 0;
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"sbq: updating prod idx = %d.\n",
+			rx_ring->sbq_prod_idx);
+		ql_write_db_reg(rx_ring->sbq_prod_idx,
+				rx_ring->sbq_prod_idx_db_reg);
+
+		rx_ring->sbq_free_cnt -= 16;
+	}
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+				    struct rx_ring *rx_ring)
+{
+	ql_update_sbq(qdev, rx_ring);
+	ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers.  Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+			  struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+	int i;
+	for (i = 0; i < mapped; i++) {
+		if (i == 0 || (i == 7 && mapped > 7)) {
+			/*
+			 * Unmap the skb->data area, or the
+			 * external sglist (AKA the Outbound
+			 * Address List (OAL)).
+			 * If its the zeroeth element, then it's
+			 * the skb->data area.  If it's the 7th
+			 * element and there is more than 6 frags,
+			 * then its an OAL.
+			 */
+			if (i == 7) {
+				QPRINTK(qdev, TX_DONE, DEBUG,
+					"unmapping OAL area.\n");
+			}
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(&tx_ring_desc->map[i],
+							mapaddr),
+					 pci_unmap_len(&tx_ring_desc->map[i],
+						       maplen),
+					 PCI_DMA_TODEVICE);
+		} else {
+			QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
+				i);
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(&tx_ring_desc->map[i],
+						      mapaddr),
+				       pci_unmap_len(&tx_ring_desc->map[i],
+						     maplen), PCI_DMA_TODEVICE);
+		}
+	}
+
+}
+
+/* Map the buffers for this transmit.  This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+		       struct ob_mac_iocb_req *mac_iocb_ptr,
+		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+	int len = skb_headlen(skb);
+	dma_addr_t map;
+	int frag_idx, err, map_idx = 0;
+	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+	int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+	if (frag_cnt) {
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+	}
+	/*
+	 * Map the skb buffer first.
+	 */
+	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+	err = pci_dma_mapping_error(qdev->pdev, map);
+	if (err) {
+		QPRINTK(qdev, TX_QUEUED, ERR,
+			"PCI mapping failed with error: %d\n", err);
+
+		return NETDEV_TX_BUSY;
+	}
+
+	tbd->len = cpu_to_le32(len);
+	tbd->addr = cpu_to_le64(map);
+	pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+	pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+	map_idx++;
+
+	/*
+	 * This loop fills the remainder of the 8 address descriptors
+	 * in the IOCB.  If there are more than 7 fragments, then the
+	 * eighth address desc will point to an external list (OAL).
+	 * When this happens, the remainder of the frags will be stored
+	 * in this list.
+	 */
+	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+		tbd++;
+		if (frag_idx == 6 && frag_cnt > 7) {
+			/* Let's tack on an sglist.
+			 * Our control block will now
+			 * look like this:
+			 * iocb->seg[0] = skb->data
+			 * iocb->seg[1] = frag[0]
+			 * iocb->seg[2] = frag[1]
+			 * iocb->seg[3] = frag[2]
+			 * iocb->seg[4] = frag[3]
+			 * iocb->seg[5] = frag[4]
+			 * iocb->seg[6] = frag[5]
+			 * iocb->seg[7] = ptr to OAL (external sglist)
+			 * oal->seg[0] = frag[6]
+			 * oal->seg[1] = frag[7]
+			 * oal->seg[2] = frag[8]
+			 * oal->seg[3] = frag[9]
+			 * oal->seg[4] = frag[10]
+			 *      etc...
+			 */
+			/* Tack on the OAL in the eighth segment of IOCB. */
+			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+					     sizeof(struct oal),
+					     PCI_DMA_TODEVICE);
+			err = pci_dma_mapping_error(qdev->pdev, map);
+			if (err) {
+				QPRINTK(qdev, TX_QUEUED, ERR,
+					"PCI mapping outbound address list with error: %d\n",
+					err);
+				goto map_error;
+			}
+
+			tbd->addr = cpu_to_le64(map);
+			/*
+			 * The length is the number of fragments
+			 * that remain to be mapped times the length
+			 * of our sglist (OAL).
+			 */
+			tbd->len =
+			    cpu_to_le32((sizeof(struct tx_buf_desc) *
+					 (frag_cnt - frag_idx)) | TX_DESC_C);
+			pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+					   map);
+			pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+					  sizeof(struct oal));
+			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
+			map_idx++;
+		}
+
+		map =
+		    pci_map_page(qdev->pdev, frag->page,
+				 frag->page_offset, frag->size,
+				 PCI_DMA_TODEVICE);
+
+		err = pci_dma_mapping_error(qdev->pdev, map);
+		if (err) {
+			QPRINTK(qdev, TX_QUEUED, ERR,
+				"PCI mapping frags failed with error: %d.\n",
+				err);
+			goto map_error;
+		}
+
+		tbd->addr = cpu_to_le64(map);
+		tbd->len = cpu_to_le32(frag->size);
+		pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+		pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+				  frag->size);
+
+	}
+	/* Save the number of segments we've mapped. */
+	tx_ring_desc->map_cnt = map_idx;
+	/* Terminate the last segment. */
+	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+	return NETDEV_TX_OK;
+
+map_error:
+	/*
+	 * If the first frag mapping failed, then i will be zero.
+	 * This causes the unmap of the skb->data area.  Otherwise
+	 * we pass in the number of frags that mapped successfully
+	 * so they can be umapped.
+	 */
+	ql_unmap_send(qdev, tx_ring_desc, map_idx);
+	return NETDEV_TX_BUSY;
+}
+
+void ql_realign_skb(struct sk_buff *skb, int len)
+{
+	void *temp_addr = skb->data;
+
+	/* Undo the skb_reserve(skb,32) we did before
+	 * giving to hardware, and realign data on
+	 * a 2-byte boundary.
+	 */
+	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+	skb_copy_to_linear_data(skb, temp_addr,
+		(unsigned int)len);
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion.  It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+				       struct rx_ring *rx_ring,
+				       struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct bq_desc *lbq_desc;
+	struct bq_desc *sbq_desc;
+	struct sk_buff *skb = NULL;
+	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+
+	/*
+	 * Handle the header buffer if present.
+	 */
+	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+		/*
+		 * Headers fit nicely into a small buffer.
+		 */
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				pci_unmap_addr(sbq_desc, mapaddr),
+				pci_unmap_len(sbq_desc, maplen),
+				PCI_DMA_FROMDEVICE);
+		skb = sbq_desc->p.skb;
+		ql_realign_skb(skb, hdr_len);
+		skb_put(skb, hdr_len);
+		sbq_desc->p.skb = NULL;
+	}
+
+	/*
+	 * Handle the data buffer(s).
+	 */
+	if (unlikely(!length)) {	/* Is there data too? */
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No Data buffer in this packet.\n");
+		return skb;
+	}
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, data of %d bytes in small, combine them.\n", length);
+			/*
+			 * Data is less than small buffer size so it's
+			 * stuffed in a small buffer.
+			 * For this case we append the data
+			 * from the "data" small buffer to the "header" small
+			 * buffer.
+			 */
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			pci_dma_sync_single_for_cpu(qdev->pdev,
+						    pci_unmap_addr
+						    (sbq_desc, mapaddr),
+						    pci_unmap_len
+						    (sbq_desc, maplen),
+						    PCI_DMA_FROMDEVICE);
+			memcpy(skb_put(skb, length),
+			       sbq_desc->p.skb->data, length);
+			pci_dma_sync_single_for_device(qdev->pdev,
+						       pci_unmap_addr
+						       (sbq_desc,
+							mapaddr),
+						       pci_unmap_len
+						       (sbq_desc,
+							maplen),
+						       PCI_DMA_FROMDEVICE);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes in a single small buffer.\n", length);
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			skb = sbq_desc->p.skb;
+			ql_realign_skb(skb, length);
+			skb_put(skb, length);
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(sbq_desc,
+							mapaddr),
+					 pci_unmap_len(sbq_desc,
+						       maplen),
+					 PCI_DMA_FROMDEVICE);
+			sbq_desc->p.skb = NULL;
+		}
+	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Header in small, %d bytes in large. Chain large to small!\n", length);
+			/*
+			 * The data is in a single large buffer.  We
+			 * chain it to the header buffer's skb and let
+			 * it rip.
+			 */
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc,
+						      mapaddr),
+				       pci_unmap_len(lbq_desc, maplen),
+				       PCI_DMA_FROMDEVICE);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Chaining page to skb.\n");
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+					   0, length);
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			lbq_desc->p.lbq_page = NULL;
+		} else {
+			/*
+			 * The headers and data are in a single large buffer. We
+			 * copy it to a new skb and let it go. This can happen with
+			 * jumbo mtu on a non-TCP/UDP frame.
+			 */
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			skb = netdev_alloc_skb(qdev->ndev, length);
+			if (skb == NULL) {
+				QPRINTK(qdev, PROBE, DEBUG,
+					"No skb available, drop the packet.\n");
+				return NULL;
+			}
+			skb_reserve(skb, NET_IP_ALIGN);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+					   0, length);
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			length -= length;
+			lbq_desc->p.lbq_page = NULL;
+			__pskb_pull_tail(skb,
+				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+				VLAN_ETH_HLEN : ETH_HLEN);
+		}
+	} else {
+		/*
+		 * The data is in a chain of large buffers
+		 * pointed to by a small buffer.  We loop
+		 * thru and chain them to the our small header
+		 * buffer's skb.
+		 * frags:  There are 18 max frags and our small
+		 *         buffer will hold 32 of them. The thing is,
+		 *         we'll use 3 max for our 9000 byte jumbo
+		 *         frames.  If the MTU goes up we could
+		 *          eventually be in trouble.
+		 */
+		int size, offset, i = 0;
+		struct bq_element *bq, bq_array[8];
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				 pci_unmap_addr(sbq_desc, mapaddr),
+				 pci_unmap_len(sbq_desc, maplen),
+				 PCI_DMA_FROMDEVICE);
+		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+			/*
+			 * This is an non TCP/UDP IP frame, so
+			 * the headers aren't split into a small
+			 * buffer.  We have to use the small buffer
+			 * that contains our sg list as our skb to
+			 * send upstairs. Copy the sg list here to
+			 * a local buffer and use it to find the
+			 * pages to chain.
+			 */
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers & data in chain of large.\n", length);
+			skb = sbq_desc->p.skb;
+			bq = &bq_array[0];
+			memcpy(bq, skb->data, sizeof(bq_array));
+			sbq_desc->p.skb = NULL;
+			skb_reserve(skb, NET_IP_ALIGN);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, %d bytes of data in chain of large.\n", length);
+			bq = (struct bq_element *)sbq_desc->p.skb->data;
+		}
+		while (length > 0) {
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
+				QPRINTK(qdev, RX_STATUS, ERR,
+					"Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
+					lbq_desc->bq->addr_lo, bq->addr_lo);
+				return NULL;
+			}
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc,
+						      mapaddr),
+				       pci_unmap_len(lbq_desc,
+						     maplen),
+				       PCI_DMA_FROMDEVICE);
+			size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
+			offset = 0;
+
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Adding page %d to skb for %d bytes.\n",
+				i, size);
+			skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
+					   offset, size);
+			skb->len += size;
+			skb->data_len += size;
+			skb->truesize += size;
+			length -= size;
+			lbq_desc->p.lbq_page = NULL;
+			bq++;
+			i++;
+		}
+		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+				VLAN_ETH_HLEN : ETH_HLEN);
+	}
+	return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+				   struct rx_ring *rx_ring,
+				   struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct net_device *ndev = qdev->ndev;
+	struct sk_buff *skb = NULL;
+
+	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+	if (unlikely(!skb)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No skb available, drop packet.\n");
+		return;
+	}
+
+	prefetch(skb->data);
+	skb->dev = ndev;
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+	}
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+	}
+	if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
+		QPRINTK(qdev, RX_STATUS, ERR,
+			"Bad checksum for this %s packet.\n",
+			((ib_mac_rsp->
+			  flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
+		skb->ip_summed = CHECKSUM_NONE;
+	} else if (qdev->rx_csum &&
+		   ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
+		    ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+		     !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+	qdev->stats.rx_packets++;
+	qdev->stats.rx_bytes += skb->len;
+	skb->protocol = eth_type_trans(skb, ndev);
+	if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a VLAN packet upstream.\n");
+		vlan_hwaccel_rx(skb, qdev->vlgrp,
+				le16_to_cpu(ib_mac_rsp->vlan_id));
+	} else {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a normal packet upstream.\n");
+		netif_rx(skb);
+	}
+	ndev->last_rx = jiffies;
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+				   struct ob_mac_iocb_rsp *mac_rsp)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+
+	QL_DUMP_OB_MAC_RSP(mac_rsp);
+	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+	qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
+	qdev->stats.tx_packets++;
+	dev_kfree_skb(tx_ring_desc->skb);
+	tx_ring_desc->skb = NULL;
+
+	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
+					OB_MAC_IOCB_RSP_S |
+					OB_MAC_IOCB_RSP_L |
+					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Total descriptor length did not match transfer length.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too short to be legal, not sent.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too long, but sent anyway.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"PCI backplane error. Frame not sent.\n");
+		}
+	}
+	atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+	netif_stop_queue(qdev->ndev);
+	netif_carrier_off(qdev->ndev);
+	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+	netif_stop_queue(qdev->ndev);
+	netif_carrier_off(qdev->ndev);
+	ql_disable_interrupts(qdev);
+	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+				    struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+	switch (ib_ae_rsp->event) {
+	case MGMT_ERR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"Management Processor Fatal Error.\n");
+		ql_queue_fw_error(qdev);
+		return;
+
+	case CAM_LOOKUP_ERR_EVENT:
+		QPRINTK(qdev, LINK, ERR,
+			"Multiple CAM hits lookup occurred.\n");
+		QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+		ql_queue_asic_error(qdev);
+		return;
+
+	case SOFT_ECC_ERROR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+		ql_queue_asic_error(qdev);
+		break;
+
+	case PCI_ERR_ANON_BUF_RD:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+			ib_ae_rsp->q_id);
+		ql_queue_asic_error(qdev);
+		break;
+
+	default:
+		QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
+			ib_ae_rsp->event);
+		ql_queue_asic_error(qdev);
+		break;
+	}
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ob_mac_iocb_rsp *net_rsp = NULL;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+
+		case OPCODE_OB_MAC_TSO_IOCB:
+		case OPCODE_OB_MAC_IOCB:
+			ql_process_mac_tx_intr(qdev, net_rsp);
+			break;
+		default:
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Hit default case, not handled! dropping the packet, opcode = %x.\n",
+				net_rsp->opcode);
+		}
+		count++;
+		ql_update_cq(rx_ring);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	}
+	ql_write_cq_idx(rx_ring);
+	if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
+		struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+		if (atomic_read(&tx_ring->queue_stopped) &&
+		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+			/*
+			 * The queue got stopped because the tx_ring was full.
+			 * Wake it up, because it's now at least 25% empty.
+			 */
+			netif_wake_queue(qdev->ndev);
+	}
+
+	return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ql_net_rsp_iocb *net_rsp;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+		case OPCODE_IB_MAC_IOCB:
+			ql_process_mac_rx_intr(qdev, rx_ring,
+					       (struct ib_mac_iocb_rsp *)
+					       net_rsp);
+			break;
+
+		case OPCODE_IB_AE_IOCB:
+			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+						net_rsp);
+			break;
+		default:
+			{
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"Hit default case, not handled! dropping the packet, opcode = %x.\n",
+					net_rsp->opcode);
+			}
+		}
+		count++;
+		ql_update_cq(rx_ring);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+		if (count == budget)
+			break;
+	}
+	ql_update_buffer_queues(qdev, rx_ring);
+	ql_write_cq_idx(rx_ring);
+	return count;
+}
+
+static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
+	struct ql_adapter *qdev = rx_ring->qdev;
+	int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+
+	QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
+		rx_ring->cq_id);
+
+	if (work_done < budget) {
+		__netif_rx_complete(qdev->ndev, napi);
+		ql_enable_completion_interrupt(qdev, rx_ring->irq);
+	}
+	return work_done;
+}
+
+static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	qdev->vlgrp = grp;
+	if (grp) {
+		QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+			   NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+	} else {
+		QPRINTK(qdev, IFUP, DEBUG,
+			"Turning off VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+	}
+}
+
+static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = MAC_ADDR_E;
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg
+	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+	}
+	spin_unlock(&qdev->hw_lock);
+}
+
+static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = 0;
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg
+	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+	}
+	spin_unlock(&qdev->hw_lock);
+
+}
+
+/* Worker thread to process a given rx_ring that is dedicated
+ * to outbound completions.
+ */
+static void ql_tx_clean(struct work_struct *work)
+{
+	struct rx_ring *rx_ring =
+	    container_of(work, struct rx_ring, rx_work.work);
+	ql_clean_outbound_rx_ring(rx_ring);
+	ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
+
+}
+
+/* Worker thread to process a given rx_ring that is dedicated
+ * to inbound completions.
+ */
+static void ql_rx_clean(struct work_struct *work)
+{
+	struct rx_ring *rx_ring =
+	    container_of(work, struct rx_ring, rx_work.work);
+	ql_clean_inbound_rx_ring(rx_ring, 64);
+	ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
+static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
+			      &rx_ring->rx_work, 0);
+	return IRQ_HANDLED;
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	netif_rx_schedule(qdev->ndev, &rx_ring->napi);
+	return IRQ_HANDLED;
+}
+
+/* We check here to see if we're already handling a legacy
+ * interrupt.  If we are, then it must belong to another
+ * chip with which we're sharing the interrupt line.
+ */
+int ql_legacy_check(struct ql_adapter *qdev)
+{
+	int err;
+	spin_lock(&qdev->legacy_lock);
+	err = atomic_read(&qdev->intr_context[0].irq_cnt);
+	spin_unlock(&qdev->legacy_lock);
+	return err;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+	u32 var;
+	int i;
+	int work_done = 0;
+
+	if (qdev->legacy_check && qdev->legacy_check(qdev)) {
+		QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
+		return IRQ_NONE;	/* Not our interrupt */
+	}
+
+	var = ql_read32(qdev, STS);
+
+	/*
+	 * Check for fatal error.
+	 */
+	if (var & STS_FE) {
+		ql_queue_asic_error(qdev);
+		QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+		var = ql_read32(qdev, ERR_STS);
+		QPRINTK(qdev, INTR, ERR,
+			"Resetting chip. Error Status Register = 0x%x\n", var);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Check MPI processor activity.
+	 */
+	if (var & STS_PI) {
+		/*
+		 * We've got an async event or mailbox completion.
+		 * Handle it and clear the source of the interrupt.
+		 */
+		QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
+				      &qdev->mpi_work, 0);
+		work_done++;
+	}
+
+	/*
+	 * Check the default queue and wake handler if active.
+	 */
+	rx_ring = &qdev->rx_ring[0];
+	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
+		QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
+				      &rx_ring->rx_work, 0);
+		work_done++;
+	}
+
+	if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		/*
+		 * Start the DPC for each active queue.
+		 */
+		for (i = 1; i < qdev->rx_ring_count; i++) {
+			rx_ring = &qdev->rx_ring[i];
+			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+			    rx_ring->cnsmr_idx) {
+				QPRINTK(qdev, INTR, INFO,
+					"Waking handler for rx_ring[%d].\n", i);
+				ql_disable_completion_interrupt(qdev,
+								intr_context->
+								intr);
+				if (i < qdev->rss_ring_first_cq_id)
+					queue_delayed_work_on(rx_ring->cpu,
+							      qdev->q_workqueue,
+							      &rx_ring->rx_work,
+							      0);
+				else
+					netif_rx_schedule(qdev->ndev,
+							  &rx_ring->napi);
+				work_done++;
+			}
+		}
+	}
+	return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+	if (skb_is_gso(skb)) {
+		int err;
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+		mac_iocb_ptr->net_trans_offset =
+		    cpu_to_le16(skb_network_offset(skb) |
+				skb_transport_offset(skb)
+				<< OB_MAC_TRANSPORT_HDR_SHIFT);
+		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+		if (likely(skb->protocol == htons(ETH_P_IP))) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->check = 0;
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+			tcp_hdr(skb)->check =
+			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					     &ipv6_hdr(skb)->daddr,
+					     0, IPPROTO_TCP, 0);
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+	int len;
+	struct iphdr *iph = ip_hdr(skb);
+	u16 *check;
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+	mac_iocb_ptr->net_trans_offset =
+		cpu_to_le16(skb_network_offset(skb) |
+		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+	if (likely(iph->protocol == IPPROTO_TCP)) {
+		check = &(tcp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) +
+				(tcp_hdr(skb)->doff << 2));
+	} else {
+		check = &(udp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) +
+				sizeof(struct udphdr));
+	}
+	*check = ~csum_tcpudp_magic(iph->saddr,
+				    iph->daddr, len, iph->protocol, 0);
+}
+
+static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int tso;
+	struct tx_ring *tx_ring;
+	u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+
+	tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+		QPRINTK(qdev, TX_QUEUED, INFO,
+			"%s: shutting down tx queue %d du to lack of resources.\n",
+			__func__, tx_ring_idx);
+		netif_stop_queue(ndev);
+		atomic_inc(&tx_ring->queue_stopped);
+		return NETDEV_TX_BUSY;
+	}
+	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+	mac_iocb_ptr = tx_ring_desc->queue_entry;
+	memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
+	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
+		QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+	mac_iocb_ptr->tid = tx_ring_desc->index;
+	/* We use the upper 32-bits to store the tx queue for this IO.
+	 * When we get the completion we can use it to establish the context.
+	 */
+	mac_iocb_ptr->txq_idx = tx_ring_idx;
+	tx_ring_desc->skb = skb;
+
+	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+	if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
+			vlan_tx_tag_get(skb));
+		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+	}
+	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+		ql_hw_csum_setup(skb,
+				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	}
+	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+	tx_ring->prod_idx++;
+	if (tx_ring->prod_idx == tx_ring->wq_len)
+		tx_ring->prod_idx = 0;
+	wmb();
+
+	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+	ndev->trans_start = jiffies;
+	QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
+		tx_ring->prod_idx, skb->len);
+
+	atomic_dec(&tx_ring->tx_count);
+	return NETDEV_TX_OK;
+}
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+	if (qdev->rx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+				    PAGE_SIZE,
+				    qdev->rx_ring_shadow_reg_area,
+				    qdev->rx_ring_shadow_reg_dma);
+		qdev->rx_ring_shadow_reg_area = NULL;
+	}
+	if (qdev->tx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+				    PAGE_SIZE,
+				    qdev->tx_ring_shadow_reg_area,
+				    qdev->tx_ring_shadow_reg_dma);
+		qdev->tx_ring_shadow_reg_area = NULL;
+	}
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+	qdev->rx_ring_shadow_reg_area =
+	    pci_alloc_consistent(qdev->pdev,
+				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
+	if (qdev->rx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of RX shadow space failed.\n");
+		return -ENOMEM;
+	}
+	qdev->tx_ring_shadow_reg_area =
+	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
+				 &qdev->tx_ring_shadow_reg_dma);
+	if (qdev->tx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of TX shadow space failed.\n");
+		goto err_wqp_sh_area;
+	}
+	return 0;
+
+err_wqp_sh_area:
+	pci_free_consistent(qdev->pdev,
+			    PAGE_SIZE,
+			    qdev->rx_ring_shadow_reg_area,
+			    qdev->rx_ring_shadow_reg_dma);
+	return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	int i;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+
+	mac_iocb_ptr = tx_ring->wq_base;
+	tx_ring_desc = tx_ring->q;
+	for (i = 0; i < tx_ring->wq_len; i++) {
+		tx_ring_desc->index = i;
+		tx_ring_desc->skb = NULL;
+		tx_ring_desc->queue_entry = mac_iocb_ptr;
+		mac_iocb_ptr++;
+		tx_ring_desc++;
+	}
+	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+	atomic_set(&tx_ring->queue_stopped, 0);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	if (tx_ring->wq_base) {
+		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+				    tx_ring->wq_base, tx_ring->wq_base_dma);
+		tx_ring->wq_base = NULL;
+	}
+	kfree(tx_ring->q);
+	tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	tx_ring->wq_base =
+	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+				 &tx_ring->wq_base_dma);
+
+	if ((tx_ring->wq_base == NULL)
+	    || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
+		QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+	tx_ring->q =
+	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+	if (tx_ring->q == NULL)
+		goto err;
+
+	return 0;
+err:
+	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+			    tx_ring->wq_base, tx_ring->wq_base_dma);
+	return -ENOMEM;
+}
+
+void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		if (lbq_desc->p.lbq_page) {
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc, mapaddr),
+				       pci_unmap_len(lbq_desc, maplen),
+				       PCI_DMA_FROMDEVICE);
+
+			put_page(lbq_desc->p.lbq_page);
+			lbq_desc->p.lbq_page = NULL;
+		}
+		lbq_desc->bq->addr_lo = 0;
+		lbq_desc->bq->addr_hi = 0;
+	}
+}
+
+/*
+ * Allocate and map a page for each element of the lbq.
+ */
+static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+	u64 map;
+	struct bq_element *bq = rx_ring->lbq_base;
+
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		memset(lbq_desc, 0, sizeof(lbq_desc));
+		lbq_desc->bq = bq;
+		lbq_desc->index = i;
+		lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
+		if (unlikely(!lbq_desc->p.lbq_page)) {
+			QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
+			goto mem_error;
+		} else {
+			map = pci_map_page(qdev->pdev,
+					   lbq_desc->p.lbq_page,
+					   0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(qdev->pdev, map)) {
+				QPRINTK(qdev, IFUP, ERR,
+					"PCI mapping failed.\n");
+				goto mem_error;
+			}
+			pci_unmap_addr_set(lbq_desc, mapaddr, map);
+			pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+			bq->addr_lo = cpu_to_le32(map);
+			bq->addr_hi = cpu_to_le32(map >> 32);
+		}
+		bq++;
+	}
+	return 0;
+mem_error:
+	ql_free_lbq_buffers(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		if (sbq_desc == NULL) {
+			QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+			return;
+		}
+		if (sbq_desc->p.skb) {
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(sbq_desc, mapaddr),
+					 pci_unmap_len(sbq_desc, maplen),
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(sbq_desc->p.skb);
+			sbq_desc->p.skb = NULL;
+		}
+		if (sbq_desc->bq == NULL) {
+			QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
+				i);
+			return;
+		}
+		sbq_desc->bq->addr_lo = 0;
+		sbq_desc->bq->addr_hi = 0;
+	}
+}
+
+/* Allocate and map an skb for each element of the sbq. */
+static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+	struct sk_buff *skb;
+	u64 map;
+	struct bq_element *bq = rx_ring->sbq_base;
+
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		memset(sbq_desc, 0, sizeof(sbq_desc));
+		sbq_desc->index = i;
+		sbq_desc->bq = bq;
+		skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			QPRINTK(qdev, IFUP, ERR,
+				"small buff alloc failed for %d bytes at index %d.\n",
+				rx_ring->sbq_buf_size, i);
+			goto mem_err;
+		}
+		skb_reserve(skb, QLGE_SB_PAD);
+		sbq_desc->p.skb = skb;
+		/*
+		 * Map only half the buffer. Because the
+		 * other half may get some data copied to it
+		 * when the completion arrives.
+		 */
+		map = pci_map_single(qdev->pdev,
+				     skb->data,
+				     rx_ring->sbq_buf_size / 2,
+				     PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(qdev->pdev, map)) {
+			QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+			goto mem_err;
+		}
+		pci_unmap_addr_set(sbq_desc, mapaddr, map);
+		pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
+		bq->addr_lo =	/*sbq_desc->addr_lo = */
+		    cpu_to_le32(map);
+		bq->addr_hi =	/*sbq_desc->addr_hi = */
+		    cpu_to_le32(map >> 32);
+		bq++;
+	}
+	return 0;
+mem_err:
+	ql_free_sbq_buffers(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+	if (rx_ring->sbq_len)
+		ql_free_sbq_buffers(qdev, rx_ring);
+	if (rx_ring->lbq_len)
+		ql_free_lbq_buffers(qdev, rx_ring);
+
+	/* Free the small buffer queue. */
+	if (rx_ring->sbq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->sbq_size,
+				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
+		rx_ring->sbq_base = NULL;
+	}
+
+	/* Free the small buffer queue control blocks. */
+	kfree(rx_ring->sbq);
+	rx_ring->sbq = NULL;
+
+	/* Free the large buffer queue. */
+	if (rx_ring->lbq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->lbq_size,
+				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
+		rx_ring->lbq_base = NULL;
+	}
+
+	/* Free the large buffer queue control blocks. */
+	kfree(rx_ring->lbq);
+	rx_ring->lbq = NULL;
+
+	/* Free the rx queue. */
+	if (rx_ring->cq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->cq_size,
+				    rx_ring->cq_base, rx_ring->cq_base_dma);
+		rx_ring->cq_base = NULL;
+	}
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+
+	/*
+	 * Allocate the completion queue for this rx_ring.
+	 */
+	rx_ring->cq_base =
+	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+				 &rx_ring->cq_base_dma);
+
+	if (rx_ring->cq_base == NULL) {
+		QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	if (rx_ring->sbq_len) {
+		/*
+		 * Allocate small buffer queue.
+		 */
+		rx_ring->sbq_base =
+		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+					 &rx_ring->sbq_base_dma);
+
+		if (rx_ring->sbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+
+		/*
+		 * Allocate small buffer queue control blocks.
+		 */
+		rx_ring->sbq =
+		    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
+			    GFP_KERNEL);
+		if (rx_ring->sbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue control block allocation failed.\n");
+			goto err_mem;
+		}
+
+		if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer allocation failed.\n");
+			goto err_mem;
+		}
+	}
+
+	if (rx_ring->lbq_len) {
+		/*
+		 * Allocate large buffer queue.
+		 */
+		rx_ring->lbq_base =
+		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+					 &rx_ring->lbq_base_dma);
+
+		if (rx_ring->lbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+		/*
+		 * Allocate large buffer queue control blocks.
+		 */
+		rx_ring->lbq =
+		    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
+			    GFP_KERNEL);
+		if (rx_ring->lbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue control block allocation failed.\n");
+			goto err_mem;
+		}
+
+		/*
+		 * Allocate the buffers.
+		 */
+		if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer allocation failed.\n");
+			goto err_mem;
+		}
+	}
+
+	return 0;
+
+err_mem:
+	ql_free_rx_resources(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+	int i, j;
+
+	/*
+	 * Loop through all queues and free
+	 * any resources.
+	 */
+	for (j = 0; j < qdev->tx_ring_count; j++) {
+		tx_ring = &qdev->tx_ring[j];
+		for (i = 0; i < tx_ring->wq_len; i++) {
+			tx_ring_desc = &tx_ring->q[i];
+			if (tx_ring_desc && tx_ring_desc->skb) {
+				QPRINTK(qdev, IFDOWN, ERR,
+				"Freeing lost SKB %p, from queue %d, index %d.\n",
+					tx_ring_desc->skb, j,
+					tx_ring_desc->index);
+				ql_unmap_send(qdev, tx_ring_desc,
+					      tx_ring_desc->map_cnt);
+				dev_kfree_skb(tx_ring_desc->skb);
+				tx_ring_desc->skb = NULL;
+			}
+		}
+	}
+}
+
+static void ql_free_ring_cb(struct ql_adapter *qdev)
+{
+	kfree(qdev->ring_mem);
+}
+
+static int ql_alloc_ring_cb(struct ql_adapter *qdev)
+{
+	/* Allocate space for tx/rx ring control blocks. */
+	qdev->ring_mem_size =
+	    (qdev->tx_ring_count * sizeof(struct tx_ring)) +
+	    (qdev->rx_ring_count * sizeof(struct rx_ring));
+	qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
+	if (qdev->ring_mem == NULL) {
+		return -ENOMEM;
+	} else {
+		qdev->rx_ring = qdev->ring_mem;
+		qdev->tx_ring = qdev->ring_mem +
+		    (qdev->rx_ring_count * sizeof(struct rx_ring));
+	}
+	return 0;
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	for (i = 0; i < qdev->tx_ring_count; i++)
+		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+	for (i = 0; i < qdev->rx_ring_count; i++)
+		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+	ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	/* Allocate space for our shadow registers and such. */
+	if (ql_alloc_shadow_space(qdev))
+		return -ENOMEM;
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"RX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	/* Allocate tx queue resources */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"TX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	return 0;
+
+err_mem:
+	ql_free_mem_resources(qdev);
+	return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	struct cqicb *cqicb = &rx_ring->cqicb;
+	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+	    (rx_ring->cq_id * sizeof(u64) * 4);
+	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+	    (rx_ring->cq_id * sizeof(u64) * 4);
+	void __iomem *doorbell_area =
+	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+	int err = 0;
+	u16 bq_len;
+
+	/* Set up the shadow registers for this ring. */
+	rx_ring->prod_idx_sh_reg = shadow_reg;
+	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+	shadow_reg += sizeof(u64);
+	shadow_reg_dma += sizeof(u64);
+	rx_ring->lbq_base_indirect = shadow_reg;
+	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+	shadow_reg += sizeof(u64);
+	shadow_reg_dma += sizeof(u64);
+	rx_ring->sbq_base_indirect = shadow_reg;
+	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+	/* PCI doorbell mem area + 0x00 for consumer index register */
+	rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
+	rx_ring->cnsmr_idx = 0;
+	rx_ring->curr_entry = rx_ring->cq_base;
+
+	/* PCI doorbell mem area + 0x04 for valid register */
+	rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/* PCI doorbell mem area + 0x18 for large buffer consumer */
+	rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
+
+	/* PCI doorbell mem area + 0x1c */
+	rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
+
+	memset((void *)cqicb, 0, sizeof(struct cqicb));
+	cqicb->msix_vect = rx_ring->irq;
+
+	cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
+
+	cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
+	cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
+
+	cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
+	cqicb->prod_idx_addr_hi =
+	    cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
+
+	/*
+	 * Set up the control block load flags.
+	 */
+	cqicb->flags = FLAGS_LC |	/* Load queue base address */
+	    FLAGS_LV |		/* Load MSI-X vector */
+	    FLAGS_LI;		/* Load irq delay values */
+	if (rx_ring->lbq_len) {
+		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
+		*((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
+		cqicb->lbq_addr_lo =
+		    cpu_to_le32(rx_ring->lbq_base_indirect_dma);
+		cqicb->lbq_addr_hi =
+		    cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
+		cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
+		bq_len = (u16) rx_ring->lbq_len;
+		cqicb->lbq_len = cpu_to_le16(bq_len);
+		rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
+		rx_ring->lbq_curr_idx = 0;
+		rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
+		rx_ring->lbq_free_cnt = 16;
+	}
+	if (rx_ring->sbq_len) {
+		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
+		*((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
+		cqicb->sbq_addr_lo =
+		    cpu_to_le32(rx_ring->sbq_base_indirect_dma);
+		cqicb->sbq_addr_hi =
+		    cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
+		cqicb->sbq_buf_size =
+		    cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
+		bq_len = (u16) rx_ring->sbq_len;
+		cqicb->sbq_len = cpu_to_le16(bq_len);
+		rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
+		rx_ring->sbq_curr_idx = 0;
+		rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
+		rx_ring->sbq_free_cnt = 16;
+	}
+	switch (rx_ring->type) {
+	case TX_Q:
+		/* If there's only one interrupt, then we use
+		 * worker threads to process the outbound
+		 * completion handling rx_rings. We do this so
+		 * they can be run on multiple CPUs. There is
+		 * room to play with this more where we would only
+		 * run in a worker if there are more than x number
+		 * of outbound completions on the queue and more
+		 * than one queue active.  Some threshold that
+		 * would indicate a benefit in spite of the cost
+		 * of a context switch.
+		 * If there's more than one interrupt, then the
+		 * outbound completions are processed in the ISR.
+		 */
+		if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+			INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
+		else {
+			/* With all debug warnings on we see a WARN_ON message
+			 * when we free the skb in the interrupt context.
+			 */
+			INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
+		}
+		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+		break;
+	case DEFAULT_Q:
+		INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
+		cqicb->irq_delay = 0;
+		cqicb->pkt_delay = 0;
+		break;
+	case RX_Q:
+		/* Inbound completion handling rx_rings run in
+		 * separate NAPI contexts.
+		 */
+		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+			       64);
+		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+		break;
+	default:
+		QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
+			rx_ring->type);
+	}
+	QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
+	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+			   CFG_LCQ, rx_ring->cq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+		return err;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
+	/*
+	 * Advance the producer index for the buffer queues.
+	 */
+	wmb();
+	if (rx_ring->lbq_len)
+		ql_write_db_reg(rx_ring->lbq_prod_idx,
+				rx_ring->lbq_prod_idx_db_reg);
+	if (rx_ring->sbq_len)
+		ql_write_db_reg(rx_ring->sbq_prod_idx,
+				rx_ring->sbq_prod_idx_db_reg);
+	return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct wqicb *wqicb = (struct wqicb *)tx_ring;
+	void __iomem *doorbell_area =
+	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+	    (tx_ring->wq_id * sizeof(u64));
+	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+	    (tx_ring->wq_id * sizeof(u64));
+	int err = 0;
+
+	/*
+	 * Assign doorbell registers for this tx_ring.
+	 */
+	/* TX PCI doorbell mem area for tx producer index */
+	tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
+	tx_ring->prod_idx = 0;
+	/* TX PCI doorbell mem area + 0x04 */
+	tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/*
+	 * Assign shadow registers for this tx_ring.
+	 */
+	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+	wqicb->rid = 0;
+	wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
+	wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
+
+	wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
+	wqicb->cnsmr_idx_addr_hi =
+	    cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
+
+	ql_init_tx_ring(qdev, tx_ring);
+
+	err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
+			   (u16) tx_ring->wq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+		return err;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
+	return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		pci_disable_msix(qdev->pdev);
+		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+		kfree(qdev->msi_x_entry);
+		qdev->msi_x_entry = NULL;
+	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+		pci_disable_msi(qdev->pdev);
+		clear_bit(QL_MSI_ENABLED, &qdev->flags);
+	}
+}
+
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+	int i;
+
+	qdev->intr_count = 1;
+	/* Get the MSIX vectors. */
+	if (irq_type == MSIX_IRQ) {
+		/* Try to alloc space for the msix struct,
+		 * if it fails then go to MSI/legacy.
+		 */
+		qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
+					    sizeof(struct msix_entry),
+					    GFP_KERNEL);
+		if (!qdev->msi_x_entry) {
+			irq_type = MSI_IRQ;
+			goto msi;
+		}
+
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->msi_x_entry[i].entry = i;
+
+		if (!pci_enable_msix
+		    (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
+			set_bit(QL_MSIX_ENABLED, &qdev->flags);
+			qdev->intr_count = qdev->rx_ring_count;
+			QPRINTK(qdev, IFUP, INFO,
+				"MSI-X Enabled, got %d vectors.\n",
+				qdev->intr_count);
+			return;
+		} else {
+			kfree(qdev->msi_x_entry);
+			qdev->msi_x_entry = NULL;
+			QPRINTK(qdev, IFUP, WARNING,
+				"MSI-X Enable failed, trying MSI.\n");
+			irq_type = MSI_IRQ;
+		}
+	}
+msi:
+	if (irq_type == MSI_IRQ) {
+		if (!pci_enable_msi(qdev->pdev)) {
+			set_bit(QL_MSI_ENABLED, &qdev->flags);
+			QPRINTK(qdev, IFUP, INFO,
+				"Running with MSI interrupts.\n");
+			return;
+		}
+	}
+	irq_type = LEG_IRQ;
+	spin_lock_init(&qdev->legacy_lock);
+	qdev->legacy_check = ql_legacy_check;
+	QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+	int i = 0;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_enable_msix(qdev);
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+		/* Each rx_ring has it's
+		 * own intr_context since we have separate
+		 * vectors for each queue.
+		 * This only true when MSI-X is enabled.
+		 */
+		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+			qdev->rx_ring[i].irq = i;
+			intr_context->intr = i;
+			intr_context->qdev = qdev;
+			/*
+			 * We set up each vectors enable/disable/read bits so
+			 * there's no bit/mask calculations in the critical path.
+			 */
+			intr_context->intr_en_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+			    | i;
+			intr_context->intr_dis_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+			    INTR_EN_IHD | i;
+			intr_context->intr_read_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+			    i;
+
+			if (i == 0) {
+				/*
+				 * Default queue handles bcast/mcast plus
+				 * async events.  Needs buffers.
+				 */
+				intr_context->handler = qlge_isr;
+				sprintf(intr_context->name, "%s-default-queue",
+					qdev->ndev->name);
+			} else if (i < qdev->rss_ring_first_cq_id) {
+				/*
+				 * Outbound queue is for outbound completions only.
+				 */
+				intr_context->handler = qlge_msix_tx_isr;
+				sprintf(intr_context->name, "%s-txq-%d",
+					qdev->ndev->name, i);
+			} else {
+				/*
+				 * Inbound queues handle unicast frames only.
+				 */
+				intr_context->handler = qlge_msix_rx_isr;
+				sprintf(intr_context->name, "%s-rxq-%d",
+					qdev->ndev->name, i);
+			}
+		}
+	} else {
+		/*
+		 * All rx_rings use the same intr_context since
+		 * there is only one vector.
+		 */
+		intr_context->intr = 0;
+		intr_context->qdev = qdev;
+		/*
+		 * We set up each vectors enable/disable/read bits so
+		 * there's no bit/mask calculations in the critical path.
+		 */
+		intr_context->intr_en_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+		intr_context->intr_dis_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+		    INTR_EN_TYPE_DISABLE;
+		intr_context->intr_read_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+		/*
+		 * Single interrupt means one handler for all rings.
+		 */
+		intr_context->handler = qlge_isr;
+		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->rx_ring[i].irq = 0;
+	}
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+	int i;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		if (intr_context->hooked) {
+			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+				free_irq(qdev->msi_x_entry[i].vector,
+					 &qdev->rx_ring[i]);
+				QPRINTK(qdev, IFDOWN, ERR,
+					"freeing msix interrupt %d.\n", i);
+			} else {
+				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+				QPRINTK(qdev, IFDOWN, ERR,
+					"freeing msi interrupt %d.\n", i);
+			}
+		}
+	}
+	ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+	int i;
+	int status = 0;
+	struct pci_dev *pdev = qdev->pdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_resolve_queues_to_irqs(qdev);
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		atomic_set(&intr_context->irq_cnt, 0);
+		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+			status = request_irq(qdev->msi_x_entry[i].vector,
+					     intr_context->handler,
+					     0,
+					     intr_context->name,
+					     &qdev->rx_ring[i]);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed request for MSIX interrupt %d.\n",
+					i);
+				goto err_irq;
+			} else {
+				QPRINTK(qdev, IFUP, INFO,
+					"Hooked intr %d, queue type %s%s%s, with name %s.\n",
+					i,
+					qdev->rx_ring[i].type ==
+					DEFAULT_Q ? "DEFAULT_Q" : "",
+					qdev->rx_ring[i].type ==
+					TX_Q ? "TX_Q" : "",
+					qdev->rx_ring[i].type ==
+					RX_Q ? "RX_Q" : "", intr_context->name);
+			}
+		} else {
+			QPRINTK(qdev, IFUP, DEBUG,
+				"trying msi or legacy interrupts.\n");
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: irq = %d.\n", __func__, pdev->irq);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: context->name = %s.\n", __func__,
+			       intr_context->name);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: dev_id = 0x%p.\n", __func__,
+			       &qdev->rx_ring[0]);
+			status =
+			    request_irq(pdev->irq, qlge_isr,
+					test_bit(QL_MSI_ENABLED,
+						 &qdev->
+						 flags) ? 0 : IRQF_SHARED,
+					intr_context->name, &qdev->rx_ring[0]);
+			if (status)
+				goto err_irq;
+
+			QPRINTK(qdev, IFUP, ERR,
+				"Hooked intr %d, queue type %s%s%s, with name %s.\n",
+				i,
+				qdev->rx_ring[0].type ==
+				DEFAULT_Q ? "DEFAULT_Q" : "",
+				qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
+				qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+				intr_context->name);
+		}
+		intr_context->hooked = 1;
+	}
+	return status;
+err_irq:
+	QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+	ql_free_irq(qdev);
+	return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+	struct ricb *ricb = &qdev->ricb;
+	int status = 0;
+	int i;
+	u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+	memset((void *)ricb, 0, sizeof(ricb));
+
+	ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
+	ricb->flags =
+	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
+	     RSS_RT6);
+	ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
+
+	/*
+	 * Fill out the Indirection Table.
+	 */
+	for (i = 0; i < 32; i++)
+		hash_id[i] = i & 1;
+
+	/*
+	 * Random values for the IPv6 and IPv4 Hash Keys.
+	 */
+	get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
+	get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
+
+	QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
+
+	status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+		return status;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
+	return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	int i;
+
+	/* Clear all the entries in the routing table. */
+	for (i = 0; i < 16; i++) {
+		status = ql_set_routing_reg(qdev, i, 0, 0);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for CAM packets.\n");
+			return status;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for error packets.\n");
+		return status;
+	}
+	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for broadcast packets.\n");
+		return status;
+	}
+	/* If we have more than one inbound queue, then turn on RSS in the
+	 * routing block.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+					RT_IDX_RSS_MATCH, 1);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for MATCH RSS packets.\n");
+			return status;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+				    RT_IDX_CAM_HIT, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for CAM packets.\n");
+		return status;
+	}
+	return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+	u32 value, mask;
+	int i;
+	int status = 0;
+
+	/*
+	 * Set up the System register to halt on errors.
+	 */
+	value = SYS_EFE | SYS_FAE;
+	mask = value << 16;
+	ql_write32(qdev, SYS, mask | value);
+
+	/* Set the default queue. */
+	value = NIC_RCV_CFG_DFQ;
+	mask = NIC_RCV_CFG_DFQ_MASK;
+	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+	/* Set the MPI interrupt to enabled. */
+	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+	/* Enable the function, set pagesize, enable error checking. */
+	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+	    FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+
+	/* Set/clear header splitting. */
+	mask = FSC_VM_PAGESIZE_MASK |
+	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+	ql_write32(qdev, FSC, mask | value);
+
+	ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
+		min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
+
+	/* Start up the rx queues. */
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start rx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	/* If there is more than one inbound completion queue
+	 * then download a RICB to configure RSS.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_start_rss(qdev);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+			return status;
+		}
+	}
+
+	/* Start up the tx queues. */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start tx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	status = ql_port_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+		return status;
+	}
+
+	status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
+				     MAC_ADDR_TYPE_CAM_MAC, qdev->func);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+		return status;
+	}
+
+	status = ql_route_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+		return status;
+	}
+
+	/* Start NAPI for the RSS queues. */
+	for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
+		QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
+			i);
+		napi_enable(&qdev->rx_ring[i].napi);
+	}
+
+	return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+	u32 value;
+	int max_wait_time;
+	int status = 0;
+	int resetCnt = 0;
+
+#define MAX_RESET_CNT   1
+issueReset:
+	resetCnt++;
+	QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
+	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+	/* Wait for reset to complete. */
+	max_wait_time = 3;
+	QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
+		max_wait_time);
+	do {
+		value = ql_read32(qdev, RST_FO);
+		if ((value & RST_FO_FR) == 0)
+			break;
+
+		ssleep(1);
+	} while ((--max_wait_time));
+	if (value & RST_FO_FR) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
+		if (resetCnt < MAX_RESET_CNT)
+			goto issueReset;
+	}
+	if (max_wait_time == 0) {
+		status = -ETIMEDOUT;
+		QPRINTK(qdev, IFDOWN, ERR,
+			"ETIMEOUT!!! errored out of resetting the chip!\n");
+	}
+
+	return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+
+	QPRINTK(qdev, PROBE, INFO,
+		"Function #%d, NIC Roll %d, NIC Rev = %d, "
+		"XG Roll = %d, XG Rev = %d.\n",
+		qdev->func,
+		qdev->chip_rev_id & 0x0000000f,
+		qdev->chip_rev_id >> 4 & 0x0000000f,
+		qdev->chip_rev_id >> 8 & 0x0000000f,
+		qdev->chip_rev_id >> 12 & 0x0000000f);
+	QPRINTK(qdev, PROBE, INFO,
+		"MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+		ndev->dev_addr[0], ndev->dev_addr[1],
+		ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
+		ndev->dev_addr[5]);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+	struct net_device *ndev = qdev->ndev;
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+
+	netif_stop_queue(ndev);
+	netif_carrier_off(ndev);
+
+	cancel_delayed_work_sync(&qdev->asic_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_work);
+
+	/* The default queue at index 0 is always processed in
+	 * a workqueue.
+	 */
+	cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
+
+	/* The rest of the rx_rings are processed in
+	 * a workqueue only if it's a single interrupt
+	 * environment (MSI/Legacy).
+	 */
+	for (i = 1; i > qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		/* Only the RSS rings use NAPI on multi irq
+		 * environment.  Outbound completion processing
+		 * is done in interrupt context.
+		 */
+		if (i >= qdev->rss_ring_first_cq_id) {
+			napi_disable(&rx_ring->napi);
+		} else {
+			cancel_delayed_work_sync(&rx_ring->rx_work);
+		}
+	}
+
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+	ql_disable_interrupts(qdev);
+
+	ql_tx_ring_clean(qdev);
+
+	spin_lock(&qdev->hw_lock);
+	status = ql_adapter_reset(qdev);
+	if (status)
+		QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
+			qdev->func);
+	spin_unlock(&qdev->hw_lock);
+	return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+	int err = 0;
+
+	spin_lock(&qdev->hw_lock);
+	err = ql_adapter_initialize(qdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+		spin_unlock(&qdev->hw_lock);
+		goto err_init;
+	}
+	spin_unlock(&qdev->hw_lock);
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	ql_enable_interrupts(qdev);
+	ql_enable_all_completion_interrupts(qdev);
+	if ((ql_read32(qdev, STS) & qdev->port_init)) {
+		netif_carrier_on(qdev->ndev);
+		netif_start_queue(qdev->ndev);
+	}
+
+	return 0;
+err_init:
+	ql_adapter_reset(qdev);
+	return err;
+}
+
+static int ql_cycle_adapter(struct ql_adapter *qdev)
+{
+	int status;
+
+	status = ql_adapter_down(qdev);
+	if (status)
+		goto error;
+
+	status = ql_adapter_up(qdev);
+	if (status)
+		goto error;
+
+	return status;
+error:
+	QPRINTK(qdev, IFUP, ALERT,
+		"Driver up/down cycle failed, closing device\n");
+	rtnl_lock();
+	dev_close(qdev->ndev);
+	rtnl_unlock();
+	return status;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+	ql_free_mem_resources(qdev);
+	ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+	int status = 0;
+
+	if (ql_alloc_mem_resources(qdev)) {
+		QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
+		return -ENOMEM;
+	}
+	status = ql_request_irq(qdev);
+	if (status)
+		goto err_irq;
+	return status;
+err_irq:
+	ql_free_mem_resources(qdev);
+	return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/*
+	 * Wait for device to recover from a reset.
+	 * (Rarely happens, but possible.)
+	 */
+	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+		msleep(1);
+	ql_adapter_down(qdev);
+	ql_release_adapter_resources(qdev);
+	ql_free_ring_cb(qdev);
+	return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+	int i;
+	struct rx_ring *rx_ring;
+	struct tx_ring *tx_ring;
+	int cpu_cnt = num_online_cpus();
+
+	/*
+	 * For each processor present we allocate one
+	 * rx_ring for outbound completions, and one
+	 * rx_ring for inbound completions.  Plus there is
+	 * always the one default queue.  For the CPU
+	 * counts we end up with the following rx_rings:
+	 * rx_ring count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 * To keep it simple we limit the total number of
+	 * queues to < 32, so we truncate CPU to 8.
+	 * This limitation can be removed when requested.
+	 */
+
+	if (cpu_cnt > 8)
+		cpu_cnt = 8;
+
+	/*
+	 * rx_ring[0] is always the default queue.
+	 */
+	/* Allocate outbound completion ring for each CPU. */
+	qdev->tx_ring_count = cpu_cnt;
+	/* Allocate inbound completion (RSS) ring for each CPU. */
+	qdev->rss_ring_count = cpu_cnt;
+	/* cq_id for the first inbound ring handler. */
+	qdev->rss_ring_first_cq_id = cpu_cnt + 1;
+	/*
+	 * qdev->rx_ring_count:
+	 * Total number of rx_rings.  This includes the one
+	 * default queue, a number of outbound completion
+	 * handler rx_rings, and the number of inbound
+	 * completion handler rx_rings.
+	 */
+	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+
+	if (ql_alloc_ring_cb(qdev))
+		return -ENOMEM;
+
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		tx_ring = &qdev->tx_ring[i];
+		memset((void *)tx_ring, 0, sizeof(tx_ring));
+		tx_ring->qdev = qdev;
+		tx_ring->wq_id = i;
+		tx_ring->wq_len = qdev->tx_ring_size;
+		tx_ring->wq_size =
+		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+		/*
+		 * The completion queue ID for the tx rings start
+		 * immediately after the default Q ID, which is zero.
+		 */
+		tx_ring->cq_id = i + 1;
+	}
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		memset((void *)rx_ring, 0, sizeof(rx_ring));
+		rx_ring->qdev = qdev;
+		rx_ring->cq_id = i;
+		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
+		if (i == 0) {	/* Default queue at index 0. */
+			/*
+			 * Default queue handles bcast/mcast plus
+			 * async events.  Needs buffers.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+			    rx_ring->lbq_len * sizeof(struct bq_element);
+			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+			    rx_ring->sbq_len * sizeof(struct bq_element);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+			rx_ring->type = DEFAULT_Q;
+		} else if (i < qdev->rss_ring_first_cq_id) {
+			/*
+			 * Outbound queue handles outbound completions only.
+			 */
+			/* outbound cq is same size as tx_ring it services. */
+			rx_ring->cq_len = qdev->tx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = 0;
+			rx_ring->lbq_size = 0;
+			rx_ring->lbq_buf_size = 0;
+			rx_ring->sbq_len = 0;
+			rx_ring->sbq_size = 0;
+			rx_ring->sbq_buf_size = 0;
+			rx_ring->type = TX_Q;
+		} else {	/* Inbound completions (RSS) queues */
+			/*
+			 * Inbound queues handle unicast frames only.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+			    rx_ring->lbq_len * sizeof(struct bq_element);
+			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+			    rx_ring->sbq_len * sizeof(struct bq_element);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+			rx_ring->type = RX_Q;
+		}
+	}
+	return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+	int err = 0;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	err = ql_configure_rings(qdev);
+	if (err)
+		return err;
+
+	err = ql_get_adapter_resources(qdev);
+	if (err)
+		goto error_up;
+
+	err = ql_adapter_up(qdev);
+	if (err)
+		goto error_up;
+
+	return err;
+
+error_up:
+	ql_release_adapter_resources(qdev);
+	ql_free_ring_cb(qdev);
+	return err;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (ndev->mtu == 1500 && new_mtu == 9000) {
+		QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
+		QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
+	} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
+		   (ndev->mtu == 9000 && new_mtu == 9000)) {
+		return 0;
+	} else
+		return -EINVAL;
+	ndev->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+					       *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return &qdev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct dev_mc_list *mc_ptr;
+	int i;
+
+	spin_lock(&qdev->hw_lock);
+	/*
+	 * Set or clear promiscuous mode if a
+	 * transition is taking place.
+	 */
+	if (ndev->flags & IFF_PROMISC) {
+		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set promiscous mode.\n");
+			} else {
+				set_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear promiscous mode.\n");
+			} else {
+				clear_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	}
+
+	/*
+	 * Set or clear all multicast mode if a
+	 * transition is taking place.
+	 */
+	if ((ndev->flags & IFF_ALLMULTI) ||
+	    (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set all-multi mode.\n");
+			} else {
+				set_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear all-multi mode.\n");
+			} else {
+				clear_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	}
+
+	if (ndev->mc_count) {
+		for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
+		     i++, mc_ptr = mc_ptr->next)
+			if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+						MAC_ADDR_TYPE_MULTI_MAC, i)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to loadmulticast address.\n");
+				goto exit;
+			}
+		if (ql_set_routing_reg
+		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
+			QPRINTK(qdev, HW, ERR,
+				"Failed to set multicast match mode.\n");
+		} else {
+			set_bit(QL_ALLMULTI, &qdev->flags);
+		}
+	}
+exit:
+	spin_unlock(&qdev->hw_lock);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct sockaddr *addr = p;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+			MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
+		QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+		return -1;
+	}
+	spin_unlock(&qdev->hw_lock);
+
+	return 0;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, asic_reset_work.work);
+	ql_cycle_adapter(qdev);
+}
+
+static void ql_get_board_info(struct ql_adapter *qdev)
+{
+	qdev->func =
+	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+	if (qdev->func) {
+		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+		qdev->port_link_up = STS_PL1;
+		qdev->port_init = STS_PI1;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+	} else {
+		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+		qdev->port_link_up = STS_PL0;
+		qdev->port_init = STS_PI0;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+	}
+	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (qdev->workqueue) {
+		destroy_workqueue(qdev->workqueue);
+		qdev->workqueue = NULL;
+	}
+	if (qdev->q_workqueue) {
+		destroy_workqueue(qdev->q_workqueue);
+		qdev->q_workqueue = NULL;
+	}
+	if (qdev->reg_base)
+		iounmap((void *)qdev->reg_base);
+	if (qdev->doorbell_area)
+		iounmap(qdev->doorbell_area);
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int __devinit ql_init_device(struct pci_dev *pdev,
+				    struct net_device *ndev, int cards_found)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int pos, err = 0;
+	u16 val16;
+
+	memset((void *)qdev, 0, sizeof(qdev));
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "PCI device enable failed.\n");
+		return err;
+	}
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pos <= 0) {
+		dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
+			"aborting.\n");
+		goto err_out;
+	} else {
+		pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+		val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+		val16 |= (PCI_EXP_DEVCTL_CERE |
+			  PCI_EXP_DEVCTL_NFERE |
+			  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
+		pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "PCI region request failed.\n");
+		goto err_out;
+	}
+
+	pci_set_master(pdev);
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		set_bit(QL_DMA64, &qdev->flags);
+		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (!err)
+		       err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	}
+
+	if (err) {
+		dev_err(&pdev->dev, "No usable DMA configuration.\n");
+		goto err_out;
+	}
+
+	pci_set_drvdata(pdev, ndev);
+	qdev->reg_base =
+	    ioremap_nocache(pci_resource_start(pdev, 1),
+			    pci_resource_len(pdev, 1));
+	if (!qdev->reg_base) {
+		dev_err(&pdev->dev, "Register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+	qdev->doorbell_area =
+	    ioremap_nocache(pci_resource_start(pdev, 3),
+			    pci_resource_len(pdev, 3));
+	if (!qdev->doorbell_area) {
+		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	ql_get_board_info(qdev);
+	qdev->ndev = ndev;
+	qdev->pdev = pdev;
+	qdev->msg_enable = netif_msg_init(debug, default_msg);
+	spin_lock_init(&qdev->hw_lock);
+	spin_lock_init(&qdev->stats_lock);
+
+	/* make sure the EEPROM is good */
+	err = ql_get_flash_params(qdev);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid FLASH.\n");
+		goto err_out;
+	}
+
+	if (!is_valid_ether_addr(qdev->flash.mac_addr))
+		goto err_out;
+
+	memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	/* Set up the default ring sizes. */
+	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+	/* Set up the coalescing parameters. */
+	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+	/*
+	 * Set up the operating parameters.
+	 */
+	qdev->rx_csum = 1;
+
+	qdev->q_workqueue = create_workqueue(ndev->name);
+	qdev->workqueue = create_singlethread_workqueue(ndev->name);
+	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+
+	if (!cards_found) {
+		dev_info(&pdev->dev, "%s\n", DRV_STRING);
+		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+			 DRV_NAME, DRV_VERSION);
+	}
+	return 0;
+err_out:
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	return err;
+}
+
+static int __devinit qlge_probe(struct pci_dev *pdev,
+				const struct pci_device_id *pci_entry)
+{
+	struct net_device *ndev = NULL;
+	struct ql_adapter *qdev = NULL;
+	static int cards_found = 0;
+	int err = 0;
+
+	ndev = alloc_etherdev(sizeof(struct ql_adapter));
+	if (!ndev)
+		return -ENOMEM;
+
+	err = ql_init_device(pdev, ndev, cards_found);
+	if (err < 0) {
+		free_netdev(ndev);
+		return err;
+	}
+
+	qdev = netdev_priv(ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ndev->features = (0
+			  | NETIF_F_IP_CSUM
+			  | NETIF_F_SG
+			  | NETIF_F_TSO
+			  | NETIF_F_TSO6
+			  | NETIF_F_TSO_ECN
+			  | NETIF_F_HW_VLAN_TX
+			  | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+
+	if (test_bit(QL_DMA64, &qdev->flags))
+		ndev->features |= NETIF_F_HIGHDMA;
+
+	/*
+	 * Set up net_device structure.
+	 */
+	ndev->tx_queue_len = qdev->tx_ring_size;
+	ndev->irq = pdev->irq;
+	ndev->open = qlge_open;
+	ndev->stop = qlge_close;
+	ndev->hard_start_xmit = qlge_send;
+	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+	ndev->change_mtu = qlge_change_mtu;
+	ndev->get_stats = qlge_get_stats;
+	ndev->set_multicast_list = qlge_set_multicast_list;
+	ndev->set_mac_address = qlge_set_mac_address;
+	ndev->tx_timeout = qlge_tx_timeout;
+	ndev->watchdog_timeo = 10 * HZ;
+	ndev->vlan_rx_register = ql_vlan_rx_register;
+	ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
+	ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
+	err = register_netdev(ndev);
+	if (err) {
+		dev_err(&pdev->dev, "net device registration failed.\n");
+		ql_release_all(pdev);
+		pci_disable_device(pdev);
+		return err;
+	}
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	ql_display_dev_info(ndev);
+	cards_found++;
+	return 0;
+}
+
+static void __devexit qlge_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	unregister_netdev(ndev);
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	free_netdev(ndev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+					       enum pci_channel_state state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_running(ndev))
+		ql_adapter_down(qdev);
+
+	pci_disable_device(pdev);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (pci_enable_device(pdev)) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	ql_adapter_reset(qdev);
+
+	/* Make sure the EEPROM is good */
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	if (!is_valid_ether_addr(ndev->perm_addr)) {
+		QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	pci_set_master(pdev);
+
+	if (netif_running(ndev)) {
+		if (ql_adapter_up(qdev)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Device initialization failed after reset.\n");
+			return;
+		}
+	}
+
+	netif_device_attach(ndev);
+}
+
+static struct pci_error_handlers qlge_err_handler = {
+	.error_detected = qlge_io_error_detected,
+	.slot_reset = qlge_io_slot_reset,
+	.resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	netif_device_detach(ndev);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_down(qdev);
+		if (!err)
+			return err;
+	}
+
+	err = pci_save_state(pdev);
+	if (err)
+		return err;
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int qlge_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_up(qdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(ndev);
+
+	return 0;
+}
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+	qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+	.name = DRV_NAME,
+	.id_table = qlge_pci_tbl,
+	.probe = qlge_probe,
+	.remove = __devexit_p(qlge_remove),
+#ifdef CONFIG_PM
+	.suspend = qlge_suspend,
+	.resume = qlge_resume,
+#endif
+	.shutdown = qlge_shutdown,
+	.err_handler = &qlge_err_handler
+};
+
+static int __init qlge_init_module(void)
+{
+	return pci_register_driver(&qlge_driver);
+}
+
+static void __exit qlge_exit(void)
+{
+	pci_unregister_driver(&qlge_driver);
+}
+
+module_init(qlge_init_module);
+module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 000000000000..24fe344bcf1f
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,150 @@
+#include "qlge.h"
+
+static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, PROC_DATA);
+exit:
+	return status;
+}
+
+int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int i, status;
+
+	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+	if (status)
+		return -EBUSY;
+	for (i = 0; i < mbcp->out_count; i++) {
+		status =
+		    ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
+				     &mbcp->mbox_out[i]);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+			break;
+		}
+	}
+	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);	/* does flush too */
+	return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 2;
+
+	if (ql_get_mb_sts(qdev, mbcp))
+		goto exit;
+
+	qdev->link_status = mbcp->mbox_out[1];
+	QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+	QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
+	if (!netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
+		netif_carrier_on(qdev->ndev);
+		netif_wake_queue(qdev->ndev);
+	}
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 3;
+
+	if (ql_get_mb_sts(qdev, mbcp)) {
+		QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+		goto exit;
+	}
+
+	if (netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
+		netif_carrier_off(qdev->ndev);
+		netif_stop_queue(qdev->ndev);
+	}
+	QPRINTK(qdev, DRV, ERR, "Link Down.\n");
+	QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 2;
+
+	if (ql_get_mb_sts(qdev, mbcp)) {
+		QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+		goto exit;
+	}
+	QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
+	QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
+		mbcp->mbox_out[0]);
+	QPRINTK(qdev, DRV, ERR, "Firmware Revision  = 0x%.08x.\n",
+		mbcp->mbox_out[1]);
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, mpi_work.work);
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	mbcp->out_count = 1;
+
+	while (ql_read32(qdev, STS) & STS_PI) {
+		if (ql_get_mb_sts(qdev, mbcp)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Could not read MPI, resetting ASIC!\n");
+			ql_queue_asic_error(qdev);
+		}
+
+		switch (mbcp->mbox_out[0]) {
+		case AEN_LINK_UP:
+			ql_link_up(qdev, mbcp);
+			break;
+		case AEN_LINK_DOWN:
+			ql_link_down(qdev, mbcp);
+			break;
+		case AEN_FW_INIT_DONE:
+			ql_init_fw_done(qdev, mbcp);
+			break;
+		case MB_CMD_STS_GOOD:
+			break;
+		case AEN_FW_INIT_FAIL:
+		case AEN_SYS_ERR:
+		case MB_CMD_STS_ERR:
+			ql_queue_fw_error(qdev);
+		default:
+			/* Clear the MPI firmware status. */
+			ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+			break;
+		}
+	}
+	ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, mpi_reset_work.work);
+	QPRINTK(qdev, DRV, ERR,
+		"Enter, qdev = %p..\n", qdev);
+	ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+	msleep(50);
+	ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f1624b396754..6f4276d461c0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1411,6 +1411,8 @@
 #define PCI_DEVICE_ID_EICON_MAESTRAQ_U	0xe013
 #define PCI_DEVICE_ID_EICON_MAESTRAP	0xe014
 
+#define PCI_VENDOR_ID_CISCO		0x1137
+
 #define PCI_VENDOR_ID_ZIATECH		0x1138
 #define PCI_DEVICE_ID_ZIATECH_5550_HC	0x5550
  
@@ -2213,6 +2215,7 @@
 
 #define PCI_VENDOR_ID_ATTANSIC		0x1969
 #define PCI_DEVICE_ID_ATTANSIC_L1	0x1048
+#define PCI_DEVICE_ID_ATTANSIC_L2	0x2048
 
 #define PCI_VENDOR_ID_JMICRON		0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360	0x2360